blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8b09651871eecbe00cda47f5046f0f5323e6249f | 9597e11aa9f9a57acea98361c2ba48f9d26332f2 | /google/cloud/datastore_v1/services/datastore/__init__.py | a8a82886464003611545e938eb86ad0ed4a5a667 | [
"Apache-2.0"
] | permissive | renovate-bot/python-datastore | 9fa781d788d7874e487afe0146bec87cd63db725 | 671dc4b2b49d185d49d6a3ae04ff12b926933ae4 | refs/heads/master | 2023-08-22T02:29:20.933250 | 2021-08-19T18:56:58 | 2021-08-19T18:56:58 | 238,816,263 | 1 | 0 | Apache-2.0 | 2020-02-07T00:55:36 | 2020-02-07T00:55:36 | null | UTF-8 | Python | false | false | 751 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import DatastoreClient
from .async_client import DatastoreAsyncClient
__all__ = (
"DatastoreClient",
"DatastoreAsyncClient",
)
| [
"noreply@github.com"
] | renovate-bot.noreply@github.com |
9d761823ff03159795f7873a7b69271d2ebda3a7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-8290.py | f24ba4df34aeac9a8e74719afe47b10bc56b0530 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,299 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
$AssignTarget (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
6fa22ce338d3f8d57ecec002067180eaafbed94e | 4908b1d34d69c1cb652f25049552562574e1075f | /2020/Day-12/Rain_Risk/vector.py | cfe2acf5df39b6354ac84b44bc21a18195edfe11 | [
"MIT"
] | permissive | sreekesari-vangeepuram/adventofcode | 3d4ad98a25a30640182d928538b421e00ad8259d | 645531be0208affe042ac0328105b9ef3cfc9dbf | refs/heads/main | 2023-07-26T13:36:03.036721 | 2021-08-11T08:27:25 | 2021-08-11T08:27:25 | 317,850,039 | 1 | 0 | MIT | 2021-08-11T08:27:26 | 2020-12-02T12:08:13 | Go | UTF-8 | Python | false | false | 2,493 | py | class vector:
def __init__(self, x, y, pointing_direction):
self.x = x
self.y = y
self.direction = pointing_direction
def get_pos(self):
return (self.x, self.y, self.direction)
def change_position(self, ins):
d = self.direction
if d == 'E':
if ins[0] == 'E' or ins[0] == 'F': self.x += ins[1]
elif ins[0] == 'W': self.x -= ins[1]
elif ins[0] == 'N': self.y += ins[1]
elif ins[0] == 'S': self.y -= ins[1]
elif d == 'W':
if ins[0] == 'W' or ins[0] == 'F': self.x -= ins[1]
elif ins[0] == 'E': self.x += ins[1]
elif ins[0] == 'N': self.y += ins[1]
elif ins[0] == 'S': self.y -= ins[1]
elif d == 'N':
if ins[0] == 'N' or ins[0] == 'F': self.y += ins[1]
elif ins[0] == 'W': self.x -= ins[1]
elif ins[0] == 'E': self.x += ins[1]
elif ins[0] == 'S': self.y -= ins[1]
elif d == 'S':
if ins[0] == 'S' or ins[0] == 'F': self.y -= ins[1]
elif ins[0] == 'W': self.x -= ins[1]
elif ins[0] == 'N': self.y += ins[1]
elif ins[0] == 'E': self.x += ins[1]
def change_direction(self, ins):
d = self.direction
if ins[0] == 'R':
if d == 'E':
if ins[1] == 90:
self.direction = 'S'
elif ins[1] == 180:
self.direction = 'W'
elif ins[1] == 270:
self.direction = 'N'
elif d == 'W':
if ins[1] == 90:
self.direction = 'N'
elif ins[1] == 180:
self.direction = 'E'
elif ins[1] == 270:
self.direction = 'S'
elif d == 'N':
if ins[1] == 90:
self.direction = 'E'
elif ins[1] == 180:
self.direction = 'S'
elif ins[1] == 270:
self.direction = 'W'
elif d == 'S':
if ins[1] == 90:
self.direction = 'W'
elif ins[1] == 180:
self.direction = 'N'
elif ins[1] == 270:
self.direction = 'E'
elif ins[0] == 'L':
if d == 'E':
if ins[1] == 90:
self.direction = 'N'
elif ins[1] == 180:
self.direction = 'W'
elif ins[1] == 270:
self.direction = 'S'
elif d == 'W':
if ins[1] == 90:
self.direction = 'S'
elif ins[1] == 180:
self.direction = 'E'
elif ins[1] == 270:
self.direction = 'N'
elif d == 'N':
if ins[1] == 90:
self.direction = 'W'
elif ins[1] == 180:
self.direction = 'S'
elif ins[1] == 270:
self.direction = 'E'
elif d == 'S':
if ins[1] == 90:
self.direction = 'E'
elif ins[1] == 180:
self.direction = 'N'
elif ins[1] == 270:
self.direction = 'W'
def manhattan_distance(self):
return abs(self.x)+abs(self.y)
| [
"kesari.vangeepuram@gmail.com"
] | kesari.vangeepuram@gmail.com |
980323cf75ba26bfab0963870210bfedc895fa2d | ecff4b18a49ce5952c5f9125dc027cebdecf10a8 | /azure-mgmt-resource/azure/mgmt/resource/resources/models/dependency.py | c5db8825cb5df20c1b8bb96a3551b7981d632c2e | [
"Apache-2.0"
] | permissive | jehine-MSFT/azure-sdk-for-python | a56c18020ecd5f4c245c093fd6a33e1b1d7c95e1 | 6d0f94b39406eab374906c683bd2150217132a9c | refs/heads/master | 2020-12-06T19:17:38.153819 | 2016-04-08T21:03:16 | 2016-04-08T21:03:16 | 55,809,131 | 0 | 0 | null | 2016-04-08T20:54:00 | 2016-04-08T20:54:00 | null | UTF-8 | Python | false | false | 2,007 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dependency(Model):
"""
Deployment dependency information.
:param depends_on: Gets the list of dependencies.
:type depends_on: list of :class:`BasicDependency
<azure.mgmt.resource.resources.models.BasicDependency>`
:param id: Gets or sets the ID of the dependency.
:type id: str
:param resource_type: Gets or sets the dependency resource type.
:type resource_type: str
:param resource_name: Gets or sets the dependency resource name.
:type resource_name: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': '[BasicDependency]'},
'id': {'key': 'id', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(self, depends_on=None, id=None, resource_type=None, resource_name=None, **kwargs):
self.depends_on = depends_on
self.id = id
self.resource_type = resource_type
self.resource_name = resource_name
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
5002a8c9e0c3a567b44de79a2522f9cb8e1dd214 | 614cd6a5336a6fe177d13a7cfd841a344f5d7a97 | /albumy/albumy/to_test/test.py | 3511682210d8dcaa4aef6ab8852c1c93a666dd3d | [
"MIT"
] | permissive | shidashui/MyFlaskWeb | 40bb80dea6aa9cc2729ee8e767062cbc519a472a | b861c76813e15250036477fca6570cef1d957af1 | refs/heads/master | 2022-12-10T08:32:53.657838 | 2019-08-26T15:29:03 | 2019-08-26T15:29:03 | 193,836,836 | 3 | 0 | MIT | 2022-12-08T05:55:00 | 2019-06-26T05:42:12 | JavaScript | UTF-8 | Python | false | false | 1,327 | py | import os
import PIL
from PIL import Image
from itsdangerous import Serializer
def A():
a = Serializer('mima')
token_a = a.dumps('test')
print(token_a)
b = Serializer('mima')
c = b.loads(token_a)
print(c)
roles_permissions_map = {
'Locked': ['FOLLOW', 'COLLECT'],
'User': ['FOLLOW', 'COLLECT','COMMENT','UPLOAD'],
'Moderator':['FOLLOW','COLLECT','COMMENT','UPLOAD','MODERATE'],
'Administrator':['FOLLOW','COLLECT','COMMENT','UPLOAD','MODERATE','ADMINISTER']
}
for key, value in roles_permissions_map.items():
print(key,value)
def B():
a = 'abc.img'
ext = os.path.splitext(a)
print(ext)
image = 'Bing_0001.jpeg'
def resize_image(image, filename, base_width):
filename, ext = os.path.splitext(filename)
img = Image.open(image)
if img.size[0] <= base_width:
return filename + ext
print(img.size)
w_percent = (base_width / float(img.size[0]))
print(w_percent)
h_size = int((float(img.size[1]) * float(w_percent)))
print(h_size)
img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
filename += '_test' + ext
img.save(filename, optimize=True, quality=85)
return filename
if __name__ == '__main__':
resize_image(image,image,400) | [
"164635470@qq.com"
] | 164635470@qq.com |
0c5c2c43ad0c32f185649374d6b21c1754c06ef4 | 113f803b721984992bdc8f2177056b0f60de546a | /ex32/ex32.py | 36131ac8165a1e6521e3dcd80f8cca1ae549099b | [] | no_license | Na-Young-Lee/16PFA-Na-Young-Lee | ddb215b0dc9cb0572b96aa90d8db71fbbea13c13 | 6cdcea12fd46a5218f9b6a7cd4ac5ee5e347cbb7 | refs/heads/master | 2021-01-17T15:15:27.429510 | 2016-06-01T11:37:20 | 2016-06-01T11:37:20 | 53,923,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # -*- coding:utf8
# http://learnpythonthehardway.org/book/ex32.html
the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# this first kind of for-loop goes through a list #loop
for number in the_count:
print("This is count %d" % number)
# same as above
for fruit in fruits:
print("A fruit of type: %s" % fruit)
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print("I got %r" % i)
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0,6):
print("Adding %d to the list." % i)
# append is a function that lists understand
elements.append(i)
# 또는 python 에서는 아래와 같이 할 수도 있다.
# list comprehension
elements2 = [i for i in range(0,6)]
# noe we can print them out too
for i in elements:
print ("Element was: %d" % i)
| [
"CAD Client"
] | CAD Client |
e5153619e02b587888247f69275781bbf39dd455 | a1951be7f25ad3dd7f1b6ae186d95dcaca2f0c0c | /selfpy/chap10/classvar_basic.py | a1ae05904dffbb1780c5d545b697f34855fcd116 | [] | no_license | kazscape/teach_yourself_python | 7742371c074e434ad77f4dd866f426d233b08e8b | 52802c12a99edaf9d061a14b34f09c0ff60bb190 | refs/heads/master | 2023-02-03T08:46:44.280067 | 2020-12-25T08:45:58 | 2020-12-25T08:45:58 | 323,297,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | class Area:
PI = 3.14
@classmethod
def circle(cls, radius):
return radius * radius * cls.PI
if __name__ == '__main__':
print(Area.PI)
print(Area.circle(10)) | [
"yamauchi.kazuharu@fixer.co.jp"
] | yamauchi.kazuharu@fixer.co.jp |
96c547b852180157735d158a6db1edc39912fd5a | 622079460ebb4ba9b80f55624622e45fcbf064d1 | /DataAnalysis/day1_5/sec01.py | a64bf9b1c39d80d6886234dff8b43729c0d6e730 | [
"MIT"
] | permissive | yunjung-lee/class_python_numpy | 60bf24442e3f72a002ef25566e6982303dc0e12d | 589817c8bbca85d70596e4097c0ece093b5353c3 | refs/heads/master | 2020-04-02T09:50:08.854798 | 2018-10-23T10:53:00 | 2018-10-23T10:53:00 | 154,311,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,206 | py | count = '23,456'
#print(int(count))
#ValueError: invalid literal for int() with base 10: '23,456' => "," 때문에 숫자로 인식 못해서 생기는 에러
import re
from bs4 import BeautifulSoup
html = """
<ul>
<li> <a href="www.naver.com">naver</a></li>
<li> <a href="https://www.naver.com">naver</a></li>
<li> <a href="https://www.daum.com">daum</a></li>
<li> <a href="http://www.naver.com">naver</a></li>
</ul>
"""
# #정규식으로 href속성이 https인 것만 추출
# #https: 오고가는 문서를 엿보지 못하게 암호화해서 보내는 통신(비교적 안전하다.)
# soup = BeautifulSoup(html, "html.parser")
# li=soup.find_all(href=re.compile("^https://"))
# #print(li)
# for e in li :
# print(e.attrs['href'])
# #urljoin : 상대 주소를 표시
# #상대 경로로 웹 주소를 지정하는 방법
# #절대 경로 : 주소를 모두 표시
# #상대 경로 : 기준(필요하다) 으로 위치 표시
# #../ 로 경로 이동
# from urllib.parse import urljoin
# base="http://example.com/html/a.html"
# print(urljoin(base,"b.html"))
# #"http://example.com/html/sub/c.html"
# print(urljoin(base,"sub/c.html"))
# #"http://example.com/index.html"
# print(urljoin(base,"../index.html"))
# #"http://example.com/img/sky.png"
# print(urljoin(base,"../img/sky.png"))
#
# print(urljoin(base,"http://other.com/test"))
# #http://를 사용하기 때문에 urljoin을 무시하고 그대로 사용하게 된다.
# #//를 사용하기 때문에 urljoin을 무시하고 그대로 사용하게 된다.
# print(urljoin(base,"//other.com/test"))
#
"""
1.http통신
-http는 통신규약
-클라이언트는 주소를 통해 URL에 접근
http://www.naver.com: naver.com 에 있는 www라는 이름의 컴퓨터(서버)
-서버는 index.html문서(홈페이지 문서)를 클라이언트에게 전송
-클라이언트는 전송된 html문서를 해석하는 프로그램(웹브라우저)이 해석을 하여 결과를 화면에 출력
-서버와 클라이언트 간 통신 과정에서 서버가 클라이언트에게 응답코드를 html문서와 함께 전송
-대표적인 응답코드 : 200(정상), 4xx(페이지 주소 잘못, 없거나..),5xx(서버 내부 오류)
-쿠키,세션 정보 생성
-쿠기의 예 :id입력 란에 자동완성 기능 체크 설정 정보 등등(클라이언트 pc에 저장되어짐)
-세션 : 쿠키 정보를 서버에 저장
-실습주소
메인페이지 :http://www.hanbit.co.kr/index.html
로그인 페이지 주소 : http://www.hanbit.co.kr/member/login.html
마이 페이지 주소 : http://www.hanbit.co.kr/myhanbit/myhanbit.html
로그인 박스 정보 : 아이디 m-id 비밀번호 m-passwd
1)아이디와 /비밀번호 입력(화면 출력,login.html)
2)로그인 단추(화면 출력)
3)로그인 처리 (화면 출력 안됨, id/pw <->db서버 조회, login_proc.php)
4)로그인 된 상태(화면 출력)
"""
#파이썬으로 사이트 로그인 -> 개인정보 추출 -> 화면출력
#info = {"id":"test","pw":"1234"}
import requests
USER = "python96"
PASS = "gg244055"
session = requests.session() #세션 객체 생성(송화기-통신선-수화기)
#세션 : 서버와 클라이언트가 연결됨
#세션 유지 : 연결 상태를 유지
#클라이언트에서 서버에 데이터를 데이터를 연결하기 위한 목저긍로 연결할때는 SESSION을 사용.
login_info = {
"m_id":USER,
"m_passwd":PASS
}
url_login="http://www.hanbit.co.kr/member/login_proc.php"
#실제 로그인 페이지 : 접근이 되지 않는 페이지(ID와PW가 같이 들어 와야 로그인됨)
#세션 연결 시도
res = session.post(url_login, data = login_info)
print(res)
url_mypage = "http://www.hanbit.co.kr/myhanbit/myhanbit.html"
res = session.get(url_mypage)
print(res)
#print(res.text)
soup = BeautifulSoup(res.text, 'html.parser')
mileage = soup.select_one("#container > div > div.sm_mymileage > dl.mileage_section1 > dd > span").string
print("마일리지 :" +mileage+"점")
soup = BeautifulSoup(res.text, 'html.parser')
ecoin = soup.select_one("#container > div > div.sm_mymileage > dl.mileage_section2 > dd > span").string
print("이코인 :" +ecoin+"원")
| [
"python966@gmail.com"
] | python966@gmail.com |
c55f0e64b63446f992db9226cc9d25f28a8f4509 | 25219f56a7958d2fdbd2c08172ef7b91e31e2b5a | /calibration_scalars/PFISR/201302/PLCal30/20130221.003/do_Cal.py | 9dad646897bf2fa7ec6ee3566a88572613e98052 | [] | no_license | amisr/overspread | d82d3adc8f6d4981b3b13f54e39e69411711e3ee | c26a736c9b356f55fd7845ad5a093504e684f434 | refs/heads/main | 2023-04-03T10:47:13.975207 | 2021-03-30T23:43:22 | 2021-03-30T23:43:22 | 431,037,982 | 1 | 0 | null | 2021-11-23T19:02:13 | 2021-11-23T09:29:47 | Python | UTF-8 | Python | false | false | 6,557 | py | import datetime
import os
import numpy
import scipy
import matplotlib.pyplot as plt
import tables
from scipy.optimize import leastsq
import scipy.io as sio
def get_BS_angle(az,el):
az_bs = 15.0*scipy.pi/180.0
el_bs = 74.0*scipy.pi/180.0
k = numpy.array([[scipy.cos(el)*scipy.cos(az)],
[scipy.cos(el)*scipy.sin(az)],
[scipy.sin(el)]])
tk = rotmat(k,3,az_bs)
tk2 = rotmat(tk,2,scipy.pi/2.0-el_bs)
alphaBS=90.0-scipy.arcsin(tk2[2])*180.0/scipy.pi
return alphaBS
def rotmat(input, dir, angle):
if dir == 1:
rotmat = numpy.array([ [1,0,0],
[0, scipy.cos(angle), scipy.sin(angle)],
[0, -scipy.sin(angle), scipy.cos(angle)]])
if dir == 2:
rotmat = numpy.array([ [scipy.cos(angle), 0, -scipy.sin(angle)],
[0, 1, 0],
[scipy.sin(angle), 0, scipy.cos(angle)]])
if dir == 3:
rotmat = numpy.array([ [scipy.cos(angle), scipy.sin(angle), 0],
[-scipy.sin(angle), scipy.cos(angle), 0],
[0, 0, 1]])
return scipy.dot(rotmat,input)
def func(x,a,b,c):
return a*scipy.power(scipy.cos(x*scipy.pi/180.0+c), b)
def residual(p, y, x):
a,b,c = p
return y - func(x,a,b,c)
if __name__ == '__main__':
now = datetime.datetime.now()
date = now.strftime("%m.%d.%Y")
#change experiment month here
exp = 'cal-201302'
GL = numpy.loadtxt('GratingLimits.txt')
read_from_file = True
if read_from_file == True:
# From processed file
#fname = '/Volumes/ISR_DATA_02/processed_data/PFISR/2010/04/Lyons30/20100406.001/20100406.001_ac_2min.h5'
# From raw file
filelist = []
filepath = '/Volumes/AMISR_017/Data AMISR Poker/20130221.003/'
dirList = os.listdir(filepath)
for fname in dirList:
if fname.endswith('.dt0.h5'):
filelist.append(fname)
h5file=tables.openFile(filepath + filelist[0])
output={}
for array in h5file.listNodes('/',classname = 'Array'):
output[array.name]=array.read()
for group in h5file.walkGroups("/"):
output[group._v_pathname]={}
for array in h5file.listNodes(group, classname = 'Array'):
output[group._v_pathname][array.name]=array.read()
h5file.close()
try:
BM = numpy.array(output['BeamCodes'])
except:
BM = numpy.array(output['/Setup']['BeamcodeMap'])
else:
BM = numpy.loadtxt('BeamCodeMap.txt')
###Change filelist here###
#fname = ['filelist_ac.txt']
fname = ['filelist_lp.txt']
#get filelist
FILES = []
for files in fname:
f = open(files)
FPATH = f.readline().strip()
FILES = f.readlines()
FILES = [line.strip() for line in FILES]
FILES = [FPATH + line for line in FILES]
thbs = scipy.linspace(0,40,100)
A = 0.98395
B = 3.8781
oldksys = A * scipy.power(scipy.cos(thbs*scipy.pi/180.0),B)
ksysbco= BM[:,0]
ksystab= BM[:,3]*1e19
#print ksystab
fig = plt.figure()
alphaBS = scipy.zeros(len(FILES))
aGL = scipy.zeros(len(FILES))
KSYScorr = scipy.zeros(len(FILES))
KSYS = scipy.zeros(len(FILES))
eKSYS = scipy.zeros(len(FILES))
for aa in range(len(FILES)):
f = open(FILES[aa])
TLINE1 = numpy.array([value for value in f.readline().split()],dtype='float')
TLINE2 = numpy.array([value for value in f.readline().split()],dtype='float')
az = TLINE1[1]*scipy.pi/180.0
el = TLINE1[2]*scipy.pi/180.0
alphaBS[aa] = get_BS_angle(az,el)
a = numpy.min(scipy.absolute(az*180.0/scipy.pi-GL[:,0]))
I = scipy.absolute(az*180.0/scipy.pi-GL[:,0]).argmin()
aGL[aa] = GL[I,2]-alphaBS[aa]
KSYScorr[aa] = TLINE2[0]
I = scipy.where(TLINE1[0] == ksysbco)
if not I:
xxx
KSYS[aa] = KSYScorr[aa]*A*scipy.power(cos(alphaBS[aa]*scipy.pi/180.0), B)
else:
KSYS[aa] = ksystab[I]*KSYScorr[aa]
print 'Beam: %d, %f, %f, %f, %f\n' % (TLINE1[0], az*180/scipy.pi, el*180/scipy.pi,alphaBS[aa], KSYS[aa])
eKSYS[aa] = TLINE2[1]*KSYS[aa]
plt.plot(alphaBS[aa],KSYS[aa],'k.', hold=True)
plt.plot([alphaBS[aa], alphaBS[aa]], [KSYS[aa]-eKSYS[aa], KSYS[aa]+eKSYS[aa]], 'b', hold=True)
for i in range(len(ksysbco)):
I = scipy.where(BM[:,0] == ksysbco[i])
az = scipy.float64(BM[I,1]*scipy.pi/180.0)
el = scipy.float64(BM[I,2]*scipy.pi/180.0)
if el>0:
tbs = get_BS_angle(az,el)
tksys = ksystab[i]
plt.plot(tbs,tksys,'rx',hold=True)
plt.plot(thbs,oldksys,'r-', hold=True)
plt.xlabel('Angle off Boresight')
plt.ylabel('Ksys')
sc = scipy.ones(len(alphaBS))
alphaBS[0] = 0
num = numpy.unique(alphaBS)
y = scipy.zeros(len(num))
for x in range(len(num)):
I = scipy.where(num[x] == alphaBS)
y[x] = scipy.median(KSYS[I])
#initial guess
[a,b,c],flag = leastsq(residual,[0,0,0],args=(y,num))
yn = a*scipy.power(scipy.cos(thbs*scipy.pi/180.0+c), b) #*.5265
plt.plot(thbs,yn,'k')
plt.plot(thbs,yn*1.1, '--k')
plt.plot(thbs,yn*.9,'--k')
#load AC
try:
dat = sio.loadmat('cal-201302-filelist_ac.txt_2.01_3.07-03.04.2013.mat')
acx = dat['x'][0]
acCal = acx[0]*scipy.power(scipy.cos(thbs*scipy.pi/180.0+acx[2]),acx[1])
plt.plot(thbs,acCal,'g')
except:
pass
#load previous month
try:
dat = sio.loadmat('cal-201206-filelist_lp.txt_5.55_12.16-09.28.2012.mat')
acx = dat['x'][0]
acCal = acx[0]*scipy.power(scipy.cos(thbs*scipy.pi/180.0+acx[2]),acx[1])
plt.plot(thbs,acCal,'m')
except:
pass
plt.title('x=%2.3f %2.3f %2.3f' % (a,b,c))
plt.xlim((numpy.min(thbs)-1, numpy.max(thbs)-1))
oname = '%s-%s_%2.2f_%2.2f-%s' % (exp,fname[0], a,b,date)
sio.savemat(oname +'.mat',{'x':[a,b,c]},oned_as='row')
plt.savefig(oname + '.png', dpi=200)
plt.show()
| [
"fitter@heaviside.local"
] | fitter@heaviside.local |
9933ea10946149d245387f2a9a519a90495e4858 | 155cbccc3ef3b8cba80629f2a26d7e76968a639c | /thelma/repositories/rdb/schema/tables/target.py | c21e56dfe6f9f876b42d6da2123e6a18e7e5cd8a | [
"MIT"
] | permissive | papagr/TheLMA | 1fc65f0a7d3a4b7f9bb2d201259efe5568c2bf78 | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | refs/heads/master | 2022-12-24T20:05:28.229303 | 2020-09-26T13:57:48 | 2020-09-26T13:57:48 | 279,159,864 | 1 | 0 | MIT | 2020-07-12T22:40:36 | 2020-07-12T22:40:35 | null | UTF-8 | Python | false | false | 868 | py | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Target table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, transcript_tbl, molecule_design_tbl):
"Table factory."
tbl = Table('target', metadata,
Column('target_id', Integer, primary_key=True),
Column('molecule_design_id', Integer,
ForeignKey(molecule_design_tbl.c.molecule_design_id),
nullable=False),
Column('transcript_id', Integer,
ForeignKey(transcript_tbl.c.transcript_id), nullable=False)
)
return tbl
| [
"fogathmann@gmail.com"
] | fogathmann@gmail.com |
0fcf8214c7084c69e3dd3f68532b58fcb26ac6e3 | bc54edd6c2aec23ccfe36011bae16eacc1598467 | /simscale_sdk/models/one_of_velocity_field_selection_velocity_type.py | 03a7e6c7257973e8d346a387920c8da883e7e035 | [
"MIT"
] | permissive | SimScaleGmbH/simscale-python-sdk | 4d9538d5efcadae718f12504fb2c7051bbe4b712 | 6fe410d676bf53df13c461cb0b3504278490a9bb | refs/heads/master | 2023-08-17T03:30:50.891887 | 2023-08-14T08:09:36 | 2023-08-14T08:09:36 | 331,949,105 | 17 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,118 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfVelocityFieldSelectionVelocityType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str'
}
attribute_map = {
'type': 'type'
}
discriminator_value_class_map = {
'ABSOLUTE': 'AbsoluteHarmonicVelocityFieldType',
'RELATIVE': 'RelativeHarmonicVelocityFieldType'
}
def __init__(self, type='RELATIVE', local_vars_configuration=None): # noqa: E501
"""OneOfVelocityFieldSelectionVelocityType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self.discriminator = 'type'
self.type = type
@property
def type(self):
"""Gets the type of this OneOfVelocityFieldSelectionVelocityType. # noqa: E501
Schema name: RelativeHarmonicVelocityFieldType # noqa: E501
:return: The type of this OneOfVelocityFieldSelectionVelocityType. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfVelocityFieldSelectionVelocityType.
Schema name: RelativeHarmonicVelocityFieldType # noqa: E501
:param type: The type of this OneOfVelocityFieldSelectionVelocityType. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfVelocityFieldSelectionVelocityType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfVelocityFieldSelectionVelocityType):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
f74f89014de6b788ad040b5d142c3e87489d3fb6 | 990b92264109dc01dbfddeb6f5e75675037fd829 | /app/api/json_responses.py | aa53ddbc8b10a312799484ff3ca05a950f83b22d | [
"Apache-2.0"
] | permissive | CitoEngine/cito_engine | 20efa189abab1b684b60b260c1ea9ed16f6ea0f2 | 95852dd109d86a344726d7b11ed1132d4e48426b | refs/heads/master | 2020-05-21T15:04:24.011603 | 2019-02-08T04:51:42 | 2019-02-08T04:51:42 | 17,123,947 | 9 | 13 | Apache-2.0 | 2019-02-08T04:51:43 | 2014-02-24T03:17:04 | Python | UTF-8 | Python | false | false | 647 | py | import json
from django.http import HttpResponseBadRequest, HttpResponse
def json_error(msg):
return HttpResponseBadRequest('{"status": "error", "reason": "%s"}' % msg,
content_type="application/json")
def json_ok(msg):
return HttpResponse('{"status": "ok", "reason": "%s"}' % msg,
content_type="application/json")
def json_warning(msg):
return HttpResponse('{"status": "warning", "reason": "%s"}' % msg,
content_type="application/json")
def json_response(msg_dict):
return HttpResponse(json.dumps(msg_dict), content_type="application/json") | [
"cyrus@extremeunix.com"
] | cyrus@extremeunix.com |
3a31448503cb98d12724ebcc831f4fb83774e782 | 2f989d067213e7a1e19904d482a8f9c15590804c | /lib/python3.4/site-packages/storages/backends/mogile.py | 5a31f663af9af93674069dd8a01eb71499f442fa | [
"MIT"
] | permissive | levabd/smart4-portal | beb1cf8847134fdf169ab01c38eed7e874c66473 | 2c18ba593ce7e9a1e17c3559e6343a14a13ab88c | refs/heads/master | 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 | MIT | 2023-02-15T21:34:01 | 2018-01-02T10:00:07 | Roff | UTF-8 | Python | false | false | 3,904 | py | from __future__ import print_function
import mimetypes
from django.conf import settings
from django.core.cache import cache
from django.utils.text import force_text
from django.http import HttpResponse, HttpResponseNotFound
from django.core.exceptions import ImproperlyConfigured
from storages.compat import urlparse, BytesIO, Storage
try:
import mogilefs
except ImportError:
raise ImproperlyConfigured("Could not load mogilefs dependency.\
\nSee http://mogilefs.pbworks.com/Client-Libraries")
class MogileFSStorage(Storage):
"""MogileFS filesystem storage"""
def __init__(self, base_url=settings.MEDIA_URL):
# the MOGILEFS_MEDIA_URL overrides MEDIA_URL
if hasattr(settings, 'MOGILEFS_MEDIA_URL'):
self.base_url = settings.MOGILEFS_MEDIA_URL
else:
self.base_url = base_url
for var in ('MOGILEFS_TRACKERS', 'MOGILEFS_DOMAIN',):
if not hasattr(settings, var):
raise ImproperlyConfigured("You must define %s to use the MogileFS backend." % var)
self.trackers = settings.MOGILEFS_TRACKERS
self.domain = settings.MOGILEFS_DOMAIN
self.client = mogilefs.Client(self.domain, self.trackers)
def get_mogile_paths(self, filename):
return self.client.get_paths(filename)
# The following methods define the Backend API
def filesize(self, filename):
raise NotImplemented
#return os.path.getsize(self._get_absolute_path(filename))
def path(self, filename):
paths = self.get_mogile_paths(filename)
if paths:
return self.get_mogile_paths(filename)[0]
else:
return None
def url(self, filename):
return urlparse.urljoin(self.base_url, filename).replace('\\', '/')
def open(self, filename, mode='rb'):
raise NotImplemented
#return open(self._get_absolute_path(filename), mode)
def exists(self, filename):
return filename in self.client
def save(self, filename, raw_contents, max_length=None):
filename = self.get_available_name(filename, max_length)
if not hasattr(self, 'mogile_class'):
self.mogile_class = None
# Write the file to mogile
success = self.client.send_file(filename, BytesIO(raw_contents), self.mogile_class)
if success:
print("Wrote file to key %s, %s@%s" % (filename, self.domain, self.trackers[0]))
else:
print("FAILURE writing file %s" % (filename))
return force_text(filename.replace('\\', '/'))
def delete(self, filename):
self.client.delete(filename)
def serve_mogilefs_file(request, key=None):
"""
Called when a user requests an image.
Either reproxy the path to perlbal, or serve the image outright
"""
# not the best way to do this, since we create a client each time
mimetype = mimetypes.guess_type(key)[0] or "application/x-octet-stream"
client = mogilefs.Client(settings.MOGILEFS_DOMAIN, settings.MOGILEFS_TRACKERS)
if hasattr(settings, "SERVE_WITH_PERLBAL") and settings.SERVE_WITH_PERLBAL:
# we're reproxying with perlbal
# check the path cache
path = cache.get(key)
if not path:
path = client.get_paths(key)
cache.set(key, path, 60)
if path:
response = HttpResponse(content_type=mimetype)
response['X-REPROXY-URL'] = path[0]
else:
response = HttpResponseNotFound()
else:
# we don't have perlbal, let's just serve the image via django
file_data = client[key]
if file_data:
response = HttpResponse(file_data, mimetype=mimetype)
else:
response = HttpResponseNotFound()
return response
| [
"levabd@gmail.com"
] | levabd@gmail.com |
73a03dbe897ddbbceb91c33f3e51473d20761723 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_438/run_cfg.py | 143de067caaee38065ed63af168dcc03c9de543a | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544840/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_652.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_653.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_654.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_655.root',
'/store/cmst3/user/cmgtools/CMG/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_656.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
26bfaaa35b9290e0391bef9a2347fed6f0d202b4 | ed6c149d5d661c034bc59a7811104a69cfd3fd0e | /version2/calculate_STD_Month.py | 0b9d07bf243d922158c81c9f58bc2e26962bffaf | [] | no_license | changshun/Wind_Python | edcc87bd4c076fadd8d77a1d3e79a84b29f35b8f | f97c33d4cbb8607c9d86f46a4990dac862dff361 | refs/heads/master | 2021-04-06T08:31:43.347857 | 2017-11-16T02:53:59 | 2017-11-16T02:53:59 | 124,359,994 | 1 | 4 | null | 2018-03-08T08:21:38 | 2018-03-08T08:21:38 | null | UTF-8 | Python | false | false | 5,937 | py | #coding=utf-8
import sys
import MySQLdb
import datetime
import pandas as pd
from deal_with_day_data import *
reload(sys)
sys.setdefaultencoding('utf8')
def calculate_daily_return_date_list(stock_code):
all_close_price_list,all_date_list = get_date_close_list(stock_code,table_name="table_day_data")
temp_1 = all_close_price_list[0:-1]
temp_2 = all_close_price_list[1:]
# temp_1 = [5,5,5,5,5]
# temp_2 = [7.5,7.5,7.5,7.5,7.5]
# temp_daily_return_list = map(lambda (a,b):(b-a)/a, zip(temp_1,temp_2))
# daily_return_list = [0] + temp_daily_return_list
temp_daily_return_list = []
for i in range(len(temp_1)):
if temp_1[i] != None and temp_2[i] != None:
temp_daily_return = (temp_2[i] - temp_1[i]) / temp_1[i]
else:
temp_daily_return = None
temp_daily_return_list.append(temp_daily_return)
# temp_1 = pd.Series(temp_1)
# temp_2 = pd.Series(temp_2)
# # 针对两个list中数据都完整时,是可以的。但有数据为None的话,就会计算错误
# # 所以用pandas处理
daily_return_list = [0] + temp_daily_return_list
return daily_return_list,all_date_list
def STD_function(input_list):
temp = pd.Series(input_list)
std = temp.std()
return std
def insert_data_into_table(table_name,insert_column,stock,date_list,data_list):
db = MySQLdb.connect(host='127.0.0.1', user='root', passwd='zjz4818774', db='invest_after_calculation', port=3306,charset='utf8')
cursor = db.cursor()
try:
if len(date_list)==len(data_list):
for i in range(len(date_list)):
# print table_name,insert_column,data_list[i],date_list[i],stock
if data_list[i] != None and pd.isnull(data_list[i]) == False:
sql = "UPDATE "+table_name+" SET "+insert_column+"="+str(data_list[i])+" WHERE trade_date='"+date_list[i]+" 00:00:00'"+" AND stock_code='"+stock+"'"
print sql
cursor.execute(sql)
else:
print "this data is None , we don not need to update it"
else:
print "ERROR: insert data into table...len(date_list)!=len(data_list)"
db.commit()
print "update data successfully"
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
print "but this error will not cause wrong data, everything is ok"
cursor.close()
db.close()
##################################### ######计算3、6个月#########################################################
############################################统一封装好的函数###############################
def calculate_std_month(stock_code,time_length):
std_month_list = [0 for i in range(time_length-1) ]
# stock_code = "000002.SZ"
daily_return_list,all_date_list = calculate_daily_return_date_list(stock_code)
#获取2010到2016的月初日期的列表和月末日期的列表
all_BOM,all_EOM = get_all_BOM_EOM()
BOM_index_list = []
EOM_index_list = []
for i in range(len(all_BOM)):
BOM_index_list.append(all_date_list.index(all_BOM[i]))
for i in range(len(all_EOM)):
EOM_index_list.append(all_date_list.index(all_EOM[i]))
# print EOM_index_list
if len(BOM_index_list)==len(EOM_index_list):
print "len(BOM_index_list)==len(EOM_index_list)"
for i in range(len(BOM_index_list)-(time_length-1)):
# EOM_index 里面在all_close_price_list和all_date_list中的位置,EOM_index_list[i]中的i是EOM_index_list中的位置
# EOM_index_list[i+2]+1 加1是因为切片是"取头不取尾"
std_month = STD_function(daily_return_list[BOM_index_list[i]:(EOM_index_list[i+(time_length-1)]+1)])
std_month_list.append(std_month)
else:
print "calculate_std_month执行出错error"
return
insert_data_into_table("table_month_data",str(time_length)+"_month_std",stock_code,all_EOM,std_month_list)
return std_month_list
###############################################控制计算和存储#########################################################
##############################################统一封装好的函数########################################################
def calculate_and_store_std_month(symbols,time_length):
db = MySQLdb.connect(host='127.0.0.1', user='root', passwd='zjz4818774', db='invest_after_calculation', port=3306,charset='utf8')
cursor = db.cursor()
sql = "alter table table_month_data add "+str(time_length)+"_month_std double"
try:
cursor.execute(sql)
print "add key:"+str(time_length)+"_month_std successfully"
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
print "but this error will not cause wrong data, everything is ok"
cursor.close()
db.close()
# symbols=['000002.SZ','000008.SZ','000009.SZ','000060.SZ','000063.SZ','000069.SZ','000100.SZ','000156.SZ','000157.SZ']#通过直接赋值获取股票代码用于测试
for symbol in symbols:
calculate_std_month(symbol,time_length)
if __name__ == "__main__":
# symbols=['000002.SZ','000008.SZ','000009.SZ','000060.SZ','000063.SZ','000069.SZ','000100.SZ','000156.SZ','000157.SZ']#通过直接赋值获取股票代码用于测试
from stocks_pool_for_HZ300 import select_good_stocks
symbols = select_good_stocks()
# a_list = [5 for i in range(100)]+ [0 for i in range(100)]
# print STD_function(a_list)
# print pd.Series(a_list).std()
calculate_and_store_std_month(symbols,1)
calculate_and_store_std_month(symbols,3)
calculate_and_store_std_month(symbols,6)
| [
"584392383@qq.com"
] | 584392383@qq.com |
1d636b5b499388ffc28a88f1f1e0afbf042662d1 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/ipykernel-4.3.1-py27_0/lib/python2.7/site-packages/ipykernel/parentpoller.py | 227614d44e0cf146bcb8c42edf673ea0ab86a5be | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 3,969 | py | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
import ctypes
except:
ctypes = None
import os
import platform
import signal
import time
try:
from _thread import interrupt_main # Py 3
except ImportError:
from thread import interrupt_main # Py 2
from threading import Thread
from IPython.utils.warn import warn
class ParentPollerUnix(Thread):
""" A Unix-specific daemon thread that terminates the program immediately
when the parent process no longer exists.
"""
def __init__(self):
super(ParentPollerUnix, self).__init__()
self.daemon = True
def run(self):
# We cannot use os.waitpid because it works only for child processes.
from errno import EINTR
while True:
try:
if os.getppid() == 1:
os._exit(1)
time.sleep(1.0)
except OSError as e:
if e.errno == EINTR:
continue
raise
class ParentPollerWindows(Thread):
""" A Windows-specific daemon thread that listens for a special event that
signals an interrupt and, optionally, terminates the program immediately
when the parent process no longer exists.
"""
def __init__(self, interrupt_handle=None, parent_handle=None):
""" Create the poller. At least one of the optional parameters must be
provided.
Parameters
----------
interrupt_handle : HANDLE (int), optional
If provided, the program will generate a Ctrl+C event when this
handle is signaled.
parent_handle : HANDLE (int), optional
If provided, the program will terminate immediately when this
handle is signaled.
"""
assert(interrupt_handle or parent_handle)
super(ParentPollerWindows, self).__init__()
if ctypes is None:
raise ImportError("ParentPollerWindows requires ctypes")
self.daemon = True
self.interrupt_handle = interrupt_handle
self.parent_handle = parent_handle
def run(self):
""" Run the poll loop. This method never returns.
"""
try:
from _winapi import WAIT_OBJECT_0, INFINITE
except ImportError:
from _subprocess import WAIT_OBJECT_0, INFINITE
# Build the list of handle to listen on.
handles = []
if self.interrupt_handle:
handles.append(self.interrupt_handle)
if self.parent_handle:
handles.append(self.parent_handle)
arch = platform.architecture()[0]
c_int = ctypes.c_int64 if arch.startswith('64') else ctypes.c_int
# Listen forever.
while True:
result = ctypes.windll.kernel32.WaitForMultipleObjects(
len(handles), # nCount
(c_int * len(handles))(*handles), # lpHandles
False, # bWaitAll
INFINITE) # dwMilliseconds
if WAIT_OBJECT_0 <= result < len(handles):
handle = handles[result - WAIT_OBJECT_0]
if handle == self.interrupt_handle:
# check if signal handler is callable
# to avoid 'int not callable' error (Python issue #23395)
if callable(signal.getsignal(signal.SIGINT)):
interrupt_main()
elif handle == self.parent_handle:
os._exit(1)
elif result < 0:
# wait failed, just give up and stop polling.
warn("""Parent poll failed. If the frontend dies,
the kernel may be left running. Please let us know
about your system (bitness, Python, etc.) at
ipython-dev@scipy.org""")
return
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
283ed7d85ec7bd24039b2553c5fb906115ec1c38 | ad212b92beac17c4d061848c1dcd443d02a168c8 | /python/1641_count_sorted_vowel_strings/dfs.py | e1bbd040e0ec74005cea31f9eda206125a247880 | [] | no_license | 21eleven/leetcode-solutions | 5ec97e4391c8ebaa77f4404a1155f3ef464953b3 | 35c91e6f5f5ed348186b8641e6fc49c825322d32 | refs/heads/master | 2023-03-03T10:22:41.726612 | 2021-02-13T21:02:13 | 2021-02-13T21:02:13 | 260,374,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | """
1641. Count Sorted Vowel Strings
Medium
Given an integer n, return the number of strings of length n that consist only of vowels (a, e, i, o, u) and are lexicographically sorted.
A string s is lexicographically sorted if for all valid i, s[i] is the same as or comes before s[i+1] in the alphabet.
Example 1:
Input: n = 1
Output: 5
Explanation: The 5 sorted strings that consist of vowels only are ["a","e","i","o","u"].
Example 2:
Input: n = 2
Output: 15
Explanation: The 15 sorted strings that consist of vowels only are
["aa","ae","ai","ao","au","ee","ei","eo","eu","ii","io","iu","oo","ou","uu"].
Note that "ea" is not a valid string since 'e' comes after 'a' in the alphabet.
Example 3:
Input: n = 33
Output: 66045
Constraints:
1 <= n <= 50
"""
class Solution:
def countVowelStrings(self, n: int) -> int:
idx = {"a":0, 'e':1, 'i':2, 'o':3, 'u':4}
vowels = ['a', 'e', 'i', 'o', 'u']
def childs(c, l):
if c == 1:
return len(vowels[idx[l]:])
else:
num = 0
for char in vowels[idx[l]:]:
num += childs(c-1,char)
return num
return childs(n, 'a')
| [
"noahlidell@gmail.com"
] | noahlidell@gmail.com |
656b0068685aa37f43d2d0b3272a203bb951a7c1 | 027b07d8a1aa019e61446469b371e0873181e0d2 | /pl1.py | 34c20ec31f53c4b474e8aee98c16930de6cd6733 | [] | no_license | sangeetha19399/pythonguvipgm | 803a71ecc0712c22ebdfd749bbda8ebedbd2ba8c | 7d15d395fd4187f498e0da26e09312e5fe0c63b3 | refs/heads/master | 2020-05-23T02:02:12.813076 | 2019-07-19T06:18:32 | 2019-07-19T06:18:32 | 186,595,069 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | str1=input()
str2=str1[::-1]
print(str2)
| [
"noreply@github.com"
] | sangeetha19399.noreply@github.com |
836faf7996943e286a897673cb06f09a6aada929 | 896c7df96be679ec24e09f920a19c933ccc4e234 | /summary.py | 444469d30c5a986ec11dbb40b70fdad7713279a2 | [] | no_license | faircloth-lab/uce-finder | 3c4ae775b284a65d99f7130108f90da0aaf8b64f | 3f9fc961918e13de5e4455b2aa607ea561e497a6 | refs/heads/master | 2021-01-23T15:42:06.008075 | 2013-05-01T00:57:02 | 2013-05-01T00:57:02 | 9,782,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,755 | py | #!/usr/bin/env python
# encoding: utf-8
"""
summary.py
Created by Brant Faircloth on 2008-07-05.
Copyright (c) 2008 Brant Faircloth. All rights reserved.
This program scans MAF files for conserved elements and stores
those results in an sqlite database
"""
import os
import re
import pdb # remove at some point
import time
import numpy
import sqlite3
import sequence
import argparse
import bx.align.maf
#import multiprocessing
def get_args():
"""Get arguments from CLI"""
parser = argparse.ArgumentParser(
description="""summary.py""")
parser.add_argument(
"--maf",
required=True,
default=None,
help="""The path to the directory containing maf file(s)"""
)
parser.add_argument(
"--db",
required=True,
default=None,
help="""The name of the output SQLITE database to hold results"""
)
parser.add_argument(
"--metadata-key",
required=True,
dest="metadata",
type=str,
help="""The primary species in the alignment (e.g. the one on top in the MAF file)"""
)
parser.add_argument(
"--alignment-length",
dest="align",
type=int,
default=25,
help="""The minimum acceptable alignment length"""
)
parser.add_argument(
"--consensus-length",
dest="consensus",
type=int,
default=25,
help="""The minimum acceptable consensus length"""
)
return parser.parse_args()
def spScreen(a, minAlignLength):
'''screen alignments to ensure minSpecies and minAlignLength'''
for spp in a.components:
if len(a.components[0].text) > minAlignLength:
return a
def alignMetadata(counter, candAlign, cons, refPosition, altPosition, metadataKey):
'''get metdata for alignment based on species in metadataKey'''
for seq in candAlign.components:
name = seq.src
metadata = {}
#pdb.set_trace()
if name.split('.')[0] == metadataKey:
metadata['target_spp'] = name.split('.')[0]
metadata['target_chromo'] = '.'.join(name.split('.')[1:])
metadata['target_start'] = seq.forward_strand_start
metadata['target_len'] = seq.size
metadata['target_end'] = seq.forward_strand_end
metadata['target_strand'] = seq.strand
metadata['cons'] = cons
metadata['cons_len'] = len(cons)
# add values to metadata, making up for 0 indexing
metadata['target_cons_start'] = metadata['target_start'] + 1 + refPosition[0]
metadata['target_cons_end'] = metadata['target_start'] + refPosition[1]
metadata['query_spp'] = candAlign.components[1].src.split('.')[0]
metadata['query_chromo'] = '.'.join(candAlign.components[1].src.split('.')[1:])
metadata['query_strand'] = candAlign.components[1].strand
metadata['query_len'] = candAlign.components[1].size
# deal with forward and reverse strand weirdness
#pdb.set_trace()
if metadata['query_strand'] == '+':
metadata['query_start'] = candAlign.components[1].start
metadata['query_end'] = candAlign.components[1].start + candAlign.components[1].size
metadata['query_cons_start'] = candAlign.components[1].start + 1 + altPosition[0]
metadata['query_cons_end'] = candAlign.components[1].start + altPosition[1]
else:
metadata['query_end'] = candAlign.components[1].src_size - candAlign.components[1].start
metadata['query_start'] = metadata['query_end'] - (candAlign.components[1].size - 1)
metadata['query_cons_end'] = candAlign.components[1].src_size - (candAlign.components[1].start + altPosition[0])
metadata['query_cons_start'] = candAlign.components[1].src_size - (candAlign.components[1].start + altPosition[1] - 1)
metadata['target_cons_map'] = (('%s:%s-%s') % (metadata['target_chromo'], metadata['target_cons_start'], metadata['target_cons_end']))
metadata['query_cons_map'] = (('%s:%s-%s') % (metadata['query_chromo'], metadata['query_cons_start'], metadata['query_cons_end']))
break
#pdb.set_trace()
return metadata
def createCons(candAlign):
'''stack sequence and return dumb (but smart!) consensus with
metadata'''
for seq in range(len(candAlign.components)):
if seq == 0:
zString = candAlign.components[seq].text
zString = numpy.array(list(zString))
seqArray = zString
else:
nzString = candAlign.components[seq].text
nzString = numpy.array(list(nzString))
seqArray = numpy.vstack((seqArray, nzString))
#pdb.set_trace()
seqStack = sequence.stack(seqArray)
consensus = seqStack.consensus()
return consensus
def filterCons(unfilteredConsensus, minConsensusLength, iterate=False):
'''filter out alignments with short, gappy, mismatching shit (most of them)'''
# find masked|unmasked block > minConsensusLength
searchString = (('[ACGT]{%i,}') % (minConsensusLength))
pattern = re.compile(searchString)
if not iterate:
masked = pattern.search(unfilteredConsensus)
if masked:
return list(masked.group())
else:
return False
else:
masked = pattern.findall(unfilteredConsensus)
if masked:
return masked
else:
return False
def positioner(candAlign, cons):
'''return correct positions of the conserved area relative to the reference seq
by degapping while also dealing with repeat-masked sequence in the conserved area'''
# strip gap character from reference seq
pattern = re.compile('-+')
cleanCandAlign = pattern.sub('', candAlign.text)
# deal with upper/lowercase issues btw reference <--> alt and
# repeat-masked bases
caseUnawareCons = []
for letter in cons:
if letter.isupper():
bracket = (('[%s%s]') % (letter, letter.lower()))
caseUnawareCons.append(bracket)
else:
bracket = (('[%s%s]') % (letter, letter.upper()))
caseUnawareCons.append(bracket)
caseUnawareCons = ''.join(caseUnawareCons)
# find position of conserved sequence relative to gapless
# candAlign
pattern = re.compile(caseUnawareCons)
position = pattern.search(cleanCandAlign)
return position.span()
def createConsTable(cur):
'''create a table to hold the results'''
try:
# if previous tables exist, drop them
# TODO: fix createDbase() to drop tables safely
cur.execute('''DROP TABLE cons''')
except:
pass
# create the primers results table
cur.execute('''CREATE TABLE cons (
id INTEGER PRIMARY KEY AUTOINCREMENT,
target_spp text NOT NULL,
target_chromo text NOT NULL,
target_start int NOT NULL,
target_end int NOT NULL,
target_len int NOT NULL,
target_strand text NOT NULL,
target_cons_start int NOT NULL,
target_cons_end int NOT NULL,
target_cons_map text NOT NULL,
query_spp text NOT NULL,
query_chromo text NOT NULL,
query_start int NOT NULL,
query_end int NOT NULL,
query_len int UNSIGNED NOT NULL,
query_cons_start int UNSIGNED NOT NULL,
query_cons_end int UNSIGNED NOT NULL,
query_cons_map text NOT NULL,
query_strand text NOT NULL,
cons text NOT NULL,
cons_len int UNSIGNED NOT NULL,
duplicate int)''')
def store(cur, metadata):
'''store the results in sqlite'''
cur.execute('''insert into cons (
target_spp,
target_chromo,
target_start,
target_end,
target_len,
target_strand,
target_cons_start,
target_cons_end,
target_cons_map,
query_spp,
query_chromo,
query_start,
query_end,
query_len,
query_cons_start,
query_cons_end,
query_cons_map,
query_strand,
cons,
cons_len)
values
(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',
(metadata['target_spp'],
metadata['target_chromo'],
metadata['target_start'],
metadata['target_end'],
metadata['target_len'],
metadata['target_strand'],
metadata['target_cons_start'],
metadata['target_cons_end'],
metadata['target_cons_map'],
metadata['query_spp'],
metadata['query_chromo'],
metadata['query_start'],
metadata['query_end'],
metadata['query_len'],
metadata['query_cons_start'],
metadata['query_cons_end'],
metadata['query_cons_map'],
metadata['query_strand'],
metadata['cons'],
metadata['cons_len']))
def worker(input, minConsensusLength, minAlignLength, metadataKey, cur):
# we need a separate connection for each mysql cursor or they are going
# start going into locking hell and things will go poorly. Creating a new
# connection for each worker process is the easiest/laziest solution.
# Connection pooling (DB-API) didn't work so hot, but probably because
# I'm slightly retarded.
file = open(input, 'rU')
parser = bx.align.maf.Reader(file)
a = parser.next()
# select only those alignments of > minSpecies
print input
counter = 0
while a:
#print counter
counter += 1
candAlign = spScreen(a, minAlignLength)
if candAlign:
# create sequence stack and stack -> dumb consensus
unfilteredConsensus = createCons(candAlign)
# filter out consensi with < 1 contiguous block of minConsensus
conserved = filterCons(unfilteredConsensus, minConsensusLength, True)
#pdb.set_trace()
if conserved:
for cons in conserved:
#print '%s: ****Valid consensus****' % counter
#print cons
# find 'real' positions in reference sequence (galGal3 here)
# by degapping
refPosition = positioner(candAlign.components[0], cons)
# find 'real' positions in alternate sequence (anoCar1 here)
# by degapping
altPosition = positioner(candAlign.components[1], cons)
# get sequence metadata
metadata = alignMetadata(counter, candAlign, cons, refPosition, altPosition, metadataKey)
# store start, totalLength, end, consensus somewhere
# insert records to dbase
store(cur, metadata)
a = parser.next()
# close the MAF reader
parser.close()
# close the file
file.close()
def file_gen(directory):
'''create an iterable list of filenames in the appropriate directory'''
for f in os.listdir(directory):
if os.path.splitext(f)[1] == '.maf' and os.path.isfile(os.path.join(directory, f)):
yield os.path.join(directory, f)
def main():
start = time.time()
args = get_args()
# connect to our dbase
conn = sqlite3.connect(args.db)
cur = conn.cursor()
createConsTable(cur)
files = file_gen(args.maf)
#pdb.set_trace()
print 'Not using multiprocessing'
try:
while files:
worker(files.next(), args.consensus, args.align, args.metadata, cur)
except StopIteration:
pass
# commit any remaining changes
conn.commit()
cur.close()
conn.close()
# finish up execution time
end = time.time()
execution = (end - start) / 60.
print 'Time for execution = %f min.' % (execution)
if __name__ == '__main__':
main()
| [
"brant@faircloth-lab.org"
] | brant@faircloth-lab.org |
decb665f6168dde3b4141a3dbdd4194e8aecb9ce | ddd466457316662a1455bae429740eb3c8411444 | /python_bbq/file_handling/bytearray_write.py | 27bc2693f0830c56592588d15585fbaf6fe72429 | [] | no_license | fingerman/python_fundamentals | 9ef46e51d6e9b8328e9c949fa0f807f30bd6e482 | 1fb604220922530d1171200a3cf3a927c028a6ed | refs/heads/master | 2023-01-09T12:02:26.712810 | 2020-01-22T16:12:32 | 2020-01-22T16:12:32 | 151,728,846 | 0 | 0 | null | 2022-12-27T15:34:12 | 2018-10-05T13:58:10 | Python | UTF-8 | Python | false | false | 243 | py | from os import strerror
data = bytearray(ord('a'))
for i in range(len(data)):
data[i] = 10 + i
try:
bf = open('file.bin', 'wb')
bf.write(data)
bf.close()
except IOError as e:
print("I/O error occurred: ", strerr(e.errno))
| [
"adamov.george@gmail.com"
] | adamov.george@gmail.com |
6cb26206676192374732d0f5be7a013e92ae98c4 | e56f68855d85dddf9d4aba496976308647c6c9c6 | /alphacsc/other/sporco/sporco/cnvrep.py | fad3dc0170b74bd135ccaff97c8d018268a5488d | [
"BSD-3-Clause"
] | permissive | vishalbelsare/alphacsc | a8c8526e213af4e302362f9d5f0b9329bd930e0f | 7b7f2f3b0456ab338e95924c76828a26b3b8e4b2 | refs/heads/master | 2023-05-26T10:16:15.652732 | 2021-11-17T00:00:18 | 2021-11-17T00:00:18 | 158,184,721 | 0 | 0 | BSD-3-Clause | 2021-11-26T10:46:29 | 2018-11-19T08:11:13 | Python | UTF-8 | Python | false | false | 36,416 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2015-2017 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Classes and functions that support working with convolutional
representations"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import range
import pprint
import functools
import numpy as np
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
class CSC_ConvRepIndexing(object):
"""Manage the inference of problem dimensions and the roles of
:class:`numpy.ndarray` indices for convolutional representations in
convolutional sparse coding problems (e.g. :class:`.admm.cbpdn.ConvBPDN`
and related classes).
"""
def __init__(self, D, S, dimK=None, dimN=2):
"""Initialise a ConvRepIndexing object representing dimensions of S
(input signal), D (dictionary), and X (coefficient array) in a
convolutional representation. These dimensions are inferred
from the input `D` and `S` as well as from parameters `dimN` and
`dimK`. Management and inferrence of these problem dimensions
is not entirely straightforward because :class:`.admm.cbpdn.ConvBPDN`
and related classes make use *internally* of S, D, and X arrays
with a standard layout (described below), but *input* `S` and `D`
are allowed to deviate from this layout for the convenience of
the user.
The most fundamental parameter is `dimN`, which specifies the
dimensionality of the spatial/temporal samples being
represented (e.g. `dimN` = 2 for representations of 2D
images). This should be common to *input* S and D, and is also
common to *internal* S, D, and X. The remaining dimensions of
input `S` can correspond to multiple channels (e.g. for RGB
images) and/or multiple signals (e.g. the array contains
multiple independent images). If input `S` contains two
additional dimensions (in addition to the `dimN` spatial
dimensions), then those are considered to correspond, in
order, to channel and signal indices. If there is only a
single additional dimension, then determination whether it
represents a channel or signal index is more complicated. The
rule for making this determination is as follows:
* if `dimK` is set to 0 or 1 instead of the default ``None``, then
that value is taken as the number of signal indices in input `S`
and any remaining indices are taken as channel indices (i.e. if
`dimK` = 0 then dimC = 1 and if `dimK` = 1 then dimC = 0).
* if `dimK` is ``None`` then the number of channel dimensions is
determined from the number of dimensions in the input dictionary
`D`. Input `D` should have at least `dimN` + 1 dimensions, with the
final dimension indexing dictionary filters. If it has exactly
`dimN` + 1 dimensions then it is a single-channel dictionary,
and input `S` is also assumed to be single-channel, with the
additional index in `S` assigned as a signal index (i.e. dimK = 1).
Conversely, if input `D` has `dimN` + 2 dimensions it is a
multi-channel dictionary, and the additional index in `S` is
assigned as a channel index (i.e. dimC = 1).
Note that it is an error to specify `dimK` = 1 if input `S`
has `dimN` + 1 dimensions and input `D` has `dimN` + 2
dimensions since a multi-channel dictionary requires a
multi-channel signal. (The converse is not true: a
multi-channel signal can be decomposed using a single-channel
dictionary.)
The *internal* data layout for S (signal), D (dictionary), and
X (coefficient array) is (multi-channel dictionary)
::
sptl. chn sig flt
S(N0, N1, ..., C, K, 1)
D(N0, N1, ..., C, 1, M)
X(N0, N1, ..., 1, K, M)
or (single-channel dictionary)
::
sptl. chn sig flt
S(N0, N1, ..., C, K, 1)
D(N0, N1, ..., 1, 1, M)
X(N0, N1, ..., C, K, M)
where
* Nv = [N0, N1, ...] and N = N0 x N1 x ... are the vector of sizes
of the spatial/temporal indices and the total number of
spatial/temporal samples respectively
* C is the number of channels in S
* K is the number of signals in S
* M is the number of filters in D
It should be emphasised that dimC and `dimK` may take on values
0 or 1, and represent the number of channel and signal
dimensions respectively *in input S*. In the internal layout
of S there is always a dimension allocated for channels and
signals. The number of channel dimensions in input `D` and the
corresponding size of that index are represented by dimCd
and Cd respectively.
Parameters
----------
D : array_like
Input dictionary
S : array_like
Input signal
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial/temporal dimensions of signal samples
"""
# Determine whether dictionary is single- or multi-channel
self.dimCd = D.ndim - (dimN + 1)
if self.dimCd == 0:
self.Cd = 1
else:
self.Cd = D.shape[-2]
# Numbers of spatial, channel, and signal dimensions in
# external S are dimN, dimC, and dimK respectively. These need
# to be calculated since inputs D and S do not already have
# the standard data layout above, i.e. singleton dimensions
# will not be present
if dimK is None:
rdim = S.ndim - dimN
if rdim == 0:
(dimC, dimK) = (0, 0)
elif rdim == 1:
dimC = self.dimCd # Assume S has same number of channels as D
dimK = S.ndim - dimN - dimC # Assign remaining channels to K
else:
(dimC, dimK) = (1, 1)
else:
dimC = S.ndim - dimN - dimK # Assign remaining channels to C
self.dimN = dimN # Number of spatial dimensions
self.dimC = dimC # Number of channel dimensions in S
self.dimK = dimK # Number of signal dimensions in S
# Number of channels in S
if self.dimC == 1:
self.C = S.shape[dimN]
else:
self.C = 1
Cx = self.C - self.Cd + 1
# Ensure that multi-channel dictionaries used with a signal with a
# matching number of channels
if self.Cd > 1 and self.C != self.Cd:
raise ValueError("Multi-channel dictionary with signal with "
"mismatched number of channels (Cd=%d, C=%d)" %
(self.Cd, self.C))
# Number of signals in S
if self.dimK == 1:
self.K = S.shape[self.dimN + self.dimC]
else:
self.K = 1
# Number of filters
self.M = D.shape[-1]
# Shape of spatial indices and number of spatial samples
self.Nv = S.shape[0:dimN]
self.N = np.prod(np.array(self.Nv))
# Axis indices for each component of X and internal S and D
self.axisN = tuple(range(0, dimN))
self.axisC = dimN
self.axisK = dimN + 1
self.axisM = dimN + 2
# Shapes of internal S, D, and X
self.shpD = D.shape[0:dimN] + (self.Cd,) + (1,) + (self.M,)
self.shpS = self.Nv + (self.C,) + (self.K,) + (1,)
self.shpX = self.Nv + (Cx,) + (self.K,) + (self.M,)
def __str__(self):
"""Return string representation of object."""
return pprint.pformat(vars(self))
class DictionarySize(object):
"""Compute dictionary size parameters from a dictionary size
specification tuple as in the dsz argument of :func:`bcrop`."""
def __init__(self, dsz, dimN=2):
"""Initialise a DictionarySize object.
Parameters
----------
dsz : tuple
Dictionary size specification (using the same format as the
`dsz` argument of :func:`bcrop`)
dimN : int, optional (default 2)
Number of spatial dimensions
"""
self.dsz = dsz
if isinstance(dsz[0], tuple):
# Multi-scale dictionary specification
if isinstance(dsz[0][0], tuple):
self.ndim = len(dsz[0][0])
self.nchn = 0
for c in range(0, len(dsz[0])):
self.nchn += dsz[0][c][-2]
else:
self.ndim = len(dsz[0])
if self.ndim == dimN + 1:
self.nchn = 1
else:
self.nchn = dsz[0][-2]
mxsz = np.zeros((dimN,), dtype=int)
self.nflt = 0
for m in range(0, len(dsz)):
if isinstance(dsz[m][0], tuple):
# Separate channel specification
for c in range(0, len(dsz[m])):
mxsz = np.maximum(mxsz, dsz[m][c][0:dimN])
self.nflt += dsz[m][0][-1]
else:
# Combined channel specification
mxsz = np.maximum(mxsz, dsz[m][0:dimN])
self.nflt += dsz[m][-1]
self.mxsz = tuple(mxsz)
else:
# Single scale dictionary specification
self.ndim = len(dsz)
self.mxsz = dsz[0:dimN]
self.nflt = dsz[-1]
if self.ndim == dimN + 1:
self.nchn = 1
else:
self.nchn = dsz[-2]
def __str__(self):
"""Return string representation of object."""
return pprint.pformat(vars(self))
class CDU_ConvRepIndexing(object):
"""Manage the inference of problem dimensions and the roles of
:class:`numpy.ndarray` indices for convolutional representations
in convolutional dictionary update problems (e.g.
:class:`.ConvCnstrMODBase` and derived classes).
"""
def __init__(self, dsz, S, dimK=None, dimN=2):
"""Initialise a ConvRepIndexing object representing dimensions
of S (input signal), D (dictionary), and X (coefficient array)
in a convolutional representation. These dimensions are inferred
from the input `dsz` and `S` as well as from parameters `dimN`
and `dimK`. Management and inferrence of these problem
dimensions is not entirely straightforward because
:class:`.ConvCnstrMODBase` and related classes make use
*internally* of S, D, and X arrays with a standard layout
(described below), but *input* `S` and `dsz` are allowed to
deviate from this layout for the convenience of the user. Note
that S, D, and X refers to the names of signal, dictionary, and
coefficient map arrays in :class:`.admm.cbpdn.ConvBPDN`; the
corresponding variable names in :class:`.ConvCnstrMODBase` are
S, X, and Z.
The most fundamental parameter is `dimN`, which specifies the
dimensionality of the spatial/temporal samples being represented
(e.g. `dimN` = 2 for representations of 2D images). This should
be common to *input* `S` and `dsz`, and is also common to
*internal* S, D, and X. The remaining dimensions of input `S`
can correspond to multiple channels (e.g. for RGB images) and/or
multiple signals (e.g. the array contains multiple independent
images). If input `S` contains two additional dimensions (in
addition to the `dimN` spatial dimensions), then those are
considered to correspond, in order, to channel and signal
indices. If there is only a single additional dimension, then
determination whether it represents a channel or signal index is
more complicated. The rule for making this determination is as
follows:
* if `dimK` is set to 0 or 1 instead of the default ``None``, then
that value is taken as the number of signal indices in input `S`
and any remaining indices are taken as channel indices (i.e. if
`dimK` = 0 then dimC = 1 and if `dimK` = 1 then dimC = 0).
* if `dimK` is ``None`` then the number of channel dimensions
is determined from the number of dimensions specified in the
input dictionary size `dsz`. Input `dsz` should specify at
least `dimN` + 1 dimensions, with the final dimension
indexing dictionary filters. If it has exactly `dimN` + 1
dimensions then it is a single-channel dictionary, and input
`S` is also assumed to be single-channel, with the
additional index in `S` assigned as a signal index
(i.e. `dimK` = 1). Conversely, if input `dsz` specified
`dimN` + 2 dimensions it is a multi-channel dictionary, and
the additional index in `S` is assigned as a channel index
(i.e. dimC = 1).
Note that it is an error to specify `dimK` = 1 if input `S`
has `dimN` + 1 dimensions and input `dsz` specified `dimN` + 2
dimensions since a multi-channel dictionary requires a
multi-channel signal. (The converse is not true: a
multi-channel signal can be decomposed using a single-channel
dictionary.)
The *internal* data layout for S (signal), D (dictionary), and
X (coefficient array) is (multi-channel dictionary)
::
sptl. chn sig flt
S(N0, N1, ..., C, K, 1)
D(N0, N1, ..., C, 1, M)
X(N0, N1, ..., 1, K, M)
or (single-channel dictionary)
::
sptl. chn sig flt
S(N0, N1, ..., C, K, 1)
D(N0, N1, ..., 1, 1, M)
X(N0, N1, ..., C, K, M)
where
* Nv = [N0, N1, ...] and N = N0 x N1 x ... are the vector of sizes
of the spatial/temporal indices and the total number of
spatial/temporal samples respectively
* C is the number of channels in S
* K is the number of signals in S
* M is the number of filters in D
It should be emphasised that dimC and dimK may take on values
0 or 1, and represent the number of channel and signal
dimensions respectively *in input S*. In the internal layout
of S there is always a dimension allocated for channels and
signals. The number of channel dimensions in input `D` and the
corresponding size of that index are represented by dimCd
and Cd respectively.
Parameters
----------
dsz : tuple
Dictionary size specification (using the same format as the
`dsz` argument of :func:`bcrop`)
S : array_like
Input signal
dimK : 0, 1, or None, optional (default None)
Number of dimensions in input signal corresponding to multiple
independent signals
dimN : int, optional (default 2)
Number of spatial/temporal dimensions of signal samples
"""
# Extract properties of dictionary size specification tuple
ds = DictionarySize(dsz, dimN)
self.dimCd = ds.ndim - dimN - 1
self.Cd = ds.nchn
self.M = ds.nflt
self.dsz = dsz
# Numbers of spatial, channel, and signal dimensions in
# external S are dimN, dimC, and dimK respectively. These need
# to be calculated since inputs D and S do not already have
# the standard data layout above, i.e. singleton dimensions
# will not be present
if dimK is None:
rdim = S.ndim - dimN
if rdim == 0:
(dimC, dimK) = (0, 0)
elif rdim == 1:
dimC = self.dimCd # Assume S has same number of channels as D
dimK = S.ndim - dimN - dimC # Assign remaining channels to K
else:
(dimC, dimK) = (1, 1)
else:
dimC = S.ndim - dimN - dimK # Assign remaining channels to C
self.dimN = dimN # Number of spatial dimensions
self.dimC = dimC # Number of channel dimensions in S
self.dimK = dimK # Number of signal dimensions in S
# Number of channels in S
if self.dimC == 1:
self.C = S.shape[dimN]
else:
self.C = 1
self.Cx = self.C - self.Cd + 1
# Ensure that multi-channel dictionaries used with a signal with a
# matching number of channels
if self.Cd > 1 and self.C != self.Cd:
raise ValueError("Multi-channel dictionary with signal with "
"mismatched number of channels (Cd=%d, C=%d)" %
(self.Cd, self.C))
# Number of signals in S
if self.dimK == 1:
self.K = S.shape[self.dimN + self.dimC]
else:
self.K = 1
# Shape of spatial indices and number of spatial samples
self.Nv = S.shape[0:dimN]
self.N = np.prod(np.array(self.Nv))
# Axis indices for each component of X and internal S and D
self.axisN = tuple(range(0, dimN))
self.axisC = dimN
self.axisK = dimN + 1
self.axisM = dimN + 2
# Shapes of internal S, D, and X
self.shpD = self.Nv + (self.Cd,) + (1,) + (self.M,)
self.shpS = self.Nv + (self.C,) + (self.K,) + (1,)
self.shpX = self.Nv + (self.Cx,) + (self.K,) + (self.M,)
def __str__(self):
"""Return string representation of object."""
return pprint.pformat(vars(self))
def stdformD(D, Cd, M, dimN=2):
"""Reshape dictionary array (`D` in :mod:`.admm.cbpdn` module, `X` in
:mod:`.admm.ccmod` module) to internal standard form.
Parameters
----------
D : array_like
Dictionary array
Cd : int
Size of dictionary channel index
M : int
Number of filters in dictionary
dimN : int, optional (default 2)
Number of problem spatial indices
Returns
-------
Dr : ndarray
Reshaped dictionary array
"""
return D.reshape(D.shape[0:dimN] + (Cd,) + (1,) + (M,))
def l1Wshape(W, cri):
r"""Get appropriate internal shape (see :class:`CSC_ConvRepIndexing`) for
an :math:`\ell_1` norm weight array `W`, as in option ``L1Weight`` in
:class:`.admm.cbpdn.ConvBPDN.Options` and related options classes. The
external shape of `W` depends on the external shape of input data array
`S` and the size of the final axis (i.e. the number of filters) in
dictionary array `D`. The internal shape of the weight array `W` is
required to be compatible for multiplication with the internal sparse
representation array `X`. The simplest criterion for ensuring that the
external `W` is compatible with `S` is to ensure that `W` has shape
``S.shape + D.shape[-1:]``, except that non-singleton dimensions may
be replaced with singleton dimensions. If `W` has a single additional
axis that is neither a spatial axis nor a filter axis, it is assigned
as a channel or multi-signal axis depending on the corresponding
assignement in `S`.
Parameters
----------
W : array_like
Weight array
cri : :class:`CSC_ConvRepIndexing` object
Object specifying convolutional representation dimensions
Returns
-------
shp : tuple
Appropriate internal weight array shape
"""
# Number of dimensions in input array `S`
sdim = cri.dimN + cri.dimC + cri.dimK
if W.ndim < sdim:
if W.size == 1:
# Weight array is a scalar
shpW = (1,) * (cri.dimN+3)
else:
# Invalid weight array shape
raise ValueError('weight array must be scalar or have at least '
'the same number of dimensions as input array')
elif W.ndim == sdim:
# Weight array has the same number of dimensions as the input array
shpW = W.shape + (1,) * (3-cri.dimC-cri.dimK)
else:
# Weight array has more dimensions than the input array
if W.ndim == cri.dimN + 3:
# Weight array is already of the appropriate shape
shpW = W.shape
else:
# Assume that the final axis in the input array is the filter
# index
shpW = W.shape[0:-1] + (1,) * (2-cri.dimC-cri.dimK) + W.shape[-1:]
return shpW
def mskWshape(W, cri):
"""Get appropriate internal shape (see :class:`CSC_ConvRepIndexing` and
:class:`CDU_ConvRepIndexing`) for data fidelity term mask array `W`. The
external shape of `W` depends on the external shape of input data array
`S`. The simplest criterion for ensuring that the external `W` is
compatible with `S` is to ensure that `W` has the same shape as `S`,
except that non-singleton dimensions in `S` may be singleton dimensions
in `W`. If `W` has a single non-spatial axis, it is assigned as a channel
or multi-signal axis depending on the corresponding assignement in `S`.
Parameters
----------
W : array_like
Data fidelity term weight/mask array
cri : :class:`CSC_ConvRepIndexing` object or :class:`CDU_ConvRepIndexing`\
object
Object specifying convolutional representation dimensions
Returns
-------
shp : tuple
Appropriate internal mask array shape
"""
ckdim = W.ndim - cri.dimN
if ckdim >= 2:
# Both C and K axes are present in W
shpW = W.shape
elif ckdim == 1:
# Exactly one of C or K axes is present in W
if cri.C == 1 and cri.K > 1:
# Input S has a single channel and multiple signals
shpW = W.shape[0:cri.dimN] + (1, W.shape[cri.dimN])
elif cri.C > 1 and cri.K == 1:
# Input S has multiple channels and a single signal
shpW = W.shape[0:cri.dimN] + (W.shape[cri.dimN], 1)
else:
# Input S has multiple channels and signals: resolve ambiguity
# by taking extra axis in W as a channel axis
shpW = W.shape[0:cri.dimN] + (W.shape[cri.dimN], 1)
else:
# Neither C nor K axis is present in W
shpW = W.shape + (1, 1)
if ckdim > 2:
return shpW
else:
return shpW + (1,)
def zeromean(v, dsz, dimN=2):
"""Subtract mean value from each filter in the input array v. The
`dsz` parameter specifies the support sizes of each filter using the
same format as the `dsz` parameter of :func:`bcrop`. Support sizes
must be taken into account to ensure that the mean values are
computed over the correct number of samples, ignoring the
zero-padded region in which the filter is embedded.
Parameters
----------
v : array_like
Input dictionary array
dsz : tuple
Filter support size(s)
dimN : int, optional (default 2)
Number of spatial dimensions
Returns
-------
vz : ndarray
Dictionary array with filter means subtracted
"""
vz = v.copy()
if isinstance(dsz[0], tuple):
# Multi-scale dictionary specification
axisN = tuple(range(0, dimN))
m0 = 0 # Initial index of current block of equi-sized filters
# Iterate over distinct filter sizes
for mb in range(0, len(dsz)):
# Determine end index of current block of filters
if isinstance(dsz[mb][0], tuple):
m1 = m0 + dsz[mb][0][-1]
c0 = 0 # Init. idx. of current channel-block of equi-sized flt.
for cb in range(0, len(dsz[mb])):
c1 = c0 + dsz[mb][cb][-2]
# Construct slice corresponding to cropped part of
# current block of filters in output array and set from
# input array
cbslc = tuple([slice(0, x) for x in dsz[mb][cb][0:dimN]]) \
+ (slice(c0, c1),) + (Ellipsis,) + (slice(m0, m1),)
vz[cbslc] -= np.mean(v[cbslc], axisN)
c0 = c1 # Update initial index for start of next block
else:
m1 = m0 + dsz[mb][-1]
# Construct slice corresponding to cropped part of
# current block of filters in output array and set from
# input array
mbslc = tuple([slice(0, x) for x in dsz[mb][0:-1]]) + \
(Ellipsis,) + (slice(m0, m1),)
vz[mbslc] -= np.mean(v[mbslc], axisN)
m0 = m1 # Update initial index for start of next block
else:
# Single scale dictionary specification
axisN = tuple(range(0, dimN))
axnslc = tuple([slice(0, x) for x in dsz[0:dimN]])
vz[axnslc] -= np.mean(v[axnslc], axisN)
return vz
def normalise(v, dimN=2):
r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array
"""
axisN = tuple(range(0, dimN))
vn = np.sqrt(np.sum(v**2, axisN, keepdims=True))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype)
def zpad(v, Nv):
"""Zero-pad initial axes of array to specified size. Padding is
applied to the right, top, etc. of the array indices.
Parameters
----------
v : array_like
Array to be padded
Nv : tuple
Sizes to which each of initial indices should be padded
Returns
-------
vp : ndarray
Padded array
"""
vp = np.zeros(Nv + v.shape[len(Nv):], dtype=v.dtype)
axnslc = tuple([slice(0, x) for x in v.shape])
vp[axnslc] = v
return vp
def bcrop(v, dsz, dimN=2):
"""Crop specified number of initial spatial dimensions of dictionary
array to specified size. Parameter `dsz` must be a tuple having one
of the following forms (the examples assume two spatial/temporal
dimensions). If all filters are of the same size, then
::
(flt_rows, filt_cols, num_filt)
may be used when the dictionary has a single channel, and
::
(flt_rows, filt_cols, num_chan, num_filt)
should be used for a multi-channel dictionary. If the filters are
not all of the same size, then
::
(
(flt_rows1, filt_cols1, num_filt1),
(flt_rows2, filt_cols2, num_filt2),
...
)
may be used for a single-channel dictionary. A multi-channel
dictionary may be specified in the form
::
(
(flt_rows1, filt_cols1, num_chan, num_filt1),
(flt_rows2, filt_cols2, num_chan, num_filt2),
...
)
or
::
(
(
(flt_rows11, filt_cols11, num_chan11, num_filt1),
(flt_rows21, filt_cols21, num_chan21, num_filt1),
...
)
(
(flt_rows12, filt_cols12, num_chan12, num_filt2),
(flt_rows22, filt_cols22, num_chan22, num_filt2),
...
)
...
)
depending on whether the filters for each channel are of the same
size or not. The total number of dictionary filters, is either
num_filt in the first two forms, or the sum of num_filt1,
num_filt2, etc. in the other form. If the filters are not
two-dimensional, then the dimensions above vary accordingly, i.e.,
there may be fewer or more filter spatial dimensions than
flt_rows, filt_cols, e.g.
::
(flt_rows, num_filt)
for one-dimensional signals, or
::
(flt_rows, filt_cols, filt_planes, num_filt)
for three-dimensional signals.
Parameters
----------
v : array_like
Dictionary array to be cropped
dsz : tuple
Filter support size(s)
dimN : int, optional (default 2)
Number of spatial dimensions
Returns
-------
vc : ndarray
Cropped dictionary array
"""
if isinstance(dsz[0], tuple):
# Multi-scale dictionary specification
maxsz = np.zeros((dimN,), dtype=int) # Max. support size
# Iterate over dsz to determine max. support size
for mb in range(0, len(dsz)):
if isinstance(dsz[mb][0], tuple):
for cb in range(0, len(dsz[mb])):
maxsz = np.maximum(maxsz, dsz[mb][cb][0:dimN])
else:
maxsz = np.maximum(maxsz, dsz[mb][0:dimN])
# Init. cropped array
vc = np.zeros(tuple(maxsz) + v.shape[dimN:], dtype=v.dtype)
m0 = 0 # Initial index of current block of equi-sized filters
# Iterate over distinct filter sizes
for mb in range(0, len(dsz)):
# Determine end index of current block of filters
if isinstance(dsz[mb][0], tuple):
m1 = m0 + dsz[mb][0][-1]
c0 = 0 # Init. idx. of current channel-block of equi-sized flt.
for cb in range(0, len(dsz[mb])):
c1 = c0 + dsz[mb][cb][-2]
# Construct slice corresponding to cropped part of
# current block of filters in output array and set from
# input array
cbslc = tuple([slice(0, x) for x in
dsz[mb][cb][0:dimN]]) + (slice(c0, c1),) + \
(Ellipsis,) + (slice(m0, m1),)
vc[cbslc] = v[cbslc]
c0 = c1 # Update initial index for start of next block
else:
m1 = m0 + dsz[mb][-1]
# Construct slice corresponding to cropped part of
# current block of filters in output array and set from
# input array
mbslc = tuple([slice(0, x) for x in dsz[mb][0:-1]]) + \
(Ellipsis,) + (slice(m0, m1),)
vc[mbslc] = v[mbslc]
m0 = m1 # Update initial index for start of next block
return vc
else:
# Single scale dictionary specification
axnslc = tuple([slice(0, x) for x in dsz[0:dimN]])
return v[axnslc]
def Pcn(x, dsz, Nv, dimN=2, dimC=1, crp=False, zm=False):
"""Constraint set projection for convolutional dictionary update
problem.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
crp : bool, optional (default False)
Flag indicating whether the result should be cropped to the support
of the largest filter in the dictionary.
zm : bool, optional (default False)
Flag indicating whether the projection function should include
filter mean subtraction
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
if crp:
zpadfn = lambda x: x
else:
zpadfn = lambda x: zpad(x, Nv)
if zm:
zmeanfn = lambda x: zeromean(x, dsz, dimN)
else:
zmeanfn = lambda x: x
return normalise(zmeanfn(zpadfn(bcrop(x, dsz, dimN))), dimN+dimC)
def getPcn(dsz, Nv, dimN=2, dimC=1, crp=False, zm=False):
"""Construct the constraint set projection function for convolutional
dictionary update problem.
Parameters
----------
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
crp : bool, optional (default False)
Flag indicating whether the result should be cropped to the support
of the largest filter in the dictionary.
zm : bool, optional (default False)
Flag indicating whether the projection function should include
filter mean subtraction
Returns
-------
fn : function
Constraint set projection function
"""
fncdict = {(False, False): _Pcn, (False, True): _Pcn_zm,
(True, False): _Pcn_crp, (True, True): _Pcn_zm_crp}
fnc = fncdict[(crp, zm)]
return functools.partial(fnc, dsz=dsz, Nv=Nv, dimN=dimN, dimC=dimC)
def _Pcn(x, dsz, Nv, dimN=2, dimC=1):
"""
Projection onto dictionary update constraint set: support projection and
normalisation. The result has the full spatial dimensions of the input.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
return normalise(zpad(bcrop(x, dsz, dimN), Nv), dimN+dimC)
def _Pcn_zm(x, dsz, Nv, dimN=2, dimC=1):
"""
Projection onto dictionary update constraint set: support projection,
mean subtraction, and normalisation. The result has the full spatial
dimensions of the input.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
return normalise(zeromean(zpad(bcrop(x, dsz, dimN), Nv), dsz), dimN+dimC)
def _Pcn_crp(x, dsz, Nv, dimN=2, dimC=1):
"""
Projection onto dictionary update constraint set: support projection and
normalisation. The result is cropped to the support of the largest filter
in the dictionary.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
return normalise(zeromean(bcrop(x, dsz, dimN), dsz, dimN), dimN+dimC)
def _Pcn_zm_crp(x, dsz, Nv, dimN=2, dimC=1):
"""
Projection onto dictionary update constraint set: support projection,
mean subtraction, and normalisation. The result is cropped to the support
of the largest filter in the dictionary.
Parameters
----------
x : array_like
Input array
dsz : tuple
Filter support size(s), specified using the same format as the `dsz`
parameter of :func:`bcrop`.
Nv : tuple
Sizes of problem spatial indices
dimN : int, optional (default 2)
Number of problem spatial indices
dimC : int, optional (default 1)
Number of problem channel indices
Returns
-------
y : ndarray
Projection of input onto constraint set
"""
return normalise(bcrop(x, dsz, dimN), dimN+dimC)
| [
"tom.dupre-la-tour@m4x.org"
] | tom.dupre-la-tour@m4x.org |
42912e1f5af5f0656862bc4f53c86682ffc290b7 | 21b632797ed6257b13574c341cdd14e6534728a9 | /ryu/lib/packet/tcp.py | 826894426713eb562daa717d6049d91a59e4c61e | [
"Apache-2.0"
] | permissive | MrCocoaCat/ryu | 0473f04e2a840e027e9002f8a6af81745eaf7094 | 9e9571991a73380099b7ba7c6f37e0e587080a6a | refs/heads/master | 2021-06-19T18:09:52.833590 | 2020-05-12T08:17:21 | 2020-05-12T08:17:21 | 163,072,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,400 | py | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import logging
import six
from ryu.lib import stringify
from . import packet_base
from . import packet_utils
from . import bgp
from . import openflow
from . import zebra
LOG = logging.getLogger(__name__)
# TCP Option Kind Numbers
TCP_OPTION_KIND_END_OF_OPTION_LIST = 0 # End of Option List
TCP_OPTION_KIND_NO_OPERATION = 1 # No-Operation
TCP_OPTION_KIND_MAXIMUM_SEGMENT_SIZE = 2 # Maximum Segment Size
TCP_OPTION_KIND_WINDOW_SCALE = 3 # Window Scale
TCP_OPTION_KIND_SACK_PERMITTED = 4 # SACK Permitted
TCP_OPTION_KIND_SACK = 5 # SACK
TCP_OPTION_KIND_TIMESTAMPS = 8 # Timestamps
TCP_OPTION_KIND_USER_TIMEOUT = 28 # User Timeout Option
TCP_OPTION_KIND_AUTHENTICATION = 29 # TCP Authentication Option (TCP-AO)
TCP_FIN = 0x001
TCP_SYN = 0x002
TCP_RST = 0x004
TCP_PSH = 0x008
TCP_ACK = 0x010
TCP_URG = 0x020
TCP_ECE = 0x040
TCP_CWR = 0x080
TCP_NS = 0x100
class tcp(packet_base.PacketBase):
"""TCP (RFC 793) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== ====================
Attribute Description
============== ====================
src_port Source Port
dst_port Destination Port
seq Sequence Number
ack Acknowledgement Number
offset Data Offset \
(0 means automatically-calculate when encoding)
bits Control Bits
window_size Window
csum Checksum \
(0 means automatically-calculate when encoding)
urgent Urgent Pointer
option List of ``TCPOption`` sub-classes or an bytearray
containing options. \
None if no options.
============== ====================
"""
_PACK_STR = '!HHIIBBHHH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, src_port=1, dst_port=1, seq=0, ack=0, offset=0,
bits=0, window_size=0, csum=0, urgent=0, option=None):
super(tcp, self).__init__()
self.src_port = src_port
self.dst_port = dst_port
self.seq = seq
self.ack = ack
self.offset = offset
self.bits = bits
self.window_size = window_size
self.csum = csum
self.urgent = urgent
self.option = option
def __len__(self):
return self.offset * 4
def has_flags(self, *flags):
"""Check if flags are set on this packet.
returns boolean if all passed flags is set
Example::
>>> pkt = tcp.tcp(bits=(tcp.TCP_SYN | tcp.TCP_ACK))
>>> pkt.has_flags(tcp.TCP_SYN, tcp.TCP_ACK)
True
"""
mask = sum(flags)
return (self.bits & mask) == mask
@staticmethod
def get_payload_type(src_port, dst_port):
from ryu.ofproto.ofproto_common import OFP_TCP_PORT, OFP_SSL_PORT_OLD
if bgp.TCP_SERVER_PORT in [src_port, dst_port]:
return bgp.BGPMessage
elif(src_port in [OFP_TCP_PORT, OFP_SSL_PORT_OLD] or
dst_port in [OFP_TCP_PORT, OFP_SSL_PORT_OLD]):
return openflow.openflow
elif src_port == zebra.ZEBRA_PORT:
return zebra._ZebraMessageFromZebra
elif dst_port == zebra.ZEBRA_PORT:
return zebra.ZebraMessage
else:
return None
@classmethod
def parser(cls, buf):
(src_port, dst_port, seq, ack, offset, bits, window_size,
csum, urgent) = struct.unpack_from(cls._PACK_STR, buf)
offset >>= 4
bits &= 0x3f
length = offset * 4
if length > tcp._MIN_LEN:
option_buf = buf[tcp._MIN_LEN:length]
try:
option = []
while option_buf:
opt, option_buf = TCPOption.parser(option_buf)
option.append(opt)
except struct.error:
LOG.warning(
'Encounter an error during parsing TCP option field.'
'Skip parsing TCP option.')
option = buf[tcp._MIN_LEN:length]
else:
option = None
msg = cls(src_port, dst_port, seq, ack, offset, bits,
window_size, csum, urgent, option)
return msg, cls.get_payload_type(src_port, dst_port), buf[length:]
def serialize(self, payload, prev):
offset = self.offset << 4
h = bytearray(struct.pack(
tcp._PACK_STR, self.src_port, self.dst_port, self.seq,
self.ack, offset, self.bits, self.window_size, self.csum,
self.urgent))
if self.option:
if isinstance(self.option, (list, tuple)):
option_buf = bytearray()
for opt in self.option:
option_buf.extend(opt.serialize())
h.extend(option_buf)
mod = len(option_buf) % 4
else:
h.extend(self.option)
mod = len(self.option) % 4
if mod:
h.extend(bytearray(4 - mod))
if self.offset:
offset = self.offset << 2
if len(h) < offset:
h.extend(bytearray(offset - len(h)))
if self.offset == 0:
self.offset = len(h) >> 2
offset = self.offset << 4
struct.pack_into('!B', h, 12, offset)
if self.csum == 0:
total_length = len(h) + len(payload)
self.csum = packet_utils.checksum_ip(prev, total_length,
h + payload)
struct.pack_into('!H', h, 16, self.csum)
return six.binary_type(h)
class TCPOption(stringify.StringifyMixin):
_KINDS = {}
_KIND_PACK_STR = '!B' # kind
NO_BODY_OFFSET = 1 # kind(1 byte)
WITH_BODY_OFFSET = 2 # kind(1 byte) + length(1 byte)
cls_kind = None
cls_length = None
def __init__(self, kind=None, length=None):
self.kind = self.cls_kind if kind is None else kind
self.length = self.cls_length if length is None else length
@classmethod
def register(cls, kind, length):
def _register(subcls):
subcls.cls_kind = kind
subcls.cls_length = length
cls._KINDS[kind] = subcls
return subcls
return _register
@classmethod
def parse(cls, buf):
# For no body TCP Options
return cls(cls.cls_kind, cls.cls_length), buf[cls.cls_length:]
@classmethod
def parser(cls, buf):
(kind,) = struct.unpack_from(cls._KIND_PACK_STR, buf)
subcls = cls._KINDS.get(kind)
if not subcls:
subcls = TCPOptionUnknown
return subcls.parse(buf)
def serialize(self):
# For no body TCP Options
return struct.pack(self._KIND_PACK_STR, self.cls_kind)
class TCPOptionUnknown(TCPOption):
_PACK_STR = '!BB' # kind, length
def __init__(self, value, kind, length):
super(TCPOptionUnknown, self).__init__(kind, length)
self.value = value if value is not None else b''
@classmethod
def parse(cls, buf):
(kind, length) = struct.unpack_from(cls._PACK_STR, buf)
value = buf[2:length]
return cls(value, kind, length), buf[length:]
def serialize(self):
self.length = self.WITH_BODY_OFFSET + len(self.value)
return struct.pack(self._PACK_STR,
self.kind, self.length) + self.value
@TCPOption.register(TCP_OPTION_KIND_END_OF_OPTION_LIST,
TCPOption.NO_BODY_OFFSET)
class TCPOptionEndOfOptionList(TCPOption):
pass
@TCPOption.register(TCP_OPTION_KIND_NO_OPERATION,
TCPOption.NO_BODY_OFFSET)
class TCPOptionNoOperation(TCPOption):
pass
@TCPOption.register(TCP_OPTION_KIND_MAXIMUM_SEGMENT_SIZE, 4)
class TCPOptionMaximumSegmentSize(TCPOption):
_PACK_STR = '!BBH' # kind, length, max_seg_size
def __init__(self, max_seg_size, kind=None, length=None):
super(TCPOptionMaximumSegmentSize, self).__init__(kind, length)
self.max_seg_size = max_seg_size
@classmethod
def parse(cls, buf):
(_, _, max_seg_size) = struct.unpack_from(cls._PACK_STR, buf)
return cls(max_seg_size,
cls.cls_kind, cls.cls_length), buf[cls.cls_length:]
def serialize(self):
return struct.pack(self._PACK_STR,
self.kind, self.length, self.max_seg_size)
@TCPOption.register(TCP_OPTION_KIND_WINDOW_SCALE, 3)
class TCPOptionWindowScale(TCPOption):
_PACK_STR = '!BBB' # kind, length, shift_cnt
def __init__(self, shift_cnt, kind=None, length=None):
super(TCPOptionWindowScale, self).__init__(kind, length)
self.shift_cnt = shift_cnt
@classmethod
def parse(cls, buf):
(_, _, shift_cnt) = struct.unpack_from(cls._PACK_STR, buf)
return cls(shift_cnt,
cls.cls_kind, cls.cls_length), buf[cls.cls_length:]
def serialize(self):
return struct.pack(self._PACK_STR,
self.kind, self.length, self.shift_cnt)
@TCPOption.register(TCP_OPTION_KIND_SACK_PERMITTED, 2)
class TCPOptionSACKPermitted(TCPOption):
_PACK_STR = '!BB' # kind, length
def serialize(self):
return struct.pack(self._PACK_STR, self.kind, self.length)
@TCPOption.register(TCP_OPTION_KIND_SACK,
2) # variable length. 2 is the length except blocks.
class TCPOptionSACK(TCPOption):
_PACK_STR = '!BB' # kind, length
_BLOCK_PACK_STR = '!II' # Left Edge of Block, Right Edge of Block
def __init__(self, blocks, kind=None, length=None):
super(TCPOptionSACK, self).__init__(kind, length)
# blocks is a list of tuple as followings.
# self.blocks = [
# ('Left Edge of 1st Block', 'Right Edge of 1st Block'),
# ...
# ('Left Edge of nth Block', 'Right Edge of nth Block')
# ]
self.blocks = blocks
@classmethod
def parse(cls, buf):
(_, length) = struct.unpack_from(cls._PACK_STR, buf)
blocks_buf = buf[2:length]
blocks = []
while blocks_buf:
lr_block = struct.unpack_from(cls._BLOCK_PACK_STR, blocks_buf)
blocks.append(lr_block) # (left, right)
blocks_buf = blocks_buf[8:]
return cls(blocks, cls.cls_kind, length), buf[length:]
def serialize(self):
buf = bytearray()
for left, right in self.blocks:
buf += struct.pack(self._BLOCK_PACK_STR, left, right)
self.length = self.cls_length + len(buf)
return struct.pack(self._PACK_STR, self.kind, self.length) + buf
@TCPOption.register(TCP_OPTION_KIND_TIMESTAMPS, 10)
class TCPOptionTimestamps(TCPOption):
_PACK_STR = '!BBII' # kind, length, ts_val, ts_ecr
def __init__(self, ts_val, ts_ecr, kind=None, length=None):
super(TCPOptionTimestamps, self).__init__(kind, length)
self.ts_val = ts_val
self.ts_ecr = ts_ecr
@classmethod
def parse(cls, buf):
(_, _, ts_val, ts_ecr) = struct.unpack_from(cls._PACK_STR, buf)
return cls(ts_val, ts_ecr,
cls.cls_kind, cls.cls_length), buf[cls.cls_length:]
def serialize(self):
return struct.pack(self._PACK_STR,
self.kind, self.length, self.ts_val, self.ts_ecr)
@TCPOption.register(TCP_OPTION_KIND_USER_TIMEOUT, 4)
class TCPOptionUserTimeout(TCPOption):
_PACK_STR = '!BBH' # kind, length, granularity(1bit)|user_timeout(15bit)
def __init__(self, granularity, user_timeout, kind=None, length=None):
super(TCPOptionUserTimeout, self).__init__(kind, length)
self.granularity = granularity
self.user_timeout = user_timeout
@classmethod
def parse(cls, buf):
(_, _, body) = struct.unpack_from(cls._PACK_STR, buf)
granularity = body >> 15
user_timeout = body & 0x7fff
return cls(granularity, user_timeout,
cls.cls_kind, cls.cls_length), buf[cls.cls_length:]
def serialize(self):
body = (self.granularity << 15) | self.user_timeout
return struct.pack(self._PACK_STR, self.kind, self.length, body)
@TCPOption.register(TCP_OPTION_KIND_AUTHENTICATION,
4) # variable length. 4 is the length except MAC.
class TCPOptionAuthentication(TCPOption):
_PACK_STR = '!BBBB' # kind, length, key_id, r_next_key_id
def __init__(self, key_id, r_next_key_id, mac, kind=None, length=None):
super(TCPOptionAuthentication, self).__init__(kind, length)
self.key_id = key_id
self.r_next_key_id = r_next_key_id
self.mac = mac
@classmethod
def parse(cls, buf):
(_, length,
key_id, r_next_key_id) = struct.unpack_from(cls._PACK_STR, buf)
mac = buf[4:length]
return cls(key_id, r_next_key_id, mac,
cls.cls_kind, length), buf[length:]
def serialize(self):
self.length = self.cls_length + len(self.mac)
return struct.pack(self._PACK_STR, self.kind, self.length,
self.key_id, self.r_next_key_id) + self.mac
| [
"MrCocoaCat@aliyun.com"
] | MrCocoaCat@aliyun.com |
5f0b62ede497790cede3fa82a2b1a000680a071c | 232ff549e297d4237caf271624716a09e9fbb2d4 | /venv1/bin/pip3 | 7064cc2af3c7014353d1b4f26cbfdccb13706fe0 | [] | no_license | csdnak/python3 | 2dd54dd6fd352f3de42111e08311f22b84e0f043 | 97e2673f20532c7cf89bf66f66673d25a2790c6d | refs/heads/master | 2020-08-31T16:22:32.188784 | 2019-11-25T10:55:23 | 2019-11-25T10:55:23 | 218,730,620 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/home/student/PycharmProjects/untitled/venv1/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"you@example.com"
] | you@example.com | |
fe5794ead0dddb72d93f5b6d535c961ab22d2382 | d57c59070bb167a167b95971e32731c48587cb59 | /backend/wallet/api/v1/serializers.py | 38828a3947e6a817d0937cd1a1c5c264662471dd | [] | no_license | crowdbotics-apps/blind-school-27613 | 2ec76994185f6f15bd8ec0fe48b4a86f10e8a882 | ad3549e18f1841bdac12a558aa59fa97cf9a6691 | refs/heads/master | 2023-05-11T19:18:34.670974 | 2021-05-31T10:12:31 | 2021-05-31T10:12:31 | 372,459,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from rest_framework import serializers
from wallet.models import (
PaymentTransaction,
PaymentMethod,
TaskerWallet,
TaskerPaymentAccount,
CustomerWallet,
)
class TaskerPaymentAccountSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerPaymentAccount
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class TaskerWalletSerializer(serializers.ModelSerializer):
class Meta:
model = TaskerWallet
fields = "__all__"
class CustomerWalletSerializer(serializers.ModelSerializer):
class Meta:
model = CustomerWallet
fields = "__all__"
class PaymentTransactionSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentTransaction
fields = "__all__"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e22248db541b874c00354eccfb4666f37e0fc02e | d864fd6c3d1452aa3cffb31ecb20fc14c46a5e91 | /ml_logger/log_client.py | 842e1532bb5078d31c8913b4ce27366ccaeac714 | [] | no_license | jonasrothfuss/ml_logger | 2c9e5592eb24bc70ae80e8d0c9e08e6e7054f3d7 | 2000b38177e3c4892e4fee74d769c1fc0a659424 | refs/heads/master | 2020-03-26T20:42:15.846183 | 2019-06-03T09:09:58 | 2019-06-03T09:09:58 | 145,340,704 | 1 | 0 | null | 2018-08-19T22:03:04 | 2018-08-19T22:03:03 | null | UTF-8 | Python | false | false | 3,940 | py | import os
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
from ml_logger.serdes import serialize, deserialize
from ml_logger.server import LogEntry, LoadEntry, PingData, LoggingServer, ALLOWED_TYPES, Signal, LogOptions, \
RemoveEntry
class LogClient:
local_server = None
def __init__(self, url: str = None, max_workers=None):
if url.startswith("file://"):
self.local_server = LoggingServer(data_dir=url[6:])
elif os.path.isabs(url):
self.local_server = LoggingServer(data_dir=url)
elif url.startswith('http://'):
self.url = url
self.ping_url = os.path.join(url, "ping")
else:
# todo: add https://, and s3://
raise TypeError('log url need to begin with `/`, `file://` or `http://`.')
if max_workers:
self.session = FuturesSession(ThreadPoolExecutor(max_workers=max_workers))
else:
self.session = FuturesSession()
def _get(self, key, dtype):
if self.local_server:
return self.local_server.load(key, dtype)
else:
json = LoadEntry(key, dtype)._asdict()
# note: reading stuff from the server is always synchronous via the result call.
res = self.session.get(self.url, json=json).result()
result = deserialize(res.text)
return result
def _post(self, key, data, dtype, options: LogOptions = None):
if self.local_server:
self.local_server.log(key, data, dtype, options)
else:
# todo: make the json serialization more robust. Not priority b/c this' client-side.
json = LogEntry(key, serialize(data), dtype, options)._asdict()
self.session.post(self.url, json=json)
def _delete(self, key):
if self.local_server:
self.local_server.remove(key)
else:
# todo: make the json serialization more robust. Not priority b/c this' client-side.
json = RemoveEntry(key)._asdict()
self.session.delete(self.url, json=json)
def ping(self, exp_key, status, _duplex=True, burn=True):
# todo: add configuration for early termination
if self.local_server:
signals = self.local_server.ping(exp_key, status)
return deserialize(signals) if _duplex else None
else:
# todo: make the json serialization more robust. Not priority b/c this' client-side.
ping_data = PingData(exp_key, status, burn=burn)._asdict()
req = self.session.post(self.ping_url, json=ping_data)
if _duplex:
response = req.result()
# note: I wonder if we should raise if the response is non-ok.
return deserialize(response.text) if response.ok else None
# send signals to the worker
def send_signal(self, exp_key, signal=None):
options = LogOptions(overwrite=True)
channel = os.path.join(exp_key, "__signal.pkl")
self._post(channel, signal, dtype="log", options=options)
# Reads binary data
def read(self, key):
return self._get(key, dtype="read")
# Reads binary data
def read_pkl(self, key):
return self._get(key, dtype="read_pkl")
def read_np(self, key):
return self._get(key, dtype="read_np")
# appends data
def log(self, key, data, **options):
self._post(key, data, dtype="log", options=LogOptions(**options))
# appends text
def log_text(self, key, text):
self._post(key, text, dtype="text")
# sends out images
def send_image(self, key, data):
assert data.dtype in ALLOWED_TYPES, "image data must be one of {}".format(ALLOWED_TYPES)
self._post(key, data, dtype="image")
# appends text
def log_buffer(self, key, buf):
self._post(key, buf, dtype="byte")
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
b8735dea132ec85a36c4534b5df4368acaafad08 | eeb469954b768095f2b8ad2376f1a114a3adb3fa | /119.py | bfe18b6ba71c7a352c3f8459e86deaca6bf97484 | [
"MIT"
] | permissive | RafaelHuang87/Leet-Code-Practice | ef18dda633932e3cce479f7d5411552d43da0259 | 7754dcee38ffda18a5759113ef06d7becf4fe728 | refs/heads/master | 2020-07-18T20:09:10.311141 | 2020-02-11T09:56:39 | 2020-02-11T09:56:39 | 206,305,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | class Solution:
def getRow(self, rowIndex: int) -> [int]:
res = []
for i in range(rowIndex):
temp = [1] * (i + 1)
res.append(temp)
for j in range(1, i):
res[i][j] = res[i - 1][j - 1] + res[i - 1][j]
return res[-1]
| [
"rafaelhuang@163.com"
] | rafaelhuang@163.com |
894ab3357bb45c22c65174d315ee30e93b7d6729 | 96ec8ea87fb2cfdd2d850a0471c9820f92152847 | /九章算法/动态规划/Longest Common Subsequence.py | 84c87a0e25d7fdbdd5276d45c2bada2c2fc4d0a7 | [] | no_license | bitterengsci/algorithm | ae0b9159fd21cc30c9865f981f9c18cf9c6898d7 | bf70d038b70c51edc6ddd6bfef1720fb5f9f2567 | refs/heads/master | 2023-08-10T10:22:18.774232 | 2023-07-31T21:04:11 | 2023-07-31T21:04:11 | 186,261,880 | 95 | 46 | null | 2023-07-31T21:04:12 | 2019-05-12T13:57:27 | Python | UTF-8 | Python | false | false | 843 | py | class Solution:
"""
@param A: A string
@param B: A string
@return: The length of longest common subsequence of A and B
"""
def longestCommonSubsequence(self, A, B):
dp = [[0] * (len(B)+1) for _ in range(len(A)+1)]
# initialization
for j in range(len(B)+1):
dp[0][j] = 0
for i in range(len(A)+1):
dp[i][0] = 0
# state transitition
# f[i][j] = max{f[i-1][j], f[i][j-1], f[i-1][j-1]+1 | A[i-1]=B[j-1]}
for i in range(1, len(A)+1):
for j in range(1, len(B)+1):
if A[i-1] == B[j-1] :
dp[i][j] = max(dp[i-1][j], dp[i][j-1], dp[i-1][j-1] + 1)
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[-1][-1] | [
"yanran2012@gmail.com"
] | yanran2012@gmail.com |
2e828986bbdd0616c1f339a48799eba5fca072c6 | 422943b00b6dab14ce72e6ed9e92c1c5c02883b0 | /updateTests.py | bf89b1d620c9e6705fe1c16beba02c1e9dc8f5d3 | [] | no_license | grid-control/testsuite | 412c376325feae3c744fd4ca4c32f254ffd9e6df | 3d2a8d3234dc1c83f7e2f71aa5704f8abe0d42de | refs/heads/master | 2021-08-06T08:16:12.209352 | 2019-07-11T13:37:56 | 2019-07-11T13:37:56 | 96,886,340 | 0 | 1 | null | 2017-08-14T12:09:12 | 2017-07-11T11:33:42 | Shell | UTF-8 | Python | false | false | 943 | py | import os
for (root, dirs, files) in os.walk('.'):
def findTestFwk(dn):
if 'testfwk.py' in os.listdir(dn):
return dn
return findTestFwk(os.path.join(dn, '..'))
for fn in files:
if fn.startswith('TEST_') and fn.endswith('.py'):
fn = os.path.join(root, fn)
print(fn)
lines = open(fn).readlines()
for idx, line in enumerate(lines):
if line.startswith('# - prolog marker'):
break
else:
raise Exception('No prolog marker found in %r!' % fn)
fp = open(fn, 'w')
fp.write("""#!/usr/bin/env python
__import__('sys').path.append(__import__('os').path.join(__import__('os').path.dirname(__file__), %r))
__import__('testfwk').setup(__file__)
""" % findTestFwk(root).replace(root, '').lstrip('/'))
fp.write('# - prolog marker\n')
fp.write(str.join('', lines[idx+1:]))
if not lines[-1].startswith('run_test(') and not (fn.endswith('fuzz.py') or fn.endswith('scale.py')):
print('run_test missing')
| [
"stober@cern.ch"
] | stober@cern.ch |
5e42f67ea15cac565bc2f72d4be281f11441b0a7 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit65.py | 9ea013b14becf0bac967ec4fc7786a2d686f9843 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,433 | py | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=7
prog.x(input_qubit[2]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=6
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit65.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
00f0ed14558402c0d3ef771f6e79a3f3375bd9a6 | 0316925e2bad29d60f0dcccdf91277fd8f03ef09 | /q015.py | 286af0b71dec819126a696d98ce11b5eb6fe2589 | [] | no_license | nomadlife/project-euler | 426614df7b2c9368a4db59954dc1df2902a44d6b | 9bc09843637a361fa93c7abb20ac990f973b08e5 | refs/heads/master | 2021-07-03T00:52:07.760948 | 2021-06-26T14:28:32 | 2021-06-26T14:28:32 | 100,214,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # Q015 Lattice paths
# How many such routes are there through a 20×20 grid?
def grid(r,c):
if r==1 or c==1:
return c+r
if (r,c) not in cache:
cache[r,c] = grid(r,c-1) + grid(r-1,c)
return cache[r,c]
cache={}
grid(20,20)
| [
"joonwoo3816@gmail.com"
] | joonwoo3816@gmail.com |
13c46f9d5b80299f0706ab94baace17772ea3776 | 23bdb9a3102b9aad3ec20419593bbc3b1b7b3f66 | /piecrust/constants.py | 0b2fbbcf2a285f5d0c06563f396591e71adbc8c9 | [
"BSD-3-Clause"
] | permissive | toastdriven/piecrust | ec961998627145c0a6f312137564b1737fdb8113 | 95dadd7614a2fe7fe9186243f998f66c0909eb21 | refs/heads/master | 2015-08-06T10:31:46.352977 | 2011-10-18T07:55:18 | 2011-10-18T07:55:18 | 2,465,163 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Enable all basic ORM filters but do not allow filtering across relationships.
ALL = 1
# Enable all ORM filters, including across relationships
ALL_WITH_RELATIONS = 2
# Ripped from Django.
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
LOOKUP_SEP = '__'
| [
"daniel@toastdriven.com"
] | daniel@toastdriven.com |
e18ab1abab1a1499c8df298fcb95380e0105243f | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc039/C/4891512.py | 2563e2d210a483f9606a5a131073753a575e0dcf | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | def pianist_takahashi(S: str)->int:
scales = [
'Do', 'Do#', 'Re', 'Re#', 'Mi',
'Fa', 'Fa#', 'So', 'So#', 'La', 'La#', 'Si'
]
keyboard = 'WBWBWWBWBWBW'
for i, scale in enumerate(scales):
if (S).startswith(keyboard[i:] + keyboard[:i]):
return scale
return 'Unknown'
if __name__ == "__main__":
S = input()
ans = pianist_takahashi(S)
print(ans) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
bc4cbaa20cc1ae062653619f557fe8e7c1106adc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s890347924.py | e08156f14ab959ff622a85fa9f2278635cf92fe4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from collections import defaultdict, Counter
H, W, N = map(int, input().split())
D = defaultdict(int)
for _ in range(N):
a, b = map(int, input().split())
for i in range(a - 2, a + 1):
if 1 <= i <= H - 2:
for j in range(b - 2, b + 1):
if 1 <= j <= W - 2:
D[(i, j)] += 1
print((H - 2) * (W - 2) - len(D))
c = Counter(D.values())
for i in range(1, 10):
print(c.get(i, 0)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c71b4a1f83067c3a098d6a37edad98bc9a120439 | a611d9ecfae0ecaed9848bf0474e8d2249764c6c | /bundestag/link_crawler.py | 25a20ec56200e4b138fa99fc4d49894fc5ca9f3a | [] | no_license | JokusPokus/TINART-finetuning | 8317ad9f1a42e10e032ab77ce998300dc605ff57 | 667ccfae1ddae1eece070bef2934160d816a9a24 | refs/heads/master | 2023-01-28T04:51:32.051599 | 2020-12-02T15:36:39 | 2020-12-02T15:36:39 | 313,454,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,176 | py | """
Crawls links to parliament proceeding protocols for the current voting period.
"""
import requests
from bs4 import BeautifulSoup
from typing import Dict, List
# metadata for the HTTP requests
HEADERS = {
'authority': 'www.bundestag.de',
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.bundestag.de/services/opendata',
'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7,nl;q=0.6,es;q=0.5',
'cookie': 'INGRESSCOOKIE=1602572682.198.40.821660; CM_SESSIONID=4A5EDDBABCC058A39020EBCA61518DB9.cae-live-1',
'dnt': '1',
'sec-gpc': '1',
}
# Link to the web page where XML protocols can be accessed.
# Note that this link might change in the future.
SOURCE = 'https://www.bundestag.de/ajax/filterlist/de/services/opendata/543410-543410'
OUTPUT_DIRECTORY = ".\\bundestag\\resource_links\\"
class Params:
"""
Defines the parameters passed to the HTTP request
"""
limit = ("limit", "10")
noFilterSet = ("noFilterSet", "true")
def __init__(self, offset):
self.offset = ("offset", str(offset))
def to_tuple(self):
return (
self.limit,
self.noFilterSet,
self.offset
)
class HTMLParser:
"""
Crawls the links to the XML files containing the parliament protocols
of the current legislative period.
Stores the links in a txt file in the output directory.
"""
def __init__(self,
headers: Dict = HEADERS,
source: str = SOURCE,
output_directory: str = OUTPUT_DIRECTORY):
"""
:param headers: metadata passed to the HTTP request
:param source: link to the web page where the XML protocols can be accessed
:param output_directory: directory where the list of links shall be stored
"""
self.headers = headers
self.source = source
self.output_directory = output_directory
@staticmethod
def _get_links(doc_string: str) -> List[str]:
"""
Takes an html string and returns a list with links to XML resources.
:param doc_string: the HTML content of an HTTP response in string format
:return: a list of strings, each representing a link to an XML resource
"""
soup = BeautifulSoup(doc_string, features="html.parser")
links = []
for a in soup.find_all("a"):
new_link = "bundestag.de" + a.get("href")
links.append(new_link)
return links
@staticmethod
def _has_link(doc_string: str) -> bool:
"""
Checks whether given html string contains an "a" element.
"""
soup = BeautifulSoup(doc_string, features="html.parser")
has_link = bool(soup.a and soup.a["href"])
return has_link
def _append_links(self, links: List[str]):
"""
Takes a list of links in string format and appends them to a text file,
each link in a new line.
The text file is saved into the parser's output directory.
:param links: list of links in string format
"""
with open(self.output_directory + "resource_links.txt", "a+") as links_file:
for link in links:
links_file.write(link + "\n")
def write_links_to_file(self):
"""
Crawls the whole source website for links to xml resources
and writes the links to a text file stored in the output directory.
"""
offset = 0
while True:
params = Params(offset).to_tuple()
response = requests.get(self.source, headers=self.headers, params=params)
if not self._has_link(response.content):
break
links = self._get_links(response.content)
self._append_links(links)
offset += 10
if __name__ == "__main__":
parser = HTMLParser()
parser.write_links_to_file()
| [
"jakob.schmitt@code.berlin"
] | jakob.schmitt@code.berlin |
bd53a8dbbf7296ec1350fc2ed8664fd62d46a3fb | ee682ade4fcafb5648a99ceb66a2d4b7bb186cb3 | /app/members/admin.py | 3a05a99558505445f0f1ca38c8377de03a63a575 | [] | no_license | gambler1541/hw-django | a7ea4ad743cb435c2f9ecbf3ff86ce3adf036069 | 99639fb3da7639bffe8b22fe9fc7ac91260f05ed | refs/heads/master | 2020-03-21T04:19:08.601227 | 2018-06-21T01:10:35 | 2018-06-21T01:10:35 | 138,102,510 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.contrib import admin
from members.models import InstagramUser, UserInfo
admin.site.register(InstagramUser)
admin.site.register(UserInfo)
| [
"gambler1541@gmail.com"
] | gambler1541@gmail.com |
a06f196d9eb015652c6a0673b127a7bf4774c5e6 | ef5a8987516fac23b35eb19200bd0b392172089e | /rlkit/torch/sac/sac.py | 07e3d1255b740f3b7e3cab937583bd788cfa64ff | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | NagisaZj/maesn_baseline | 8be2dc57b2b846ca5a68783ee346542b17a5acec | bfcc761dacbc602390d063d8b4770d10655ec7ff | refs/heads/master | 2023-04-07T10:38:09.551354 | 2020-09-29T08:28:11 | 2020-09-29T08:28:11 | 254,028,985 | 0 | 0 | NOASSERTION | 2023-03-16T01:18:39 | 2020-04-08T08:27:31 | Python | UTF-8 | Python | false | false | 129,427 | py | from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.core.rl_algorithm import MetaRLAlgorithm,ExpAlgorithmIter,ExpAlgorithmFin, ExpAlgorithmFin2,ExpAlgorithmFin3
class PEARLSoftActorCritic(MetaRLAlgorithm):
def __init__(
self,
env,
train_tasks,
eval_tasks,
latent_dim,
nets,
policy_lr=1e-3,
qf_lr=1e-3,
vf_lr=1e-3,
context_lr=1e-3,
kl_lambda=1.,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
policy_pre_activation_weight=0.,
optimizer_class=optim.Adam,
recurrent=False,
use_information_bottleneck=True,
use_next_obs_in_context=False,
sparse_rewards=False,
soft_target_tau=1e-2,
plotter=None,
render_eval_paths=False,
**kwargs
):
super().__init__(
env=env,
agent=nets[0],
train_tasks=train_tasks,
eval_tasks=eval_tasks,
**kwargs
)
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.policy_pre_activation_weight = policy_pre_activation_weight
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.recurrent = recurrent
self.latent_dim = latent_dim
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.vib_criterion = nn.MSELoss()
self.l2_reg_criterion = nn.MSELoss()
self.kl_lambda = kl_lambda
self.use_information_bottleneck = use_information_bottleneck
self.sparse_rewards = sparse_rewards
self.use_next_obs_in_context = use_next_obs_in_context
self.qf1, self.qf2, self.vf = nets[1:]
self.target_vf = self.vf.copy()
self.policy_optimizer = optimizer_class(
self.agent.policy.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.vf_optimizer = optimizer_class(
self.vf.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self.agent.context_encoder.parameters(),
lr=context_lr,
)
###### Torch stuff #####
@property
def networks(self):
return self.agent.networks + [self.agent] + [self.qf1, self.qf2, self.vf, self.target_vf]
def training_mode(self, mode):
for net in self.networks:
net.train(mode)
def to(self, device=None):
if device == None:
device = ptu.device
for net in self.networks:
net.to(device)
##### Data handling #####
def unpack_batch(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
r = batch['sparse_rewards'][None, ...]
else:
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
return [o, a, r, no, t]
def sample_sac(self, indices):
''' sample batch of training data from a list of tasks for training the actor-critic '''
# this batch consists of transitions sampled randomly from replay buffer
# rewards are always dense
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)) for idx in indices]
unpacked = [self.unpack_batch(batch) for batch in batches]
# group like elements together
unpacked = [[x[i] for x in unpacked] for i in range(len(unpacked[0]))]
unpacked = [torch.cat(x, dim=0) for x in unpacked]
return unpacked
def sample_context(self, indices):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(self.enc_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=self.recurrent)) for idx in indices]
context = [self.unpack_batch(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
if self.use_next_obs_in_context:
context = torch.cat(context[:-1], dim=2)
else:
context = torch.cat(context[:-2], dim=2)
return context
##### Training #####
def _do_training(self, indices):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
context_batch = self.sample_context(indices)
# zero out context and hidden encoder state
self.agent.clear_z(num_tasks=len(indices))
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
context = context_batch[:, i * mb_size: i * mb_size + mb_size, :]
self._take_step(indices, context)
# stop backprop
self.agent.detach_z()
def _do_training_fit(self, indices):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
context_batch = self.sample_context(indices)
# zero out context and hidden encoder state
self.agent.clear_z(num_tasks=len(indices))
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
context = context_batch[:, i * mb_size: i * mb_size + mb_size, :]
self._take_step_fit(indices, context)
# stop backprop
self.agent.detach_z()
def _min_q(self, obs, actions, task_z):
q1 = self.qf1(obs, actions, task_z.detach())
q2 = self.qf2(obs, actions, task_z.detach())
min_q = torch.min(q1, q2)
return min_q
def _update_target_network(self):
ptu.soft_update_from_to(self.vf, self.target_vf, self.soft_target_tau)
def _take_step(self, indices, context):
num_tasks = len(indices)
# data is (task, batch, feat)
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
# run inference in networks
policy_outputs, task_z = self.agent(obs, context)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flattens out the task dimension
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1(obs, actions, task_z)
q2_pred = self.qf2(obs, actions, task_z)
v_pred = self.vf(obs, task_z.detach())
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(next_obs, task_z)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
if self.use_information_bottleneck:
kl_div = self.agent.compute_kl_div()
kl_loss = self.kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self.batch_size * num_tasks, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(self.batch_size * num_tasks, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
min_q_new_actions = self._min_q(obs, new_actions, task_z)
# vf update
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean**2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std**2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value**2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# save some statistics for eval
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
if self.use_information_bottleneck:
z_mean = np.mean(ptu.get_numpy(self.agent.z_means))
z_sig = np.mean(ptu.get_numpy(self.agent.z_vars))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
def _take_step_fit(self, indices, context):
num_tasks = len(indices)
# data is (task, batch, feat)
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
# run inference in networks
policy_outputs, task_z = self.agent(obs, context)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flattens out the task dimension
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1(obs, actions, task_z)
q2_pred = self.qf2(obs, actions, task_z)
v_pred = self.vf(obs, task_z.detach())
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(next_obs, task_z)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
if self.use_information_bottleneck:
kl_div = self.agent.compute_kl_div()
kl_loss = self.kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self.batch_size * num_tasks, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(self.batch_size * num_tasks, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
#self.qf1_optimizer.step()
#self.qf2_optimizer.step()
self.context_optimizer.step()
def get_epoch_snapshot(self, epoch):
# NOTE: overriding parent method which also optionally saves the env
snapshot = OrderedDict(
qf1=self.qf1.state_dict(),
qf2=self.qf2.state_dict(),
policy=self.agent.policy.state_dict(),
vf=self.vf.state_dict(),
target_vf=self.target_vf.state_dict(),
context_encoder=self.agent.context_encoder.state_dict(),
)
return snapshot
class ExpSACIter(ExpAlgorithmIter):
def __init__(
self,
env,
train_tasks,
eval_tasks,
nets,
nets_exp,
encoder,
policy_lr=1e-3,
qf_lr=1e-3,
vf_lr=1e-3,
context_lr=1e-3,
kl_lambda=1.,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
policy_pre_activation_weight=0.,
optimizer_class=optim.Adam,
recurrent=False,
use_information_bottleneck=True,
use_next_obs_in_context=False,
sparse_rewards=False,
soft_target_tau=1e-2,
plotter=None,
render_eval_paths=False,
use_info_in_context=False,
entropy_weight=1e-2,
intrinsic_reward_weight=1e-1,
**kwargs
):
super().__init__(
env=env,
agent=nets[0],
agent_exp=nets_exp[0],
train_tasks=train_tasks,
eval_tasks=eval_tasks,
encoder=encoder,
**kwargs
)
self.intrinsic_reward_weight = intrinsic_reward_weight
self.entropy_weight = entropy_weight
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.policy_pre_activation_weight = policy_pre_activation_weight
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.recurrent = recurrent
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.qf_exp_criterion = nn.MSELoss()
self.vf_exp_criterion = nn.MSELoss()
self.vib_criterion = nn.MSELoss()
self.l2_reg_criterion = nn.MSELoss()
self.kl_lambda = kl_lambda
self.use_information_bottleneck = use_information_bottleneck
self.sparse_rewards = sparse_rewards
self.use_next_obs_in_context = use_next_obs_in_context
self.qf1, self.qf2, self.vf = nets[1:]
self.qf1_exp, self.qf2_exp, self.vf_exp = nets_exp[1:]
self.target_vf = self.vf.copy()
self.target_exp_vf = self.vf_exp.copy()
self.policy_optimizer = optimizer_class(
self.agent.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.vf_optimizer = optimizer_class(
self.vf.parameters(),
lr=vf_lr,
)
self.policy_exp_optimizer = optimizer_class(
self.exploration_agent.parameters(),
lr=policy_lr,
)
self.qf1_exp_optimizer = optimizer_class(
self.qf1_exp.parameters(),
lr=qf_lr,
)
self.qf2_exp_optimizer = optimizer_class(
self.qf2_exp.parameters(),
lr=qf_lr,
)
self.vf_exp_optimizer = optimizer_class(
self.vf_exp.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self.context_encoder.parameters(),
lr=context_lr,
)
###### Torch stuff #####
@property
def networks(self):
return [self.context_encoder, self.agent.policy] + [self.qf1, self.qf2, self.vf, self.target_vf] + [self.exploration_agent.policy] + [self.qf1_exp, self.qf2_exp, self.vf_exp, self.target_exp_vf]
def training_mode(self, mode):
for net in self.networks:
net.train(mode)
def to(self, device=None):
if device == None:
device = ptu.device
for net in self.networks:
net.to(device)
##### Data handling #####
def unpack_batch(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
r = batch['sparse_rewards'][None, ...]
else:
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
return [o, a, r, no, t]
def unpack_batch_context(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
r = batch['sparse_rewards'][None, ...]
else:
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
info = batch['env_infos'][None,...]
return [o, a, r, no, t,info]
def sample_sac(self, indices):
''' sample batch of training data from a list of tasks for training the actor-critic '''
# this batch consists of transitions sampled randomly from replay buffer
# rewards are always dense
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)) for idx in indices]
unpacked = [self.unpack_batch(batch) for batch in batches]
# group like elements together
unpacked = [[x[i] for x in unpacked] for i in range(len(unpacked[0]))]
unpacked = [torch.cat(x, dim=0) for x in unpacked]
return unpacked
def sample_context(self, indices,sequence=False):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(self.enc_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
if self.use_next_obs_in_context:
context = torch.cat(context[:-2], dim=2)
else:
context = torch.cat(context[:-3], dim=2)
return context, context_unbatched
##### Training #####
def _do_training(self, indices, num_iter):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
context_batch,context_unbatched = self.sample_context(indices,False)
_,context_unbatched = self.sample_context(indices,True)
# zero out context and hidden encoder state
self.agent.clear_z(num_tasks=len(indices))
self.exploration_agent.clear_z(num_tasks=len(indices))
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
context = context_batch[:, i * mb_size: i * mb_size + mb_size, :]
if num_iter<500:
self._take_step(indices, context,context_unbatched)
else:
self._take_step_exp(indices, context, context_unbatched)
# stop backprop
self.agent.detach_z()
def _min_q_exp(self, obs, actions):
#print(obs.shape,actions.shape)
self.qf1_exp.inner_reset(num_tasks=obs.shape[0])
self.qf2_exp.inner_reset(num_tasks=obs.shape[0])
q1 = self.qf1_exp(torch.cat([obs, actions],dim=2))
q2 = self.qf2_exp(torch.cat([obs, actions],dim=2))
min_q = torch.min(q1, q2)
return min_q
def _min_q(self, obs, actions, task_z):
q1 = self.qf1(obs, actions, task_z.detach())
q2 = self.qf2(obs, actions, task_z.detach())
min_q = torch.min(q1, q2)
return min_q
def _update_target_network(self):
ptu.soft_update_from_to(self.vf, self.target_vf, self.soft_target_tau)
def _update_target_network_exp(self):
ptu.soft_update_from_to(self.vf_exp, self.target_exp_vf, self.soft_target_tau)
def _take_step(self, indices, context,context_unbatched):
num_tasks = len(indices)
# data is (task, batch, feat)
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
rewards_traj = rewards
# run inference in networks
policy_outputs, task_z = self.agent(obs, context)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flattens out the task dimension
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1(obs, actions, task_z)
q2_pred = self.qf2(obs, actions, task_z)
v_pred = self.vf(obs, task_z.detach())
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(next_obs, task_z)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
if self.use_information_bottleneck:
kl_div = self.agent.compute_kl_div()
kl_loss = self.kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self.batch_size * num_tasks, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(self.batch_size * num_tasks, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
min_q_new_actions = self._min_q(obs, new_actions, task_z)
# vf update
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# save some statistics for eval
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
if self.use_information_bottleneck:
z_mean = np.mean(np.abs(ptu.get_numpy(self.agent.z_means[0])))
z_sig = np.mean(ptu.get_numpy(self.agent.z_vars[0]))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
def _take_step_exp(self, indices, context,context_unbatched):
obs, actions, rewards, next_obs, terms, er = context_unbatched
self.exploration_agent.reset_RNN(num_tasks=obs.shape[0])
self.qf1_exp.inner_reset(num_tasks=obs.shape[0])
self.qf2_exp.inner_reset(num_tasks=obs.shape[0])
self.vf_exp.inner_reset(num_tasks=obs.shape[0])
self.target_exp_vf.inner_reset(num_tasks=obs.shape[0])
policy_outputs,er = self.exploration_agent(obs, context)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
t, b, _ = obs.size()
rewards = rewards.view(t*b,-1)
#obs = obs.view(t * b, -1)
#actions = actions.view(t * b, -1)
#next_obs = next_obs.view(t * b, -1)
rew = er * self.intrinsic_reward_weight + rewards
#print(z_mean.shape, z_mean_next.shape, obs.shape, t, b)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1_exp(torch.cat([obs, actions],dim=2))
q2_pred = self.qf2_exp(torch.cat([obs, actions],dim=2))
v_pred = self.vf_exp(obs)
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_exp_vf(next_obs)
# KL constraint on z if probabilistic
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_exp_optimizer.zero_grad()
self.qf2_exp_optimizer.zero_grad()
rewards = rew.detach()
rewards_flat = rewards.view(t * b, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(t * b, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_exp_optimizer.step()
self.qf2_exp_optimizer.step()
# compute min Q on the new actions
new_actions = new_actions.view(t , b, -1)
min_q_new_actions = self._min_q_exp(obs, new_actions)
# vf update
# print(min_q_new_actions)
# print(log_pi)
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_exp_criterion(v_pred, v_target.detach())
self.vf_exp_optimizer.zero_grad()
vf_loss.backward()
self.vf_exp_optimizer.step()
self._update_target_network_exp()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_exp_optimizer.zero_grad()
policy_loss.backward()
self.policy_exp_optimizer.step()
# save some statistics for eval
def get_epoch_snapshot(self, epoch):
# NOTE: overriding parent method which also optionally saves the env
snapshot = OrderedDict(
qf1=self.qf1.state_dict(),
qf2=self.qf2.state_dict(),
policy=self.agent.state_dict(),
vf=self.vf.state_dict(),
target_vf=self.target_vf.state_dict(),
context_encoder=self.agent.context_encoder.state_dict(),
qf1_exp=self.qf1_exp.state_dict(),
qf2_exp=self.qf2_exp.state_dict(),
policy_exp=self.exploration_agent.state_dict(),
vf_exp=self.vf_exp.state_dict(),
target_vf_exp=self.target_exp_vf.state_dict(),
)
return snapshot
class ExpSACRew(ExpAlgorithmIter):
def __init__(
self,
env,
train_tasks,
eval_tasks,
nets,
nets_exp,
encoder,
policy_lr=1e-3,
qf_lr=1e-3,
vf_lr=1e-3,
context_lr=1e-3,
kl_lambda=1.,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
policy_pre_activation_weight=0.,
optimizer_class=optim.Adam,
recurrent=False,
use_information_bottleneck=True,
use_next_obs_in_context=False,
sparse_rewards=False,
soft_target_tau=1e-2,
plotter=None,
render_eval_paths=False,
use_info_in_context=False,
entropy_weight=1e-2,
intrinsic_reward_weight=1e-1,
**kwargs
):
super().__init__(
env=env,
agent=nets[0],
agent_exp=nets_exp[0],
train_tasks=train_tasks,
eval_tasks=eval_tasks,
encoder=encoder,
**kwargs
)
self.intrinsic_reward_weight = intrinsic_reward_weight
self.entropy_weight = entropy_weight
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.policy_pre_activation_weight = policy_pre_activation_weight
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.recurrent = recurrent
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.qf_exp_criterion = nn.MSELoss()
self.vf_exp_criterion = nn.MSELoss()
self.vib_criterion = nn.MSELoss()
self.l2_reg_criterion = nn.MSELoss()
self.pred_loss = nn.MSELoss()
self.kl_lambda = kl_lambda
self.use_information_bottleneck = use_information_bottleneck
self.sparse_rewards = sparse_rewards
self.use_next_obs_in_context = use_next_obs_in_context
self.qf1, self.qf2, self.vf = nets[1:]
self.qf1_exp, self.qf2_exp, self.vf_exp, self.rew_predictor = nets_exp[1:]
self.target_vf = self.vf.copy()
self.target_exp_vf = self.vf_exp.copy()
self.policy_optimizer = optimizer_class(
self.agent.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.vf_optimizer = optimizer_class(
self.vf.parameters(),
lr=vf_lr,
)
self.policy_exp_optimizer = optimizer_class(
self.exploration_agent.parameters(),
lr=policy_lr,
)
self.qf1_exp_optimizer = optimizer_class(
self.qf1_exp.parameters(),
lr=qf_lr,
)
self.qf2_exp_optimizer = optimizer_class(
self.qf2_exp.parameters(),
lr=qf_lr,
)
self.vf_exp_optimizer = optimizer_class(
self.vf_exp.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self.context_encoder.parameters(),
lr=context_lr,
)
self.predictor_optimizer = optimizer_class(
self.rew_predictor.parameters(),
lr=context_lr,
)
###### Torch stuff #####
@property
def networks(self):
return [self.context_encoder, self.agent.policy] + [self.qf1, self.qf2, self.vf, self.target_vf] + [self.exploration_agent.policy] + [self.qf1_exp, self.qf2_exp, self.vf_exp, self.target_exp_vf,self.rew_predictor]
def training_mode(self, mode):
for net in self.networks:
net.train(mode)
def to(self, device=None):
if device == None:
device = ptu.device
for net in self.networks:
net.to(device)
##### Data handling #####
def unpack_batch(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
r = batch['sparse_rewards'][None, ...]
else:
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
return [o, a, r, no, t]
def unpack_batch_context(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
r = batch['sparse_rewards'][None, ...]
else:
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
info = batch['env_infos'][None,...]
return [o, a, r, no, t,info]
def sample_sac(self, indices):
''' sample batch of training data from a list of tasks for training the actor-critic '''
# this batch consists of transitions sampled randomly from replay buffer
# rewards are always dense
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)) for idx in indices]
unpacked = [self.unpack_batch(batch) for batch in batches]
# group like elements together
unpacked = [[x[i] for x in unpacked] for i in range(len(unpacked[0]))]
unpacked = [torch.cat(x, dim=0) for x in unpacked]
return unpacked
def sample_context(self, indices,sequence=False):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(self.enc_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
if self.use_next_obs_in_context:
context = torch.cat(context[:-2], dim=2)
else:
context = torch.cat(context[:-3], dim=2)
return context, context_unbatched
def pred_context(self, context):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
r_0 = ptu.zeros(context[2].shape[0],1,context[2].shape[2])
tmp = torch.cat([r_0,context[2]],dim=1)[:,:-1,:]
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
contextr = torch.cat([context[0],context[1],tmp], dim=2)
return contextr
##### Training #####
def _do_training(self, indices, num_iter):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
context_batch,context_unbatched = self.sample_context(indices,False)
_,context_unbatched = self.sample_context(indices,True)
context_pred = self.pred_context(context_unbatched)
# zero out context and hidden encoder state
self.agent.clear_z(num_tasks=len(indices))
self.exploration_agent.clear_z(num_tasks=len(indices))
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
context = context_batch[:, i * mb_size: i * mb_size + mb_size, :]
if num_iter<500:
self._take_step(indices, context,context_unbatched,context_pred)
else:
self._take_step_exp(indices, context, context_unbatched,context_pred)
# stop backprop
self.agent.detach_z()
def _min_q_exp(self, obs, actions):
#print(obs.shape,actions.shape)
self.qf1_exp.inner_reset(num_tasks=obs.shape[0])
self.qf2_exp.inner_reset(num_tasks=obs.shape[0])
q1 = self.qf1_exp(torch.cat([obs, actions],dim=2))
q2 = self.qf2_exp(torch.cat([obs, actions],dim=2))
min_q = torch.min(q1, q2)
return min_q
def _min_q(self, obs, actions, task_z):
q1 = self.qf1(obs, actions, task_z.detach())
q2 = self.qf2(obs, actions, task_z.detach())
min_q = torch.min(q1, q2)
return min_q
def _update_target_network(self):
ptu.soft_update_from_to(self.vf, self.target_vf, self.soft_target_tau)
def _update_target_network_exp(self):
ptu.soft_update_from_to(self.vf_exp, self.target_exp_vf, self.soft_target_tau)
def _take_step(self, indices, context,context_unbatched,context_pred):
num_tasks = len(indices)
self.rew_predictor.inner_reset(context_pred.shape[0])
rew_pred = self.rew_predictor(context_pred)
rew = context_unbatched[2].contiguous()
#print(rew_pred.shape)
rew = rew.view(rew_pred.shape[0],-1)
loss = self.pred_loss(rew,rew_pred)
self.predictor_optimizer.zero_grad()
loss.backward()
self.predictor_optimizer.step()
# data is (task, batch, feat)
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
rewards_traj = rewards
# run inference in networks
policy_outputs, task_z = self.agent(obs, context)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flattens out the task dimension
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1(obs, actions, task_z)
q2_pred = self.qf2(obs, actions, task_z)
v_pred = self.vf(obs, task_z.detach())
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(next_obs, task_z)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
if self.use_information_bottleneck:
kl_div = self.agent.compute_kl_div()
kl_loss = self.kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self.batch_size * num_tasks, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(self.batch_size * num_tasks, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
min_q_new_actions = self._min_q(obs, new_actions, task_z)
# vf update
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# save some statistics for eval
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
if self.use_information_bottleneck:
z_mean = np.mean(np.abs(ptu.get_numpy(self.agent.z_means[0])))
z_sig = np.mean(ptu.get_numpy(self.agent.z_vars[0]))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
def _take_step_exp(self, indices, context,context_unbatched,context_pred):
obs, actions, rewards, next_obs, terms, er = context_unbatched
pred_rew = self.rew_predictor(context_pred)
self.exploration_agent.reset_RNN(num_tasks=obs.shape[0])
self.qf1_exp.inner_reset(num_tasks=obs.shape[0])
self.qf2_exp.inner_reset(num_tasks=obs.shape[0])
self.vf_exp.inner_reset(num_tasks=obs.shape[0])
self.target_exp_vf.inner_reset(num_tasks=obs.shape[0])
self.rew_predictor.inner_reset(context_pred.shape[0])
policy_outputs,er = self.exploration_agent(obs, context,cal_rew=False)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
t, b, _ = obs.size()
rewards = rewards.contiguous()
rewards = rewards.view(t*b,-1)
#obs = obs.view(t * b, -1)
#actions = actions.view(t * b, -1)
#next_obs = next_obs.view(t * b, -1)
#er = er.view(t * b, -1)
rew = (rewards-pred_rew)**2 * self.intrinsic_reward_weight + rewards
rew = rew.detach()
#print(z_mean.shape, z_mean_next.shape, obs.shape, t, b)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1_exp(torch.cat([obs, actions],dim=2))
q2_pred = self.qf2_exp(torch.cat([obs, actions],dim=2))
v_pred = self.vf_exp(obs)
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_exp_vf(next_obs)
# KL constraint on z if probabilistic
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_exp_optimizer.zero_grad()
self.qf2_exp_optimizer.zero_grad()
rewards = rew.detach()
rewards_flat = rewards.view(t * b, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(t * b, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_exp_optimizer.step()
self.qf2_exp_optimizer.step()
# compute min Q on the new actions
new_actions = new_actions.view(t , b, -1)
min_q_new_actions = self._min_q_exp(obs, new_actions)
# vf update
# print(min_q_new_actions)
# print(log_pi)
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_exp_criterion(v_pred, v_target.detach())
self.vf_exp_optimizer.zero_grad()
vf_loss.backward()
self.vf_exp_optimizer.step()
self._update_target_network_exp()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_exp_optimizer.zero_grad()
policy_loss.backward()
self.policy_exp_optimizer.step()
# save some statistics for eval
def get_epoch_snapshot(self, epoch):
# NOTE: overriding parent method which also optionally saves the env
snapshot = OrderedDict(
qf1=self.qf1.state_dict(),
qf2=self.qf2.state_dict(),
policy=self.agent.state_dict(),
vf=self.vf.state_dict(),
target_vf=self.target_vf.state_dict(),
context_encoder=self.agent.context_encoder.state_dict(),
qf1_exp=self.qf1_exp.state_dict(),
qf2_exp=self.qf2_exp.state_dict(),
policy_exp=self.exploration_agent.state_dict(),
vf_exp=self.vf_exp.state_dict(),
target_vf_exp=self.target_exp_vf.state_dict(),
)
return snapshot
class ExpSACFin(ExpAlgorithmFin):
def __init__(
self,
env,
train_tasks,
eval_tasks,
nets,
nets_exp,
encoder,
latent_dim,
policy_lr=1e-3,
qf_lr=1e-3,
vf_lr=1e-3,
context_lr=1e-3,
kl_lambda=1.,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
policy_pre_activation_weight=0.,
optimizer_class=optim.Adam,
recurrent=False,
use_information_bottleneck=True,
use_next_obs_in_context=False,
sparse_rewards=False,
soft_target_tau=1e-2,
plotter=None,
render_eval_paths=False,
use_info_in_context=False,
entropy_weight=1e-2,
intrinsic_reward_weight=1e-1,
use_kl_div_intrinsic=False,
gradient_from_Q=False,
prediction_reward_scale=1,
intrinsic_reward_decay = 1,
kl_min_weight = 1e-3,
pie_hidden_dim = 5,
consider_dynamics=0,
prediction_transition_scale=1,
**kwargs
):
super().__init__(
env=env,
agent=None,
agent_exp=nets_exp[0],
train_tasks=train_tasks,
eval_tasks=eval_tasks,
encoder=encoder,
**kwargs
)
self.use_kl_div_intrinsic = use_kl_div_intrinsic
self.intrinsic_reward_weight = intrinsic_reward_weight
self.entropy_weight = entropy_weight
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.policy_pre_activation_weight = policy_pre_activation_weight
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.latent_dim = latent_dim
self.recurrent = recurrent
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.qf_exp_criterion = nn.MSELoss()
self.vf_exp_criterion = nn.MSELoss()
self.vib_criterion = nn.MSELoss()
self.l2_reg_criterion = nn.MSELoss()
self.pred_loss = nn.MSELoss()
self.kl_lambda = kl_lambda
self.prediction_reward_scale = prediction_reward_scale
self.prediction_transition_scale = prediction_transition_scale
self.use_information_bottleneck = use_information_bottleneck
self.sparse_rewards = sparse_rewards
self.use_next_obs_in_context = use_next_obs_in_context
self.gradient_from_Q = gradient_from_Q
self.intrinsic_reward_decay = intrinsic_reward_decay
self.consider_dynamics = consider_dynamics
self.qf1_exp, self.qf2_exp, self.vf_exp, self.rew_decoder, self.transition_decoder = nets_exp[1:]
self.target_exp_vf = self.vf_exp.copy()
self.policy_exp_optimizer = optimizer_class(
self.exploration_agent.parameters(),
lr=policy_lr,
)
self.qf1_exp_optimizer = optimizer_class(
self.qf1_exp.parameters(),
lr=qf_lr,
)
self.qf2_exp_optimizer = optimizer_class(
self.qf2_exp.parameters(),
lr=qf_lr,
)
self.vf_exp_optimizer = optimizer_class(
self.vf_exp.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self.context_encoder.parameters(),
lr=context_lr,
)
self.rew_optimizer = optimizer_class(
self.rew_decoder.parameters(),
lr=context_lr,
)
self.transition_optimizer = optimizer_class(
self.transition_decoder.parameters(),
lr=context_lr,
)
###### Torch stuff #####
@property
def networks(self):
return [self.context_encoder] + [self.exploration_agent.policy] + [self.qf1_exp, self.qf2_exp, self.vf_exp, self.target_exp_vf,self.rew_decoder,self.transition_decoder]
def training_mode(self, mode):
for net in self.networks:
net.train(mode)
def to(self, device=None):
if device == None:
device = ptu.device
for net in self.networks:
net.to(device)
##### Data handling #####
def unpack_batch(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
sr = batch['sparse_rewards'][None, ...]
else:
sr=None
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
return [o, a, r, no, t,sr]
def unpack_batch_context(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
sparse_r = batch['sparse_rewards'][None, ...]
r = batch['rewards'][None, ...]
if not sparse_reward:
sparse_r = r
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
info = batch['env_infos'][None,...]
#print(o[0,:5],a[0,:5],r[0],sparse_r[0],no[0,:5])
return [o, a, sparse_r, no, t,info,r]
def sample_sac(self, indices):
''' sample batch of training data from a list of tasks for training the actor-critic '''
# this batch consists of transitions sampled randomly from replay buffer
# rewards are always dense
if self.use_per:
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)[0]) for idx in indices]
else:
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)) for
idx in indices]
unpacked = [self.unpack_batch(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
unpacked = [[x[i] for x in unpacked] for i in range(len(unpacked[0]))]
unpacked = [torch.cat(x, dim=0) for x in unpacked]
return unpacked
def sample_context(self, indices,sequence=False):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(self.enc_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
if self.use_next_obs_in_context:
context = torch.cat(context[:-3], dim=2)
else:
context = torch.cat(context[:-4], dim=2)
return context,context_unbatched
def pred_context(self, context):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
r_0 = ptu.zeros(context[2].shape[0],1,context[2].shape[2])
tmp = torch.cat([r_0,context[2]],dim=1)
a_0 = ptu.zeros(context[1].shape[0], 1, context[1].shape[2])
tmp2 = torch.cat([a_0, context[1]], dim=1)
tmp3 = torch.cat([torch.unsqueeze(context[0][:,0,:],1),context[3]],dim=1)
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
contextr = torch.cat([tmp3,tmp2,tmp], dim=2)
return contextr
##### Training #####
def _do_training(self, indices, mode):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
_,context_unbatched = self.sample_context(indices,True)
context_pred = self.pred_context(context_unbatched)
context = self.sample_sac(indices)
# zero out context and hidden encoder state
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
self._take_step_exp(indices, context_unbatched,context_pred,context)
# stop backprop
def _min_q_exp(self, obs, actions,z_mean_prev, z_var_prev):
#print(obs.shape,actions.shape)
q1 = self.qf1_exp(torch.cat([obs, actions,z_mean_prev, z_var_prev],dim=1))
q2 = self.qf2_exp(torch.cat([obs, actions,z_mean_prev, z_var_prev],dim=1))
min_q = torch.min(q1, q2)
return min_q
def _update_target_network_exp(self):
ptu.soft_update_from_to(self.vf_exp, self.target_exp_vf, self.soft_target_tau)
def compute_kl(self,means,vars):
std_mean = ptu.zeros(means.size())
std_var = ptu.ones(means.size())
tem = vars / std_var
kl_div = tem ** 2 - 2 * torch.log(tem) + ((std_mean - means) / std_var) ** 2 - 1
kl_div = torch.sum(kl_div, dim=1, keepdim=True) / 2
kl_div = torch.mean(kl_div)
return kl_div
def compute_intrinsic(self,z_mean_prev, z_var_prev,z_mean_post,z_var_post):
tem = z_var_post / z_var_prev
kl_div = tem ** 2 - 2 * torch.log(tem) + ((z_mean_prev - z_mean_post) / z_var_prev) ** 2 - 1
kl_div = torch.sum(kl_div, dim=1, keepdim=True) / 2
return kl_div
def _take_step(self, indices,context_pred):
z_s = self.context_encoder.forward_seq(context_pred[:,:-1,:])
z_mean = z_s[:,:self.latent_dim]
z_var = torch.nn.functional.softplus(z_s[:,self.latent_dim:])
#print(z_mean.shape,z_var.shape)
z_dis = torch.distributions.Normal(z_mean,torch.sqrt(z_var))
z_sample = z_dis.rsample()
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
t,b,_ = obs.size()
obs = obs.view(t*b,-1)
actions = actions.view(t * b, -1)
rewards = rewards.view(t * b, -1)
rewards = rewards * self.prediction_reward_scale
z_sample = z_sample.view(t*b,-1)
rew_pred = self.rew_decoder.forward(z_sample,obs,actions)
self.context_optimizer.zero_grad()
self.rew_optimizer.zero_grad()
loss = self.pred_loss(rewards,rew_pred)
loss.backward(retain_graph=True)
kl_div = self.compute_kl(z_mean,z_var)
kl_loss = kl_div * self.kl_lambda
kl_loss.backward()
self.context_optimizer.step()
self.rew_optimizer.step()
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
z_mean = np.mean(np.abs(ptu.get_numpy(z_mean)))
z_sig = np.mean(ptu.get_numpy(z_var))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['reward prediction loss'] = ptu.get_numpy(loss)
def _take_step_exp(self, indices,context_unbatched,context_pred,context):
self.context_optimizer.zero_grad()
if self.prediction:
t, b, _ = context_pred.size()
z_s = self.context_encoder.forward_seq(context_pred)
z_s = z_s.view(t,b,-1)
z_s_pre = z_s[:,:-1,:]
z_s_pre = z_s_pre.contiguous()
z_s_pre = z_s_pre.view(t*(b-1),-1)
z_mean = z_s_pre[:, :self.latent_dim]
z_var = torch.nn.functional.softplus(z_s_pre[:, self.latent_dim:])
# print(z_mean.shape,z_var.shape)
z_dis = torch.distributions.Normal(z_mean, torch.sqrt(z_var))
z_sample = z_dis.rsample()
#z_sample = z_sample[-1:,:].repeat(t*(b-1),1)
obs, actions, rewards, next_obs, terms, sparse_r = context
if self.sparse_rewards:
rewards = sparse_r
#obs, actions, rewards, next_obs, terms = context
#_,cu = self.sample_context(indices,False)
#obs, actions, _, next_obs, terms, info,rewards = cu
t, b, _ = obs.size()
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
rewards = rewards.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
z_sample = z_sample.view(t * b, -1)
rew_pred = self.rew_decoder.forward(z_sample, obs, actions)
self.rew_optimizer.zero_grad()
loss = self.pred_loss(rewards, rew_pred)* self.prediction_reward_scale
loss.backward(retain_graph=True)
kl_div = self.compute_kl(z_mean, z_var)
kl_loss = kl_div * self.kl_lambda
kl_loss.backward(retain_graph=True)
self.rew_optimizer.step()
if self.consider_dynamics:
self.transition_optimizer.zero_grad()
trans_pred = self.transition_decoder.forward(z_sample, obs, actions)
trans_loss = self.pred_loss(next_obs, trans_pred)* self.prediction_transition_scale
trans_loss.backward(retain_graph=True)
self.transition_optimizer.step()
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
z_mean = np.mean(np.abs(ptu.get_numpy(z_mean)))
z_sig = np.mean(ptu.get_numpy(z_var))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['reward prediction loss'] = ptu.get_numpy(loss)
if self.consider_dynamics:
self.eval_statistics['transisition prediction loss'] = ptu.get_numpy(trans_loss)
#obs, actions, _, next_obs, terms, info, agent_rew = context_unbatched
obs, actions, agent_rew, next_obs, terms, sr = context
if self.sparse_rewards:
pred_rewardss= sr
else:
pred_rewardss = agent_rew
obs = obs.contiguous()
t,b,_ = context_pred.size()
if not self.prediction:
z_s = self.context_encoder.forward_seq(context_pred)
z_s = z_s.view(t,b,-1)
z_mean = z_s[:, :,:self.latent_dim]
z_var = torch.nn.functional.softplus(z_s[:,:, self.latent_dim:])
#print(z_mean.shape,z_var.shape)
z_mean_prev, z_var_prev,z_mean_post,z_var_post = z_mean[:,:-1,:],z_var[:,:-1,:],z_mean[:,1:,:],z_var[:,1:,:]
z_mean_prev, z_var_prev, z_mean_post, z_var_post = z_mean_prev.contiguous(), z_var_prev.contiguous(), z_mean_post.contiguous(), z_var_post.contiguous()
z_mean_prev, z_var_prev,z_mean_post,z_var_post = z_mean_prev.view(t*(b-1),-1), z_var_prev.view(t*(b-1),-1),z_mean_post.view(t*(b-1),-1),z_var_post.view(t*(b-1),-1)
policy_outputs,_ = self.exploration_agent(obs.view(t*(b-1),-1), z_mean_prev,z_var_prev)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
t, b, _ = obs.size()
agent_rew = agent_rew.contiguous()
pred_rewardss = pred_rewardss.contiguous()
agent_rew = agent_rew.view(t*b,-1)
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
pred_rewardss = pred_rewardss.view(t * b, -1)
#info = info.view(t * b, -1)
if self.intrinsic_reward_weight>0:
if self.use_kl_div_intrinsic:
intrinsic_reward = self.compute_intrinsic(z_mean_prev, z_var_prev, z_mean_post, z_var_post).detach()
else:
if not self.prediction:
z_dis = torch.distributions.Normal(z_mean_prev, torch.sqrt(z_var_prev))
z_sample = z_dis.rsample()
pred_rew = self.rew_decoder.forward(z_sample, obs, actions)
intrinsic_reward = (pred_rew - pred_rewardss) ** 2
if self.consider_dynamics:
pred_trans = self.transition_decoder.forward(z_sample, obs, actions)
intrinsic_reward = intrinsic_reward + torch.mean((pred_trans - next_obs) ** 2,dim=1,keepdim=True)
intrinsic_reward = intrinsic_reward.view(t * b, -1)
if self.intrinsic_reward_decay !=1:
intrinsic_reward = intrinsic_reward * torch.unsqueeze(ptu.from_numpy(self.intrinsic_reward_decay **np.linspace(0,t*b-1,t*b)),1)
rew = intrinsic_reward * self.intrinsic_reward_weight + agent_rew
else:
rew = agent_rew
rew = rew.detach()
#print(z_mean.shape, z_mean_next.shape, obs.shape, t, b)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1_exp(torch.cat([obs, actions,z_mean_prev, z_var_prev],dim=1))
q2_pred = self.qf2_exp(torch.cat([obs, actions,z_mean_prev, z_var_prev],dim=1))
v_pred = self.vf_exp(torch.cat([obs,z_mean_prev.detach(), z_var_prev.detach()],dim=1))
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_exp_vf(torch.cat([next_obs,z_mean_post,z_var_post],dim=1))
# KL constraint on z if probabilistic
if not self.gradient_from_Q:
self.context_optimizer.step()
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_exp_optimizer.zero_grad()
self.qf2_exp_optimizer.zero_grad()
rewards = rew.detach()
rewards_flat = rewards.view(t * b, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(t * b, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward(retain_graph=True)
if self.gradient_from_Q:
kl_div = self.compute_kl(z_mean_prev, z_var_prev)
kl_loss = kl_div * self.kl_lambda
kl_loss.backward(retain_graph=True)
self.qf1_exp_optimizer.step()
self.qf2_exp_optimizer.step()
if self.gradient_from_Q:
self.context_optimizer.step()
# compute min Q on the new actions
new_actions = new_actions.view(t * b, -1)
min_q_new_actions = self._min_q_exp(obs, new_actions,z_mean_prev.detach(), z_var_prev.detach())
# vf update
# print(min_q_new_actions)
# print(log_pi)
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_exp_criterion(v_pred, v_target.detach())
self.vf_exp_optimizer.zero_grad()
vf_loss.backward()
self.vf_exp_optimizer.step()
self._update_target_network_exp()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_exp_optimizer.zero_grad()
policy_loss.backward()
self.policy_exp_optimizer.step()
# save some statistics for eval
if self.eval_statistics_2 is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics_2 = OrderedDict()
self.eval_statistics_2['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics_2['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics_2['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
if self.gradient_from_Q:
self.eval_statistics_2['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics_2['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics_2.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
def get_epoch_snapshot(self, epoch):
# NOTE: overriding parent method which also optionally saves the env
snapshot = OrderedDict(
context_encoder=self.context_encoder.state_dict(),
qf1_exp=self.qf1_exp.state_dict(),
qf2_exp=self.qf2_exp.state_dict(),
policy_exp=self.exploration_agent.state_dict(),
vf_exp=self.vf_exp.state_dict(),
target_vf_exp=self.target_exp_vf.state_dict(),
)
return snapshot
class ExpSACFin2(ExpAlgorithmFin2):
def __init__(
self,
env,
train_tasks,
eval_tasks,
nets,
nets_exp,
encoder,
latent_dim,
policy_lr=1e-3,
qf_lr=1e-3,
vf_lr=1e-3,
context_lr=1e-3,
kl_lambda=1.,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
policy_pre_activation_weight=0.,
optimizer_class=optim.Adam,
recurrent=False,
use_information_bottleneck=True,
use_next_obs_in_context=False,
sparse_rewards=False,
soft_target_tau=1e-2,
plotter=None,
render_eval_paths=False,
use_info_in_context=False,
entropy_weight=1e-2,
intrinsic_reward_weight=1e-1,
use_kl_div_intrinsic=False,
gradient_from_Q=False,
prediction_reward_scale=1,
intrinsic_reward_decay = 1,
kl_min_weight=5,
pie_hidden_dim=15,
consider_dynamics=0,
prediction_transition_scale=1,
**kwargs
):
super().__init__(
env=env,
agent=nets[0],
agent_exp=nets_exp[0],
train_tasks=train_tasks,
eval_tasks=eval_tasks,
encoder=encoder,
**kwargs
)
self.use_kl_div_intrinsic = use_kl_div_intrinsic
self.intrinsic_reward_weight = intrinsic_reward_weight
self.entropy_weight = entropy_weight
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.policy_pre_activation_weight = policy_pre_activation_weight
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.latent_dim = latent_dim
self.recurrent = recurrent
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.qf_exp_criterion = nn.MSELoss()
self.vf_exp_criterion = nn.MSELoss()
self.vib_criterion = nn.MSELoss()
self.l2_reg_criterion = nn.MSELoss()
self.pred_loss = nn.MSELoss()
self.kl_lambda = kl_lambda
self.prediction_reward_scale = prediction_reward_scale
self.consider_dynamics = consider_dynamics
self.prediction_transition_scale = prediction_transition_scale
self.use_information_bottleneck = use_information_bottleneck
self.sparse_rewards = sparse_rewards
self.use_next_obs_in_context = use_next_obs_in_context
self.gradient_from_Q = gradient_from_Q
self.intrinsic_reward_decay = intrinsic_reward_decay
self.kl_min_weight = kl_min_weight
self.qf1, self.qf2, self.vf = nets[1:]
self.qf1_exp, self.qf2_exp, self.vf_exp, self.rew_decoder, self.transition_decoder = nets_exp[1:]
self.target_exp_vf = self.vf_exp.copy()
self.target_vf = self.vf.copy()
self.policy_exp_optimizer = optimizer_class(
self.exploration_agent.parameters(),
lr=policy_lr,
)
self.qf1_exp_optimizer = optimizer_class(
self.qf1_exp.parameters(),
lr=qf_lr,
)
self.qf2_exp_optimizer = optimizer_class(
self.qf2_exp.parameters(),
lr=qf_lr,
)
self.vf_exp_optimizer = optimizer_class(
self.vf_exp.parameters(),
lr=vf_lr,
)
self.policy_optimizer = optimizer_class(
self.agent.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.vf_optimizer = optimizer_class(
self.vf.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self.context_encoder.parameters(),
lr=context_lr,
)
self.rew_optimizer = optimizer_class(
self.rew_decoder.parameters(),
lr=context_lr,
)
self.transition_optimizer = optimizer_class(
self.transition_decoder.parameters(),
lr=context_lr,
)
###### Torch stuff #####
@property
def networks(self):
return [self.context_encoder] + [self.exploration_agent.policy] + [self.qf1_exp, self.qf2_exp, self.vf_exp, self.target_exp_vf,self.rew_decoder,self.transition_decoder] + [self.agent.policy,self.qf1, self.qf2, self.vf, self.target_vf]
def training_mode(self, mode):
for net in self.networks:
net.train(mode)
def to(self, device=None):
if device == None:
device = ptu.device
for net in self.networks:
net.to(device)
##### Data handling #####
def unpack_batch(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
sr = batch['sparse_rewards'][None, ...]
else:
sr = batch['rewards'][None, ...]
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
return [o, a, r, no, t, sr]
def unpack_batch_context(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
sparse_r = batch['sparse_rewards'][None, ...]
r = batch['rewards'][None, ...]
if not sparse_reward:
sparse_r = r
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
info = batch['env_infos'][None, ...]
# print(o[0,:5],a[0,:5],r[0],sparse_r[0],no[0,:5])
return [o, a, sparse_r, no, t, info, r]
def sample_sac(self, indices):
''' sample batch of training data from a list of tasks for training the actor-critic '''
# this batch consists of transitions sampled randomly from replay buffer
# rewards are always dense
if self.use_per:
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)[0]) for
idx in indices]
else:
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)) for
idx in indices]
unpacked = [self.unpack_batch(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
unpacked = [[x[i] for x in unpacked] for i in range(len(unpacked[0]))]
unpacked = [torch.cat(x, dim=0) for x in unpacked]
return unpacked
def sample_context(self, indices, sequence=False):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(
self.enc_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx
in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
if self.use_next_obs_in_context:
context = torch.cat(context[:-3], dim=2)
else:
context = torch.cat(context[:-4], dim=2)
return context, context_unbatched
def pred_context(self, context):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
r_0 = ptu.zeros(context[2].shape[0], 1, context[2].shape[2])
tmp = torch.cat([r_0, context[2]], dim=1)
a_0 = ptu.zeros(context[1].shape[0], 1, context[1].shape[2])
tmp2 = torch.cat([a_0, context[1]], dim=1)
tmp3 = torch.cat([torch.unsqueeze(context[0][:, 0, :], 1), context[3]], dim=1)
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
contextr = torch.cat([tmp3, tmp2, tmp], dim=2)
return contextr
def sample_exp(self, indices,sequence=True):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(self.exp_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
if self.use_next_obs_in_context:
context = torch.cat(context[:-3], dim=2)
else:
context = torch.cat(context[:-4], dim=2)
return context,context_unbatched
##### Training #####
def _do_training(self, indices):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
#_,exp_context_unbatched = self.sample_exp(indices,True)
#exp_context_pred = self.pred_context(exp_context_unbatched)
_, context_unbatched = self.sample_context(indices, False)
context_pred = self.pred_context(context_unbatched)
context = self.sample_sac(indices)
# zero out context and hidden encoder state
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
#self._take_step(indices, context_unbatched,context_pred)
self._take_step_exp(indices, context_unbatched,context_pred,context)
# stop backprop
def _min_q_exp(self, obs,actions,z_mean,z_var):
#print(obs.shape,actions.shape)
q1 = self.qf1_exp(torch.cat([ obs,actions,z_mean,z_var],dim=1))
q2 = self.qf2_exp(torch.cat([ obs,actions,z_mean,z_var],dim=1))
min_q = torch.min(q1, q2)
return min_q
def _min_q(self, obs, actions,z):
#print(obs.shape,actions.shape)
q1 = self.qf1(torch.cat([obs, actions,z],dim=1))
q2 = self.qf2(torch.cat([obs, actions,z],dim=1))
min_q = torch.min(q1, q2)
return min_q
def _update_target_network_exp(self):
ptu.soft_update_from_to(self.vf_exp, self.target_exp_vf, self.soft_target_tau)
def _update_target_network(self):
ptu.soft_update_from_to(self.vf, self.target_vf, self.soft_target_tau)
def compute_kl(self,means,vars):
std_mean = ptu.zeros(means.size())
std_var = ptu.ones(means.size())
tem = vars / std_var
kl_div = tem ** 2 - 2 * torch.log(tem) + ((std_mean - means) / std_var) ** 2 - 1
kl_div = torch.sum(kl_div, dim=1, keepdim=True) / 2
kl_div = torch.mean(kl_div)
return kl_div
def compute_intrinsic(self,z_mean_prev, z_var_prev,z_mean_post,z_var_post):
tem = z_var_post / z_var_prev
kl_div = tem ** 2 - 2 * torch.log(tem) + ((z_mean_prev - z_mean_post) / z_var_prev) ** 2 - 1
kl_div = torch.sum(kl_div, dim=1, keepdim=True) / 2
return kl_div
def _take_step(self, indices, context_unbatched,context_pred,context):
t,b,_ = context_pred.size()
num_tasks = len(indices)
z_s = self.context_encoder.forward_seq(context_pred)
z_s = z_s.view(t,b,-1)
z_mean = z_s[:,:-1,:self.latent_dim]
z_var = torch.nn.functional.softplus(z_s[:,:-1,self.latent_dim:])
z_mean_post = z_s[:, 1:, self.latent_dim]
z_var_post = torch.nn.functional.softplus(z_s[:, 1:, self.latent_dim:])
z_dis = torch.distributions.Normal(z_mean,torch.sqrt(z_var))
z_sample = z_dis.rsample()
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
t,b,_ = obs.size()
obs = obs.view(t*b,-1)
actions = actions.view(t * b, -1)
rewards = rewards.view(t * b, -1)
z_sample = z_sample.view(t * b, -1)
# run inference in networks
policy_outputs = self.agent(obs, z_sample.detach())
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flattens out the task dimension
next_obs = next_obs.view(t * b, -1)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1(obs, actions, z_sample)
q2_pred = self.qf2(obs, actions, z_sample)
v_pred = self.vf(obs, z_sample.detach())
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(next_obs, z_sample)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
kl_div = self.compute_kl(z_mean,z_var)
kl_loss = self.kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
kl_min_loss = self.compute_intrinsic(z_mean.contiguous().view(t*b,-1),z_var.contiguous().view(t*b,-1),z_mean_post.contiguous().view(t*b,-1),z_var_post.contiguous().view(t*b,-1))
kl_min_loss = torch.mean(kl_min_loss) * self.kl_min_weight
kl_min_loss.backward(retain_graph=True)
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self.batch_size * num_tasks, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(self.batch_size * num_tasks, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
min_q_new_actions = self._min_q(obs, new_actions, z_sample.detach())
# vf update
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# save some statistics for eval
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
if self.use_information_bottleneck:
z_mean = np.mean(np.abs(ptu.get_numpy(self.agent.z_means[0])))
z_sig = np.mean(ptu.get_numpy(self.agent.z_vars[0]))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['KL Min Loss'] = ptu.get_numpy(kl_min_loss)
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
def _take_step_exp(self, indices,context_unbatched,context_pred,context):
t, b, _ = context_pred.size()
b = b - 1
context_pred_pre = context_pred [:,:-1,:]
#context_pred = context_pred.contiguous()
z_s = self.context_encoder.forward_seq(context_pred_pre)
#z_s = z_s.view(t, b, -1)
z_mean = z_s[:, :self.latent_dim]
z_var = torch.nn.functional.softplus(z_s[:, self.latent_dim:])
# print(z_mean.shape,z_var.shape)
z_dis = torch.distributions.Normal(z_mean, torch.sqrt(z_var))
z_sample = z_dis.rsample()
z_sample_pearl = z_sample
obs, actions, agent_rew, next_obs, terms, sr = context
if self.sparse_rewards:
pred_rewardss = sr
else:
pred_rewardss = agent_rew
t, b, _ = obs.size()
#agent_rew = agent_rew.contiguous()
#pred_rewardss = pred_rewardss.contiguous()
agent_rew = agent_rew.view(t * b, -1)
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
pred_rewardss = pred_rewardss.view(t * b, -1)
rewards_flat = agent_rew.detach()
q1_pred = self.qf1(torch.cat([obs, actions, z_sample_pearl], dim=1))
q2_pred = self.qf2(torch.cat([obs, actions, z_sample_pearl], dim=1))
v_pred = self.vf(torch.cat([obs, z_sample_pearl.detach()], dim=1))
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(torch.cat([next_obs, z_sample_pearl], dim=1))
# KL constraint on z if probabilistic
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
self.context_optimizer.zero_grad()
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(t * b, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward(retain_graph=True)
kl_div = self.compute_kl(z_mean, z_var)
kl_loss = kl_div * self.kl_lambda
kl_loss.backward(retain_graph=True)
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
policy_outputs, _ = self.agent(obs, z_sample_pearl.detach())
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
new_actions = new_actions.view(t * b, -1)
min_q_new_actions = self._min_q(obs, new_actions, z_sample_pearl.detach())
# vf update
# print(min_q_new_actions)
# print(log_pi)
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
rew_pred = self.rew_decoder.forward(z_sample_pearl.detach(), obs, actions)
self.rew_optimizer.zero_grad()
rew_loss = self.pred_loss(pred_rewardss, rew_pred) * self.prediction_reward_scale
rew_loss.backward()
self.rew_optimizer.step()
if self.consider_dynamics:
self.transition_optimizer.zero_grad()
trans_pred = self.transition_decoder.forward(z_sample_pearl.detach(), obs, actions)
trans_loss = self.pred_loss(next_obs, trans_pred) * self.prediction_transition_scale
trans_loss.backward()
self.transition_optimizer.step()
policy_outputs, _ = self.exploration_agent(obs, z_mean.detach(), z_var.detach())
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
context_post = context_pred[:, 1:, :]
context_post = context_post.contiguous()
z_s_post = self.context_encoder.forward_seq(context_post)
z_mean_post = z_s_post[:, :self.latent_dim]
z_var_post = torch.nn.functional.softplus(z_s_post[:, self.latent_dim:])
if self.intrinsic_reward_weight > 0:
if self.use_kl_div_intrinsic:
intrinsic_reward = self.compute_intrinsic(z_mean, z_var, z_mean_post, z_var_post).detach()
else:
pred_rew = self.rew_decoder.forward(z_sample.detach(), obs, actions)
intrinsic_reward = (pred_rew - pred_rewardss) ** 2
if self.consider_dynamics:
pred_trans = self.transition_decoder.forward(z_sample.detach(), obs, actions)
intrinsic_reward = intrinsic_reward + torch.mean((pred_trans - next_obs) ** 2, dim=1, keepdim=True)
intrinsic_reward = intrinsic_reward.view(t * b, -1)
if self.intrinsic_reward_decay != 1:
intrinsic_reward = intrinsic_reward * torch.unsqueeze(
ptu.from_numpy(self.intrinsic_reward_decay ** np.linspace(0, t * b - 1, t * b)), 1)
rew = intrinsic_reward * self.intrinsic_reward_weight + agent_rew
else:
rew = agent_rew
rew = rew.detach()
# print(z_mean.shape, z_mean_next.shape, obs.shape, t, b)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred_exp = self.qf1_exp(torch.cat([obs, actions, z_mean.detach(), z_var.detach()], dim=1))
q2_pred_exp = self.qf2_exp(torch.cat([obs, actions, z_mean.detach(), z_var.detach()], dim=1))
v_pred_exp = self.vf_exp(torch.cat([obs, z_mean.detach(), z_var.detach()], dim=1))
# get targets for use in V and Q updates
with torch.no_grad():
#print(next_obs.shape,z_mean_post.shape)
target_v_values = self.target_exp_vf(torch.cat([next_obs, z_mean_post, z_var_post], dim=1))
# KL constraint on z if probabilistic
self.qf1_exp_optimizer.zero_grad()
self.qf2_exp_optimizer.zero_grad()
rewards_flat = rew
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(t * b, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss_exp = torch.mean((q1_pred_exp - q_target) ** 2) + torch.mean((q2_pred_exp - q_target) ** 2)
qf_loss_exp.backward()
self.qf1_exp_optimizer.step()
self.qf2_exp_optimizer.step()
# compute min Q on the new actions
new_actions = new_actions.view(t * b, -1)
min_q_new_actions = self._min_q_exp(obs, new_actions, z_mean.detach(), z_var.detach())
# vf update
# print(min_q_new_actions)
# print(log_pi)
v_target = min_q_new_actions - log_pi
vf_loss_exp = self.vf_exp_criterion(v_pred_exp, v_target.detach())
self.vf_exp_optimizer.zero_grad()
vf_loss_exp.backward()
self.vf_exp_optimizer.step()
self._update_target_network_exp()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss_exp = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss_exp = policy_loss_exp + policy_reg_loss
self.policy_exp_optimizer.zero_grad()
policy_loss_exp.backward()
self.policy_exp_optimizer.step()
if self.eval_statistics_2 is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics_2 = OrderedDict()
self.eval_statistics_2['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics_2['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics_2['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics_2['QF Loss Exp'] = np.mean(ptu.get_numpy(qf_loss_exp))
self.eval_statistics_2['VF Loss Exp'] = np.mean(ptu.get_numpy(vf_loss_exp))
self.eval_statistics_2['Policy Loss Exp'] = np.mean(ptu.get_numpy(
policy_loss_exp
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Q Predictions Exp',
ptu.get_numpy(q1_pred_exp),
))
self.eval_statistics_2['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics_2['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics_2.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'V Predictions Exp',
ptu.get_numpy(v_pred_exp),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
self.eval_statistics_2['Z mean train'] = np.mean(ptu.get_numpy(z_mean))
self.eval_statistics_2['Z variance train'] = np.mean(ptu.get_numpy(z_var))
self.eval_statistics_2['reward prediction loss'] = ptu.get_numpy(rew_loss)
if self.consider_dynamics:
self.eval_statistics_2['transisition prediction loss'] = ptu.get_numpy(trans_loss)
def get_epoch_snapshot(self, epoch):
# NOTE: overriding parent method which also optionally saves the env
snapshot = OrderedDict(
context_encoder=self.context_encoder.state_dict(),
qf1_exp=self.qf1_exp.state_dict(),
qf2_exp=self.qf2_exp.state_dict(),
policy_exp=self.exploration_agent.state_dict(),
vf_exp=self.vf_exp.state_dict(),
target_vf_exp=self.target_exp_vf.state_dict(),
qf1=self.qf1.state_dict(),
qf2=self.qf2.state_dict(),
policy=self.agent.policy.state_dict(),
vf=self.vf.state_dict(),
target_vf=self.target_vf.state_dict(),
)
return snapshot
class ExpSACFin3(ExpAlgorithmFin3):
def __init__(
self,
env,
train_tasks,
eval_tasks,
nets,
nets_exp,
encoder,
latent_dim,
policy_lr=1e-3,
qf_lr=1e-3,
vf_lr=1e-3,
context_lr=1e-3,
kl_lambda=1.,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
policy_pre_activation_weight=0.,
optimizer_class=optim.Adam,
recurrent=False,
use_information_bottleneck=True,
use_next_obs_in_context=False,
sparse_rewards=False,
soft_target_tau=1e-2,
plotter=None,
render_eval_paths=False,
use_info_in_context=False,
entropy_weight=1e-2,
intrinsic_reward_weight=1e-1,
use_kl_div_intrinsic=False,
gradient_from_Q=False,
prediction_reward_scale=1,
intrinsic_reward_decay = 1,
kl_min_weight=5,
pie_hidden_dim=15,
consider_dynamics=0,
prediction_transition_scale=1,
**kwargs
):
super().__init__(
env=env,
agent=nets[0],
agent_exp=nets_exp[0],
train_tasks=train_tasks,
eval_tasks=eval_tasks,
encoder=encoder,
**kwargs
)
self.use_kl_div_intrinsic = use_kl_div_intrinsic
self.intrinsic_reward_weight = intrinsic_reward_weight
self.entropy_weight = entropy_weight
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.policy_pre_activation_weight = policy_pre_activation_weight
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.latent_dim = latent_dim
self.recurrent = recurrent
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.qf_exp_criterion = nn.MSELoss()
self.vf_exp_criterion = nn.MSELoss()
self.vib_criterion = nn.MSELoss()
self.l2_reg_criterion = nn.MSELoss()
self.pred_loss = nn.MSELoss()
self.kl_lambda = kl_lambda
self.prediction_reward_scale = prediction_reward_scale
self.consider_dynamics = consider_dynamics
self.prediction_transition_scale = prediction_transition_scale
self.use_information_bottleneck = use_information_bottleneck
self.sparse_rewards = sparse_rewards
self.use_next_obs_in_context = use_next_obs_in_context
self.gradient_from_Q = gradient_from_Q
self.intrinsic_reward_decay = intrinsic_reward_decay
self.kl_min_weight = kl_min_weight
self.qf1, self.qf2, self.vf = nets[1:]
self.qf1_exp, self.qf2_exp, self.vf_exp, self.rew_decoder, self.transition_decoder = nets_exp[1:]
self.target_exp_vf = self.vf_exp.copy()
self.target_vf = self.vf.copy()
self.policy_exp_optimizer = optimizer_class(
self.exploration_agent.parameters(),
lr=policy_lr,
)
self.qf1_exp_optimizer = optimizer_class(
self.qf1_exp.parameters(),
lr=qf_lr,
)
self.qf2_exp_optimizer = optimizer_class(
self.qf2_exp.parameters(),
lr=qf_lr,
)
self.vf_exp_optimizer = optimizer_class(
self.vf_exp.parameters(),
lr=vf_lr,
)
self.policy_optimizer = optimizer_class(
self.agent.parameters(),
lr=policy_lr,
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
lr=qf_lr,
)
self.vf_optimizer = optimizer_class(
self.vf.parameters(),
lr=vf_lr,
)
self.context_optimizer = optimizer_class(
self.context_encoder.parameters(),
lr=context_lr,
)
self.rew_optimizer = optimizer_class(
self.rew_decoder.parameters(),
lr=context_lr,
)
self.transition_optimizer = optimizer_class(
self.transition_decoder.parameters(),
lr=context_lr,
)
###### Torch stuff #####
@property
def networks(self):
return [self.context_encoder] + [self.exploration_agent.policy] + [self.qf1_exp, self.qf2_exp, self.vf_exp, self.target_exp_vf,self.rew_decoder,self.transition_decoder] + [self.agent.policy,self.qf1, self.qf2, self.vf, self.target_vf]
def training_mode(self, mode):
for net in self.networks:
net.train(mode)
def to(self, device=None):
if device == None:
device = ptu.device
for net in self.networks:
net.to(device)
##### Data handling #####
def unpack_batch(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
if sparse_reward:
sr = batch['sparse_rewards'][None, ...]
else:
sr = batch['rewards'][None, ...]
r = batch['rewards'][None, ...]
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
return [o, a, r, no, t, sr]
def unpack_batch_context(self, batch, sparse_reward=False):
''' unpack a batch and return individual elements '''
o = batch['observations'][None, ...]
a = batch['actions'][None, ...]
sparse_r = batch['sparse_rewards'][None, ...]
r = batch['rewards'][None, ...]
if not sparse_reward:
sparse_r = r
no = batch['next_observations'][None, ...]
t = batch['terminals'][None, ...]
info = batch['env_infos'][None, ...]
# print(o[0,:5],a[0,:5],r[0],sparse_r[0],no[0,:5])
return [o, a, sparse_r, no, t, info, r]
def sample_sac(self, indices):
''' sample batch of training data from a list of tasks for training the actor-critic '''
# this batch consists of transitions sampled randomly from replay buffer
# rewards are always dense
if self.use_per:
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)[0]) for
idx in indices]
else:
batches = [ptu.np_to_pytorch_batch(self.replay_buffer.random_batch(idx, batch_size=self.batch_size)) for
idx in indices]
unpacked = [self.unpack_batch(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
unpacked = [[x[i] for x in unpacked] for i in range(len(unpacked[0]))]
unpacked = [torch.cat(x, dim=0) for x in unpacked]
return unpacked
def sample_context(self, indices, sequence=False):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(
self.enc_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx
in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
if self.use_next_obs_in_context:
context = torch.cat(context[:-3], dim=2)
else:
context = torch.cat(context[:-4], dim=2)
return context, context_unbatched
def pred_context(self, context):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
r_0 = ptu.zeros(context[2].shape[0], 1, context[2].shape[2])
tmp = torch.cat([r_0, context[2]], dim=1)
a_0 = ptu.zeros(context[1].shape[0], 1, context[1].shape[2])
tmp2 = torch.cat([a_0, context[1]], dim=1)
tmp3 = torch.cat([torch.unsqueeze(context[0][:, 0, :], 1), context[3]], dim=1)
# full context consists of [obs, act, rewards, next_obs, terms]
# if dynamics don't change across tasks, don't include next_obs
# don't include terminals in context
contextr = torch.cat([tmp3, tmp2, tmp], dim=2)
return contextr
def sample_exp(self, indices,sequence=True):
''' sample batch of context from a list of tasks from the replay buffer '''
# make method work given a single task index
if not hasattr(indices, '__iter__'):
indices = [indices]
batches = [ptu.np_to_pytorch_batch(self.exp_replay_buffer.random_batch(idx, batch_size=self.embedding_batch_size, sequence=sequence)) for idx in indices]
context = [self.unpack_batch_context(batch, sparse_reward=self.sparse_rewards) for batch in batches]
# group like elements together
context = [[x[i] for x in context] for i in range(len(context[0]))]
context = [torch.cat(x, dim=0) for x in context]
context_unbatched = context
if self.use_next_obs_in_context:
context = torch.cat(context[:-3], dim=2)
else:
context = torch.cat(context[:-4], dim=2)
return context,context_unbatched
##### Training #####
def _do_training(self, indices):
mb_size = self.embedding_mini_batch_size
num_updates = self.embedding_batch_size // mb_size
# sample context batch
#_,exp_context_unbatched = self.sample_exp(indices,True)
#exp_context_pred = self.pred_context(exp_context_unbatched)
_, context_unbatched = self.sample_context(indices, False)
context_pred = self.pred_context(context_unbatched)
context = self.sample_sac(indices)
# zero out context and hidden encoder state
# do this in a loop so we can truncate backprop in the recurrent encoder
for i in range(num_updates):
#self._take_step(indices, context_unbatched,context_pred)
self._take_step_exp(indices, context_unbatched,context_pred,context)
# stop backprop
def _min_q_exp(self, obs,actions,z_mean,z_var):
#print(obs.shape,actions.shape)
q1 = self.qf1_exp(torch.cat([ obs,actions,z_mean,z_var],dim=1))
q2 = self.qf2_exp(torch.cat([ obs,actions,z_mean,z_var],dim=1))
min_q = torch.min(q1, q2)
return min_q
def _min_q(self, obs, actions,z):
#print(obs.shape,actions.shape)
q1 = self.qf1(torch.cat([obs, actions,z],dim=1))
q2 = self.qf2(torch.cat([obs, actions,z],dim=1))
min_q = torch.min(q1, q2)
return min_q
def _update_target_network_exp(self):
ptu.soft_update_from_to(self.vf_exp, self.target_exp_vf, self.soft_target_tau)
def _update_target_network(self):
ptu.soft_update_from_to(self.vf, self.target_vf, self.soft_target_tau)
def compute_kl(self,means,vars):
std_mean = ptu.zeros(means.size())
std_var = ptu.ones(means.size())
tem = vars / std_var
kl_div = tem ** 2 - 2 * torch.log(tem) + ((std_mean - means) / std_var) ** 2 - 1
kl_div = torch.sum(kl_div, dim=1, keepdim=True) / 2
kl_div = torch.mean(kl_div)
return kl_div
def compute_intrinsic(self,z_mean_prev, z_var_prev,z_mean_post,z_var_post):
tem = z_var_post / z_var_prev
kl_div = tem ** 2 - 2 * torch.log(tem) + ((z_mean_prev - z_mean_post) / z_var_prev) ** 2 - 1
kl_div = torch.sum(kl_div, dim=1, keepdim=True) / 2
return kl_div
def _take_step(self, indices, context_unbatched,context_pred,context):
t,b,_ = context_pred.size()
num_tasks = len(indices)
z_s = self.context_encoder.forward_seq(context_pred)
z_s = z_s.view(t,b,-1)
z_mean = z_s[:,:-1,:self.latent_dim]
z_var = torch.nn.functional.softplus(z_s[:,:-1,self.latent_dim:])
z_mean_post = z_s[:, 1:, self.latent_dim]
z_var_post = torch.nn.functional.softplus(z_s[:, 1:, self.latent_dim:])
z_dis = torch.distributions.Normal(z_mean,torch.sqrt(z_var))
z_sample = z_dis.rsample()
obs, actions, rewards, next_obs, terms = self.sample_sac(indices)
t,b,_ = obs.size()
obs = obs.view(t*b,-1)
actions = actions.view(t * b, -1)
rewards = rewards.view(t * b, -1)
z_sample = z_sample.view(t * b, -1)
# run inference in networks
policy_outputs = self.agent(obs, z_sample.detach())
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
# flattens out the task dimension
next_obs = next_obs.view(t * b, -1)
# Q and V networks
# encoder will only get gradients from Q nets
q1_pred = self.qf1(obs, actions, z_sample)
q2_pred = self.qf2(obs, actions, z_sample)
v_pred = self.vf(obs, z_sample.detach())
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(next_obs, z_sample)
# KL constraint on z if probabilistic
self.context_optimizer.zero_grad()
kl_div = self.compute_kl(z_mean,z_var)
kl_loss = self.kl_lambda * kl_div
kl_loss.backward(retain_graph=True)
kl_min_loss = self.compute_intrinsic(z_mean.contiguous().view(t*b,-1),z_var.contiguous().view(t*b,-1),z_mean_post.contiguous().view(t*b,-1),z_var_post.contiguous().view(t*b,-1))
kl_min_loss = torch.mean(kl_min_loss) * self.kl_min_weight
kl_min_loss.backward(retain_graph=True)
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
rewards_flat = rewards.view(self.batch_size * num_tasks, -1)
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(self.batch_size * num_tasks, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
min_q_new_actions = self._min_q(obs, new_actions, z_sample.detach())
# vf update
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# save some statistics for eval
if self.eval_statistics is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics = OrderedDict()
if self.use_information_bottleneck:
z_mean = np.mean(np.abs(ptu.get_numpy(self.agent.z_means[0])))
z_sig = np.mean(ptu.get_numpy(self.agent.z_vars[0]))
self.eval_statistics['Z mean train'] = z_mean
self.eval_statistics['Z variance train'] = z_sig
self.eval_statistics['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics['KL Min Loss'] = ptu.get_numpy(kl_min_loss)
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
def _take_step_exp(self, indices,context_unbatched,context_pred,context):
t, b, _ = context_pred.size()
b = b - 1
context_pred_pre = context_pred [:,:-1,:]
#context_pred = context_pred.contiguous()
z_s = self.context_encoder.forward_seq(context_pred_pre)
#z_s = z_s.view(t, b, -1)
z_mean = z_s[:, :self.latent_dim]
z_var = torch.nn.functional.softplus(z_s[:, self.latent_dim:])
# print(z_mean.shape,z_var.shape)
z_dis = torch.distributions.Normal(z_mean, torch.sqrt(z_var))
z_sample = z_dis.rsample()
z_sample_pearl = z_sample
obs, actions, agent_rew, next_obs, terms, sr = context
if self.sparse_rewards:
pred_rewardss = sr
else:
pred_rewardss = agent_rew
rew_pred = self.rew_decoder.forward(z_sample_pearl.detach(), obs, actions)
self.rew_optimizer.zero_grad()
rew_loss = self.pred_loss(pred_rewardss, rew_pred) * self.prediction_reward_scale
rew_loss.backward()
self.rew_optimizer.step()
if self.consider_dynamics:
self.transition_optimizer.zero_grad()
trans_pred = self.transition_decoder.forward(z_sample_pearl.detach(), obs, actions)
trans_loss = self.pred_loss(next_obs, trans_pred) * self.prediction_transition_scale
trans_loss.backward()
self.transition_optimizer.step()
if self.intrinsic_reward_weight > 0:
if self.use_kl_div_intrinsic:
intrinsic_reward = self.compute_intrinsic(z_mean, z_var, z_mean_post, z_var_post).detach()
else:
pred_rew = self.rew_decoder.forward(z_sample.detach(), obs, actions)
intrinsic_reward = (pred_rew - pred_rewardss) ** 2
if self.consider_dynamics:
pred_trans = self.transition_decoder.forward(z_sample.detach(), obs, actions)
intrinsic_reward = intrinsic_reward + torch.mean((pred_trans - next_obs) ** 2, dim=1, keepdim=True)
intrinsic_reward = intrinsic_reward.view(t * b, -1)
if self.intrinsic_reward_decay != 1:
intrinsic_reward = intrinsic_reward * torch.unsqueeze(
ptu.from_numpy(self.intrinsic_reward_decay ** np.linspace(0, t * b - 1, t * b)), 1)
rew = intrinsic_reward * self.intrinsic_reward_weight + agent_rew
else:
rew = agent_rew
rew = rew.detach()
agent_rew = rew
t, b, _ = obs.size()
#agent_rew = agent_rew.contiguous()
#pred_rewardss = pred_rewardss.contiguous()
agent_rew = agent_rew.view(t * b, -1)
obs = obs.view(t * b, -1)
actions = actions.view(t * b, -1)
next_obs = next_obs.view(t * b, -1)
pred_rewardss = pred_rewardss.view(t * b, -1)
rewards_flat = agent_rew.detach()
q1_pred = self.qf1(torch.cat([obs, actions, z_sample_pearl], dim=1))
q2_pred = self.qf2(torch.cat([obs, actions, z_sample_pearl], dim=1))
v_pred = self.vf(torch.cat([obs, z_sample_pearl.detach()], dim=1))
# get targets for use in V and Q updates
with torch.no_grad():
target_v_values = self.target_vf(torch.cat([next_obs, z_sample_pearl], dim=1))
# KL constraint on z if probabilistic
# qf and encoder update (note encoder does not get grads from policy or vf)
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
self.context_optimizer.zero_grad()
# scale rewards for Bellman update
rewards_flat = rewards_flat * self.reward_scale
terms_flat = terms.view(t * b, -1)
q_target = rewards_flat + (1. - terms_flat) * self.discount * target_v_values
qf_loss = torch.mean((q1_pred - q_target) ** 2) + torch.mean((q2_pred - q_target) ** 2)
qf_loss.backward(retain_graph=True)
kl_div = self.compute_kl(z_mean, z_var)
kl_loss = kl_div * self.kl_lambda
kl_loss.backward(retain_graph=True)
self.qf1_optimizer.step()
self.qf2_optimizer.step()
self.context_optimizer.step()
# compute min Q on the new actions
policy_outputs, _ = self.agent(obs, z_sample_pearl.detach())
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
new_actions = new_actions.view(t * b, -1)
min_q_new_actions = self._min_q(obs, new_actions, z_sample_pearl.detach())
# vf update
# print(min_q_new_actions)
# print(log_pi)
v_target = min_q_new_actions - log_pi
vf_loss = self.vf_criterion(v_pred, v_target.detach())
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
self._update_target_network()
# policy update
# n.b. policy update includes dQ/da
log_policy_target = min_q_new_actions
policy_loss = (
log_pi - log_policy_target
).mean()
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
pre_tanh_value = policy_outputs[-1]
pre_activation_reg_loss = self.policy_pre_activation_weight * (
(pre_tanh_value ** 2).sum(dim=1).mean()
)
policy_reg_loss = mean_reg_loss + std_reg_loss + pre_activation_reg_loss
policy_loss = policy_loss + policy_reg_loss
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
if self.eval_statistics_2 is None:
# eval should set this to None.
# this way, these statistics are only computed for one batch.
self.eval_statistics_2 = OrderedDict()
self.eval_statistics_2['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics_2['VF Loss'] = np.mean(ptu.get_numpy(vf_loss))
self.eval_statistics_2['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics_2['QF Loss Exp'] = np.mean(ptu.get_numpy(qf_loss_exp))
self.eval_statistics_2['VF Loss Exp'] = np.mean(ptu.get_numpy(vf_loss_exp))
self.eval_statistics_2['Policy Loss Exp'] = np.mean(ptu.get_numpy(
policy_loss_exp
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Q Predictions Exp',
ptu.get_numpy(q1_pred_exp),
))
self.eval_statistics_2['KL Divergence'] = ptu.get_numpy(kl_div)
self.eval_statistics_2['KL Loss'] = ptu.get_numpy(kl_loss)
self.eval_statistics_2.update(create_stats_ordered_dict(
'V Predictions',
ptu.get_numpy(v_pred),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'V Predictions Exp',
ptu.get_numpy(v_pred_exp),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics_2.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
self.eval_statistics_2['Z mean train'] = np.mean(ptu.get_numpy(z_mean))
self.eval_statistics_2['Z variance train'] = np.mean(ptu.get_numpy(z_var))
self.eval_statistics_2['reward prediction loss'] = ptu.get_numpy(rew_loss)
if self.consider_dynamics:
self.eval_statistics_2['transisition prediction loss'] = ptu.get_numpy(trans_loss)
def get_epoch_snapshot(self, epoch):
# NOTE: overriding parent method which also optionally saves the env
snapshot = OrderedDict(
context_encoder=self.context_encoder.state_dict(),
qf1_exp=self.qf1_exp.state_dict(),
qf2_exp=self.qf2_exp.state_dict(),
policy_exp=self.exploration_agent.state_dict(),
vf_exp=self.vf_exp.state_dict(),
target_vf_exp=self.target_exp_vf.state_dict(),
qf1=self.qf1.state_dict(),
qf2=self.qf2.state_dict(),
policy=self.agent.policy.state_dict(),
vf=self.vf.state_dict(),
target_vf=self.target_vf.state_dict(),
)
return snapshot | [
"1170863106@qq.com"
] | 1170863106@qq.com |
f6607a1124301e9ff2d135311f8538737bc61608 | bbf874cf4abb20e7ec5c66e808e97ae6f2043c3f | /0x0B-python-input_output/14-pascal_triangle.py | c7d2d1a3c815f704485b95a44abecae4c2d99415 | [] | no_license | imperfectskillz/holbertonschool-higher_level_programming | 105fd80c2bea8fbb60eb786ce9019b3f63188342 | 704e99b29125d6449db32b9d52ede443318df620 | refs/heads/master | 2021-09-14T10:44:22.551896 | 2018-05-12T03:38:59 | 2018-05-12T03:38:59 | 113,130,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!/usr/bin/python3
"""
module contains pascal traingle
"""
def pascal_triangle(n):
"""
pascal triangle
"""
if n <= 0:
return []
result = []
for i in range(n):
result.append([])
result[i].append(1)
for j in range(1, i):
result[i].append(result[i - 1][j - 1] + result[i - 1][j])
if (i > 0):
result[i].append(1)
return result
| [
"j.choi.89@gmail.com"
] | j.choi.89@gmail.com |
e14d0438eaf2707bc5170fde8bfa1f64838a51d7 | 8aeeba99ae8f56b8fcd68ba06e6a1a69890dcca5 | /pydpocl/plan_to_json.py | 72d79e9d01b449fc39b5d406e2f27256ccda16eb | [] | no_license | drwiner/BiPOCL | e867b76bb65cce3208be4746ec71a31f98be5c90 | 4350cc7921b8a03e0c60400de967d828caf3006a | refs/heads/master | 2021-05-06T13:03:33.350835 | 2018-01-29T18:07:56 | 2018-01-29T18:07:56 | 113,220,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,887 | py | """
Read plan solution, use time table to coordinate times
"""
import json
from CacheGroundSteps import plan_single_example
from plan_to_lists import make_partial_ordered_list
# from Ground_Compiler_Library.GElm import GStep, GLiteral
dist_dict = {}
with open("distances.txt", 'r') as fol:
for line in fol:
fol_sp = line.split()
dist_dict[(fol_sp[0], fol_sp[2])] = float(fol_sp[-1])
NAV_MOVE_RATE = {"strut": 1.5}
NAV_ARGS = {"strut": (1,2)}
FAB_SEGMENTS = {"strut": dict()}
FAB_SEGMENTS["strut"]["start"] = (0, .4)
FAB_SEGMENTS["strut"]["mid"] = (.4, .6)
FAB_SEGMENTS["strut"]["end"] = (.6, 1)
FAB_SEGMENTS["strut"]["slow-max"] = 3.5
FAB_SEGMENTS["strut"]["fast-max"] = 2
FAB_GAMEOBJECT_ARG = {"strut": 0, "turn-to": 0}
FAB_STEPS = ["strut", "turn-to"]
# ANIM_MAP = {"strut": "Stroll"}
FAB_TYPES = {"strut": "navigate"}
ORIENTS = {"front": '0', "behind": "180", "behind-right": 210, "behind-left": 150}
# FAB_TYPES = {"strut": nav_step_to_xml, "turn-to": "animate"}
CAM_STEPS = ["cam"]
# NAV_ACTIONS = ["strut"]
def step_extraction(plan_steps):
fab_steps = []
cam_steps = []
for step in plan_steps:
t = step.schema.split("-")[0]
if t in FAB_STEPS:
fab_steps.append(step)
elif t in CAM_STEPS:
cam_steps.append(step)
return fab_steps, cam_steps
### FABULA ### action for navigating
def nav_act_to_json(state, step, step_token, begin_time):
# state: state space representation provided to each fabula action to determine location
which_arg = FAB_GAMEOBJECT_ARG[step_token]
gameobj = step.Args[which_arg].name
# NAV ARGS: for each step type, we would need to specifically annotate origina and destination locations
origin, dest = NAV_ARGS[step_token]
# the duration is based on the distance plus the move rate. This is "real world" move rate, not how long shown
duration = dist_dict[(step.Args[origin].name, step.Args[dest].name)] / NAV_MOVE_RATE[step_token]
location = "None"
# find starting location in current state
for lit in state:
if lit.name != "at" or lit.truth is False:
continue
if lit.Args[0] != step.Args[which_arg]:
continue
location = lit.Args[1].name
break
ending_loc = "None"
# WARNING: hack used here is that any navigation step has a single true "at" literal describing end location
for eff in step.effects:
if eff.name == "at" and eff.Args[0].name == gameobj and eff.truth is True:
ending_loc = eff.Args[1].name
break
json_obj = {
"method_used": "nav_act",
"name": step.schema,
"step_id": step.schema.split("-")[2].split("[")[0],
"step_num": step.stepnumber,
"type": FAB_TYPES[step_token],
"start": begin_time,
"duration": duration,
"start_pos_name": location,
"end_pos_name": ending_loc,
"animation_name": step_token,
"gameobject_name": gameobj
}
return json_obj, duration
def stationary_act_to_json():
pass
def transition_state(state, step):
new_state = []
do_not_transition = []
for effect in set(step.effects):
same_condition = [condition for condition in state if condition.sameProposition(effect)]
if len(same_condition) > 1:
raise ValueError("should not be 2 of the same proposition. "
"\n\n A proposition is a predicate name and equivalent arguments, but where truth value can differ.")
if len(same_condition) == 0:
# this proposition isn't one found in previous states
new_state.append(effect)
continue
new_state.append(effect)
do_not_transition.append(same_condition[0])
for condition in state:
if condition in do_not_transition:
continue
new_state.append(condition)
return set(new_state)
def fabula_to_json(init, steps):
before_time = 0
before_state = init
clips = []
for step in steps:
step_tokens = step.schema.split("-")[:2]
# step_with_num = step_tokens[0] + "-" + step_tokens[1]
xml_method = FAB_METHODS[step_tokens[0]]
# e.g.: nav_step_to_xml (state, step, step_token, begin_time, duration)
sub_root, step_duration = xml_method(before_state, step, step_tokens[0], before_time)
before_state = transition_state(before_state, step)
before_time += step_duration
clips.append(sub_root)
return clips
def fabula_to_json_po(init, nested_po_steps):
before_time = 0
before_state = init
clips = []
for po_list in nested_po_steps:
max_step_duration = 0
for step in po_list:
step_tokens = step.schema.split("-")[:2]
# step_with_num = step_tokens[0] + "-" + step_tokens[1]
xml_method = FAB_METHODS[step_tokens[0]]
# e.g.: nav_step_to_xml (state, step, step_token, begin_time, duration)
sub_root, step_duration = xml_method(before_state, step, step_tokens[0], before_time)
before_state = transition_state(before_state, step)
clips.append(sub_root)
if step_duration > max_step_duration:
max_step_duration = step_duration
before_time += step_duration
return clips
def get_ref_to_fab_json_obj(fab_step_ref, fab_clips):
fab_step_id = str(fab_step_ref.root.ID)[19:23]
fab_json = None
same_step_num = []
for clip in fab_clips:
fs_id = clip["step_id"]
if fs_id == fab_step_id:
# this match implies we have found the clip corresponding to the reference fabula step
fab_json = clip
break
if clip["step_num"] == fab_step_ref.stepnumber:
same_step_num.append(clip)
if fab_json is None:
# idea 1: find the step reference in the past discourse step, and start search from there
# WARNING: instead, hack used: juust find first step with same stepnumber
try:
fab_json = same_step_num[0]
except:
raise ValueError("no fab step with same stepnum found")
# find the "nearest" clip with same stepnumber
return fab_json
def discourse_to_json(steps, fab_clips):
clips = []
before_time = 0
for step in steps:
# cam - shot - segment [fab-step, orientation]
# cam - virtual - shot [cam-name, fab-step, segment]
step_tokens = step.schema.split("-")
# gets the first fabula step reference in the arguments
fab_step_ref = get_steps_in_args(step.Args)[0]
# get a reference to the fabula json object
fab_json = get_ref_to_fab_json_obj(fab_step_ref, fab_clips)
type_of_action_being_filmed = fab_json["type"]
type_of_camera_being_used = step_tokens[1]
# json method depends on type of action filmed (stationary or navigate) and type of camera (cam or virtual)
json_method = DISC_METHODS[type_of_action_being_filmed][type_of_camera_being_used]
step_root, this_duration = json_method(before_time, step_tokens, fab_json, step.Args[0].name)
# step_root = xml_method(step.schema, before_time, this_duration, this_fab_start_time, orient, target, fab_xml)
clips.append(step_root)
before_time += this_duration
return clips
def get_steps_in_args(args):
return [arg for arg in args if hasattr(arg, "stepnumber")]
def nav_virtual_shot_to_json(before_time, step_tokens, fab_json, camName):
# cam - virtual - shot [cam-name, fab-step, segment]
segment = step_tokens[-1].split('\'')[-2]
target = fab_json['gameobject_name']
s, f = FAB_SEGMENTS[fab_json["animation_name"]][segment]
fab_duration = fab_json["duration"]
this_duration = f * float(fab_duration) - s * float(fab_duration)
this_fab_start_time = fab_json["start"] + s * float(fab_duration)
step_root = {
"method_used": "nav_virtual_shot_to_json",
"name": " ".join(step_tokens),
"type": "nav_virtual",
"start": before_time,
"duration": this_duration,
"fab_step": fab_json["name"],
"fab_start": this_fab_start_time,
"aim_target": target,
"camera_name": camName
}
return step_root, this_duration
def nav_cam_shot_to_json(before_time, step_tokens, fab_json, camName=None):
# cam - shot - segment [fab-step, orientation]
segment = step_tokens[2]
orient = step_tokens[2].split('[')[0]
target = fab_json["gameobject_name"]
# fab_with_num = fab_tokens[0] + "-" + fab_tokens[1]
step_type = fab_json["animation_name"]
s, f = FAB_SEGMENTS[step_type][segment]
fab_duration = fab_json["duration"]
this_duration = f * float(fab_duration) - s * fab_duration
this_fab_start_time = fab_json["start"] + s * fab_duration
fab_location_start = fab_json["start_pos_name"]
fab_location_end = fab_json["end_pos_name"]
dist = dist_dict[(fab_location_start, fab_location_end)]
step_root = {
"method_used": "nav_cam_shot_to_json",
"name": ' '.join(step_tokens),
"type": "nav_cam",
"start": before_time,
"duration": this_duration,
"fab_step_name": fab_json["name"],
"fab_start": this_fab_start_time,
"start_pos_name": fab_location_start,
"start_dist_offset": s * dist,
"end_pos_name": fab_location_end,
"end_dist_offset": f * dist,
"orient": orient,
"aim_target": target,
"camera_name": camName
}
return step_root, this_duration
def fab_step_to_id(gstep):
return gstep.schema.split("-")[2].split("[")[0]
import pickle
def upload(plan_steps, name):
# n = re.sub('[^A-Za-z0-9]+', '', name)
print(name)
with open(name, 'wb') as afile:
pickle.dump(plan_steps, afile)
def reload(name):
# n = re.sub('[^A-Za-z0-9]+', '', name)
afile = open(name, "rb")
plan_steps = pickle.load(afile)
afile.close()
return plan_steps
FAB_METHODS = {
"strut": nav_act_to_json,
"turn-to": stationary_act_to_json
}
DISC_METHODS = {
"navigate":
{
"virtual": nav_virtual_shot_to_json,
"shot": nav_cam_shot_to_json
},
"stationary":
{
"virtual": None,
"shot": None
}
}
if __name__ == '__main__':
domain_file = 'D:/documents/python/cinepydpocl/pydpocl/Ground_Compiler_Library/domains/Unity_Domain_Arrive1.pddl'
problem_file = 'D:/documents/python/cinepydpocl/pydpocl/Ground_Compiler_Library/domains/Unity_Arrive_Problem.pddl'
# plan_output, gsteps = plan_single_example(domain_file, problem_file)
# plan_steps = [step for step in plan_output[0].OrderingGraph.topoSort()]
# upload(plan_steps, "plan")
# plan_steps = reload("cached_plan_CA5.pkl")
# plan_steps = reload("cached_plan_Arrive.pkl")
plan = reload("full_plan_Arrive.pkl")
fab_partial_ordering = make_partial_ordered_list(plan, FAB_STEPS)
#
# print("organize into partial order")
# # plan_steps = [step for step in plan_output.OrderingGraph.topoSort()]
# with open("arrival_plan.txt", 'w') as wtp:
# for step in plan_steps:
# wtp.write(str(step))
# wtp.write("\n")
#
# print("ok")
plan_steps = [step for step in plan.OrderingGraph.topoSort()]
fab_steps, disc_steps = step_extraction(plan_steps)
fab_list = fabula_to_json_po(set(plan_steps[0].effects), fab_partial_ordering)
# fab_list = fabula_to_json(set(plan_steps[0].effects), fab_steps)
disc_list = discourse_to_json(disc_steps, fab_list)
# tree_string = etree.tostring(root, pretty_print=True)
# fab_string = json.dumps(fab_xml)
# fab_string = etree.tostring(fab_xml, pretty_print=True)
with open("fabula.json", 'w') as tout:
tout.write(json.dumps(fab_list, indent=4))
# disc_string = etree.tostring(disc_xml, pretty_print=True)
with open("discourse.json", 'w') as tout:
tout.write(json.dumps(disc_list, indent=4)) | [
"drwiner131@gmail.com"
] | drwiner131@gmail.com |
58be4518bcc02c88e3fe519fe8cc98faaf47f62f | 75fa11b13ddab8fd987428376f5d9c42dff0ba44 | /smoke-test/tests/test_stateful_ingestion.py | a10cf13a08029da9a728f7d6532ae0e04b1eefc9 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | RyanHolstien/datahub | 163d0ff6b4636919ed223ee63a27cba6db2d0156 | 8cf299aeb43fa95afb22fefbc7728117c727f0b3 | refs/heads/master | 2023-09-04T10:59:12.931758 | 2023-08-21T18:33:10 | 2023-08-21T18:33:10 | 246,685,891 | 0 | 0 | Apache-2.0 | 2021-02-16T23:48:05 | 2020-03-11T21:43:58 | TypeScript | UTF-8 | Python | false | false | 5,402 | py | from typing import Any, Dict, Optional, cast
from datahub.ingestion.api.committable import StatefulCommittable
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.sql.mysql import MySQLConfig, MySQLSource
from datahub.ingestion.source.state.checkpoint import Checkpoint
from datahub.ingestion.source.state.entity_removal_state import GenericCheckpointState
from datahub.ingestion.source.state.stale_entity_removal_handler import StaleEntityRemovalHandler
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from tests.utils import (
get_gms_url,
get_mysql_password,
get_mysql_url,
get_mysql_username,
)
def test_stateful_ingestion(wait_for_healthchecks):
def create_mysql_engine(mysql_source_config_dict: Dict[str, Any]) -> Any:
mysql_config = MySQLConfig.parse_obj(mysql_source_config_dict)
url = mysql_config.get_sql_alchemy_url()
return create_engine(url)
def create_table(engine: Any, name: str, defn: str) -> None:
create_table_query = text(f"CREATE TABLE IF NOT EXISTS {name}{defn};")
engine.execute(create_table_query)
def drop_table(engine: Any, table_name: str) -> None:
drop_table_query = text(f"DROP TABLE {table_name};")
engine.execute(drop_table_query)
def run_and_get_pipeline(pipeline_config_dict: Dict[str, Any]) -> Pipeline:
pipeline = Pipeline.create(pipeline_config_dict)
pipeline.run()
pipeline.raise_from_status()
return pipeline
def validate_all_providers_have_committed_successfully(pipeline: Pipeline) -> None:
provider_count: int = 0
for name, provider in pipeline.ctx.get_committables():
provider_count += 1
assert isinstance(provider, StatefulCommittable)
stateful_committable = cast(StatefulCommittable, provider)
assert stateful_committable.has_successfully_committed()
assert stateful_committable.state_to_commit
assert provider_count == 1
def get_current_checkpoint_from_pipeline(
pipeline: Pipeline,
) -> Optional[Checkpoint[GenericCheckpointState]]:
# TODO: Refactor to use the helper method in the metadata-ingestion tests, instead of copying it here.
mysql_source = cast(MySQLSource, pipeline.source)
return mysql_source.state_provider.get_current_checkpoint(
StaleEntityRemovalHandler.compute_job_id(
getattr(mysql_source, "platform", "default")
)
)
source_config_dict: Dict[str, Any] = {
"host_port": get_mysql_url(),
"username": get_mysql_username(),
"password": get_mysql_password(),
"database": "datahub",
"stateful_ingestion": {
"enabled": True,
"remove_stale_metadata": True,
"fail_safe_threshold": 100.0,
"state_provider": {
"type": "datahub",
"config": {"datahub_api": {"server": get_gms_url()}},
},
},
}
pipeline_config_dict: Dict[str, Any] = {
"source": {
"type": "mysql",
"config": source_config_dict,
},
"sink": {
"type": "datahub-rest",
"config": {"server": get_gms_url()},
},
"pipeline_name": "mysql_stateful_ingestion_smoke_test_pipeline",
"reporting": [
{
"type": "datahub",
}
],
}
# 1. Setup the SQL engine
mysql_engine = create_mysql_engine(source_config_dict)
# 2. Create test tables for first run of the pipeline.
table_prefix = "stateful_ingestion_test"
table_defs = {
f"{table_prefix}_t1": "(id INT, name VARCHAR(10))",
f"{table_prefix}_t2": "(id INT)",
}
table_names = sorted(table_defs.keys())
for table_name, defn in table_defs.items():
create_table(mysql_engine, table_name, defn)
# 3. Do the first run of the pipeline and get the default job's checkpoint.
pipeline_run1 = run_and_get_pipeline(pipeline_config_dict)
checkpoint1 = get_current_checkpoint_from_pipeline(pipeline_run1)
assert checkpoint1
assert checkpoint1.state
# 4. Drop table t1 created during step 2 + rerun the pipeline and get the checkpoint state.
drop_table(mysql_engine, table_names[0])
pipeline_run2 = run_and_get_pipeline(pipeline_config_dict)
checkpoint2 = get_current_checkpoint_from_pipeline(pipeline_run2)
assert checkpoint2
assert checkpoint2.state
# 5. Perform all assertions on the states
state1 = checkpoint1.state
state2 = checkpoint2.state
difference_urns = list(
state1.get_urns_not_in(type="*", other_checkpoint_state=state2)
)
assert len(difference_urns) == 1
assert (
difference_urns[0]
== "urn:li:dataset:(urn:li:dataPlatform:mysql,datahub.stateful_ingestion_test_t1,PROD)"
)
# 6. Cleanup table t2 as well to prevent other tests that rely on data in the smoke-test world.
drop_table(mysql_engine, table_names[1])
# 7. Validate that all providers have committed successfully.
# NOTE: The following validation asserts for presence of state as well
# and validates reporting.
validate_all_providers_have_committed_successfully(pipeline_run1)
validate_all_providers_have_committed_successfully(pipeline_run2)
| [
"noreply@github.com"
] | RyanHolstien.noreply@github.com |
ffda0a09de33d284ab2df4cc442161c7af34907f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/LinkedList/160_IntersectionOfTwoLinkedLists.py | 02ca4ec4485da1426b1045081c588a002f6a40d7 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 557 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
# Refer to:
# https://leetcode.com/discuss/17278/accepted-shortest-explaining-algorithm-comments-improvements
p1, p2 = headA, headB
while(p1 != p2):
p1 = headB if not p1 else p1.next
p2 = headA if not p2 else p2.next
return p1
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
d4035a0d109421eeaa4a7aa57eb5f56252559836 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/118/usersdata/37/25629/submittedfiles/questao2.py | 8a87584abfbf2f20ff1af27fd15c677482690be2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | tom1=1
tom2=2
tom3=3
tom4=4
tom5=5
tom6=6
tom7=7
tom8=8
tom9=9
t0m10=10
tom0=0
l=[]
n=input('Digite n: ')
for i in range(0,n,1):
for i in range(0,m.shape(0),1):
for j in range(0,m.shape(1),1):
m=m.append(input('Digite 0 ou -1: ')
if m[i,j]!=0
d=(m[i,j]-((m[i,j]+1)!=(m[i,j]==0)))
m[i,j]=d
else:
m[i,j]=tom0
l=l.append(m[i,j])
print l
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
94fa1da6010571b7f8dc49b5540b59295bb345fc | ac227cc22d5f5364e5d029a2cef83816a6954590 | /applications/physbam/physbam-lib/Scripts/Archives/pd/sim/CLIENT_LIBRARY.py | e7a5d4c3b369bd10ee2e36f79dfcf8ecb4aacd02 | [
"BSD-3-Clause"
] | permissive | schinmayee/nimbus | 597185bc8bac91a2480466cebc8b337f5d96bd2e | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | refs/heads/master | 2020-03-11T11:42:39.262834 | 2018-04-18T01:28:23 | 2018-04-18T01:28:23 | 129,976,755 | 0 | 0 | BSD-3-Clause | 2018-04-17T23:33:23 | 2018-04-17T23:33:23 | null | UTF-8 | Python | false | false | 4,921 | py | from pd.common import CONFIG
from pd.common import SOCKET
import sys
import time
import os
import socket
import dialog
client=None
try:
client=SOCKET.CLIENT(CONFIG.pdsim_server_host,CONFIG.pdsim_server_port,
(CONFIG.client_private_key_file,CONFIG.client_certificate_file,CONFIG.ca_certificate_file))
# client=SOCKET.CLIENT(os.environ["PSIM_SERVER_HOST"],int(os.environ["PSIM_SERVER_PORT"]),
# (os.environ["PSIM_CLIENT_KEY"],os.environ["PSIM_CLIENT_CERT"],os.environ["PSIM_CA_CERT"]))
except KeyError:
print "You must define the environment variables PSIM_SERVER_HOST and PSIM_SERVER_PORT, PSIM_CLIENT_KEY, \nPSIM_CA_CERT, PSIM_CLIENT_CERT"
sys.exit(1)
except socket.error:
print "Unable to connect to server"
sys.exit(1)
username=os.environ["USER"]
def handle_dialog_code(d,code):
if code in (d.DIALOG_CANCEL, d.DIALOG_ESC):
sys.exit(1)
return 0
def New_Session(d):
# Get memory
(code,answer)=d.inputbox("How much memory (GBytes) will this job take?",init="4")
handle_dialog_code(d,code)
memory=int(answer)
# Get CPUs
(code,answer)=d.inputbox("How many CPUs will this job take?",init="1")
handle_dialog_code(d,code)
cpus=int(answer)
# Get Label
(code,label)=d.inputbox("What do you call this job?",init="")
handle_dialog_code(d,code)
# Try making the job
session_info=client.Create_Session(username,memory,cpus)
id=None
try:
id=session_info["id"]
except:
d.infobox("Failed to get session")
sys.exit(1)
client.Label_Session(id,label)
return id
# query_states is a list of states we want
# users is a list of users we want
def Get_Session(d,query_states=None,users=None):
sessions=client.Session_List()
formatted_choices=[]
for i in sessions.keys():
session=sessions[i]
if query_states and not session["state"] in query_states: continue
if users and not session["username"] in users: continue
statestr=session["state"]
if statestr=="active": statestr="active@%s"%session["machine"]
formatted_choices.append((str(session["id"]),"%-8s %-20s %-200s"%(session["username"],statestr,session["label"])))
if len(formatted_choices)==0:
print "No sessions found with state %s and users %s"%(repr(query_states),repr(users))
sys.exit(1)
(code,session)=d.menu("Choose session", width=230,height=-1,menu_height=0,choices=formatted_choices)
handle_dialog_code(d,code)
return int(session)
def Label_Session(d,id):
label=client.Session_Info(id)["label"]
(code,label)=d.inputbox("What should the label be?",init=label)
handle_dialog_code(d,code)
client.Label_Session(id,label)
def Deactivate_Session(d,id):
(code,state)=d.menu("What state should it get?",width=60,height=-1,menu_height=0,choices=[("inactive","Not running but might be soon"),("done","Pretty much done")])
handle_dialog_code(d,code)
client.Deactivate_Session(id,state)
def Activate_Session(d,id):
while 1:
hosts=client.Host_List()
formatted=[]
for host in hosts.keys():
claims=hosts[host]["claims"]
mem=hosts[host]["max_memory"]
cpu=hosts[host]["max_cpus"]
users=[]
for claim in claims.keys():
cpu-=claims[claim]["cpus"]
mem-=claims[claim]["memory"]
users.append(claims[claim]["user"])
avail_string="Free CPU=%2d Free Mem=%3d claims=%s"%(cpu,mem,", ".join(users))
formatted.append((host,avail_string))
if len(formatted)==0:
print "No hosts"
sys.exit(1)
(code,hostname)=d.menu("Which host you would like?", width=60,height=-1,menu_height=0,choices=formatted)
handle_dialog_code(d,code)
# now try to attach
try:
client.Activate_Session(id,hostname)
d.infobox("Session %d successfully attached to %s"%(id,hostname));
break
except SOCKET.COMMAND_EXCEPTION,e:
d.msgbox("The following error occured when trying to get host:\n\n"+str(e))
def Status(d,id):
info=client.Session_Info(id)
lines=["%20s %d"%("ID:",info["id"]),
"%20s %s"%("Label:",info["label"]),
"%20s %s"%("State:",info["state"]),
"%20s %d"%("Memory (GB):",info["memory"]),
"%20s %d"%("CPUs:",info["cpus"]),
"%20s %s"%("Username:",info["username"]),
"%20s %s"%("Machine:",info["machine"]),
"%20s %s"%("Date Created:",time.ctime(info["created_date"])),
"",
"User Status",
"-----------"]
status=info["user_status"]
stats=status.keys()
stats.sort()
for i in stats:
lines.append(" %20s : %s"%(i,repr(status[i])))
d.msgbox(width=-1,height=-1,text="\n".join(lines))
| [
"quhang@stanford.edu"
] | quhang@stanford.edu |
b7040f5517b2041949cb1948f352b486386a60f6 | d1d79d0c3889316b298852834b346d4246825e66 | /blackbot/core/wss/ttp/art/art_T1069.002-4.py | b2323cce9e6f0197c920a21ed0d2d5a454bf2cc0 | [] | no_license | ammasajan/Atomic-Red-Team-Intelligence-C2 | 78d1ed2de49af71d4c3c74db484e63c7e093809f | 5919804f0bdeb15ea724cd32a48f377bce208277 | refs/heads/master | 2023-07-17T12:48:15.249921 | 2021-08-21T20:10:30 | 2021-08-21T20:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'Discovery/T1069.002-4'
self.controller_type = ''
self.external_id = 'T1069.002'
self.blackbot_id = 'T1069.002-4'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {
'OutString': {
'Description' : 'Appends Out-String to the PowerShellCode',
'Required' : False,
'Value' : True,
},
'BypassLogging': {
'Description' : 'Bypasses ScriptBlock and Techniques logging',
'Required' : False,
'Value' : True,
},
'BypassAmsi': {
'Description' : 'Bypasses AMSI',
'Required' : False,
'Value' : True,
}
}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/powershell.boo'), 'r') as ttp_src:
src = ttp_src.read()
pwsh_script = get_path_in_package('core/wss/ttp/art/pwsh_ttp/discovery/T1069.002-4')
with open(pwsh_script) as pwsh:
src = src.replace("POWERSHELL_SCRIPT", pwsh.read())
src = src.replace("OUT_STRING", str(self.options["OutString"]["Value"]).lower())
src = src.replace("BYPASS_LOGGING", str(self.options["BypassLogging"]["Value"]).lower())
src = src.replace("BYPASS_AMSI", str(self.options["BypassAmsi"]["Value"]).lower())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/pwsh_ttp/discovery/T1069.002-4')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
| [
"root@uw2artic201.blackbot.net"
] | root@uw2artic201.blackbot.net |
c36fd5af1a819d31260212e83fad532b77b86bca | 7bbc83f3f84d7e5057cb04f6895082ab3e016e90 | /keras/keras09_val2.py | b7f8415c59b79aa0aea11813adddbfec31dea9dc | [] | no_license | osy1223/bit_seoul | 908f6adf007c0a7d0df2659b4fae75eb705acaea | b523d78c7b80d378a2d148b35466304f10bf4af4 | refs/heads/master | 2023-02-02T14:26:40.120989 | 2020-12-18T00:46:04 | 2020-12-18T00:46:04 | 311,279,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | import numpy as np
#1. 데이터 준비
x_train=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
y_train=np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
# x_val = np.array([11,12,13,14,15])
# y_val = np.array([11,12,13,14,15])
# x_pred=np.array([16,17,18])
x_test = np.array([16,17,18,19,20])
y_test = np.array([16,17,18,19,20])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#2. 모델 구성
model = Sequential()
model.add(Dense(30,input_dim=1))
model.add(Dense(500))
model.add(Dense(200))
model.add(Dense(800))
model.add(Dense(900))
model.add(Dense(200))
model.add(Dense(700))
model.add(Dense(1))
#3. 컴파일, 훈련 (컴퓨터가 알아들을 수 있도록)
# model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
#훈련할때 훈련용 데이터, 검증용 데이터 (문제지-답안지 컨닝)
model.fit(x_train, y_train, epochs=100, validation_split=0.2)
#validation_split=0.2 : 20%로 나눠서 val (x,y는 동일하게 움직인다(동일하게 짤린다))
#4. 평가, 예측
loss = model.evaluate(x_test,y_test)
print("loss : ", loss)
y_predict = model.predict(x_test)
print("결과물 : ",y_predict)
#실습 : 결과물 오차 수정. 미세조정
#RMSE 함수 사용자 정의
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test,y_predict))
print("RMSE :", RMSE(y_test,y_predict))
#R2 함수
from sklearn.metrics import r2_score
r2=r2_score(y_test, y_predict)
print("R2 : ",r2) | [
"osu1223@gmail.com"
] | osu1223@gmail.com |
2277ec3c58a86509eb47d7f5fc1ddee6e56b7382 | 8dad9c4fc22ba754179a86a5c74467249685b171 | /python/audio/Jean/Device Platform [2017-04-17]/SQLog.py | 18d8e63cf50cc9b34a88894a417b02f104cffcc3 | [] | no_license | ANTZ314/raspi | d3aefab9694c73767e0b243a8539f0c2fbb713a5 | a1688c2c3b0f0372f3c5674c8e098dcbebbfd38a | refs/heads/master | 2021-06-10T18:08:50.308992 | 2021-06-03T14:58:03 | 2021-06-03T14:58:03 | 173,297,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,943 | py | # -*- coding: utf-8 -*-
#
#Created on Mon Mar 13 11:36:27 2017
#
#@author: Jean Louw
#-------------------------------------IMPORTS----------------------------------
import sqlite3
import threading
#------------------------------------------------------------------------------
#--------------------------------SQLOG CLASS START-----------------------------
class SQLog(object):
#-------------------------------------------------------------------------#
# Name: SQLog #
# Function: Contains methods with which to log and retrieve the gunshot #
# application data into and from a SQLite database. #
# Dependencies: SQLite3 (comes pre-installed with Python) #
#-------------------------------------------------------------------------#
__version__ = "2017.3"
#- - - - - - - - - - - - - - COMMON DATABASE FUNCTIONS- - - - - - - - - - - - -
#<summary> Constructor checks for existing db or creates new one </summary>
#<param> Receives name/path of database to be used as filename = STRING</param>
def __init__(self, **kwargs):
self.data = kwargs
self.sql_thread_lock = threading.Lock()
filename = self.data.get('filename', None)
if filename != None: self.connect_to_DB(filename)
else: self.connect_to_DB("gunshot_db.db")
#<summary> Closing function to be called before deleting the class instance. </summary>
def close(self):
self._db.close()
#<summary> Generator function for fetching all tables in a database file </summary>
#<retuns> The string name of each table in the database sequentially </returns>
def list_tables(self):
list_of_tables = self.sql_do("SELECT tbl_name AS 'Table:' FROM sqlite_master WHERE type='table';")
for table in list_of_tables:
yield table[0]
#<summary> Helper function for performing and commiting change actions in the database </summary>
#<param name="sql_string"> This is the SQL format string that is to be executed by the db </param>
def sql_do(self, sql_string):
with self.sql_thread_lock:
res = self._db.execute(sql_string)
self._db.commit() # to commit is to save the change in the db
return res
#<summary>Conects to the .db file in the filename path and looks for essential tables.
# If the tables are not found it re-directs to create the tables </summary>
#<param name="filename"> filename is a mandatory STRING and houses the database file path </param>
def connect_to_DB(self, filename):
self._db = sqlite3.connect(filename, check_same_thread = False)
self._db.row_factory = sqlite3.Row
if 'Event_Log' not in self.list_tables(): self.create_Event_Log_table()
if 'Status_Log' not in self.list_tables(): self.create_Status_Log_table()
if 'Error_Log' not in self.list_tables(): self.create_Error_Log_table()
if 'Update_Log' not in self.list_tables(): self.create_Update_Log_table()
def set_thread_lock(self,sql_lock):
self.sql_thread_lock = sql_lock
def get_thread_lock(self):
return self.api_thread_lock
#- - - - - - - - - - - - - TABLE SPECIFIC FUNCTIONS - - - - - - - - - - - - - -
#- - - - - - - - - - - - - - - EVENT LOG TABLE - - - - - - - - - - - - - - - -
#<summary> Creates a brand new table in the connected db with the name "Event_Log" </summary>
def create_Event_Log_table(self):
print("No event log found. Creating Event_Log table.")
self.sql_do('DROP TABLE IF EXISTS Event_Log')#this is to ensure no duplicates if possible
self.sql_do('''CREATE TABLE Event_Log ( event_id INTEGER PRIMARY KEY NOT NULL,
gunshot_event STRING,
filepath STRING,
api_response INTEGER DEFAULT 0 );''')
#<summary>Generator that returns all of the records of the Event_Log table</summary>
#<returns>Returns a dictionary with the current record's information </returns>
def fetch_all_from_Event_Log_table(self):
alldata = self.sql_do('SELECT * FROM Event_Log')
for record in alldata.fetchall():
yield dict(record)
#<summary>Generator that returns all of the records of the Event_Log table
# whose api repsonse is not equal to 201 (success) </summary>
#<returns>Returns a dictionary with the current record's information</returns>
def fetch_unsent_from_Event_Log_table(self):
alldata = self.sql_do('SELECT * FROM Event_Log WHERE api_response != 201')
for record in alldata.fetchall():
yield dict(record)
#<summary>Fetches the information of a specific Event_Log record by its event_id </summary>
#<param name="event_id">An INTEGER that identifies the record to be fetched </param>
#<returns>A dictionary item with the record's information if it exists, else None </returns>
def fetch_row_from_Event_Log_table(self, event_id):
sql_string = 'SELECT * FROM Event_Log WHERE event_id = {}'.format(event_id)
rowdata = self.sql_do(sql_string)
rowdata = rowdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if rowdata!= None: return dict(rowdata)
else: return rowdata #return None if the result is empty
#<summary> Fetches the information of the last record in Event_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
def fetch_last_from_Event_Log_table(self):
lastdata = self.sql_do('SELECT * FROM Event_Log ORDER BY event_id DESC LIMIT 1;')
lastdata = lastdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if lastdata != None: return dict(lastdata)
else: return lastdata #return None if the result is empty
#<summary>Updates a specific column of a specific record in Event_Log with
# a specific value, and saves the changes. </summary>
#<param name="event_id"> An INTEGER that identifies the record to be updated </param>
#<param name="key"> A STRING name value of the column to update </param>
#<param name="value"> The value of any accepted type to be the updated value </param>
def update_Event_Log_table(self, event_id, key, value):
#if the value passed is a string, it should be encapsulated in quotes before execution.
if isinstance(value, basestring): sql_string = "UPDATE Event_Log SET {0} = '{1}' where event_id = {2}".format(key, value, event_id)
else: sql_string = 'UPDATE Event_Log SET {0} = {1} where event_id = {2}'.format(key, value, event_id)
self.sql_do(sql_string)
# <summary> Inserts a single record into the Event_Log table</summary>
# <param name="**kwargs"> Receives coinciding parameters to the table column names:
# event_data = json parsed string or dictionary object to be parsed of all the event data
# event_file_location = a string showing the cross-platform location of the event sound clip file
# api_response (optional) = the returned integer value from the attempt to upload to the API endpoint
# </param>
# <returns> Returns the SQL string that was processed. </returns>
def insert_Event_Log_table(self, insert_data):
acceptable_data = ('gunshot_event', 'filepath', 'api_response')
value_names = "INSERT INTO Event_Log ( "
value_data = " ) VALUES ( "
#create two strings for the dictionary and its values
for column in insert_data:
if column in acceptable_data:
value_names += column + ","
#if the value passed into column is of string type, encapsulate in quotes for SQL processing
if isinstance(insert_data[column], basestring): value_data += "'" + insert_data[column] + "',"
else: value_data += str(insert_data[column]) + "," #else pass the value directly
#combine the two strings
value_names = value_names[:-1] #trim the last ','
value_data = value_data[:-1] #trim the last ','
sql_string = value_names + value_data + " );"
#pass the string to be processed
self.sql_do(sql_string)
return sql_string
#- - - - - - - - - - - - - - - STATUS LOG TABLE - - - - - - - - - - - - - - - -
#<summary> Creates a brand new table in the connected db with the name "Status_Log" </summary>
def create_Status_Log_table(self):
print("No status log found. Creating Status_Log table.")
self.sql_do('DROP TABLE IF EXISTS Status_Log')#this is to ensure no duplicates if possible
self.sql_do('''CREATE TABLE Status_Log ( event_id INTEGER PRIMARY KEY NOT NULL,
dts STRING,
gps_status INTEGER,
gps_signal INTEGER,
status_3g INTEGER,
signal_3g INTEGER,
mic_status INTEGER,
snr STRING,
battery_status STRING,
cover_status INTEGER,
api_response INTEGER DEFAULT 0);''')
#<summary>Generator that returns all of the records of the Status_Log table</summary>
#<returns>Returns a dictionary with the current record's information </returns>
def fetch_all_from_Status_Log_table(self):
alldata = self.sql_do('SELECT * FROM Status_Log')
for record in alldata.fetchall():
yield dict(record)
#<summary>Generator that returns all of the records of the Status_Log table
# whose api repsonse is not equal to 201 (success) </summary>
#<returns>Returns a dictionary with the current record's information</returns>
def fetch_unsent_from_Status_Log_table(self):
alldata = self.sql_do('SELECT * FROM Status_Log WHERE api_response != 201')
for record in alldata.fetchall():
yield dict(record)
#<summary>Fetches the information of a specific Status_Log record by its event_id </summary>
#<param name="event_id">An INTEGER that identifies the record to be fetched </param>
#<returns>A dictionary item with the record's information if it exists, else None </returns>
def fetch_row_from_Status_Log_table(self, event_id):
sql_string = 'SELECT * FROM Status_Log WHERE event_id = {}'.format(event_id)
rowdata = self.sql_do(sql_string)
rowdata = rowdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if rowdata!= None: return dict(rowdata)
else: return rowdata #return None if the result is empty
#<summary> Fetches the information of the last record in Status_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
def fetch_last_from_Status_Log_table(self):
lastdata = self.sql_do('SELECT * FROM Status_Log ORDER BY event_id DESC LIMIT 1;')
lastdata = lastdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if lastdata != None: return dict(lastdata)
else: return lastdata #return None if the result is empty
#<summary>Updates a specific column of a specific record in Status_Log with
# a specific value, and saves the changes. </summary>
#<param name="event_id"> An INTEGER that identifies the record to be updated </param>
#<param name="key"> A STRING name value of the column to update </param>
#<param name="value"> The value of any accepted type to be the updated value </param>
def update_Status_Log_table(self, event_id, key, value):
#if the value passed is a string, it should be encapsulated in quotes before execution.
if isinstance(value, basestring): sql_string = "UPDATE Status_Log SET {0} = '{1}' where event_id = {2}".format(key, value, event_id)
else: sql_string = 'UPDATE Status_Log SET {0} = {1} where event_id = {2}'.format(key, value, event_id)
self.sql_do(sql_string)
# <summary> Inserts a single record into the Status_Log table</summary>
# <param name="**kwargs"> Receives coinciding parameters to the table column names:
# status_log_date_time = STRING that states when the data was logged and relevant
# gps_status = BOOLEAN INTEGER indicating working (1) or not (0)
# gps_signal = INTEGER that houses the signal strenght value
# net_3g_status = BOOLEAN INTEGER indicating working (1) or not (0)
# net_3g_signal = INTEGER that houses the signal strenght value
# microphone_status = BOOLEAN INTEGER indicating working (1) or not (0)
# signal_to_noise_ratio = STRING that is the passed float ratio value
# power_status = STRING indicating if empty, charging or full or AC powered
# cover_status = BOOLEAN INTEGER indication open (0) or covered (1)
# api_response (optional) = the returned integer value from the attempt to upload to the API endpoint
# </param>
# <returns> Returns the SQL string that was processed. </returns>
def insert_Status_Log_table(self, insert_data):
acceptable_data = ('dts', 'gps_status', 'gps_signal',
'status_3g', 'signal_3g', 'mic_status',
'snr', 'battery_status', 'cover_status',
'api_response')
value_names = "INSERT INTO Status_Log ( "
value_data = " ) VALUES ( "
#create two strings for the dictionary and its values
for column in insert_data:
if column in acceptable_data:
value_names += column + ","
#if the value passed into column is of string type, encapsulate in quotes for SQL processing
if isinstance(insert_data[column], basestring): value_data += "'" + insert_data[column] + "',"
else: value_data += str(insert_data[column]) + "," #else pass the value directly
#combine the two strings
value_names = value_names[:-1] #trim the last ','
value_data = value_data[:-1] #trim the last ','
sql_string = value_names + value_data + " );"
#pass the string to be processed
self.sql_do(sql_string)
return sql_string
#- - - - - - - - - - - - - - - ERROR LOG TABLE - - - - - - - - - - - - - - - -
#<summary> Creates a brand new table in the connected db with the name "Error_Log" </summary>
def create_Error_Log_table(self):
print("No error log found. Creating Error_Log table.")
self.sql_do('DROP TABLE IF EXISTS Error_Log')
self.sql_do('''CREATE TABLE Error_Log ( event_id INTEGER PRIMARY KEY NOT NULL,
error_details STRING,
error_date_time STRING,
api_response INTEGER DEFAULT 0);''')
#<summary>Generator that returns all of the records of the Error_Log table</summary>
#<returns>Returns a dictionary with the current record's information</returns>
def fetch_all_from_Error_Log_table(self):
alldata = self.sql_do('SELECT * FROM Error_Log')
for record in alldata.fetchall():
yield dict(record)
#<summary>Generator that returns all of the records of the Error_Log table
# whose api repsonse is not equal to 201 (success) </summary>
#<returns>Returns a dictionary with the current record's information</returns>
def fetch_unsent_from_Error_Log_table(self):
alldata = self.sql_do('SELECT * FROM Error_Log WHERE api_response != 201')
for record in alldata.fetchall():
yield dict(record)
#<summary>Fetches the information of a specific Error_Log record by its event_id </summary>
#<param name="event_id">An INTEGER that identifies the record to be fetched </param>
#<returns>A dictionary item with the record's information if it exists, else None </returns>
def fetch_row_from_Error_Log_table(self, event_id):
sql_string = 'SELECT * FROM Error_Log WHERE event_id = {}'.format(event_id)
rowdata = self.sql_do(sql_string)
rowdata = rowdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if rowdata!= None: return dict(rowdata)
else: return rowdata #return None if the result is empty
#<summary> Fetches the information of the last record in Error_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
def fetch_last_from_Error_Log_table(self):
lastdata = self.sql_do('SELECT * FROM Error_Log ORDER BY event_id DESC LIMIT 1;')
lastdata = lastdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if lastdata != None: return dict(lastdata)
else: return lastdata #return None if the result is empty
#<summary>Updates a specific column of a specific record in Error_Log with
# a specific value, and saves the changes. </summary>
#<param name="event_id"> An INTEGER that identifies the record to be updated </param>
#<param name="key"> A STRING name value of the column to update </param>
#<param name="value"> The value of any accepted type to be the updated value </param>
def update_Error_Log_table(self, event_id, key, value):
#if the value passed is a string, it should be encapsulated in quotes before execution.
if isinstance(value, basestring): sql_string = "UPDATE Error_Log SET {0} = '{1}' where event_id = {2}".format(key, value, event_id)
else: sql_string = 'UPDATE Error_Log SET {0} = {1} where event_id = {2}'.format(key, value, event_id)
self.sql_do(sql_string)
# <summary> Inserts a single record into the Error_Log table</summary>
# <param name="**kwargs"> Receives coinciding parameters to the table column names:
# error_description = STRING with all of the error info.
# error_date_time = a string with the time and date of the error occuring
# api_response (optional) = the returned integer value from the attempt to upload to the API endpoint
# </param>
# <returns> Returns the SQL string that was processed. </returns>
def insert_Error_Log_table(self, insert_data):
acceptable_data = ('error_details', 'error_date_time', 'api_response')
value_names = "INSERT INTO Error_Log ( "
value_data = " ) VALUES ( "
#create two strings for the dictionary and its values
for column in insert_data:
if column in acceptable_data:
value_names += column + ","
#if the value passed into column is of string type, encapsulate in quotes for SQL processing
if isinstance(insert_data[column], basestring): value_data += "'" + insert_data[column] + "',"
else: value_data += str(insert_data[column]) + "," #else pass the value directly
#combine the two strings
value_names = value_names[:-1] #trim the last ','
value_data = value_data[:-1] #trim the last ','
sql_string = value_names + value_data + " );"
#pass the string to be processed
self.sql_do(sql_string)
return sql_string
#- - - - - - - - - - - - - - - UPDATE LOG TABLE - - - - - - - - - - - - - - - -
#<summary> Creates a brand new table in the connected db with the name "Update_Log" </summary>
def create_Update_Log_table(self):
print("No update log found. Creating Update_Log table.")
self.sql_do('DROP TABLE IF EXISTS Update_Log')
self.sql_do('''CREATE TABLE Update_Log ( event_id INTEGER PRIMARY KEY NOT NULL,
updated_file_name STRING,
module_previous_version STRING,
module_version STRING,
update_version STRING,
update_start_date_time STRING,
update_stop_date_time STRING,
update_status STRING,
api_response INTEGER DEFAULT 0);''')
#<summary>Generator that returns all of the records of the Update_Log table</summary>
#<returns>Returns a dictionary with the current record's information</returns>
def fetch_all_from_Update_Log_table(self):
alldata = self.sql_do('SELECT * FROM Update_Log')
for record in alldata.fetchall():
yield dict(record)
#<summary>Generator that returns all of the records of the Update_Log table
# whose api repsonse is not equal to 201 (success) </summary>
#<returns>Returns a dictionary with the current record's information</returns>
def fetch_unsent_from_Update_Log_table(self):
alldata = self.sql_do('SELECT * FROM Update_Log WHERE api_response != 201')
for record in alldata.fetchall():
yield dict(record)
#<summary>Fetches the information of a specific Update_Log record by its event_id </summary>
#<param name="event_id">An INTEGER that identifies the record to be fetched </param>
#<returns>A dictionary item with the record's information if it exists, else None </returns>
def fetch_row_from_Update_Log_table(self, event_id):
sql_string = 'SELECT * FROM Update_Log WHERE event_id = {}'.format(event_id)
rowdata = self.sql_do(sql_string)
rowdata = rowdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if rowdata!= None: return dict(rowdata)
else: return rowdata #return None if the result is empty
#<summary> Fetches the information of the last record in Update_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
def fetch_last_from_Update_Log_table(self):
lastdata = self.sql_do('SELECT * FROM Update_Log ORDER BY event_id DESC LIMIT 1;')
lastdata = lastdata.fetchone() #HINT: never run fetchone() twice, it returns a null value
if lastdata != None: return dict(lastdata)
else: return lastdata #return None if the result is empty
#<summary>Updates a specific column of a specific record in Update_Log with
# a specific value, and saves the changes. </summary>
#<param name="event_id"> An INTEGER that identifies the record to be updated </param>
#<param name="key"> A STRING name value of the column to update </param>
#<param name="value"> The value of any accepted type to be the updated value </param>
def update_Update_Log_table(self, event_id, key, value):
#if the value passed is a string, it should be encapsulated in quotes before execution.
if isinstance(value, basestring): sql_string = "UPDATE Update_Log SET {0} = '{1}' where event_id = {2}".format(key, value, event_id)
else: sql_string = 'UPDATE Update_Log SET {0} = {1} where event_id = {2}'.format(key, value, event_id)
self.sql_do(sql_string)
# <summary> Inserts a single record into the Update_Log table</summary>
# <param name="**kwargs"> Receives coinciding parameters to the table column names:
# updated_file_name = the STRING name value of the file that was updated
# current_version = the STRING value of the class' original version
# update_version = the STRING value of the class' updated version
# update_start_date_time = STRING value of when the download started
# update_stop_date_time = STRING value of whent the download ended
# update_status = STRING stating if download has started, is busy or successful
# api_response = the returned integer value from the attempt to upload to the API endpoint
# </param>
# <returns> Returns the SQL string that was processed. </returns>
def insert_Update_Log_table(self, insert_data):
acceptable_data = ('updated_file_name', 'module_previous_version','module_version', 'update_version',
'update_start_date_time', 'update_stop_date_time',
'update_status', 'api_response')
value_names = "INSERT INTO Event_Log ( "
value_data = " ) VALUES ( "
#create two strings for the dictionary and its values
for column in insert_data:
if column in acceptable_data:
value_names += column + ","
#if the value passed into column is of string type, encapsulate in quotes for SQL processing
if isinstance(insert_data[column], basestring): value_data += "'" + insert_data[column] + "',"
else: value_data += str(insert_data[column]) + "," #else pass the value directly
#combine the two strings
value_names = value_names[:-1] #trim the last ','
value_data = value_data[:-1] #trim the last ','
sql_string = value_names + value_data + " );"
#pass the string to be processed
self.sql_do(sql_string)
return sql_string
#- - - - - - - - - - - - - - - - - -DECORATORS- - - - - - - - - - - - - - - - -
#<summary> Exposes the information of the last record in Event_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
@property
def last_Event_entry(self):
return self.fetch_last_from_Event_Log_table()
#<summary> Exposes the information of the last record in Satus_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
@property
def last_Satus_entry(self):
return self.fetch_last_from_Satus_Log_table()
#<summary> Exposes the information of the last record in Error_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
@property
def last_Error_entry(self):
return self.fetch_last_from_Error_Log_table()
#<summary> Exposes the information of the last record in Update_Log </summary>
#<returns> A dictionary item with the record's information if it exists, else None </returns>
@property
def last_Update_entry(self):
return self.fetch_last_from_Update_Log_table()
@property
def thread_lock(self):
return self.get_thread_lock()
@thread_lock.setter
def thread_lock(self, sqlock):
self.set_thread_lock(sqlock)
#---------------------------------SQLOG CLASS END------------------------------
#<summary>Test code for the class</summary>
def SQLogClassTestCode():
#-----------------------------------------------------------#
# Here I will only try to show the function of the Event_Log#
# table in a test file of our own creation. Please note that#
# the same functions are available for all the tables. #
#-----------------------------------------------------------#
# First we create an instance of the SQLog class within which we
# create or connect to a database with a specific file name
instance_of_SQLog = SQLog(filename = "bubblez.db")
# Simply, we need to first create a dictionary that holds all the data
event_information = {
"gunshot_event" : "JSON EVENT DATA",
"filepath" : "../SOUND_FILES/1.wav",
"api_response" : 202
}
#create a new event entry
instance_of_SQLog.insert_Event_Log_table(event_information)
#the problem is that an event takes a JSON string as event_data
#to achieve this, you merely import JSON as follows:
import json
#good. now we have the dictionary of values:
gunshot_event_data = { 'gun' : 'bazooka', #note how the squiggly bracket is how we make dictionaries
'shots_fired':100,
'screams_in_agony' : True
}
print("gunshot_event_data is of type: " + str(type(gunshot_event_data))) #just to prove it's a dictionary
#finally we parse the values into JSON format
gunshot_event_data = json.dumps(gunshot_event_data)
#amazing! Now it is a JSON string! We then create a dictionary just like above:
event_information = {
"gunshot_event" : gunshot_event_data,
"filepath" : "../SOUND_FILES/1.wav",
"api_response" : 202
}
#Then we pass it into the event logging function
instance_of_SQLog.insert_Event_Log_table(event_information)
#if we want to display the last inserted record of event_data
print("\n\nlast data value:")
last_entry_is_a_dictionary = instance_of_SQLog.last_Event_entry
if last_entry_is_a_dictionary != None: #if the table is empty, it returns None
for each_item in last_entry_is_a_dictionary:
print(str(each_item) + " = " + str(last_entry_is_a_dictionary[each_item]))
#if we want to display the a specific record of event_data
print("\n\nvalue at event_id = 1:")
specific_entry_is_a_dictionary = instance_of_SQLog.fetch_row_from_Event_Log_table(event_id = 1) #the records start at 1 and not at 0 like arrays
if specific_entry_is_a_dictionary != None: #if the table is empty, it returns None
for each_item in specific_entry_is_a_dictionary:
print(str(each_item) + " = " + str(specific_entry_is_a_dictionary[each_item]))
#now if we have detected a change and want to change a specific value in a table
instance_of_SQLog.update_Event_Log_table(event_id = 2, #the record to change
key = 'api_response', #the item to change
value = 201) #its new value
#just as easily, again, we can change the last record value like so:
instance_of_SQLog.update_Event_Log_table(event_id = instance_of_SQLog.last_Event_entry['event_id'], #the record to change
key = 'api_response', #the item to change
value = 201) #its new value
#if we want to list all of the entries that have not been updated to the server we say (api_response != 201):
print("\n\nAll unsent records:")
for each_unupdated_record in instance_of_SQLog.fetch_unsent_from_Event_Log_table():
print("->")
for each_item in each_unupdated_record:
print(" " + str(each_item) + " = " + str(each_unupdated_record[each_item]))
#finally, we can list all of the data in the entire table
print("\n\nAll records ever:")
for each_record in instance_of_SQLog.fetch_all_from_Event_Log_table():
print("->")
for each_item in each_record:
print(" " + str(each_item) + " = " + str(each_record[each_item]))
if __name__ == "__main__": SQLogClassTestCode()
| [
"antony.smith411@gmail.com"
] | antony.smith411@gmail.com |
0e43b23e908a9cd1c8e81a140d24dc97f62181e0 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /K4ifFgsgRitT7MDiz_16.py | 77b1c214fd6ed0e5db1bfb8bf0b05c6fbe946497 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py |
template = 'My {} is: {:{}{}{}}.'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a842165c139624026322c930813f9c75e602bb52 | 0e4a037398cf0a54004d4ab272ebe4ddb296fe7f | /config/settings.py | a4245207b9b55badc3bf291ebc7977ed55f94861 | [] | no_license | sug5806/Blog | 9aacf1c8c1d78688163ae9ef93d23bc252b2b1ec | aa24629722c079f768d7fcde3a937223cb2ff5e0 | refs/heads/develop | 2022-12-17T16:48:09.204870 | 2019-06-01T12:27:30 | 2019-06-01T12:27:30 | 188,932,485 | 0 | 1 | null | 2022-12-08T05:10:54 | 2019-05-28T01:34:59 | Python | UTF-8 | Python | false | false | 3,601 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')95clqh=z^rk6asisdh!!r=w!p-m#_^3cb)n*mt1=9tq$$c9^z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'post',
'ckeditor',
'sslserver',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.naver',
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'common_templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
SITE_ID = 1
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/' | [
"sug5806@gmail.com"
] | sug5806@gmail.com |
77120a6fa5a36871153e38ba22f1a9f83d82483e | 0adf94fc39a02018165b62e93dd83edddd041230 | /.history/Jobs/views/views_1_20190221112754.py | 2132bf17a5068dace16ac781e78dd077e3aeab86 | [] | no_license | SabitDeepto/BrJobs | 1e3baa143331cf46b9c70911c6644d1efd4fffd6 | 1a458c8c667f8093a2325d963e5542655467c7aa | refs/heads/master | 2020-04-24T08:02:26.350007 | 2019-03-17T05:53:30 | 2019-03-17T05:53:30 | 171,818,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | # from django.contrib.auth.decorators import login_required
from django.shortcuts import render
# from .models import SolutionPost
def home(request):
return render(request, 'basic/index.html')
def test(request):
return render(request, 'test.html')
# def update_profile(request, user_id):
# user = User.objects.get(pk=user_id)
# user.profile.bio = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit...'
# user.save()
# @login_required
@transaction.atomic
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, _('Your profile was successfully updated!'))
return redirect('settings:profile')
else:
messages.error(request, _('Please correct the error below.'))
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'profiles/profile.html', {
'user_form': user_form,
'profile_form': profile_form
}) | [
"deepto69@gmail.com"
] | deepto69@gmail.com |
87193fc338f23a42a20af7ae9ae3eaa872a6f802 | c533017c711b7edf3fb8590504c983286a509804 | /logging_level.py | 8fe5066a5caa1ec87ca7e6a6cb84c5d8893826b2 | [] | no_license | zgotter/Flask | 78dface6b7968087aa19f7221d21d3dbd8793dda | c0ed83812a4cdd7ce70ca900712c6ac40f9c71d0 | refs/heads/master | 2022-06-17T18:41:28.008422 | 2020-05-08T08:05:06 | 2020-05-08T08:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import logging
# 파일로 남기기 위해 filename='test.log' parameter로 추가한다.
logging.basicConfig(filename='test.log', level=logging.DEBUG)
logging.debug("debug")
logging.info("info")
logging.warning("warning")
logging.error("error")
logging.critical("critical")
| [
"shkim4738@gmail.com"
] | shkim4738@gmail.com |
2a36a2937b9885b3dd22b6f636af9dbbc8d2d620 | ca66a4283c5137f835377c3ed9a37128fcaed037 | /djangoPIWebsite/pages/urls.py | 27c2ff3beec290d2c3c8ca0f634526b361686a73 | [] | no_license | NamithaKonda09/majorProject | f377f7a77d40939a659a3e59f5f1b771d88889ad | 4eff4ff18fa828c6278b00244ff2e66522e0cd51 | refs/heads/master | 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.analysis, name='home'),
path('dashboard/', views.dashboard, name='dashboard'),
path('annotate/', views.upload_annotate, name='annotate'),
path('test/', views.upload_test, name='test'),
path('analysis/', views.analysis, name='analysis'),
path('result/', views.result, name='result'),
path('testresult/', views.testresult, name='testresult'),
]
| [
"namithakonda09@gmail.com"
] | namithakonda09@gmail.com |
425c8d1b40bc20b6c30f8231fc233c7a216592df | d66818f4b951943553826a5f64413e90120e1fae | /hackerrank/Algorithms/Circular Array Rotation/solution.py | 2b18de59ae789f0cc2f536aa5aec470e5256052f | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 658 | py | #!/bin/python3
import os
# Complete the circularArrayRotation function below.
def circularArrayRotation(a, k, queries):
l = len(a)
return [a[(l - k % l + q) % l] for q in queries]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nkq = input().split()
n = int(nkq[0])
k = int(nkq[1])
q = int(nkq[2])
a = list(map(int, input().rstrip().split()))
queries = []
for _ in range(q):
queries_item = int(input())
queries.append(queries_item)
result = circularArrayRotation(a, k, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
a0447be691ddc7f0c16a8a89de42c18b13880fc5 | 147fd8d2fa164c173d3763b868d9df790045e53e | /file_build_failures.py | 8c193f0e151dfb76a8cc58d1c4eb90cf4613d126 | [
"CC0-1.0"
] | permissive | hroncok/mini-mass-rebuild | d49c0da413d48a3092e0393f02d4f7b3015e5022 | f12cae9ad7277c5524628a8da31c884e172a12e8 | refs/heads/master | 2022-03-11T15:32:55.818761 | 2022-01-31T11:18:12 | 2022-01-31T11:18:38 | 122,631,346 | 5 | 3 | null | 2020-10-30T12:56:06 | 2018-02-23T14:26:24 | Python | UTF-8 | Python | false | false | 2,172 | py | import bugzilla
import pathlib
import time
import sys
import webbrowser
from urllib.parse import urlencode
from textwrap import dedent
BUGZILLA = 'bugzilla.redhat.com'
TRACKER = 1686977 # PYTHON38
def bugzillas():
bzapi = bugzilla.Bugzilla(BUGZILLA)
query = bzapi.build_query(product='Fedora')
query['blocks'] = TRACKER
return sorted(bzapi.query(query), key=lambda b: -b.id)
def bug(bugs, package):
for b in bugs:
if b.component == package:
return b
return None
def open_bz(package):
summary = f"{package} fails to build with Python 3.8 on Fedora 32+"
description = dedent(f"""
{package} fails to build with Python 3.8.0b4 in Fedora 32.
See the build failures at https://koji.fedoraproject.org/koji/search?match=glob&type=package&terms={package}
...
It is not important whether the problem is relevant to Python 3.8, this issue is blocking the Python 3.8 rebuilds.
If this package won't build with 3.8, it won't be installable, along with all its dependent packages, in Fedora 32 and further.
Furthermore, as it fails to install, its dependent packages will fail to install and/or build as well.
Please rebuild the package in Fedora 32 (rawhide).
Let us know here if you have any questions. Thank You!
""")
url_prefix = 'https://bugzilla.redhat.com/enter_bug.cgi?'
params = {
'short_desc': summary,
'comment': description,
'component': package,
'blocked': 'PYTHON38,F32FTBFS,F32FailsToInstall',
'product': 'Fedora',
'version': 'rawhide',
'bug_severity': 'high',
}
webbrowser.open(url_prefix + urlencode(params))
time.sleep(1)
webbrowser.open(f'https://koji.fedoraproject.org/koji/search?match=glob&type=package&terms={package}')
time.sleep(1)
pkgs = pathlib.Path(sys.argv[1]).read_text().splitlines()
print('Getting bugzillas...', end=' ', flush=True)
bugs = bugzillas()
print('..done.')
for pkg in pkgs:
bz = bug(bugs, pkg)
if bz:
print(f'{pkg} bz{bz.id} {bz.status}')
if not bz or bz.status == 'CLOSED':
open_bz(pkg)
| [
"miro@hroncok.cz"
] | miro@hroncok.cz |
dc6c93b1ba2ac42b69d9d29f8212a43187f85984 | 4a59e35a12af911f588224f07aab52d24fd6b044 | /venv/lib/python2.7/site-packages/mbed_host_tests/host_tests_conn_proxy/conn_primitive_remote.py | bc5e0168c2d0d9dc23072be057abf7bb022bd8f6 | [] | no_license | ryankurte/mbed-node | 95caba48404e06c4f21f48a850152c08d911bbc8 | 3584d391fca00fc3cda138c26ae28fdbe5527d83 | refs/heads/master | 2021-01-22T13:23:08.506740 | 2017-11-02T17:40:26 | 2017-11-02T17:40:26 | 100,665,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,698 | py | #!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mbed_host_tests import DEFAULT_BAUD_RATE
from mbed_host_tests.host_tests_conn_proxy.conn_primitive import ConnectorPrimitive
class RemoteConnectorPrimitive(ConnectorPrimitive):
def __init__(self, name, config):
ConnectorPrimitive.__init__(self, name)
self.config = config
self.target_id = self.config.get('target_id', None)
self.grm_host = config.get('grm_host', None)
self.grm_port = int(config.get('grm_port', 8000))
self.grm_module = config.get('grm_module', 'unknown')
self.platform_name = config.get('platform_name', None)
self.baudrate = config.get('baudrate', DEFAULT_BAUD_RATE)
self.image_path = config.get('image_path', None)
self.polling_timeout = int(config.get('polling_timeout', 60))
# Global Resource Mgr tool-kit
self.remote_module = None
self.selected_resource = None
self.client = None
# Initialize remote resource manager
self.__remote_init()
def __remote_init(self):
"""! Initialize DUT using GRM APIs """
# We want to load global resource manager module by name from command line (switch --grm)
try:
self.remote_module = __import__(self.grm_module)
except ImportError as e:
self.logger.prn_err("unable to load global resource manager '%s' module!"% self.grm_module)
self.remote_module = None
return False
self.logger.prn_inf("remote resources initialization: remote(host=%s, port=%s)"% (self.grm_host, self.grm_port))
# Connect to remote global resource manager
self.client = self.remote_module.create(host=self.grm_host, port=self.grm_port)
# First get the resources
resources = self.client.get_resources()
self.logger.prn_inf("remote resources count: %d" % len(resources))
# Query for available resource
# Automatic selection and allocation of a resource
try:
self.selected_resource = self.client.allocate({
"platform_name": self.platform_name
})
except self.remote_module.resources.ResourceError as e:
self.logger.prn_err("can't allocate resource: '%s', reason: %s"% (self.platform_name, str(e)))
return False
# Remote DUT connection, flashing and reset...
try:
self.__remote_disconnect()
self.__remote_flashing(self.image_path)
self.__remote_connect(baudrate=self.baudrate)
self.__remote_reset()
except Exception as e:
self.logger.prn_err(str(e))
return False
return True
def __remote_connect(self, baudrate=DEFAULT_BAUD_RATE, buffer_size=6):
"""! Open remote connection to DUT """
self.logger.prn_inf("opening connection to platform at baudrate='%s, bufferSize=%d'"% (baudrate, buffer_size))
if not self.selected_resource:
raise Exception("remote resource not exists!")
try:
serial_parameters = self.remote_module.SerialParameters(lineMode=False, baudrate=baudrate, bufferSize=buffer_size)
self.selected_resource.openConnection(parameters=serial_parameters)
except self.remote_module.resources.ResourceError as e:
self.logger.prn_inf("openConnection() failed")
raise e
def __remote_disconnect(self):
if not self.selected_resource:
raise Exception("remote resource not exists!")
if self.selected_resource.is_connected:
self.selected_resource.closeConnection()
def __remote_reset(self):
"""! Use GRM remote API to reset DUT """
self.logger.prn_inf("remote resources reset...")
if not self.selected_resource:
raise Exception("remote resource not exists!")
if not self.selected_resource.reset():
raise Exception("remote resources reset failed!")
def __remote_flashing(self, filename, forceflash=False):
"""! Use GRM remote API to flash DUT """
self.logger.prn_inf("remote resources flashing with '%s'..."% filename)
if not self.selected_resource:
raise Exception("remote resource not exists!")
if not self.selected_resource.flash(filename, forceflash=forceflash):
raise Exception("remote resources flashing failed!")
def read(self, count):
"""! Read 'count' bytes of data from DUT """
if not self.selected_resource:
raise Exception("remote resource not exists!")
date = str()
try:
data = self.selected_resource.read(count)
except self.remote_module.resources.ResourceError as e:
self.logger.prn_err("RemoteConnectorPrimitive.read(%d): %s"% (count, str(e)))
return data
def write(self, payload, log=False):
"""! Write 'payload' to DUT """
if self.selected_resource:
self.selected_resource.write(payload)
if log:
self.logger.prn_txd(payload)
return True
def flush(self):
pass
def connected(self):
return all([self.remote_module,
self.selected_resource,
self.selected_resource.is_connected])
def finish(self):
# Finally once we're done with the resource
# we disconnect and release the allocation
if self.selected_resource:
try:
if self.selected_resource.is_connected:
self.selected_resource.closeConnection()
if self.selected_resource.is_allocated:
self.selected_resource.release()
self.selected_resource = None
except self.remote_module.resources.ResourceError as e:
self.logger.prn_err("RemoteConnectorPrimitive.finish() failed, reason: " + str(e))
def __del__(self):
self.finish()
| [
"ryan.kurte@trutest.co.nz"
] | ryan.kurte@trutest.co.nz |
3e942e91f7d2d29231fc55e21c086b11c76337b1 | e288180c977c8fccf31c00bb74b7e8f56ee69303 | /vkrb/newsitem/views.py | ca6ce1be70905efd68a1aa31f6f346ac625c8fed | [] | no_license | kaluginadaria/vkrb-back | 32e0c9aef7a647ea2a2e399c8d999622e993a433 | d037baaa9f17cb038d41dda5dfbf1dbb56acdf90 | refs/heads/master | 2022-12-07T23:36:32.902662 | 2019-05-22T15:06:31 | 2019-05-22T15:06:31 | 179,382,015 | 0 | 0 | null | 2022-11-22T02:38:25 | 2019-04-03T22:52:30 | Python | UTF-8 | Python | false | false | 5,188 | py | from django import forms
from django.contrib.contenttypes.models import ContentType
from django_serializer.base_views import (ListView,
DetailsView,
BaseView,
CreateView,
DeleteView)
from django_serializer.exceptions import ServerError
from django_serializer.mixins import ObjectMixin, SerializerMixin
from django_serializer.permissions import (
PermissionsModelMixin,
PermissionsMixin,
)
from vkrb.core.mixins import EventMixin, LimitOffsetFullPaginator
from vkrb.core.utils import get_absolute_bundle_urls, render_to_pdf
from vkrb.favorites.forms import FavoriteForm
from vkrb.favorites.models import FavoriteItem
from vkrb.newsitem.models import NewsItem, CategoryNewsItem
from vkrb.newsitem.serializers import (
NewsItemSerializer,
CategoryNewsItemSerializer,
)
class NewsListView(EventMixin, ListView):
class CategoryForm(forms.Form):
category_id = forms.ModelChoiceField(CategoryNewsItem.objects.all(),
required=False)
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
def get_queryset(self):
queryset = super().get_queryset()
category = self.request_args.get('category_id')
if category:
return queryset.filter(category=category)
return queryset
section = 'news'
args_form = CategoryForm
authorized_permission = (PermissionsModelMixin.Permission.R,)
paginator = LimitOffsetFullPaginator
model = NewsItem
serializer = NewsItemSerializer
class NewsGetView(DetailsView):
authorized_permission = (PermissionsModelMixin.Permission.R,)
model = NewsItem
serializer = NewsItemSerializer
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
class CategoryView(ListView):
authorized_permission = (PermissionsModelMixin.Permission.R,)
model = CategoryNewsItem
serializer = CategoryNewsItemSerializer
paginator = LimitOffsetFullPaginator
def get_queryset(self):
return self.model.objects.all().order_by('order')
class CreatePDFView(ObjectMixin, PermissionsMixin, BaseView):
model = NewsItem
authorized_permission = (PermissionsModelMixin.Permission.R,)
def response_wrapper(self, response):
return response
def get(self, request, *args, **kwargs):
self.check_r_permission(self.request.user)
newsitem = self.get_object()
ctx = {
'photos': newsitem.attachments.all().order_by('attachments__order'),
'newsitem': newsitem,
'css_urls': get_absolute_bundle_urls('pdf', 'css'),
}
return render_to_pdf(template_path='newsitem.html', ctx=ctx)
class FavoriteNewsCreateView(CreateView):
authorized_permission = (PermissionsModelMixin.Permission.R,
PermissionsModelMixin.Permission.W)
serializer = NewsItemSerializer
form_class = FavoriteForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['content_type'] = 'newsitem'
return kwargs
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
def post(self, request, *args, **kwargs):
inst = super().post(request, *args, **kwargs)
return inst.content_object
class FavoriteNewsDeleteView(SerializerMixin, DeleteView):
authorized_permission = (PermissionsModelMixin.Permission.R,
PermissionsModelMixin.Permission.D)
model = FavoriteItem
serializer = NewsItemSerializer
class FavoriteForm(forms.Form):
object_id = forms.IntegerField()
def get_args_form(self):
return self.FavoriteForm
def get_object(self):
content_type = ContentType.objects.get(model='newsitem')
try:
return self.model.objects.get(user=self.request.user,
content_type=content_type,
object_id=self.request_args['object_id'])
except self.model.DoesNotExist:
raise ServerError(ServerError.NOT_FOUND)
def get_serializer_kwargs(self, obj, **kwargs):
serializer_kwargs = super().get_serializer_kwargs(obj, **kwargs)
serializer_kwargs['user'] = self.request.user
return serializer_kwargs
def post(self, request, *args, **kwargs):
super().post(request, *args, **kwargs)
try:
return NewsItem.objects.get(id=self.request_args['object_id'])
except NewsItem.DoesNotExist:
raise ServerError(ServerError.NOT_FOUND)
| [
"d.kalugina@ktsstudio.ru"
] | d.kalugina@ktsstudio.ru |
77516ca559071c396386ca15fe21b648acd1f37d | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/RapidShare/AddRealFolder.py | a08056f99e344635a1c16c36fd020a144dfabb6a | [] | no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# AddRealFolder
# Creates a new folder in RapidShare.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class AddRealFolder(Choreography):
"""
Create a new instance of the AddRealFolder Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/RapidShare/AddRealFolder')
def new_input_set(self):
return AddRealFolderInputSet()
def _make_result_set(self, result, path):
return AddRealFolderResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddRealFolderChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the AddRealFolder
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class AddRealFolderInputSet(InputSet):
"""
Set the value of the Login input for this choreography. ((required, string) Your RapidShare username)
"""
def set_Login(self, value):
InputSet._set_input(self, 'Login', value)
"""
Set the value of the Name input for this choreography. ((required, string) The name of the folder (Max character length is 250 bytes))
"""
def set_Name(self, value):
InputSet._set_input(self, 'Name', value)
"""
Set the value of the Parent input for this choreography. ((optional, integer) The ID of the parent folder. Defaults to 0 for 'root'.)
"""
def set_Parent(self, value):
InputSet._set_input(self, 'Parent', value)
"""
Set the value of the Password input for this choreography. ((required, password) Your RapidShare password)
"""
def set_Password(self, value):
InputSet._set_input(self, 'Password', value)
"""
A ResultSet with methods tailored to the values returned by the AddRealFolder choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class AddRealFolderResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((string) The response from RapidShare. The id of the newly created folder should be returned in the response upon a successful execution.)
"""
def get_Response(self):
return self._output.get('Response', None)
class AddRealFolderChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddRealFolderResultSet(response, path)
| [
"mike.wimsatt@gmail.com"
] | mike.wimsatt@gmail.com |
29e30f6c5d806f391e7443ee20d901d615f8045e | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/codingbat/codingbat-solutions-master/Python/List-2/sum67.py | fb8d4799059832c94349de8442523ed1b0a77f34 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 538 | py | """
Return the sum of the numbers in the array,
except ignore sections of numbers starting with a 6 and
extending to the next 7 (every 6 will be followed by at least one 7).
Return 0 for no numbers.
sum67([1, 2, 2]) → 5
sum67([1, 2, 2, 6, 99, 99, 7]) → 5
sum67([1, 1, 6, 7, 2]) → 4
"""
def sum67(nums):
record = True
total = 0
for n in nums:
if n == 6:
record = False
if record:
total += n
continue
if n == 7:
record = True
return total
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3355d566237ae53f9ec0cd4778ba796a468515fa | de644b254b17a28f82e9212d80872a3d9eca2149 | /lib/mock/asset/CodeTilesetAsset.py | 1a516b75ee1713f62b14e116e258e5eda5c53cdf | [
"MIT"
] | permissive | pixpil/gii | 506bee02b11eb412016b583d807dcfcc485e189c | ba6d94ada86d82bacae06f165567a02585264440 | refs/heads/master | 2021-12-03T06:30:31.503481 | 2021-11-24T03:02:49 | 2021-11-24T03:02:49 | 431,331,021 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import os.path
import json
from gii.core import AssetManager, AssetLibrary, getProjectPath, app, JSONHelper
from .helper.psd2tileset import TilesetProject
from gii.core.CommonAsset.DataYAMLAsset import DataYAMLAssetManager
def _getModulePath( path ):
import os.path
return os.path.dirname( __file__ ) + '/' + path
class CodeTilesetAssetManager(DataYAMLAssetManager):
def getName(self):
return 'asset_manager.code_tileset'
def acceptAssetFile(self, filepath):
if not os.path.isfile(filepath): return False
if not filepath.endswith( '.code_tileset' ): return False
return True
def postDataLoad( self, node, data ):
tiles = data.get( 'tiles', None )
if not tiles: return
id = 0
for key, value in list(tiles.items()):
id +=1
value[ '_id' ] = id
def importAsset(self, node, reload = False ):
imported = super( CodeTilesetAssetManager, self ).importAsset( node, reload )
node.assetType = 'code_tileset'
return imported
CodeTilesetAssetManager().register()
AssetLibrary.get().setAssetIcon( 'code_tileset', 'cell' )
| [
"tommo.zhou@gmail.com"
] | tommo.zhou@gmail.com |
77f288050a171578be6b3051b61925b4a58bd900 | 75c7004744315a22afdad8a68f20c06b8d3efad0 | /网络设备脚本/思科命令行/路由映射.py | b2091c2660925823710d7ecc04e4199e812979c2 | [
"MIT"
] | permissive | cflw/network_device_script | b13cde8719f23402cdd6acd3ca9048a7d65952aa | c3644e933a3c557c44951a0a1994a49357e49c02 | refs/heads/master | 2023-08-03T11:00:29.188101 | 2023-07-29T13:58:09 | 2023-07-29T13:58:09 | 182,526,402 | 18 | 12 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | from ..基础接口 import 操作
from ..基础接口 import 策略
from ..命令行接口 import 模式
from ..命令行接口 import 命令
from ..命令行接口 import 路由映射
from . import 访问控制列表 as 访问列表
class C路由映射组(路由映射.I路由映射组, 模式.C同级模式):
def __init__(self, a, a名称):
路由映射.I路由映射组.__init__(self, a, a名称)
def f模式_节点(self, a序号 = 10, a动作 = True, a操作 = None):
return C路由映射节点(self, a序号, a动作)
@策略.A自动策略()
class C路由映射节点(路由映射.I路由映射节点):
def __init__(self, a, a序号, a动作):
路由映射.I路由映射节点.__init__(self, a, a序号, a动作)
def fg进入命令(self):
"""route-map 名称 [动作] [序号]"""
return f"route-map {self.m名称} {访问列表.f生成允许(self.m动作)} {self.m序号}"
#匹配
@策略.A匹配(策略.E类型.e访问列表)
def f匹配_访问列表(self, a访问列表, a操作 = 操作.E操作.e添加):
if isinstance(a访问列表, 访问列表.I访问控制列表):
v命令 = f"match {a访问列表.m协议} address {a访问列表.m名称}"
else:
v命令 = f"match ip address {str(a访问列表)}"
self.f执行当前模式命令(v命令)
#设置
@策略.A设置(策略.E类型.e下一跳4)
def f设置_下一跳4(self, a地址, a操作 = 操作.E操作.e添加):
v命令 = f"set ip next-hop {a地址}"
self.f执行当前模式命令(v命令)
@策略.A设置(策略.E类型.e默认下一跳4)
def f设置_默认下一跳4(self, a地址, a操作 = 操作.E操作.e添加):
v命令 = f"set ip default next-hop {a地址}"
self.f执行当前模式命令(v命令)
@策略.A设置(策略.E类型.e出接口)
def f设置_出接口(self, a接口, a操作 = 操作.E操作.e添加):
v命令 = f"set interface {a接口}"
self.f执行当前模式命令(v命令)
@策略.A设置(策略.E类型.e默认出接口)
def f设置_默认出接口(self, a接口, a操作 = 操作.E操作.e添加):
v命令 = f"set default interface {a接口}"
self.f执行当前模式命令(v命令)
| [
"cflw@outlook.com"
] | cflw@outlook.com |
bc1f2755b787437621a08fd37cef0f3df85c95da | c2b8adb8b4062a14bfc7d8c8fa2938359530e028 | /mfes/evaluate_function/eval_sys_poker.py | 6b834b866e2eb3caf02c64e7d4631749ec387747 | [] | no_license | thomas-young-2013/hp-tuner | 1e7d277f3c0135b9032884e3f20b050f19012918 | e606569719a14d8445633e42aedc8296a63a577a | refs/heads/master | 2023-04-15T08:41:02.514912 | 2020-09-14T13:23:55 | 2020-09-14T13:23:55 | 225,173,361 | 0 | 2 | null | 2023-03-24T22:31:25 | 2019-12-01T14:17:29 | Python | UTF-8 | Python | false | false | 405 | py | from __future__ import division, print_function, absolute_import
import os
import sys
from functools import partial
sys.path.append(os.getcwd())
from solnml.datasets.utils import load_train_test_data
from mfes.evaluate_function.sys.combined_evaluator import train as _train
train_node, test_node = load_train_test_data('poker', data_dir='./', task_type=0)
train = partial(_train, data_node=train_node)
| [
"459240868@qq.com"
] | 459240868@qq.com |
bf59c63dc485ae90cd29cba0e2b0f40a6a812578 | 3c31584c1b661195a567ffd2603d30cb2e270493 | /codeforces/706/B.py | 31dd5d8a311b87f310cd96d3b478f008e8e8580c | [] | no_license | ku-nal/Codeforces | c7f621e35b5d4eea1ed11276ee8e91031252ca91 | df43c2fcbcfd1c9f96b6fe79c7abc9ddee054cb7 | refs/heads/main | 2023-04-10T19:00:40.559074 | 2021-04-27T15:15:51 | 2021-04-27T15:15:51 | 362,154,763 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | #===========Template===============
from io import BytesIO, IOBase
import sys,os
inpl=lambda:list(map(int,input().split()))
inpm=lambda:map(int,input().split())
inpi=lambda:int(input())
inp=lambda:input()
rev,ra,l=reversed,range,len
P=print
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
def input(): return sys.stdin.readline().rstrip("\r\n")
#=========I/p O/p ========================================#
from bisect import bisect_left as bl
from bisect import bisect_right as br
import sys,operator,math,operator
from collections import Counter,deque
import random
#==============To chaliye shuru krte he====================#
n1=inpi()
li=inpl()
n=inpi()
li.sort()
for i in ra(n):
a=inpi()
P(br(li,a))
| [
"kunalmakwana18@gnu.ac.in"
] | kunalmakwana18@gnu.ac.in |
de272a3b846152173db1088aa944b13b8b03e905 | ec7f476240c8edf88bf9500c5b677113d840d6c6 | /PMModels/Implementations/Backtesting/mean_pnl.py | 3cab0e3ceec638f84a2f5440f990c7308e1d435f | [] | no_license | fagan2888/BehavioralAssignmentSolution | 80b82b71a94d0da68783b00046215755c1dd3715 | 15acead2d6af6e59f64a7d3baf7da5356cea57ec | refs/heads/master | 2021-04-21T08:31:40.453758 | 2018-08-27T06:18:41 | 2018-08-27T06:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
'''
Name: Beier (Benjamin) Liu
Date: 7/5/2018
Remark:
Python 3.6 is recommended
Before running please install packages *numpy, scipy, matplotlib
Using cmd line py -3,6 -m pip install [package_name]
'''
import os, time, logging
import copy, math
import functools, itertools
import numpy as np
from Implementations.compute_mean import *
from Implementations.compute_var import *
'''===================================================================================================
File content:
provide optimization target function fitness_sharpe, like cost
==================================================================================================='''
# 每笔盈亏
def mean_pnl(trades, freq='annually'):
'''==============================================================================================
Arguments:
trades -- list of objects, past expected returns of strategy
freq -- string, the frequency of computation
Returns:
res -- double,
=============================================================================================='''
# Preparation Phrase
res = 0.0;
# Handling Phrase
# Checking Phrase
return res
| [
"imbenliu@gmail.com"
] | imbenliu@gmail.com |
021fe2ec6b0f2d796211aa9535bec12b1d96a01e | 12f27e80ee7cf5f5274c98ba2e370defbdafa67b | /data-exporter/pipeline.py | 8dfb1c175af634643b9e62ecb169b82ae425ad77 | [] | no_license | dawnkd/beehive-server | 48f94abfe2392902845f4a682f59fdfefa64dbf7 | 21f61a9c5723554931a9735c2c9398e82186fbd1 | refs/heads/master | 2020-03-19T02:12:53.584756 | 2018-05-31T16:26:55 | 2018-05-31T16:26:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,163 | py | import binascii
import re
import struct
from waggle.coresense.utils import decode_frame as decode_frame_v3
from waggle.protocol.v5.decoder import decode_frame as decode_frame_v5
from waggle.protocol.v5.decoder import convert as convert_v5
def normalize_key(k):
return re.sub('[-_.]+', '_', k).lower()
def normalize_value(v):
if isinstance(v, dict):
return {normalize_key(k2): normalize_value(v2) for k2, v2 in v.items()}
if isinstance(v, list):
return [normalize_value(v2) for v2 in v]
if isinstance(v, float):
return round(v, 3)
return v
def trim_python_repr(s):
if s.startswith("b'"):
return s[2:-1]
return s
def trim_coresense_packet(source):
start = source.index(b'\xaa')
end = source.rindex(b'\x55')
return source[start:end + 1]
def reunpack_if_needed(source):
if source[0] != 0xaa:
return binascii.unhexlify(source.decode())
return source
def decode_coresense_3(source):
source = trim_coresense_packet(source)
source = reunpack_if_needed(source)
return decode_frame_v3(source)
still_raw_sensors = {
'Chemsense',
'Si1145',
}
def decode_coresense_4(source):
source = trim_coresense_packet(source)
source = reunpack_if_needed(source)
unpacked_data = decode_frame_v5(source)
raw_results = {}
for sensor_id, sensor_data in unpacked_data.items():
raw_results.update(sensor_data)
converted_results = {}
for sensor_id, sensor_data in unpacked_data.items():
for key, (value, unit) in convert_v5(sensor_data, sensor_id).items():
converted_results[key] = value
all_results = {}
for k, v in map_readings_4to3(raw_results).items():
all_results[('raw', k)] = v
for k, v in map_readings_4to3(converted_results).items():
if k in still_raw_sensors:
all_results[('raw', k)] = v
else:
all_results[('converted', k)] = v
return all_results
def decode18(data):
bincounts = struct.unpack_from('<16H', data, offset=0)
mtof = [x / 3 for x in struct.unpack_from('<4B', data, offset=32)]
pmvalues = sorted(struct.unpack_from('<3f', data, offset=50))
values = {
'bins': bincounts,
'mtof': mtof,
'pm': {'1': pmvalues[0], '2.5': pmvalues[1], '10': pmvalues[2]},
}
return values
def decode_alphasense_1(source):
return decode18(source)
decoders = {
'coresense:3': decode_coresense_3,
'coresense:4': decode_coresense_4,
'alphasense:1': decode_alphasense_1,
}
def decode(row):
plugin = ':'.join([row.plugin_name, row.plugin_version])
source = binascii.unhexlify(trim_python_repr(row.data))
if plugin not in decoders:
return {}
return decoders[plugin](source)
template_4to3 = {
'APDS-9006-020': {
'intensity': 'lightsense_apds_9006_020_light'
},
'BMP180': {
'pressure': 'metsense_bmp180_pressure',
'temperature': 'metsense_bmp180_temperature',
},
'HIH4030': {
'humidity': 'metsense_hih4030_humidity',
},
'HIH6130': {
'humidity': 'lightsense_hih6130_humidity',
'temperature': 'lightsense_hih6130_temperature',
},
'HMC5883L': {
'magnetic_field.x': 'lightsense_hmc5883l_hx',
'magnetic_field.y': 'lightsense_hmc5883l_hy',
'magnetic_field.z': 'lightsense_hmc5883l_hz',
},
'HTU21D': {
'humidity': 'metsense_htu21d_humidity',
'temperature': 'metsense_htu21d_temperature',
},
'LPS25H': {
'pressure': 'chemsense_lpp',
'temperature': 'chemsense_lpt',
},
'ML8511': {
'intensity': 'lightsense_ml8511',
},
'MLX75305': {
'intensity': 'lightsense_mlx75305',
},
'MMA8452Q': {
'acceleration.x': 'metsense_mma8452q_acc_x',
'acceleration.y': 'metsense_mma8452q_acc_y',
'acceleration.z': 'metsense_mma8452q_acc_z',
},
'SHT25': {
'humidity': 'chemsense_shh',
'temperature': 'chemsense_sht',
},
'Si1145': {
'ir_count': 'chemsense_sir',
'uv_count': 'chemsense_suv',
'visible_light_count': 'chemsense_svl',
},
'TMP421': {
'temperature': 'lightsense_tmp421',
},
'TSL250RD-LS': {
'intensity': 'lightsense_tsl250_light',
},
'TSL260RD': {
'intensity': 'lightsense_tsl260_light',
},
'Coresense ID': {
'mac_address': 'metsense_id',
},
'PR103J2': {
'temperature': 'metsense_pr103j2_temperature',
},
'SPV1840LR5H-B': {
'intensity': 'metsense_spv1840lr5h-b',
},
'TMP112': {
'temperature': 'metsense_tmp112',
},
'TSL250RD-AS': {
'intensity': 'metsense_tsl250rd_light',
},
'TSYS01': {
'temperature': 'metsense_tsys01_temperature',
},
'Chemsense ID': {
'mac_address': 'chemsense_id',
},
'Chemsense': {
'co': 'chemsense_cmo',
'h2s': 'chemsense_h2s',
'no2': 'chemsense_no2',
'o3': 'chemsense_ozo',
'so2': 'chemsense_so2',
'reducing_gases': 'chemsense_irr',
'oxidizing_gases': 'chemsense_iaq',
'at0': 'chemsense_at0',
'at1': 'chemsense_at1',
'at2': 'chemsense_at2',
'at3': 'chemsense_at3',
},
'Alphasense': {
'pm1': 'alphasense_pm1',
'pm2.5': 'alphasense_pm2.5',
'pm10': 'alphasense_pm10',
'bins': 'alphasense_bins',
'sample flow rate': 'alphasense_sample_flow_rate',
'sampling period': 'alphasense_sampling_period',
'id': 'alpha_serial',
'fw': 'alpha_firmware',
},
'PMS7003': {
'10um_particle': 'pms7003_10um_particle',
'1um_particle': 'pms7003_1um_particle',
'2_5um_particle': 'pms7003_2_5um_particle',
'5um_particle': 'pms7003_5um_particle',
'pm10_atm': 'pms7003_pm10_atm',
'pm10_cf1': 'pms7003_pm10_cf1',
'pm1_atm': 'pms7003_pm1_atm',
'pm1_cf1': 'pms7003_pm1_cf1',
'pm25_atm': 'pms7003_pm25_atm',
'pm25_cf1': 'pms7003_pm25_cf1',
'point_3um_particle': 'pms7003_point_3um_particle',
'point_5um_particle': 'pms7003_point_5um_particle',
},
'Net Broadband': {
'rx': 'net_broadband_rx',
'tx': 'net_broadband_tx',
},
'Net LAN': {
'rx': 'net_lan_rx',
'tx': 'net_lan_tx',
},
'Net USB': {
'rx': 'net_usb_rx',
'tx': 'net_usb_tx',
},
}
def stringify(x):
if isinstance(x, tuple) or isinstance(x, list):
return ','.join([stringify(xi) for xi in x])
if isinstance(x, bytes) or isinstance(x, bytearray):
return binascii.hexlify(x).decode()
return str(x)
def map_parameters_4to3(readings, parameters):
output = {}
for p, k in parameters.items():
output[p] = stringify(readings[k])
return output
def map_readings_4to3(readings):
output = {}
for sensor, parameters in template_4to3.items():
try:
output[sensor] = map_parameters_4to3(readings, parameters)
except KeyError:
continue
return output
| [
"sean.shahkarami@gmail.com"
] | sean.shahkarami@gmail.com |
dfc33f4392592784b20a4f10a81aaefb5131c5c3 | 1d363dfbe69b79bc1989251f085060232beb12f5 | /tests/test_chemical_package.py | a87f5ed3afd8ddfc87e362e8508edd4dcf84319c | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | CalebBell/thermo | ec602af2316875692e385287c6010e9f206b1bc3 | 8622fada3614179d4372192e0031b4a206384c93 | refs/heads/master | 2023-08-30T05:30:07.552575 | 2023-06-25T01:35:53 | 2023-06-25T01:35:53 | 62,404,647 | 529 | 127 | MIT | 2023-08-11T18:31:21 | 2016-07-01T16:04:56 | Python | UTF-8 | Python | false | false | 6,401 | py | '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import json
from math import *
import pytest
from chemicals import *
from chemicals.utils import hash_any_primitive
from fluids.numerics import *
from thermo import *
@pytest.mark.fuzz
@pytest.mark.slow
def test_ChemicalConstantsPackage_from_json_as_json_large():
create_compounds = []
for k in dippr_compounds():
try:
if search_chemical(k) is not None:
create_compounds.append(k)
except:
pass
obj = ChemicalConstantsPackage.constants_from_IDs(create_compounds)
obj2 = ChemicalConstantsPackage.from_json(json.loads(json.dumps(obj.as_json())))
assert hash(obj) == hash(obj2)
assert obj == obj2
assert id(obj) != id(obj2)
obj = ChemicalConstantsPackage.correlations_from_IDs(create_compounds)
obj2 = PropertyCorrelationsPackage.from_json(json.loads(json.dumps(obj.as_json())))
assert hash(obj) == hash(obj2)
assert obj == obj2
assert id(obj) != id(obj2)
assert obj != int
assert obj != float
def test_ChemicalConstantsPackage_json_version_exported():
constants = ChemicalConstantsPackage(MWs=[18.01528, 106.165], names=['water', 'm-xylene'])
string = json.dumps(constants.as_json())
c2 = ChemicalConstantsPackage.from_json(json.loads(string))
assert 'py/object' in string
assert 'json_version' in string
assert not hasattr(c2, 'json_version')
def test_ChemicalConstantsPackage_json_export_does_not_change_hashes():
# There was a nasty bug where the hashing function was changing its result
# every call
obj = ChemicalConstantsPackage.correlations_from_IDs(['hexane'])
hashes_orig = [hash_any_primitive(getattr(obj, k)) for k in obj.correlations]
copy = obj.as_json()
hashes_after = [hash_any_primitive(getattr(obj, k)) for k in obj.correlations]
assert hashes_orig == hashes_after
def test_ChemicalConstantsPackage_json_export_sane_recursion():
# It might be nice to do something about the duplicate EOSs, but they could be different
# Really still feels like a different structure for that would be better.
obj = ChemicalConstantsPackage.correlations_from_IDs(['methane', 'ethane'])
assert 3 == json.dumps(obj.as_json()).count('VaporPressure')
def test_ChemicalConstantsPackage_json_export_same_output():
obj = ChemicalConstantsPackage.correlations_from_IDs(['hexane'])
obj2 = PropertyCorrelationsPackage.from_json(json.loads(json.dumps(obj.as_json())))
assert hash_any_primitive(obj.constants) == hash_any_primitive(obj2.constants)
for prop in obj.pure_correlations:
assert hash_any_primitive(getattr(obj, prop)) == hash_any_primitive(getattr(obj2, prop))
assert hash_any_primitive(obj.VaporPressures) == hash_any_primitive(obj2.VaporPressures)
assert hash_any_primitive(obj.ViscosityGases) == hash_any_primitive(obj2.ViscosityGases)
assert hash(obj.SurfaceTensionMixture) == hash(obj2.SurfaceTensionMixture)
assert hash(obj.VolumeGasMixture) == hash(obj2.VolumeGasMixture)
for prop in obj.mixture_correlations:
assert hash_any_primitive(getattr(obj, prop)) == hash_any_primitive(getattr(obj2, prop))
assert hash(obj) == hash(obj2)
assert obj == obj2
def test_ChemicalConstantsPackage_wrong_behaviors():
obj = ChemicalConstantsPackage.correlations_from_IDs(['7647-19-0'])
obj.VolumeLiquids[0].eos is None
assert obj != int
assert obj != float
def test_lemmon2000_package():
Ts = (150.0, 200.0, 300.0, 1000.0, 2000.0)
CoolProp_Cps = [29.030484473246823, 29.03511836728048, 29.103801681330573, 33.046833525551676, 36.210748112152906]
for T, Cp in zip(Ts, CoolProp_Cps):
assert_close(Cp, lemmon2000_correlations.HeatCapacityGases[0](T), rtol=2e-7)
def test_compound_index():
obj = ChemicalConstantsPackage(MWs=[18.01528, 106.165], names=['water', 'm-xylene'],
CASs=['7732-18-5', '108-38-3'],
InChI_Keys=['XLYOFNOQVPJJNP-UHFFFAOYSA-N', 'IVSZLXZYQVIEFR-UHFFFAOYSA-N'],
InChIs=['H2O/h1H2', 'C8H10/c1-7-4-3-5-8(2)6-7/h3-6H,1-2H3'],
smiless=['O', 'CC1=CC(=CC=C1)C'], PubChems=[962, 7929],)
assert 0 == obj.compound_index(name='water')
assert 1 == obj.compound_index(name='m-xylene')
assert 1 == obj.compound_index(PubChem=7929)
assert 0 == obj.compound_index(smiles='O')
assert 0 == obj.compound_index(CAS='7732-18-5')
assert 0 == obj.compound_index(InChI='H2O/h1H2')
assert 1 == obj.compound_index(InChI_Key='IVSZLXZYQVIEFR-UHFFFAOYSA-N')
def test_add_ChemicalConstantsPackage():
a = ChemicalConstantsPackage.constants_from_IDs(IDs=['water', 'hexane'])
b = ChemicalConstantsPackage.constants_from_IDs(IDs=['toluene'])
c = a + b
c_good = ChemicalConstantsPackage.constants_from_IDs(IDs=['water', 'hexane', 'toluene'])
assert c == c_good
def test_add_PropertyCorrelationsPackage():
a = ChemicalConstantsPackage.correlations_from_IDs(IDs=['water', 'hexane'])
b = ChemicalConstantsPackage.correlations_from_IDs(IDs=['toluene'])
c = a + b
c_good = ChemicalConstantsPackage.correlations_from_IDs(IDs=['water', 'hexane', 'toluene'])
assert c == c_good
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
5c377987f3a2b5dc3ed85c65b668b95b75cb4097 | c93f51492cfee3f98040f07d7f4323ec27ac81a5 | /refinery/units/misc/drp.py | b38fff444e00c9713d6f25da6071973808bbc9fe | [
"BSD-3-Clause"
] | permissive | prats84/refinery | cbe9ebfeb570c9c0531e13bbf13ec18801f12aca | 5f961051e9cc1857a06108ce4d36a6799ac9d720 | refs/heads/master | 2023-07-13T02:32:04.998285 | 2021-08-20T09:08:01 | 2021-08-20T09:08:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,630 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from .. import arg, Unit
from ...lib.suffixtree import SuffixTree
from ...lib.types import INF
class stackdepth:
def __init__(self, depth):
self.depth = depth
self.default = sys.getrecursionlimit()
def __enter__(self):
if self.depth > self.default:
sys.setrecursionlimit(self.depth)
return self
def __exit__(self, *args):
sys.setrecursionlimit(self.default)
return False
class drp(Unit):
"""
Detect Repeating Patterns - detects the most prevalent repeating byte pattern
in a chunk of data. The unit computes a suffix tree which may require a lot of
memory for large buffers.
"""
def __init__(
self,
consecutive: arg.switch('-c', help='Assume that the repeating pattern is consecutive when observable.') = False,
min: arg.number('-n', help='Minimum size of the pattern to search for. Default is {default}.') = 1,
max: arg.number('-N', help='Maximum size of the pattern to search for. Default is {default}.') = INF,
len: arg.number('-l', help='Set the exact size of the pattern. This is equivalent to --min=N --max=N.') = None,
all: arg.switch('-a', help='Produce one output for each repeating pattern that was detected.') = False,
threshold: arg.number('-t', help='Patterns must match this performance threshold in percent, lest they be discarded.') = 20,
weight: arg.number('-w', help='Specifies how much longer patterns are favored over small ones. Default is {default}.') = 0,
buffer: arg.number('-b', group='BFR', help='Maximum number of bytes to inspect at once. The default is {default}.') = 1024,
chug : arg.switch('-g', group='BFR', help='Compute the prefix tree for the entire buffer instead of chunking it.') = False
):
if len is not None:
min = max = len
super().__init__(
min=min,
max=max,
all=all,
consecutive=consecutive,
weight=weight,
buffer=buffer,
chug=chug,
threshold=threshold
)
def _get_patterns(self, data):
with stackdepth(len(data)):
tree = SuffixTree(data)
min_size = self.args.min
max_size = self.args.max
patterns = set()
cursor = 0
while cursor < len(data):
node = tree.root
rest = data[cursor:]
remaining = len(rest)
length = 0
offset = None
while node.children and length < remaining:
for child in node.children.values():
if tree.data[child.start] == rest[length]:
node = child
break
if node.start >= cursor:
break
offset = node.start - length
length = node.end + 1 - offset
if offset is None:
cursor += 1
continue
length = min(remaining, length)
if max_size >= length >= min_size:
pattern = rest[:length].tobytes()
patterns.add(pattern)
cursor += length
del tree
return patterns
@staticmethod
def _consecutive_count(data, pattern):
length = len(pattern)
if length == 1:
return data.count(pattern)
view = memoryview(data)
return max(sum(1 for i in range(k, len(view), length) if view[i:i + length] == pattern)
for k in range(len(pattern)))
@staticmethod
def _truncate_pattern(pattern):
offset = 0
for byte in pattern[1:]:
if byte == pattern[offset]:
offset += 1
else:
offset = 0
if offset > 0:
pattern = pattern[:-offset]
return pattern
def process(self, data):
memview = memoryview(data)
weight = 1 + (self.args.weight / 10)
if self.args.chug:
patterns = self._get_patterns(memview)
else:
patterns = set()
chunksize = self.args.buffer
for k in range(0, len(memview), chunksize):
patterns |= self._get_patterns(memview[k:k + chunksize])
if not patterns:
raise RuntimeError('unexpected state: no repeating sequences found')
self.log_debug('removing duplicate pattern detections')
duplicates = set()
maxlen = max(len(p) for p in patterns)
for pattern in sorted(patterns, key=len):
for k in range(2, maxlen // len(pattern) + 1):
repeated = pattern * k
if repeated in patterns:
duplicates.add(repeated)
patterns -= duplicates
self.log_debug(F'counting coverage of {len(patterns)} patterns')
pattern_count = {p: data.count(p) for p in patterns}
pattern_performance = dict(pattern_count)
for consecutive in (False, True):
if consecutive:
self.log_debug(F're-counting coverage of {len(patterns)} patterns')
patterns = {self._truncate_pattern(p) for p in patterns}
pattern_performance = {p: self._consecutive_count(data, p) for p in patterns}
self.log_debug('evaluating pattern performance')
for pattern, count in pattern_performance.items():
pattern_performance[pattern] = count * (len(pattern) ** weight)
best_performance = max(pattern_performance.values())
for pattern, performance in pattern_performance.items():
pattern_performance[pattern] = performance / best_performance
self.log_debug('removing patterns below performance threshold')
threshold = self.args.threshold
patterns = {p for p in patterns if pattern_performance[p] * 100 >= threshold}
if not self.args.consecutive:
break
if self.args.all:
for pattern in sorted(patterns, key=pattern_performance.get, reverse=True):
yield self.labelled(pattern, count=pattern_count[pattern])
return
best_patterns = [p for p in patterns if pattern_performance[p] == 1.0]
if len(best_patterns) > 1:
self.log_warn('could not determine unique best repeating pattern, returning the first of these:')
for k, pattern in enumerate(best_patterns):
self.log_warn(F'{k:02d}.: {pattern.hex()}')
yield best_patterns[0]
| [
"rattle@nullteilerfrei.de"
] | rattle@nullteilerfrei.de |
bd68a56c8021ef5410ed32841b7d9aa4f48a1c15 | f810836bea801f2fa85418ac7f5f5ffb0f3e0bda | /abc/abc152/B - Comparing Strings.py | c51536616f18cb30a6098701c3c10148ce646fb0 | [] | no_license | cocoinit23/atcoder | 0afac334233e5f8c75d447f6adf0ddf3942c3b2c | 39f6f6f4cc893e794d99c514f2e5adc9009ee8ca | refs/heads/master | 2022-08-29T06:01:22.443764 | 2022-07-29T07:20:05 | 2022-07-29T07:20:05 | 226,030,199 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | a, b = map(int, input().split())
if a <= b:
print(str(a) * b)
else:
print(str(b) * a)
| [
"cocoinit23@gmail.com"
] | cocoinit23@gmail.com |
fdb0f1461476422b82e479e5258854ffe5155a90 | a9a8931d6877d6e0f4f11cbd7b50322819e0fe45 | /hpc/REBAGG-WERCS-GN_80.py | f8def9de400fee00afb0aafccf18c7fbffeff645 | [] | no_license | jafetgado/tomerdesign | 8517f9f8266bcf1db64fdf00d12294f682cd412d | a0d0961a11d7d84be5343d374198ab0f5084c2b3 | refs/heads/master | 2022-05-31T18:15:55.045419 | 2020-04-25T05:49:32 | 2020-04-25T05:49:32 | 258,499,679 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,403 | py | """
Template script for hyperparameter tuning with HPC
Evaluates the performance of a strategy for a single
set of hyperparameter combinations)
"""
# Imports
#============#
import numpy as np
import pandas as pd
import joblib
import itertools
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
import resreg
import warnings
warnings.filterwarnings("ignore")
# Get dataset and features
#==============================#
aalist = list('ACDEFGHIKLMNPQRSTVWY')
def getAAC(seq):
aac = np.array([seq.count(x) for x in aalist])/len(seq)
return aac
data = pd.read_excel('sequence_ogt_topt.xlsx', index_col=0)
aac = np.array([getAAC(seq) for seq in data['sequence']])
ogt = data['ogt'].values.reshape((data.shape[0],1))
X = np.append(aac, ogt, axis=1)
sc = StandardScaler()
X = sc.fit_transform(X)
y = data['topt'].values
# Strategies and hyperparameters
#======================================#
# Hyperparameter range
cl_vals = [25.0, 30.0, None]
ch_vals = [72.2, 60.0]
ks = [5, 10, 15]
deltas = [0.1, 0.5, 1.0]
overs = [0.5, 0.75]
unders = [0.5, 0.75]
sizes = [300, 600]
sample_methods = ['balance', 'extreme', 'average']
size_methods = ['balance', 'variation']
all_params = {}
# Hyperparameter combinations (grid search)
all_params['RO'] = list(itertools.product(cl_vals, ch_vals, sample_methods))
all_params['SMOTER'] = list(itertools.product(cl_vals, ch_vals, sample_methods, ks))
all_params['GN'] = list(itertools.product(cl_vals, ch_vals, sample_methods, deltas))
all_params['WERCS'] = list(itertools.product(cl_vals, ch_vals, overs, unders))
all_params['WERCS-GN'] = list(itertools.product(cl_vals, ch_vals, overs, unders, deltas))
all_params['REBAGG-RO'] = list(itertools.product(cl_vals, ch_vals, size_methods,
sizes))
all_params['REBAGG-SMOTER'] = list(itertools.product(cl_vals, ch_vals, size_methods,
sizes, ks))
all_params['REBAGG-GN'] = list(itertools.product(cl_vals, ch_vals, size_methods,
sizes, deltas))
all_params['REBAGG-WERCS'] = list(itertools.product(cl_vals, ch_vals, sizes, overs,
unders))
all_params['REBAGG-WERCS-GN'] = list(itertools.product(cl_vals, ch_vals, sizes, overs,
unders, deltas))
strategies = list(all_params.keys())
# Evaluate performance for a single strategy and hyperparameter combination
#===========================================================================#
bins = [30, 50, 65, 85] # For splitting target values into bins
m = 100 # Number of regressors in REBAGG ensemble
# Specify strategy and param (instead of a lengthy for loop of combinations)
strategy = 'REBAGG-WERCS-GN' # Replace REBAGG-WERCS-GN for this calculation
params = all_params[strategy]
param = params[80] # Replace 80 for this calculation
# Implement calculation for only specified strategy and param
r2_store, mse_store, mcc_store, f1_store = [], [], [], [] # Empty lists for storing results
mse_bins_store = []
# Monte Carlo cross validation (MCCV) loop
for rrr in range(50):
# Resample validation set (uniform distribution)
train_indices, test_indices = resreg.uniform_test_split(X, y, bins=bins,
bin_test_size=70, verbose=False,
random_state=rrr)
X_train, y_train = X[train_indices,:], y[train_indices]
X_test, y_test = X[test_indices,:], y[test_indices]
# Unpack hyperparameters, resample training data, and fit regressors
reg = DecisionTreeRegressor(random_state=rrr) if 'REBAGG' in strategy else \
RandomForestRegressor(n_estimators=10, n_jobs=-1, random_state=rrr)
if strategy=='RO':
cl, ch, sample_method = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.random_oversample(X_train, y_train, relevance,
relevance_threshold=0.5, over=sample_method,
random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='SMOTER':
cl, ch, sample_method, k = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.smoter(X_train, y_train, relevance,
relevance_threshold=0.5, k=k, over=sample_method,
random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='GN':
cl, ch, sample_method, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.gaussian_noise(X_train, y_train, relevance,
relevance_threshold=0.5, delta=delta, over=sample_method,
random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='WERCS':
cl, ch, over, under = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.wercs(X_train, y_train, relevance, over=over,
under=under, noise=False, random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='WERCS-GN':
cl, ch, over, under, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.wercs(X_train, y_train, relevance, over=over,
under=under, noise=True, delta=delta, random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='REBAGG-RO':
cl, ch, size_method, s = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance, relevance_threshold=0.5,
sample_method='random_oversample', size_method=size_method,
random_state=rrr)
elif strategy=='REBAGG-SMOTER':
cl, ch, size_method, s, k = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance, relevance_threshold=0.5,
sample_method='smoter', size_method=size_method, k=k,
random_state=rrr)
elif strategy=='REBAGG-GN':
cl, ch, size_method, s, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance, relevance_threshold=0.5,
sample_method='gaussian', size_method=size_method, delta=delta,
random_state=rrr)
elif strategy=='REBAGG-WERCS':
cl, ch, s, over, under = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance=relevance, sample_method='wercs',
over=over, under=under, random_state=rrr)
elif strategy=='REBAGG-WERCS-GN':
cl, ch, s, over, under, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance=relevance, sample_method='wercs-gn',
over=over, under=under, delta=delta, random_state=rrr)
# Validate fitted regressors on uniform validation set
if 'REBAGG' in strategy:
y_pred = rebagg.predict(X_test)
else:
y_pred = reg.predict(X_test)
# Evaluate regressor performance on validation set
r2 = r2_score(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
mcc = resreg.matthews_corrcoef(y_test, y_pred, bins)
relevance_true = resreg.sigmoid_relevance(y_test, cl=None, ch=65)
relevance_pred = resreg.sigmoid_relevance(y_pred, cl=None, ch=65)
f1 = resreg.f1_score(y_test, y_pred, error_threshold=5,
relevance_true=relevance_true, relevance_pred=relevance_pred,
relevance_threshold=0.5, k=1e4)
mse_bins = resreg.bin_performance(y_test, y_pred, bins, metric='MSE')
# Store performance results
r2_store.append(r2)
mse_store.append(mse)
mcc_store.append(mcc)
f1_store.append(f1)
mse_bins_store.append(mse_bins)
# Performance statistics
r2_mean, r2_std = np.mean(r2_store), np.std(r2_store)
mse_mean, mse_std = np.mean(mse_store), np.std(mse_store)
f1_mean, f1_std = np.mean(f1_store), np.std(f1_store)
mcc_mean, mcc_std = np.mean(mcc_store), np.std(mcc_store)
mse_bins_store = pd.DataFrame(mse_bins_store)
mse_bins_mean, mse_bins_std = np.mean(mse_bins_store, axis=0), np.std(mse_bins_store, axis=0)
# Combine all performance data and write to excel spreadsheet
means = [r2_mean, mse_mean, f1_mean, mcc_mean] + list(mse_bins_mean)
stds = [r2_std, mse_std, f1_std, mcc_std] + list(mse_bins_std)
store = [param] + means + stds
# Save performance results as a binary file (to be read and analyzed later)
joblib.dump(store, f'hpc/joblib_files/{strategy}_{80}.pkl')
| [
"japhethgado@gmail.com"
] | japhethgado@gmail.com |
8939f3f24ec15e8fa68cd329521e1feaccb25612 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_asset_filters_operations.py | 94385524e45a19be44bb9043148585e1e204c044 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 26,140 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._asset_filters_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AssetFiltersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.media.aio.AzureMediaServices`'s
:attr:`asset_filters` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any
) -> AsyncIterable["_models.AssetFilter"]:
"""List Asset Filters.
List Asset Filters associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AssetFilter or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.media.models.AssetFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AssetFilterCollection]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AssetFilterCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, account_name: str, asset_name: str, filter_name: str, **kwargs: Any
) -> _models.AssetFilter:
"""Get an Asset Filter.
Get the details of an Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AssetFilter]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
filter_name=filter_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AssetFilter", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
filter_name: str,
parameters: _models.AssetFilter,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AssetFilter:
"""Create or update an Asset Filter.
Creates or updates an Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:param parameters: The request parameters. Required.
:type parameters: ~azure.mgmt.media.models.AssetFilter
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
filter_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AssetFilter:
"""Create or update an Asset Filter.
Creates or updates an Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:param parameters: The request parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
filter_name: str,
parameters: Union[_models.AssetFilter, IO],
**kwargs: Any
) -> _models.AssetFilter:
"""Create or update an Asset Filter.
Creates or updates an Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:param parameters: The request parameters. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.media.models.AssetFilter or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AssetFilter]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AssetFilter")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
filter_name=filter_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("AssetFilter", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("AssetFilter", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, asset_name: str, filter_name: str, **kwargs: Any
) -> None:
"""Delete an Asset Filter.
Deletes an Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
filter_name=filter_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
filter_name: str,
parameters: _models.AssetFilter,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AssetFilter:
"""Update an Asset Filter.
Updates an existing Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:param parameters: The request parameters. Required.
:type parameters: ~azure.mgmt.media.models.AssetFilter
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
filter_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AssetFilter:
"""Update an Asset Filter.
Updates an existing Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:param parameters: The request parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
filter_name: str,
parameters: Union[_models.AssetFilter, IO],
**kwargs: Any
) -> _models.AssetFilter:
"""Update an Asset Filter.
Updates an existing Asset Filter associated with the specified Asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param filter_name: The Asset Filter name. Required.
:type filter_name: str
:param parameters: The request parameters. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.media.models.AssetFilter or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetFilter or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetFilter
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AssetFilter]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AssetFilter")
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
filter_name=filter_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AssetFilter", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/assetFilters/{filterName}"} # type: ignore
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
6694c98e3b9d6d13e32999cfd268c777850b2bcb | fa1953cb5c96b816b3d7e3df757cea0aa0f973b1 | /src/ensemble.py | d2014f12b77d18bd6b8be9d41617da60a77eca9f | [] | no_license | cttsai1985/Kaggle-Recursion-Cellular | 369aafd89f0ddfa4229f9b19fdba1317bfcf6cb8 | a91740a4ad984588c28a9369f303eba2e6b0bea0 | refs/heads/master | 2020-11-23T23:29:40.218838 | 2019-10-09T07:18:06 | 2019-10-09T07:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | import pandas as pd
import numpy as np
import click
import json
from scipy.optimize import linear_sum_assignment
from scipy.special import softmax
@click.group()
def cli():
print("Ensemble")
def load_one_fold(predict_root, model_name, fold):
test_preds = []
for channel in [
"[1,2,3,4,5]",
"[1,2,3,4,6]",
"[1,2,3,5,6]",
"[1,2,4,5,6]",
"[1,3,4,5,6]",
"[2,3,4,5,6]",
]:
pred = np.load(f"{predict_root}/{channel}/fold_{fold}/{model_name}/pred_test.npy")
test_preds.append(pred)
test_preds = np.asarray(test_preds)
test_preds = test_preds.mean(axis=0)
return test_preds
def load_kfold(predict_root, model_name):
preds = 0
for fold in range(5):
preds += load_one_fold(predict_root, model_name, fold) / 5
return preds
@cli.command()
@click.option('--data_root', type=str, default='/data/')
@click.option('--predict_root', type=str, default='/logs/pseudo/')
@click.option('--group_json', type=str, default='group.json')
def ensemble(
data_root='/data/',
predict_root='/logs/pseudo/',
group_json="group.json",
):
model_names = ['se_resnext50_32x4d']
ensemble_preds = 0
for model_name in model_names:
ensemble_preds += load_kfold(predict_root, model_name)
# Just a maigc
ensemble_preds = ensemble_preds / 121
test_df = pd.read_csv(f"{data_root}/test.csv")
ensemble_preds = softmax(ensemble_preds, axis=1)
with open(group_json, 'r') as f:
m = json.load(f)
id_codes = test_df.id_code.values
test_plate_id_to_group_id = m["test_plate_id_to_group_id"]
label_group_list = m["label_group_list"]
plate_ids = [id_code[:-4] for id_code in id_codes]
start_indices = sorted([plate_ids.index(experiment_id) for experiment_id in set(plate_ids)])
start_indices.append(len(plate_ids))
sirnas = []
for i in range(len(start_indices) - 1):
start_id = start_indices[i]
end_id = start_indices[i + 1]
test_plate_id = id_codes[start_id][:-4]
label_group_id = test_plate_id_to_group_id[test_plate_id]
group_labels = label_group_list[label_group_id]
plate_prob = ensemble_preds[start_id:end_id, group_labels]
plate_prob = plate_prob / plate_prob.sum(axis=0, keepdims=True)
row_ind, col_ind = linear_sum_assignment(1 - plate_prob)
col_ind = np.array(group_labels)[col_ind]
sirnas.extend(col_ind)
sub = pd.DataFrame.from_dict(
data={"id_code": id_codes, "sirna": sirnas}
)
sub.to_csv(f"{predict_root}/submission.csv", index=False)
if __name__ == '__main__':
cli()
| [
"ngxbac.dt@gmail.com"
] | ngxbac.dt@gmail.com |
3b4a3829303c86ac2c0fbcbaed7b424dad368359 | 4a6dbf2a39e91cd9ed2404dcc954c3c6aaadf69c | /make_playlist/whitelist.py | 1d0c8eeb11a5fa22a636fbcddf670472534f9417 | [] | no_license | schan27/picify | c51bfd44430049e1e25b09a90ba7c5b618c69909 | a160d3d09743096ad90c7e111fce5096ebd11869 | refs/heads/master | 2023-05-28T06:49:22.734916 | 2023-05-20T05:50:35 | 2023-05-20T05:50:35 | 167,751,806 | 3 | 1 | null | 2023-05-20T05:50:44 | 2019-01-26T23:52:18 | Python | UTF-8 | Python | false | false | 78,698 | py | """Contains a whitelist of allowed words.
As of now these words are just scraped from the following sources:
- http://everynoise.com/genrewords.html
- https://positivethesaurus.com/positive-words-to-describe-music-melody-sound/
- https://www.allmusic.com/moods
"""
whitelist = {
"abschied",
"abstract",
"abyss",
"accordion",
"acerbic",
"acid",
"acoustic",
"action",
"adagio",
"addio",
"adesso",
"adeste",
"adeus",
"adon",
"adoracao",
"adult",
"adventista",
"adventure",
"adventures",
"africa",
"african",
"afrika",
"afrikaans",
"afro",
"afrobeat",
"afrobeats",
"afropop",
"aftermath",
"afternoon",
"afti",
"afto",
"agapas",
"agapi",
"agapo",
"age",
"aggressive",
"aggrotech",
"agnus",
"agora",
"agreeable",
"agua",
"aguila",
"ahava",
"ahora",
"aida",
"aika",
"aime",
"aina",
"ainda",
"ainsi",
"aint",
"air",
"aire",
"airegin",
"aires",
"airport",
"airy",
"aisa",
"aisi",
"akash",
"alabama",
"alap",
"alarm",
"alba",
"albanian",
"albi",
"alborada",
"album",
"aldri",
"aldrig",
"alegr",
"aleluia",
"aleluya",
"alex",
"alfredo",
"algo",
"algu",
"alien",
"alive",
"alla",
"allah",
"alle",
"alleen",
"allegretto",
"allegro",
"allein",
"alleluia",
"allemande",
"aller",
"alles",
"alli",
"allo",
"allons",
"allt",
"alltid",
"allting",
"alma",
"aloha",
"alte",
"alternate",
"alternative",
"altijd",
"amada",
"amado",
"amame",
"aman",
"amanec",
"amanecer",
"amaneci",
"amanh",
"amapola",
"amar",
"amargo",
"amargura",
"amazing",
"amba",
"ambe",
"ambeat",
"ambiance",
"ambient",
"ambitious",
"ambush",
"amen",
"america",
"american",
"americana",
"amico",
"amma",
"amorcito",
"amore",
"amores",
"amour",
"amours",
"amsterdam",
"anadolu",
"anak",
"anand",
"ananda",
"ancient",
"ancora",
"andamos",
"andante",
"andantino",
"andean",
"ando",
"andra",
"andrea",
"anema",
"angels",
"anges",
"anglican",
"angry",
"angst",
"angular",
"anhelo",
"animal",
"animals",
"animated",
"anime",
"anitas",
"anjo",
"anjos",
"ankhon",
"anna",
"annie",
"anniversary",
"annonce",
"announcement",
"anoche",
"ansiedad",
"antara",
"anthem",
"anthem-folk",
"anthropology",
"anti",
"anti-folk",
"antideutsche",
"antiviral",
"antonio",
"anxiety",
"anxious",
"apaixonado",
"apocalypse",
"apocalyptic",
"apopse",
"apos",
"appalachian",
"aprende",
"april",
"aquarela",
"aquela",
"aquellos",
"aqui",
"arab",
"arabesk",
"arabic",
"arbolito",
"arde",
"area",
"arena",
"argentine",
"argentino",
"aria",
"arid",
"ariose",
"arioso",
"arise",
"arkada",
"arkansas",
"armenian",
"army",
"arpa",
"arre",
"arrival",
"arrivederci",
"arroz",
"art",
"arte",
"ascension",
"asesino",
"ashamed",
"ashes",
"asian",
"asleep",
"asmr",
"asot",
"assonance",
"asta",
"astral",
"athletic",
"atl",
"atmospheric",
"attack",
"attention",
"audience",
"auld",
"aunque",
"aunt",
"aurinko",
"aurora",
"ausencia",
"ausente",
"austere",
"australian",
"austro-german",
"austropop",
"automatic",
"autre",
"autumn",
"autumnal",
"avalon",
"avant",
"avant-garde",
"avantgarde",
"avec",
"awake",
"awaken",
"awakening",
"awesome",
"axe",
"ayudante",
"azonto",
"azontobeats",
"baat",
"baba",
"babbling",
"babe",
"babylon",
"bach",
"bachata",
"background",
"bade",
"badman",
"bagpipe",
"bahar",
"bahia",
"baia",
"baiana",
"baila",
"bailando",
"baile",
"bajo",
"bakit",
"balada",
"balan",
"balance",
"bale",
"balearic",
"balfolk",
"bali",
"balkan",
"ball",
"ballad",
"ballada",
"ballade",
"balladen",
"ballata",
"balle",
"ballet",
"ballin",
"ballo",
"ballroom",
"baltic",
"bamba",
"bambini",
"bamboo",
"bana",
"band",
"banda",
"bands",
"bang",
"banging",
"bangla",
"banho",
"banjo",
"bank",
"banks",
"banter",
"bap",
"bara",
"baraja",
"barbara",
"barbershop",
"barbiere",
"barca",
"barcarolle",
"barcelona",
"bare",
"baritone",
"barnemusikk",
"barnmusik",
"baroque",
"barquinho",
"barrio",
"bars",
"basic",
"basin",
"basque",
"bass",
"batak",
"bate",
"batman",
"battle",
"battlefield",
"batuque",
"baubles",
"bay",
"bayou",
"bboy",
"bc",
"beach",
"beale",
"bean",
"bear",
"beast",
"beat",
"beata",
"beatdown",
"beats",
"beau",
"beauty",
"beber",
"bebop",
"because",
"beef",
"been",
"beer",
"beethoven",
"before",
"begin",
"beginning",
"behind",
"behold",
"beija",
"beijo",
"beim",
"being",
"beleza",
"belgian",
"believer",
"belki",
"bell",
"bell-like",
"bella",
"belle",
"belligerent",
"belly",
"belong",
"bemsha",
"bend",
"bendito",
"bene",
"beneath",
"benedictus",
"benevolent",
"benga",
"beni",
"benim",
"bent",
"berceuse",
"berg",
"berge",
"berimbau",
"besito",
"beso",
"besoin",
"beste",
"betcha",
"bethlehem",
"betrayal",
"betty",
"between",
"bewitched",
"beyaz",
"beyond",
"bhairavi",
"bhajan",
"bhala",
"bhalo",
"bhangra",
"bicycle",
"bien",
"bienvenue",
"biet",
"big",
"bila",
"bill",
"billo",
"billy",
"binaural",
"bintang",
"bird",
"birds",
"birmingham",
"birthday",
"bist",
"bitch",
"bitches",
"bitte",
"bitter",
"bittersweet",
"bitti",
"black",
"blackbird",
"blackout",
"blade",
"blanc",
"blanca",
"blank",
"blaskapelle",
"blaze",
"bleak",
"bleed",
"bleeding",
"bleib",
"bless",
"blessed",
"blessing",
"blijf",
"blind",
"blir",
"blissful",
"block",
"bloody",
"blow",
"bluegrass",
"blues",
"blues-rock",
"blustery",
"blut",
"boat",
"boda",
"body",
"boheme",
"bohemian",
"boiadeiro",
"boisterous",
"boku",
"bold",
"bolero",
"boliviano",
"boliyan",
"boll",
"bollywood",
"bomb",
"bomba",
"bombastic",
"bones",
"bong",
"bonito",
"bonjour",
"bonnie",
"bonny",
"bonus",
"boogaloo",
"boogie",
"boogie-woogie",
"book",
"boom",
"booming",
"boomy",
"boots",
"booty",
"bop",
"boquita",
"bored",
"borinquen",
"born",
"bornesange",
"borrachera",
"borracho",
"bosque",
"boss",
"bossa",
"boston",
"bota",
"botellas",
"bottle",
"bottom",
"bounce",
"bouncy",
"bourbon",
"bourr",
"bout",
"bow",
"box",
"boy",
"boyfriend",
"boys",
"boyz",
"brahms",
"brain",
"branca",
"brand",
"brandenburg",
"brash",
"brasil",
"brasileiro",
"brass",
"brassy",
"bravado",
"brave",
"brazil",
"brazilian",
"brazos",
"bread",
"break",
"breakbeat",
"breakcore",
"breakdown",
"breaking",
"breaks",
"breath",
"breathe",
"breathless",
"breathy",
"breeze",
"brega",
"breton",
"breve",
"bricks",
"bridge",
"brigas",
"bright",
"brill",
"bring",
"british",
"britpop",
"brittle",
"broadway",
"broke",
"brooding",
"brook",
"brooklyn",
"brostep",
"brother",
"brought",
"brown",
"bruja",
"brutal",
"bubble",
"bubblegum",
"buck",
"buddha",
"bude",
"budi",
"bueno",
"buenos",
"buffalo",
"bugle",
"building",
"bukan",
"bulan",
"buler",
"bulerias",
"bulgarian",
"bull",
"bullet",
"bunga",
"bunny",
"buon",
"buried",
"burn",
"burning",
"burrito",
"burro",
"bury",
"buscando",
"buss",
"butter",
"butterfly",
"byzantine",
"c-pop",
"c86",
"caballito",
"caballo",
"cabaret",
"cabe",
"cabeza",
"cabin",
"cachita",
"cada",
"cadenas",
"cadence",
"cafe",
"cajun",
"cake",
"cala",
"cali",
"caliente",
"california",
"call",
"calla",
"calle",
"calles",
"calling",
"calm",
"calming",
"calypso",
"cama",
"came",
"camina",
"caminito",
"camino",
"caminos",
"campana",
"campanas",
"campanitas",
"camping",
"campo",
"camptown",
"campy",
"cana",
"canadian",
"canal",
"cancao",
"canci",
"cancion",
"cand",
"candela",
"candle",
"candlelight",
"candy",
"cannonball",
"canon",
"canorous",
"cant",
"canta",
"cantante",
"cantar",
"cantata",
"cantate",
"cantautor",
"cante",
"canterbury",
"canticle",
"cantiga",
"canto",
"cantopop",
"cantor",
"canyon",
"canzon",
"canzona",
"canzone",
"cape",
"capitan",
"capoeira",
"cappella",
"capriccio",
"caprice",
"capricho",
"capricious",
"captain",
"captura",
"caravan",
"carefree",
"careless",
"carga",
"cari",
"caribbean",
"carillon",
"carinhoso",
"carioca",
"carita",
"carlo",
"carmen",
"carnatic",
"carnaval",
"carne",
"carnival",
"caro",
"carol",
"carolina",
"carretero",
"carry",
"carta",
"cartel",
"cartoonish",
"casa",
"cash",
"casinha",
"casita",
"castle",
"catala",
"catalan",
"catchy",
"catfish",
"cathartic",
"cathedral",
"cats",
"catstep",
"cattle",
"caught",
"causa",
"cause",
"cavalleria",
"cavalo",
"cave",
"ccm",
"ceilidh",
"celebration",
"celebratory",
"celestial",
"cello",
"celos",
"celoso",
"celtic",
"celui",
"cenizas",
"center",
"century",
"cerca",
"cerebral",
"cesta",
"chacarera",
"chaconne",
"chai",
"chain",
"chained",
"chak",
"chakra",
"chal",
"chalo",
"chama",
"chamame",
"chamber",
"champeta",
"champions",
"chan",
"chand",
"chandelier",
"chang",
"change",
"changed",
"chango",
"channel",
"chanson",
"chant",
"chante",
"chaos",
"chaparrita",
"chapel",
"chapo",
"chapter",
"charade",
"charan",
"charanga",
"charleston",
"charmaine",
"chase",
"chasing",
"chat",
"chavo",
"chce",
"check",
"cheek",
"cheerful",
"chega",
"cheiro",
"cherie",
"cherokee",
"cherry",
"chez",
"chhod",
"chica",
"chicago",
"chicano",
"chicas",
"chicha",
"chicken",
"child",
"child-like",
"children",
"chilean",
"chilena",
"chileno",
"chill",
"chilled",
"chillhop",
"chillout",
"chillstep",
"chillwave",
"chim",
"chime",
"chimes",
"chinatown",
"chinese",
"chinna",
"chiptune",
"chiquilla",
"chiquitita",
"choclo",
"chod",
"choir",
"chokhe",
"chokher",
"cholo",
"chopin",
"chopped",
"chora",
"choral",
"chorale",
"chorando",
"chorar",
"chori",
"chorinho",
"choro",
"chorus",
"chosen",
"chris",
"christ",
"christe",
"christelijk",
"christian",
"christopher",
"christus",
"chte",
"chula",
"chun",
"chunchaca",
"church",
"chutney",
"chuva",
"chuy",
"chuyen",
"cidade",
"ciebie",
"cielito",
"cinco",
"cindy",
"cine",
"cinq",
"cinta",
"cintaku",
"ciranda",
"circle",
"circles",
"circuit",
"circular",
"citt",
"city",
"ciudad",
"clair",
"claire",
"clap",
"clarinet",
"clarity",
"clark",
"clasico",
"class",
"classic",
"classical",
"classify",
"clave",
"clean",
"clear",
"clinical",
"clock",
"close",
"closer",
"closing",
"cloud",
"clouds",
"club",
"coast",
"cocaine",
"cocktail",
"coco",
"coda",
"coeur",
"coffee",
"coisa",
"coisas",
"cold",
"college",
"colombia",
"colombian",
"colombiano",
"colonel",
"colorado",
"colores",
"colors",
"columbus",
"comandante",
"comedy",
"comic",
"comigo",
"comin",
"coming",
"comme",
"commentary",
"common",
"communion",
"comp",
"compa",
"compadre",
"compadres",
"comparsa",
"compas",
"compassion",
"compay",
"complainte",
"complex",
"complextro",
"complicated",
"composition",
"compositional",
"compro",
"computer",
"concentrate",
"concentration",
"conception",
"concert",
"concertino",
"concerto",
"concierto",
"concise",
"concrete",
"condemned",
"condor",
"confesi",
"confession",
"confident",
"confirmation",
"confrontational",
"conga",
"congo",
"congratulations",
"conmigo",
"conscient",
"conscious",
"consejo",
"consejos",
"consonant",
"conta",
"contemporary",
"contigo",
"continental",
"continuous",
"contra",
"contrabando",
"control",
"conversa",
"cool",
"copacabana",
"coplas",
"coquette",
"cora",
"coracao",
"coraz",
"corazon",
"core",
"corn",
"corner",
"cornetas",
"corre",
"corrente",
"corrido",
"corrina",
"corrosion",
"corsican",
"cosa",
"cosas",
"cose",
"cosita",
"cosmic",
"cosmopolitan",
"cottage",
"cotton",
"countdown",
"counting",
"country",
"coup",
"coupe",
"courante",
"court",
"coventry",
"cover",
"coverchill",
"cowboy",
"cowboys",
"coyote",
"cradle",
"crash",
"crawl",
"cream",
"creation",
"creature",
"creatures",
"credits",
"credo",
"creep",
"creo",
"creole",
"crestina",
"cricket",
"crickets",
"cried",
"crimson",
"cripple",
"crisp",
"cristiano",
"cristo",
"croatian",
"crocodile",
"crois",
"crooked",
"cross",
"crossfire",
"crossing",
"crossover",
"crossroads",
"crow",
"crowd",
"crown",
"cruel",
"crunchy",
"crunk",
"crush",
"crust",
"cruz",
"crying",
"crystal",
"csak",
"cuando",
"cuarteto",
"cuatro",
"cuba",
"cuban",
"cubana",
"cubano",
"cubaton",
"cucaracha",
"cuckoo",
"cueca",
"cuerpo",
"culebra",
"cumbanchero",
"cumberland",
"cumbia",
"cumparsita",
"cumplea",
"cuoi",
"cuore",
"cupid",
"cura",
"curse",
"cyberpunk",
"czas",
"czech",
"czsk",
"daar",
"daddy",
"dado",
"dagen",
"daha",
"daily",
"dalam",
"dale",
"dallas",
"damage",
"dame",
"damn",
"damnation",
"damned",
"dance",
"dance-punk",
"danceable",
"dancehall",
"dancing",
"dang",
"dangdut",
"danger",
"dangerous",
"dani",
"danish",
"dank",
"danke",
"dann",
"danny",
"dans",
"dansa",
"dansband",
"danse",
"danseband",
"dansktop",
"danspunk",
"danza",
"dard",
"dare",
"dari",
"dark",
"darkness",
"darktown",
"darling",
"darn",
"darshan",
"daru",
"date",
"dawn",
"days",
"dchen",
"deal",
"dear",
"dearly",
"death",
"deathcore",
"deathgrind",
"debajo",
"decale",
"december",
"decir",
"deck",
"declamatory",
"dedicated",
"deep",
"deeper",
"deer",
"defiant",
"deilig",
"dein",
"deine",
"deixa",
"deixe",
"dejala",
"dejame",
"dejan",
"dejate",
"deje",
"dekho",
"deli",
"delicate",
"delight",
"delta",
"demain",
"dembow",
"demo",
"demon",
"demonic",
"demons",
"deniz",
"depend",
"depois",
"derecho",
"dernier",
"dert",
"desafinado",
"descant",
"descarga",
"desde",
"desejo",
"desenga",
"desert",
"deserve",
"desi",
"designer",
"desire",
"despacito",
"despedida",
"desperate",
"despertar",
"despierta",
"desprecio",
"desprecios",
"destino",
"destiny",
"destroy",
"destroyer",
"desvelo",
"detached",
"detour",
"detroit",
"deum",
"deus",
"deutsche",
"deutschland",
"deutschrap",
"deux",
"deuxi",
"deva",
"devi",
"devil",
"devotional",
"dhan",
"dhol",
"dhun",
"dialogue",
"diamond",
"diamonds",
"dias",
"dich",
"diciembre",
"dick",
"diddle",
"diddley",
"diego",
"dies",
"dieses",
"dieu",
"diferencia",
"different",
"difficult",
"digital",
"digitally",
"digno",
"dile",
"dime",
"dimmi",
"dina",
"dinah",
"dindi",
"dine",
"ding",
"dinner",
"dios",
"dippermouth",
"dire",
"directo",
"dirt",
"dirty",
"disco",
"discofox",
"disease",
"distance",
"distant",
"ditt",
"ditty",
"divertimento",
"divine",
"dixie",
"dixieland",
"dixit",
"django",
"dmv",
"doamne",
"dobro",
"doce",
"doch",
"doctor",
"does",
"dogs",
"doin",
"dois",
"dolce",
"doll",
"dolor",
"dolores",
"dolphin",
"domani",
"domine",
"dominicano",
"domino",
"dona",
"donald",
"donde",
"dong",
"donna",
"dont",
"doo-wop",
"doom",
"dope",
"dopo",
"dora",
"dost",
"double",
"douce",
"dove",
"downpour",
"downtempo",
"doxology",
"drag",
"dragon",
"drama",
"dramatic",
"drank",
"draw",
"dread",
"dreadlocks",
"dream",
"dreamed",
"dreaming",
"dreamo",
"dreams",
"dreamy",
"drei",
"dress",
"drift",
"drill",
"drink",
"drinking",
"drip",
"drive",
"driving",
"dromen",
"drone",
"droom",
"drop",
"drown",
"drowning",
"drug",
"druggy",
"drugs",
"drum",
"drumfunk",
"drums",
"drunk",
"drunken",
"duas",
"dub",
"dubbing",
"dublin",
"dubstep",
"dubsteppe",
"duel",
"duele",
"duerme",
"duet",
"duetto",
"dulce",
"dulcet",
"dulci",
"dumb",
"dung",
"dunia",
"duniya",
"duong",
"duranguense",
"dure",
"durga",
"duro",
"dusk",
"dust",
"dusty",
"dutch",
"dutty",
"dying",
"dynamic",
"dynamite",
"dzie",
"eagle",
"early",
"earnest",
"earth",
"earthquake",
"earthy",
"east",
"easy",
"ebm",
"ebullient",
"ecce",
"eccentric",
"ecco",
"echame",
"echo",
"eclectic",
"ecstatic",
"ectofolk",
"ecuadoria",
"ecuadorian",
"edelweiss",
"edge",
"edit",
"edited",
"edm",
"eens",
"eerie",
"effects",
"effervescent",
"efter",
"egal",
"egyptian",
"eight",
"eimai",
"einai",
"eine",
"einem",
"einer",
"einmal",
"eins",
"eira",
"eisai",
"ekki",
"elaborate",
"eleanor",
"electric",
"electro",
"electro-industrial",
"electronic",
"electronica",
"electropop",
"electropunk",
"elegant",
"elegiac",
"elegie",
"elegy",
"elevator",
"elise",
"elke",
"ella",
"elle",
"elli",
"elsker",
"elveda",
"embora",
"embrace",
"embraceable",
"emerald",
"emily",
"emmanuel",
"emo",
"emotion",
"empty",
"en",
"enamor",
"enamorado",
"enamore",
"enas",
"enchanted",
"encore",
"encuentro",
"enda",
"ende",
"ending",
"endless",
"endlich",
"enemy",
"energetic",
"energy",
"enfant",
"enfants",
"enga",
"engel",
"engine",
"england",
"english",
"enigmatic",
"enka",
"enna",
"ennai",
"enorme",
"enough",
"enquanto",
"ensam",
"ensemble",
"enta",
"entehno",
"enter",
"entered",
"entering",
"entertainer",
"entertaining",
"entha",
"entierren",
"entre",
"entrega",
"envidia",
"environmental",
"epic",
"epicore",
"epilog",
"epilogue",
"epistrophy",
"era",
"eres",
"erinnerung",
"erotic",
"erotica",
"erste",
"escape",
"escuela",
"eski",
"espa",
"espana",
"espanol",
"esperan",
"esperando",
"esperanza",
"espiritu",
"esque",
"esquecer",
"essa",
"esse",
"esta",
"estaba",
"estar",
"estas",
"este",
"esti",
"estilo",
"esto",
"estou",
"estoy",
"estrada",
"estrela",
"estrella",
"eternal",
"eternity",
"ethereal",
"etsi",
"etude",
"etudes",
"euphonic",
"euphonious",
"euphoric",
"euro",
"eurodance",
"europop",
"euroska",
"even",
"evening",
"evensong",
"evergreen",
"everybody",
"everyone",
"evidence",
"evil",
"exactly",
"exalted",
"exciting",
"exclusive",
"exit",
"exodus",
"exotic",
"exotica",
"experimental",
"exploring",
"explosive",
"extended",
"extra",
"extrano",
"extroverted",
"exuberant",
"eyes",
"face",
"faces",
"facile",
"facts",
"fade",
"faded",
"fado",
"fair",
"fairest",
"fairy",
"fais",
"fait",
"faith",
"faithful",
"fake",
"fall",
"fallaste",
"fallen",
"faller",
"falling",
"falls",
"falsche",
"false",
"familia",
"family",
"famous",
"fancy",
"fandango",
"fandangos",
"fanfare",
"fang",
"fantaisie",
"fantasia",
"fantasie",
"fantasma",
"fantasmas",
"fantasy",
"fare",
"farewell",
"farmer",
"farther",
"farv",
"farvel",
"fascination",
"fashioned",
"fast",
"fate",
"father",
"faust",
"faut",
"favela",
"favorite",
"faze",
"fazer",
"fear",
"fearless",
"feather",
"featuring",
"feed",
"feelin",
"feeling",
"feelings",
"feels",
"feest",
"feiti",
"fekete",
"felek",
"felicidade",
"fell",
"femme",
"feng",
"feral",
"feria",
"fernando",
"festa",
"feuer",
"fever",
"feverish",
"fica",
"ficar",
"fick",
"fiddle",
"fidget",
"fidgety",
"fiebre",
"fiel",
"fields",
"fierce",
"fiery",
"fiesta",
"fight",
"filho",
"fill",
"fille",
"filles",
"filmi",
"fils",
"filter",
"filthstep",
"final",
"finale",
"find",
"finding",
"fine",
"fingerstyle",
"finnish",
"finns",
"fireball",
"fireflies",
"firework",
"fireworks",
"firm",
"five",
"flamenco",
"flamingo",
"flashback",
"flashy",
"flawless",
"flesh",
"fleur",
"flex",
"flick",
"flickan",
"flight",
"float",
"floating",
"flor",
"flow",
"flower",
"flowers",
"flowing",
"flucht",
"fluid",
"flute",
"fluxwork",
"flying",
"focus",
"foggy",
"folclore",
"folhas",
"folk",
"folk-pop",
"folklore",
"folkmusik",
"folks",
"follow",
"following",
"folsom",
"fond",
"fonte",
"food",
"fool",
"foolish",
"fools",
"football",
"footsteps",
"footwork",
"fora",
"force",
"foreign",
"forest",
"forget",
"forgiven",
"forgot",
"forgotten",
"forr",
"forro",
"forte",
"fortune",
"forty",
"forward",
"forza",
"found",
"fountain",
"four",
"fourth",
"fractured",
"frag",
"fragile",
"francais",
"franco-flemish",
"francoton",
"frankie",
"frau",
"frauen",
"freak",
"freakbeat",
"free",
"freedom",
"freestyle",
"freewheeling",
"freeze",
"frei",
"freight",
"freiheit",
"french",
"fresh",
"freunde",
"frevo",
"friend",
"friends",
"frog",
"frozen",
"fuck",
"fucking",
"fuego",
"fuel",
"fuente",
"fuera",
"fuerza",
"fuga",
"fugue",
"fuiste",
"full",
"full-toned",
"fun",
"fundo",
"funeral",
"funereal",
"funk",
"funky",
"funny",
"fuori",
"further",
"fusion",
"fussball",
"future",
"futurepop",
"g",
"gaan",
"gaat",
"gabba",
"gabino",
"gabru",
"gadis",
"gaita",
"galactic",
"galante",
"galbi",
"galego",
"galicia",
"galician",
"gallina",
"gallito",
"gallo",
"gallos",
"galway",
"game",
"gamla",
"gamle",
"gammal",
"gammel",
"ganesh",
"ganesha",
"gang",
"ganga",
"gangsta",
"gangster",
"ganja",
"ganz",
"garage",
"garden",
"garip",
"garota",
"gata",
"gates",
"gather",
"gathering",
"gato",
"gaucho",
"gavil",
"gaviota",
"gavotte",
"gayatri",
"gaze",
"gbvfi",
"gdzie",
"gece",
"geceler",
"geef",
"geek",
"geen",
"gegen",
"geheimnis",
"geht",
"geld",
"gelin",
"gelir",
"generation",
"genesis",
"genom",
"gens",
"gente",
"gentle",
"george",
"georgia",
"german",
"gern",
"geschichte",
"gestern",
"gets",
"gettin",
"getting",
"ghadi",
"gham",
"ghazal",
"ghetto",
"ghettotech",
"ghir",
"ghost",
"ghosts",
"ghum",
"giac",
"giant",
"giati",
"gibi",
"giddy",
"gigi",
"gigue",
"gimme",
"gioconda",
"giorni",
"giorno",
"giovanni",
"girl",
"girlfriend",
"girls",
"gitana",
"gitme",
"giving",
"glad",
"glade",
"glam",
"glass",
"glaub",
"gleeful",
"glitch",
"glocken",
"gloomy",
"gloria",
"glorious",
"glory",
"gnawa",
"goddess",
"gods",
"godt",
"goed",
"goin",
"gold",
"golden",
"goldene",
"golondrina",
"golondrinas",
"golpe",
"gone",
"gong",
"gonna",
"goodbye",
"goodnight",
"gora",
"goregrind",
"gospel",
"gosto",
"gostoso",
"gothic",
"gott",
"gotta",
"govind",
"govinda",
"gozar",
"grace",
"graceful",
"gracia",
"gracias",
"gran",
"granada",
"grand",
"grand-sounding",
"grande",
"grass",
"grateful",
"graveyard",
"gravity",
"greasy",
"greater",
"greek",
"green",
"greensleeves",
"grenade",
"grey",
"grim",
"grimas",
"grime",
"grind",
"grindcore",
"gritty",
"groove",
"groovy",
"grote",
"group",
"grown",
"grunge",
"grupera",
"gruperas",
"guadalajara",
"guaguanc",
"guaguanco",
"guajira",
"guajiro",
"guantanamera",
"guarda",
"guardian",
"guardians",
"gucci",
"guds",
"guero",
"guerra",
"guess",
"guidance",
"guide",
"guiding",
"guilty",
"guitar",
"guitarra",
"guns",
"gurbet",
"guru",
"gusta",
"gustas",
"gusto",
"gute",
"guten",
"guter",
"gutsy",
"gutta",
"gyal",
"gypsy",
"habana",
"habanera",
"habe",
"habibi",
"habits",
"hadi",
"hail",
"haitian",
"hajde",
"hakol",
"hala",
"half",
"halk",
"hall",
"halleluja",
"hallelujah",
"halloween",
"halo",
"halt",
"haluun",
"hamba",
"hamen",
"hana",
"handel",
"hands",
"handsome",
"hang",
"hanging",
"hank",
"hanuman",
"hanya",
"happened",
"happy",
"hard",
"hardcore",
"harder",
"hardest",
"hardstyle",
"hardtechno",
"hare",
"hari",
"hark",
"harlem",
"harmonic",
"harmonica",
"harmonious",
"harmony",
"harp",
"harpsichord",
"harry",
"harsh",
"hart",
"harte",
"hasret",
"hast",
"hasta",
"hate",
"haters",
"hati",
"haunted",
"haunting",
"haus",
"hava",
"havana",
"havet",
"hawa",
"hawaii",
"hawaiian",
"hayat",
"hazme",
"head",
"headbanging",
"healing",
"hear",
"heard",
"heartaches",
"heartbreak",
"heartland",
"hearts",
"heat",
"heaven",
"heavenly",
"heavy",
"hebrew",
"hedonistic",
"heer",
"heimat",
"hela",
"hele",
"helga",
"hell",
"hella",
"hello",
"help",
"helpless",
"helt",
"hemel",
"hermano",
"hermanos",
"hermoso",
"hero",
"heroes",
"heroic",
"herr",
"herre",
"herz",
"herzen",
"hetki",
"heut",
"heute",
"hicaz",
"hickory",
"hidden",
"hideaway",
"hidup",
"hier",
"high-flown",
"high-sounding",
"higher",
"highest",
"highland",
"highlife",
"highway",
"hijos",
"hikari",
"hilang",
"hill",
"hillbilly",
"hills",
"himlen",
"himmel",
"himmelen",
"himno",
"hindi",
"hindustan",
"hindustani",
"hino",
"hip",
"hiplife",
"hist",
"historia",
"historic",
"historical",
"historically",
"hit",
"hjem",
"hliche",
"hobo",
"hoch",
"hodie",
"hoerspiel",
"hojas",
"hoje",
"hokey",
"hola",
"hole",
"holiday",
"holla",
"holler",
"hollow",
"holly",
"hollywood",
"holy",
"hombre",
"homem",
"homenagem",
"homenaje",
"homie",
"homies",
"hommage",
"homophonic",
"honestly",
"honey",
"honeyed",
"honeysuckle",
"hong",
"honky",
"honor",
"hoochie",
"hood",
"hook",
"hooked",
"hoor",
"hop",
"hope",
"hopelessly",
"hora",
"horas",
"hori",
"horn",
"horror",
"horrorcore",
"horse",
"hosanna",
"hospital",
"hostile",
"hotel",
"hound",
"hours",
"house",
"houston",
"howling",
"huayno",
"huella",
"human",
"humorous",
"humpty",
"hund",
"hungarian",
"hungry",
"hunt",
"hunter",
"hurricane",
"hurry",
"hurt",
"hurts",
"husn",
"hustla",
"hustle",
"huwag",
"hvad",
"hvem",
"hvis",
"hvor",
"hvorfor",
"hymn",
"hymn-like",
"hype",
"hyped",
"hyper",
"hyphy",
"hypnotic",
"hypocrites",
"ibig",
"ibiza",
"icelandic",
"idiot",
"idol",
"igen",
"igjen",
"igual",
"ikaw",
"ikke",
"ilha",
"ilta",
"imagine",
"imma",
"immer",
"immortal",
"imperial",
"impossible",
"impressions",
"impromptu",
"improvisation",
"improvisatory",
"improvised",
"inch",
"independent",
"india",
"indian",
"indiana",
"indie",
"indiecoustica",
"indietronica",
"indita",
"indo",
"indonesian",
"indorock",
"indulgent",
"industrial",
"inen",
"infantil",
"infernal",
"infierno",
"infinite",
"inflection",
"informed",
"informer",
"inga",
"ingen",
"ingenting",
"inget",
"ingin",
"inima",
"inmortales",
"inna",
"innan",
"inner",
"innocence",
"innocent",
"inolvidable",
"insensatez",
"insensitive",
"inside",
"inst",
"instrumental",
"insular",
"inte",
"intelligent",
"intense",
"interlude",
"interludio",
"intermezzo",
"intermission",
"internal",
"international",
"interview",
"intimate",
"intonation",
"intricate",
"intro",
"introdu",
"introduction",
"introduzione",
"introspective",
"invasion",
"invierno",
"invisible",
"invocation",
"iraqi",
"irgendwann",
"irish",
"iron",
"ironic",
"irreverent",
"isang",
"ishq",
"iskelma",
"islamic",
"island",
"islands",
"isle",
"isolation",
"israel",
"israeli",
"isso",
"issues",
"italian",
"itsy",
"itunes",
"ivan",
"ivot",
"j-ambient",
"j-core",
"j-dance",
"j-division",
"j-idol",
"j-indie",
"j-metal",
"j-pop",
"j-poprock",
"j-rap",
"j-reggae",
"j-rock",
"jaan",
"jaane",
"jack",
"jacques",
"jagat",
"jagdish",
"jahre",
"jail",
"jako",
"jalousie",
"jam",
"jamaica",
"jamaican",
"jamais",
"jamas",
"jambalaya",
"jame",
"james",
"janani",
"jane",
"jangan",
"jangle",
"janji",
"japanese",
"jaro",
"jatt",
"jatuh",
"jaunty",
"java",
"javier",
"jawaiian",
"jaya",
"jazz",
"jazztronica",
"jazzy",
"jede",
"jeden",
"jeder",
"jedna",
"jednou",
"jeep",
"jeepers",
"jeevan",
"jefe",
"jeito",
"jelly",
"jenny",
"jersey",
"jerusalem",
"jesse",
"jest",
"jeste",
"jestem",
"jesu",
"jesus",
"jeszcze",
"jete",
"jetzt",
"jeune",
"jewish",
"jezus",
"jig",
"jigs",
"jimmy",
"jingle",
"jitterbug",
"joan",
"jodi",
"joga",
"jogo",
"john",
"johnny",
"johnson",
"jolie",
"jolly",
"jones",
"joseph",
"jota",
"jotain",
"joulun",
"jouluy",
"jour",
"journey",
"jours",
"jouw",
"jovem",
"joven",
"jovial",
"joyful",
"joyous",
"jsem",
"juan",
"juana",
"jubilate",
"judaica",
"judas",
"jude",
"judgement",
"juego",
"juice",
"juicy",
"juke",
"jukebox",
"julen",
"julia",
"july",
"jump",
"jumpstyle",
"june",
"jungle",
"junior",
"juste",
"justice",
"k-hop",
"k-indie",
"k-pop",
"kaadhal",
"kabarett",
"kabhi",
"kada",
"kadhal",
"kahan",
"kahit",
"kaikki",
"kaise",
"kako",
"kaksi",
"kalam",
"kali",
"kalle",
"kama",
"kana",
"kane",
"kann",
"kanna",
"kano",
"kansas",
"kapak",
"kapitel",
"kara",
"karadeniz",
"karaoke",
"kardia",
"karneval",
"karo",
"kash",
"kasih",
"kathe",
"katy",
"kaunis",
"kaze",
"kei",
"kein",
"keine",
"keiner",
"kell",
"kembali",
"kenangan",
"keno",
"kentucky",
"keroncong",
"kerran",
"keyboard",
"khali",
"khaliji",
"khayal",
"khong",
"kick",
"kids",
"kiedy",
"kiitos",
"kijk",
"kill",
"killa",
"killer",
"killing",
"kimi",
"kind",
"kinda",
"kinder",
"kinderchor",
"kinderen",
"kinderlein",
"kindermusik",
"kindie",
"kinetic",
"kingdom",
"kingston",
"kirtan",
"kisah",
"kisi",
"kismat",
"kiss",
"kissed",
"kit",
"kita",
"kite",
"kitty",
"kiwi",
"kizomba",
"klapa",
"klavierst",
"klein",
"kleine",
"kleiner",
"kleines",
"klezmer",
"kling",
"knew",
"knock",
"knotty",
"knowing",
"kolme",
"komm",
"kommer",
"kommet",
"kommt",
"kompa",
"komt",
"konzert",
"kool",
"kopf",
"koplo",
"kore",
"korean",
"kotha",
"koto",
"kraj",
"krieg",
"krishna",
"kuch",
"kuchh",
"kudi",
"kuin",
"kuka",
"kundalini",
"kung",
"kurdish",
"kuri",
"kush",
"kwaito",
"kylm",
"kyrie",
"la",
"laat",
"laatste",
"laboratorio",
"lado",
"lagan",
"lagu",
"laid-back",
"laiko",
"laisse",
"laissez",
"lake",
"lakh",
"lakshmi",
"lala",
"lama",
"lamb",
"lamberto",
"lament",
"lamento",
"lamentos",
"lamp",
"land",
"lande",
"landslide",
"lang",
"langt",
"languid",
"lapsille",
"large",
"larghetto",
"largo",
"lark",
"lass",
"lasst",
"late",
"lately",
"latin",
"latino",
"latvian",
"laudate",
"laulu",
"laurita",
"lavender",
"lawdy",
"lay",
"layla",
"lazy",
"lds",
"lead",
"lean",
"leaning",
"learn",
"learning",
"leather",
"leave",
"leaves",
"leaving",
"lebanese",
"leben",
"leef",
"legacy",
"legend",
"legions",
"leil",
"leila",
"leise",
"lejos",
"lekker",
"lembran",
"lesen",
"lester",
"lets",
"letter",
"lettera",
"letters",
"letting",
"lettre",
"letzte",
"leven",
"levenslied",
"lever",
"leyenda",
"leyla",
"liar",
"libertad",
"liberty",
"library",
"libre",
"licht",
"lick",
"liebe",
"lieber",
"liebeslied",
"liebestraum",
"lied",
"lieder",
"liedermacher",
"lief",
"liefde",
"lies",
"lieve",
"lifestyle",
"lift",
"lifted",
"liga",
"light",
"lighters",
"lighthouse",
"lights",
"liian",
"likes",
"lilith",
"lilla",
"lille",
"lilt",
"lilting",
"limbo",
"limburg",
"limehouse",
"limosna",
"linda",
"line",
"lion",
"liquid",
"lisboa",
"list",
"listen",
"listening",
"lite",
"liten",
"literate",
"literature",
"liturgical",
"liturgy",
"live",
"lively",
"livet",
"livets",
"livin",
"living",
"livre",
"liza",
"ljubav",
"ljubavi",
"ljus",
"lkommen",
"llama",
"llaman",
"llanto",
"llar",
"llaves",
"lleg",
"llegando",
"llora",
"llorando",
"llorar",
"lloraras",
"llorare",
"llores",
"llorona",
"lo-fi",
"loaded",
"lobo",
"loca",
"loch",
"locked",
"loco",
"locura",
"lodie",
"lofty",
"lohengrin",
"loin",
"lola",
"loma",
"london",
"lonely",
"lonesome",
"longe",
"longing",
"looking",
"loopable",
"lord",
"lords",
"lose",
"losing",
"loss",
"lotus",
"louca",
"louco",
"loucura",
"louder",
"louie",
"louis",
"louise",
"louisiana",
"lounge",
"loup",
"louvor",
"loved",
"lovely",
"lover",
"lovers",
"loves",
"lovesick",
"loving",
"lowdown",
"loyalty",
"lska",
"lskar",
"ltima",
"ltimo",
"luces",
"lucia",
"lucifer",
"lucille",
"luck",
"lucky",
"lucy",
"lude",
"ludes",
"lugar",
"luk",
"lula",
"lullaby",
"lumi",
"luna",
"lush",
"lustige",
"lute",
"lyric",
"lyrical",
"maailma",
"maailman",
"maak",
"maan",
"maar",
"macabre",
"macarena",
"macdonald",
"mach",
"machine",
"macht",
"madama",
"madame",
"made",
"mademoiselle",
"madre",
"madrid",
"madrigal",
"maggie",
"magic",
"magical",
"magnetic",
"magnificat",
"magniloquent",
"magnolia",
"magnum",
"magyar",
"maha",
"mahal",
"mahalakshmi",
"mahi",
"maid",
"main",
"maine",
"maior",
"mais",
"maison",
"majestic",
"majesty",
"major",
"making",
"makossa",
"mala",
"malague",
"malaguena",
"malai",
"malam",
"malaysian",
"maldita",
"malevolent",
"mali",
"mama",
"maman",
"mambo",
"mame",
"mami",
"mamma",
"mana",
"manasa",
"mand",
"manda",
"mande",
"mandopop",
"mandy",
"manele",
"manera",
"mangal",
"mangalam",
"manguebeat",
"manh",
"manha",
"manhattan",
"mani",
"manic",
"manicero",
"manila",
"manisero",
"mann",
"mano",
"manon",
"mansion",
"mantra",
"manuel",
"manuela",
"many",
"maple",
"maps",
"mara",
"marca",
"march",
"marcha",
"marche",
"marches",
"marching",
"marcia",
"marcus",
"mardi",
"mare",
"margie",
"mari",
"maria",
"mariachi",
"mariage",
"marie",
"marimba",
"mario",
"mariposa",
"maris",
"mark",
"market",
"marriage",
"married",
"marry",
"marsch",
"martha",
"martial",
"mary",
"masal",
"mash",
"masih",
"mask",
"maskandi",
"masquerade",
"mass",
"massage",
"massive",
"mast",
"master",
"masterful",
"mata",
"mater",
"mathcore",
"matia",
"mattinata",
"mausam",
"mauvais",
"mavi",
"mawal",
"maya",
"maybe",
"mazurka",
"mbalax",
"mean",
"meandering",
"meat",
"mechanical",
"media",
"medicine",
"medieval",
"medio",
"medita",
"meditation",
"meditative",
"mediterranean",
"medium",
"medley",
"medo",
"meen",
"meet",
"meeting",
"megamix",
"megh",
"meglio",
"mehr",
"mein",
"meine",
"meio",
"meisje",
"meistersinger",
"mejor",
"melancholia",
"melancholy",
"mele",
"melhor",
"mellifluous",
"mellow",
"melod",
"melodia",
"melodic",
"melodie",
"melodious",
"melody",
"memories",
"memory",
"memphis",
"menacing",
"mene",
"mengapa",
"meni",
"menina",
"menino",
"mensch",
"mental",
"mentirosa",
"menuet",
"menuetto",
"mera",
"merci",
"mercy",
"mere",
"merengue",
"meri",
"mero",
"merry",
"merseybeat",
"mesmo",
"mess",
"messa",
"message",
"messe",
"messiah",
"messianic",
"messy",
"mestre",
"metal",
"metalcore",
"metro",
"metropopolis",
"mets",
"meus",
"mexe",
"mexicali",
"mexican",
"mexicano",
"mexico",
"miami",
"mich",
"michael",
"michelle",
"microhouse",
"middle",
"midnight",
"midsummer",
"miedo",
"miei",
"mientras",
"mies",
"mighty",
"mignon",
"mijn",
"mike",
"milagre",
"milano",
"milenberg",
"miles",
"milestones",
"military",
"milk",
"million",
"milonga",
"mimpi",
"mina",
"mind",
"minden",
"mindful",
"mindfulness",
"mine",
"minh",
"minha",
"minhas",
"mini",
"minimal",
"minor",
"mint",
"minuetto",
"minuit",
"mira",
"miracle",
"mirror",
"mirrors",
"mirza",
"miserere",
"misery",
"misirlou",
"miss",
"missa",
"missing",
"mission",
"mississippi",
"misterioso",
"mistletoe",
"misty",
"miten",
"mitran",
"mitt",
"mixed",
"mizrahi",
"mnie",
"mobb",
"moda",
"moderato",
"modern",
"modernism",
"modinha",
"moeder",
"moet",
"mogu",
"mohabbat",
"mohan",
"mohe",
"moja",
"moje",
"mojo",
"moliendo",
"molly",
"molto",
"moment",
"monastic",
"mond",
"monde",
"mondo",
"mone",
"moneda",
"moner",
"money",
"mong",
"monk",
"monkey",
"mono",
"monophonic",
"monsieur",
"monster",
"monsters",
"montuno",
"monumental",
"mood",
"mooi",
"mooie",
"moombahton",
"moonglow",
"moonlight",
"moonlit",
"morbid",
"morena",
"morenita",
"morgen",
"morgon",
"mori",
"morna",
"morning",
"morro",
"mort",
"mortal",
"morte",
"mosaico",
"most",
"moten",
"motet",
"motets",
"mother",
"motherless",
"motivation",
"motivo",
"moto",
"motoric",
"motown",
"mots",
"moulin",
"mountain",
"mountains",
"mouth",
"move",
"moved",
"moves",
"movie",
"moving",
"mozart",
"mpb",
"muchacha",
"muchachita",
"muchacho",
"muchachos",
"muero",
"muerte",
"mueve",
"muistojen",
"mujer",
"mujeres",
"mujhe",
"mulher",
"munda",
"mundart",
"mundo",
"murda",
"murder",
"murga",
"muriendo",
"murio",
"muruga",
"music",
"musica",
"musical",
"musicality",
"musiikkia",
"musik",
"musikkorps",
"musique",
"muskrat",
"must",
"musta",
"muzak",
"muzica",
"muziek",
"muzigi",
"mwana",
"mwen",
"mysterious",
"mystery",
"mystic",
"mystical",
"naal",
"naam",
"naan",
"naar",
"nach",
"nachdi",
"nachna",
"nacht",
"nacional",
"nada",
"nade",
"nadie",
"nagasaki",
"nagumomu",
"nahi",
"nahin",
"naija",
"naima",
"naive",
"naked",
"namah",
"namo",
"nana",
"nang",
"nani",
"nannu",
"napoletana",
"napoletano",
"narayana",
"narcotic",
"nardis",
"narrative",
"nasheed",
"nashville",
"nasyid",
"natale",
"nation",
"national",
"native",
"nativista",
"natsu",
"natt",
"natty",
"natural",
"natureza",
"navidad",
"ncia",
"ncio",
"ndas",
"nder",
"nearer",
"nearness",
"nebo",
"necesito",
"neden",
"neel",
"neela",
"neem",
"nega",
"negative",
"negra",
"negrita",
"negrito",
"neighborhood",
"neka",
"nella",
"nema",
"nemesis",
"nena",
"neo",
"neo-pagan",
"neo-progressive",
"neo-psychedelic",
"neo-rockabilly",
"neo-synthpop",
"neo-trad",
"neoclassical",
"neofolk",
"neomelodici",
"neon",
"neotango",
"nepali",
"nervous",
"nessun",
"nessuno",
"neue",
"neurofunk",
"neva",
"nevertheless",
"new",
"news",
"next",
"ngay",
"ngel",
"ngen",
"nguoi",
"nhau",
"nhung",
"nice",
"nicht",
"nichts",
"niem",
"niemals",
"niente",
"niet",
"niets",
"nieuwe",
"nieves",
"nigdy",
"nigerian",
"nigga",
"niggas",
"niggaz",
"nightingale",
"nightmare",
"nigin",
"nihilistic",
"nihta",
"niin",
"nije",
"nimm",
"nincs",
"nine",
"ningu",
"ninja",
"ninna",
"ninne",
"ninnu",
"ninos",
"nirvana",
"nisam",
"nisi",
"nite",
"nl",
"nler",
"no",
"nobody",
"noch",
"noche",
"noches",
"nocturnal",
"nocturne",
"nocturnes",
"nocturno",
"noir",
"noise",
"noite",
"noites",
"nomas",
"nombre",
"nomine",
"nooit",
"noor",
"nordic",
"norteno",
"north",
"northern",
"norwegian",
"nosotros",
"nossa",
"nosso",
"nostalgia",
"nostalgias",
"nostalgic",
"notre",
"notte",
"notturno",
"nous",
"nouveau",
"nouvelle",
"nova",
"november",
"novia",
"novo",
"nowhere",
"nozze",
"nste",
"ntate",
"nu",
"nuages",
"nube",
"nuestra",
"nuestro",
"nueva",
"nueve",
"nuevo",
"nuff",
"nuit",
"nuits",
"numb",
"number",
"numbers",
"nunc",
"nunca",
"nursery",
"nutcracker",
"nwobhm",
"nwothm",
"oasis",
"oberkrainer",
"oboe",
"obsesi",
"obsesion",
"obsession",
"occhi",
"ocean",
"oceans",
"october",
"ode",
"oder",
"odio",
"ogni",
"ohio",
"ohne",
"oi",
"ojal",
"ojitos",
"ojos",
"oklahoma",
"oktoberfest",
"old",
"old-time",
"oldu",
"olen",
"oleo",
"olet",
"olha",
"olhos",
"olmaz",
"olsun",
"olur",
"olvidar",
"olvido",
"ominous",
"omnes",
"omri",
"on",
"once",
"onda",
"onde",
"onder",
"onkel",
"onnen",
"onze",
"open",
"opening",
"opera",
"operatic",
"operation",
"operetta",
"opm",
"optimistic",
"opulent",
"oraci",
"oratory",
"orchestra",
"orchestral",
"orchids",
"ordinary",
"organ",
"organic",
"orgcore",
"oriental",
"oriente",
"orilla",
"orleans",
"ornate",
"ornithology",
"orotund",
"orquesta",
"orquestas",
"oshare",
"ostrock",
"otacore",
"otan",
"otello",
"other",
"otra",
"otro",
"oude",
"outlaw",
"outra",
"outraged",
"outrageous",
"outro",
"outside",
"outsider",
"outta",
"ouverture",
"overture",
"pachanga",
"paddy",
"padre",
"paean",
"pagan",
"pagliacci",
"pago",
"pagode",
"paha",
"pahi",
"paillarde",
"pain",
"pais",
"paix",
"pajarillo",
"pajarito",
"pajaro",
"pakhi",
"pakistani",
"palabras",
"palace",
"palavras",
"pale",
"palha",
"pali",
"palo",
"paloma",
"palomita",
"pame",
"pampa",
"pan",
"panama",
"panamanian",
"panchhi",
"pane",
"panic",
"panis",
"pannonica",
"panpipe",
"papa",
"paper",
"papuri",
"para",
"parab",
"parade",
"paradise",
"paraguaya",
"paraguayan",
"paranoid",
"pari",
"paris",
"parle",
"parole",
"parranda",
"parrandero",
"part",
"partida",
"parting",
"partita",
"party",
"pasi",
"pasodobles",
"pass",
"passacaglia",
"passage",
"passe",
"passing",
"passion",
"passionate",
"passione",
"past",
"pastor",
"pastoral",
"pastorale",
"pater",
"path",
"patio",
"patria",
"patriotic",
"pavan",
"pavana",
"pavane",
"payback",
"payphone",
"pays",
"peace",
"peaceful",
"peanut",
"peces",
"peda",
"pedra",
"peer",
"pega",
"pena",
"penas",
"pennies",
"penny",
"pentru",
"people",
"peor",
"peque",
"per",
"perch",
"percussive",
"perd",
"perfect",
"perfidia",
"performance",
"perky",
"permanent",
"pernambuco",
"pero",
"perpetual",
"perro",
"persian",
"perto",
"peruana",
"peruvian",
"pescador",
"pet",
"petit",
"petite",
"petits",
"peut",
"peux",
"phantom",
"philosophical",
"phir",
"phire",
"phoenix",
"phone",
"phool",
"photograph",
"pianissimo",
"piano",
"piccola",
"piccolo",
"picking",
"picture",
"pictures",
"pido",
"piece",
"pieces",
"piedad",
"piedmont",
"pieni",
"piensa",
"pigs",
"pikku",
"pilgrim",
"pimp",
"pimpin",
"pind",
"pine",
"pines",
"pink",
"pinoy",
"piosenka",
"piping",
"pirate",
"pista",
"pitk",
"pitter",
"pixie",
"piya",
"place",
"places",
"plague",
"plain",
"plainsong",
"plaintive",
"plaisir",
"plan",
"planet",
"planets",
"plastic",
"plata",
"play",
"playa",
"playback",
"playful",
"playing",
"plaza",
"pleasant-sounding",
"please",
"pleasure",
"plebes",
"pledging",
"plegaria",
"plena",
"pleure",
"plug",
"plus",
"png",
"pobre",
"pode",
"poder",
"poderoso",
"poeta",
"poetic",
"poetry",
"poignant",
"poinciana",
"point",
"pois",
"poison",
"police",
"polish",
"polka",
"pollera",
"polly",
"polo",
"polonaise",
"polska",
"polynesian",
"polyphonic",
"polyphony",
"pompeii",
"pompous",
"poncho",
"pont",
"ponte",
"ponto",
"pool",
"poor",
"pop",
"poppin",
"poptimism",
"popular",
"populara",
"popurr",
"popurri",
"porque",
"porro",
"port",
"portal",
"porte",
"portland",
"portrait",
"portugues",
"portuguese",
"positive",
"posledn",
"possessed",
"post-bop",
"post-disco",
"post-doom",
"post-grunge",
"post-hardcore",
"post-metal",
"post-punk",
"post-rock",
"post-romantic",
"post-screamo",
"post-teen",
"pouco",
"pound",
"pour",
"pourquoi",
"pourri",
"pout",
"povo",
"power",
"power-pop",
"powerful",
"powwow",
"praeludium",
"praise",
"praises",
"pray",
"prayer",
"prece",
"precious",
"precipitation",
"preciso",
"preghiera",
"prel",
"prelude",
"preludes",
"preludio",
"prem",
"prema",
"premi",
"premier",
"prenda",
"prends",
"presen",
"presence",
"presencia",
"presentaci",
"presentacion",
"president",
"preso",
"pressure",
"presto",
"pretty",
"preverb",
"pride",
"prieta",
"prima",
"primo",
"prin",
"prince",
"printemps",
"prison",
"prisoner",
"privave",
"priya",
"problems",
"prod",
"prodigal",
"product",
"prog",
"progressive",
"project",
"prolog",
"prologue",
"promenade",
"protect",
"protopunk",
"proud",
"provocative",
"psalm",
"psalmen",
"psy",
"psych",
"psych-rock",
"psychedelic",
"psychill",
"psycho",
"psychobilly",
"psytrance",
"pub",
"public",
"pueblito",
"pueblo",
"puede",
"puedo",
"puerta",
"puerto",
"puff",
"pull",
"pulse",
"pulsing",
"pump",
"punch",
"punjab",
"punjabi",
"punk",
"punto",
"puppy",
"pura",
"pure",
"puro",
"purple",
"pursuit",
"push",
"pussy",
"putt",
"pyaar",
"pyar",
"qawwali",
"qing",
"qual",
"qualcosa",
"qualquer",
"quam",
"quan",
"quand",
"quando",
"quanto",
"quartet",
"quarto",
"quase",
"quatre",
"quatuor",
"quebecois",
"queda",
"queen",
"quejas",
"quella",
"quello",
"quem",
"quen",
"quer",
"querendo",
"querer",
"quero",
"quest",
"questa",
"questo",
"quick",
"quien",
"quieras",
"quiere",
"quieren",
"quieres",
"quiero",
"quiet",
"quintet",
"quirky",
"quisiera",
"quoi",
"r&b",
"raag",
"raat",
"rabba",
"rabbit",
"race",
"racks",
"radetzky",
"radhe",
"radio",
"radioactive",
"raga",
"ragam",
"ragazza",
"ragazzo",
"rage",
"ragga",
"ragtime",
"rai",
"raid",
"railroad",
"rain",
"rainbow",
"raindrops",
"rainfall",
"rainforest",
"rainha",
"rainy",
"raise",
"raja",
"rakkauden",
"rakkaus",
"rama",
"rambling",
"rambunctious",
"rame",
"ramshackle",
"ranchera",
"rancho",
"rang",
"rani",
"rap",
"rare",
"rasta",
"rastafari",
"rastaman",
"ration",
"raucous",
"rave",
"rayito",
"raza",
"re:techno",
"reach",
"reading",
"ready",
"realest",
"really",
"reason",
"rebel",
"rebelde",
"rebellious",
"rebetiko",
"reborn",
"reci",
"recitation",
"recitative",
"reckless",
"reconsider",
"record",
"recorda",
"recordando",
"recorded",
"recorder",
"recuerdo",
"recuerdos",
"red",
"redeemed",
"redeemer",
"redneck",
"reel",
"reels",
"refined",
"reflections",
"reflective",
"refrain",
"regalame",
"regalo",
"regarde",
"reggae",
"reggaeton",
"regina",
"regional",
"regn",
"regresa",
"regretful",
"regrets",
"reigns",
"reiki",
"reina",
"reise",
"rejoice",
"relax",
"relaxation",
"relaxative",
"relaxed",
"relaxing",
"religieux",
"religion",
"reloj",
"remaster",
"remastered",
"remasterizado",
"remembering",
"remix",
"renaissance",
"rendez",
"renunciaci",
"represent",
"reprise",
"requiem",
"rerecorded",
"reserved",
"resistance",
"resolute",
"resonance",
"resonant",
"resounding",
"respect",
"rest",
"reste",
"restful",
"restless",
"restrained",
"resurrection",
"retirada",
"retour",
"retro",
"return",
"returning",
"reunion",
"reunited",
"revelation",
"revenge",
"reverberant",
"reverent",
"reviens",
"revival",
"revoluci",
"revolution",
"reyna",
"rhapsodic",
"rhapsody",
"rhythm",
"rhythmic",
"rhythmical",
"rican",
"ricercar",
"rich",
"rico",
"riddim",
"ride",
"rider",
"ridin",
"riding",
"rien",
"rif",
"righteous",
"rigoletto",
"rinc",
"rindu",
"ringing",
"ringtone",
"rio",
"riot",
"rique",
"rise",
"rising",
"ritmo",
"rito",
"ritu",
"ritual",
"river",
"riveting",
"rldens",
"rlek",
"rleken",
"rlekens",
"rlighed",
"rlighet",
"road",
"roar",
"robinson",
"robot",
"rock",
"rock-and-roll",
"rockabilly",
"rockers",
"rocket",
"rockin",
"rockin’",
"rocky",
"roda",
"rodeo",
"roll",
"rollicking",
"rollin",
"rolling",
"romance",
"romanian",
"romantic",
"romanticism",
"romantico",
"romanza",
"romanze",
"romeo",
"rompe",
"rond",
"ronda",
"ronde",
"rondeau",
"rondo",
"roof",
"rooftop",
"room",
"rooster",
"root",
"roots",
"rosa",
"rosalie",
"rosamunde",
"rosario",
"rosary",
"rose",
"rosetta",
"rough",
"round",
"rousing",
"roving",
"rowdy",
"royal",
"rsta",
"rste",
"rude",
"rudolph",
"ruff",
"rugged",
"rule",
"rules",
"rumba",
"rumble",
"run",
"runaway",
"running",
"rural",
"rush",
"rushing",
"russian",
"russiavision",
"rustic",
"sabah",
"sabes",
"sabi",
"sabor",
"sabr",
"sabroso",
"sach",
"sacred",
"sacrifice",
"sacrificio",
"sad",
"sada",
"safar",
"safe",
"saga",
"said",
"sail",
"sailing",
"saint",
"saints",
"sais",
"sakhi",
"sakura",
"salam",
"sale",
"sally",
"salmo",
"salsa",
"saludo",
"salute",
"salvadorena",
"salvation",
"salve",
"sama",
"samba",
"samba-enredo",
"same",
"samen",
"samma",
"samo",
"sampai",
"san",
"sana",
"sancta",
"sanctus",
"sand",
"sandy",
"sang",
"sangen",
"sangre",
"sangue",
"sannonce",
"sano",
"sans",
"sant",
"santa",
"santafesina",
"santiago",
"santo",
"sapo",
"sara",
"sarabande",
"saraswati",
"sarcastic",
"sardonic",
"sarho",
"satan",
"satellite",
"satin",
"satirical",
"satisfied",
"satisfy",
"satu",
"saturday",
"saudade",
"saudades",
"savage",
"save",
"saved",
"savior",
"saviour",
"savoy",
"sawan",
"saxophone",
"sayang",
"sayonara",
"scarborough",
"scared",
"scary",
"scattered",
"scene",
"schatten",
"schau",
"schei",
"schenk",
"scherzo",
"schlaf",
"schlager",
"school",
"schranz",
"schubert",
"schwarz",
"schwarze",
"scientist",
"score",
"scorecore",
"scotland",
"scottish",
"scrapple",
"scratch",
"scream",
"screaming",
"screamo",
"screw",
"screwed",
"search",
"searching",
"seas",
"seasons",
"sechs",
"second",
"secret",
"seek",
"sega",
"segura",
"sehnsucht",
"seigneur",
"sein",
"seis",
"selalu",
"selamat",
"self",
"selfish",
"sempre",
"sena",
"send",
"sende",
"senden",
"senhor",
"senhora",
"seni",
"senin",
"seninle",
"senor",
"sensation",
"sense",
"sensiz",
"sensual",
"sentimental",
"sentimiento",
"senza",
"september",
"sera",
"sere",
"serenade",
"serenata",
"serene",
"serenity",
"serialism",
"serious",
"serpent",
"sertanejo",
"session",
"sete",
"seul",
"sevda",
"sevdah",
"sevdal",
"sevdi",
"seven",
"severe",
"sevgi",
"sevgilim",
"sevilla",
"sevillanas",
"sexo",
"sexual",
"sexy",
"shaam",
"shab",
"shabad",
"shadow",
"shadows",
"shady",
"shake",
"shakti",
"shall",
"shalom",
"shame",
"shang",
"shangri",
"shanti",
"shanty",
"shape",
"share",
"shattered",
"shawty",
"sheep",
"sheik",
"shell",
"shen",
"shenandoah",
"shepherd",
"sher",
"shibuya-kei",
"shim",
"shimmer",
"shimmering",
"shine",
"shir",
"shit",
"shiv",
"shiva",
"shock",
"shoegaze",
"shoo",
"shoot",
"shop",
"shopping",
"shore",
"short",
"shot",
"shots",
"shou",
"should",
"shout",
"show",
"showdown",
"shree",
"shri",
"shrill",
"shut",
"shyam",
"siamo",
"siboney",
"sica",
"sick",
"side",
"sidi",
"sidste",
"siempre",
"siente",
"siento",
"sign",
"sigo",
"silence",
"silencio",
"silent",
"silhouettes",
"silly",
"silm",
"silver",
"silver-toned",
"silvery",
"simple",
"sina",
"sinaloense",
"since",
"sind",
"sinfonia",
"sinfonie",
"sinfonietta",
"sing",
"singable",
"singer-songwriter",
"singers",
"singh",
"singing",
"single",
"sinhala",
"sinua",
"siren",
"siri",
"sista",
"siste",
"sister",
"sita",
"sitar",
"sitting",
"siva",
"size",
"ska",
"skal",
"skate",
"sketch",
"skiffle",
"skin",
"skinhead",
"skip",
"skit",
"skool",
"skull",
"sky",
"skylark",
"slack-key",
"slam",
"slaughter",
"slave",
"slaves",
"slavonic",
"slayer",
"sleaze",
"sleazy",
"sleep",
"sleeping",
"sleepless",
"sleepy",
"sleigh",
"slick",
"slide",
"slokam",
"slovenian",
"slow",
"sludge",
"small",
"smash",
"smile",
"smoke",
"smokestack",
"smooth",
"snake",
"snakes",
"snide",
"snow",
"soaring",
"sober",
"sobre",
"soca",
"soft",
"softly",
"sogni",
"sogno",
"soir",
"sola",
"solamente",
"solar",
"soldiers",
"sole",
"soledad",
"soleil",
"solen",
"soll",
"solo",
"somber",
"sombra",
"sombras",
"somebody",
"someday",
"someone",
"sometimes",
"somewhere",
"sommar",
"sommaren",
"sommer",
"somos",
"son",
"sonata",
"sonatas",
"sonate",
"sonatina",
"sonatine",
"song",
"songful",
"songs",
"sonho",
"sonic",
"sonidera",
"sonidos",
"sono",
"sonorous",
"sons",
"soothing",
"sophisticated",
"soprano",
"sora",
"sorry",
"sotto",
"soukous",
"soul",
"soulful",
"sound",
"soundtrack",
"source",
"sous",
"south",
"southern",
"southside",
"souvenir",
"space",
"spacey",
"spanish",
"sparkling",
"sparse",
"speak",
"speaks",
"special",
"speed",
"speedcore",
"spend",
"spicy",
"spider",
"spin",
"spirit",
"spirits",
"spiritual",
"spit",
"splashing",
"spontaneous",
"spooky",
"spoonful",
"spotlight",
"sprawling",
"sprightly",
"spring",
"springlike",
"spune",
"spytrack",
"squeeze",
"srce",
"sree",
"stabat",
"stablemates",
"staccato",
"stad",
"stadt",
"stagger",
"stai",
"stanbul",
"stand",
"standards",
"standing",
"stanna",
"star",
"stardust",
"starry",
"stars",
"start",
"state",
"stately",
"steady",
"steal",
"steam",
"steampunk",
"steel",
"steelpan",
"steh",
"steht",
"stella",
"stentorian",
"stentorious",
"step",
"stepped",
"stereo",
"sterne",
"stico",
"stilla",
"stille",
"stillness",
"stin",
"stis",
"stockholm",
"stole",
"stomp",
"ston",
"stone",
"stoner",
"stor",
"stora",
"store",
"storia",
"storm",
"stormy",
"story",
"stra",
"straight",
"strain",
"strange",
"stranger",
"strangers",
"strawberry",
"streektaal",
"street",
"street-smart",
"streets",
"streichquartett",
"stress",
"stretch",
"stride",
"striding",
"strike",
"string",
"strip",
"strong",
"stronger",
"stuck",
"stupid",
"sturm",
"style",
"stylish",
"suara",
"suavecito",
"sublime",
"suburban",
"such",
"suena",
"suffer",
"suffocating",
"sufi",
"sufro",
"sugar",
"sugary",
"suicide",
"suis",
"suite",
"sulla",
"sultan",
"summertime",
"summery",
"sunday",
"sunflower",
"sung",
"sungura",
"sunlight",
"sunny",
"suno",
"sunrise",
"sunset",
"sunshine",
"sunt",
"suomi",
"super",
"superman",
"superstar",
"suppertime",
"supreme",
"surat",
"sure",
"surf",
"surrender",
"surrey",
"surrounded",
"survivor",
"surya",
"susie",
"suspenseful",
"suspicious",
"svart",
"swag",
"swaggering",
"swamp",
"swan",
"swanee",
"swang",
"sweat",
"swedish",
"sweet",
"sweet-sounding",
"sweet-toned",
"sweeter",
"sweetheart",
"swing",
"swingin",
"swinging",
"swiss",
"switch",
"sword",
"symphonic",
"symphonie",
"symphonious",
"symphony",
"synthpop",
"system",
"szerelem",
"taas",
"tabla",
"table",
"taboo",
"tack",
"tagalog",
"tahdon",
"tais",
"tait",
"taivas",
"takes",
"tako",
"talent",
"talk",
"talkin",
"talking",
"tambor",
"tambores",
"tambourine",
"tamil",
"tammy",
"tamo",
"tango",
"tangos",
"tanha",
"tannenbaum",
"tannh",
"tant",
"tante",
"tantra",
"tantric",
"tanz",
"tanzen",
"tanzlmusi",
"tanzt",
"tarana",
"tarantella",
"tare",
"tata",
"tate",
"tausend",
"tchaikovsky",
"teach",
"team",
"tear",
"teardrops",
"tears",
"tebe",
"tebi",
"tech",
"technical",
"techno",
"tecnobrega",
"teddy",
"teenage",
"teenager",
"teil",
"tejano",
"tekno",
"telefone",
"telepop",
"tema",
"temple",
"tempo",
"temps",
"temptation",
"tender",
"tenderly",
"tengo",
"tenho",
"tennessee",
"tenor",
"tense",
"tenu",
"tequila",
"tera",
"tere",
"teresa",
"teri",
"terra",
"terre",
"terror",
"terrorcore",
"terug",
"testamento",
"teufel",
"teus",
"texas",
"thai",
"thang",
"thank",
"thankful",
"thanks",
"theatrical",
"thee",
"thelo",
"them",
"thema",
"theme",
"then",
"therapy",
"these",
"theta",
"thillana",
"thin",
"thing",
"things",
"think",
"thinking",
"thinks",
"third",
"thirty",
"those",
"thou",
"thought",
"thoughtful",
"thoughts",
"thousand",
"thrash",
"threatening",
"thriller",
"thrilling",
"throat",
"throbbing",
"through",
"throw",
"throwed",
"thug",
"thuggish",
"thula",
"thumri",
"thunder",
"thunderstorm",
"thung",
"thuong",
"tiada",
"tian",
"tibetan",
"ticket",
"tickle",
"tico",
"tiden",
"tief",
"tiempo",
"tiempos",
"tiene",
"tienes",
"tierra",
"tiger",
"tight",
"tijd",
"till",
"tillana",
"timba",
"timbre",
"timeless",
"tin",
"tine",
"ting",
"tinh",
"tinta",
"tiny",
"tipica",
"tipico",
"tipo",
"tira",
"tired",
"tiroler",
"tisket",
"titanium",
"title",
"titles",
"toccata",
"toch",
"toda",
"today",
"todo",
"todos",
"toen",
"together",
"tokyo",
"toma",
"tomake",
"tomar",
"tomay",
"tombeau",
"tomorrow",
"tonada",
"tonal",
"tone",
"tonight",
"tonite",
"tonk",
"tonto",
"toolroom",
"toon",
"topsy",
"tora",
"tore",
"torero",
"tormenta",
"tormento",
"torn",
"torna",
"toro",
"torture",
"tosca",
"total",
"touch",
"touched",
"tour",
"tous",
"tout",
"toute",
"towards",
"tower",
"town",
"toxic",
"tracestep",
"track",
"tracks",
"trad",
"tradicional",
"traditional",
"tragedia",
"tragic",
"tragoudi",
"traigo",
"trail",
"train",
"training",
"trance",
"trang",
"tranquil",
"tranquility",
"transformation",
"trap",
"trappin",
"tras",
"trash",
"trashy",
"traum",
"traviata",
"treble",
"trees",
"tren",
"tres",
"tribal",
"tribute",
"trickling",
"trio",
"trip",
"trippin",
"trippy",
"tristan",
"triste",
"tristes",
"tristeza",
"triumphant",
"trival",
"trois",
"troisi",
"trombone",
"trong",
"trop",
"tropical",
"troppo",
"trouble",
"trova",
"trovatore",
"truck",
"true",
"truly",
"trumpet",
"trunk",
"trust",
"truth",
"trying",
"tryna",
"tubular",
"tude",
"tudes",
"tudo",
"tuga",
"tuhan",
"tujhe",
"tule",
"tulsa",
"tumbling",
"tumhare",
"tumi",
"tumid",
"tune",
"tuned",
"tuneful",
"tunes",
"tuning",
"tunnel",
"turandot",
"turbo",
"turbulent",
"turf",
"turkey",
"turkish",
"turnt",
"turntablism",
"tusen",
"tutta",
"tutte",
"tutto",
"tuuli",
"tuxedo",
"tuyo",
"twee",
"twenty",
"twilight",
"twin",
"twinkle",
"twist",
"twisted",
"twitch",
"tylko",
"type",
"tytt",
"tzadik",
"ugly",
"uk",
"ultimate",
"umbanda",
"unchained",
"uncle",
"uncompromising",
"undecided",
"underground",
"underneath",
"understated",
"unholy",
"unison",
"united",
"unity",
"universal",
"universe",
"universitario",
"unknown",
"unnai",
"unos",
"unplugged",
"unser",
"unsere",
"unsettling",
"unter",
"until",
"untitled",
"unto",
"untuk",
"uomo",
"up",
"up-tempo",
"upbeat",
"uplift",
"uplifting",
"uptempo",
"uptown",
"urban",
"urbano",
"urgent",
"urlaub",
"uruguaya",
"uruguayo",
"usted",
"utan",
"uthando",
"uusi",
"uzbek",
"uzun",
"vaan",
"vaca",
"vackra",
"vader",
"vagy",
"vagyok",
"vain",
"vais",
"vale",
"valencia",
"valentine",
"valeu",
"vallenato",
"valley",
"vals",
"valsa",
"valse",
"valzer",
"vamo",
"vamonos",
"vamos",
"vampire",
"vanavond",
"vanha",
"vapor",
"vaporwave",
"vara",
"varf",
"variation",
"variationen",
"variations",
"varje",
"varnam",
"vart",
"vater",
"vazge",
"vbs",
"veces",
"vecina",
"veena",
"veer",
"vegas",
"veil",
"veinte",
"veit",
"velha",
"velho",
"velkommen",
"veneno",
"venezuelan",
"venganza",
"vengeance",
"venha",
"veni",
"vent",
"vente",
"vento",
"vera",
"verano",
"verdammt",
"verde",
"verden",
"verdi",
"vereda",
"vergeet",
"verliebt",
"verliefd",
"verr",
"vers",
"verse",
"versi",
"version",
"verso",
"versos",
"vertigo",
"verum",
"very",
"vestido",
"vete",
"veux",
"viaggio",
"viaje",
"viata",
"vibe",
"vibrant",
"vibraphone",
"victim",
"victory",
"vida",
"video",
"vieja",
"viejo",
"viel",
"vielleicht",
"viene",
"vieni",
"vienna",
"viens",
"viento",
"vier",
"viernes",
"vietnamese",
"vieux",
"view",
"viii",
"viimeinen",
"viking",
"vila",
"vilken",
"vill",
"villa",
"village",
"villancicos",
"villera",
"vine",
"vino",
"vintage",
"viola",
"violence",
"violent",
"violin",
"vira",
"viral",
"virgen",
"virgencita",
"virginia",
"virgo",
"virile",
"virtual",
"visa",
"visceral",
"visual",
"vita",
"viva",
"vivace",
"vivaldi",
"vive",
"viver",
"vivere",
"vivir",
"vivo",
"vivre",
"vocal",
"vocaloid",
"voce",
"voel",
"voglio",
"voice",
"voices",
"voici",
"void",
"voidgaze",
"voix",
"volare",
"volatile",
"volim",
"volksmusik",
"volkstumliche",
"volta",
"volte",
"volume",
"volver",
"vontade",
"voodoo",
"voor",
"vorrei",
"voudrais",
"vous",
"vreau",
"vuelo",
"vuelta",
"vuelva",
"vuelve",
"vulgar",
"waar",
"waarom",
"wabash",
"wade",
"wafa",
"wahrheit",
"wailing",
"wait",
"waiting",
"wake",
"wala",
"walang",
"walkin",
"walking",
"walls",
"waltz",
"walzer",
"wandering",
"wang",
"wann",
"warble",
"warm",
"warning",
"warrior",
"warriors",
"warum",
"washed",
"washington",
"wasser",
"waste",
"wasted",
"watch",
"watching",
"water",
"waterloo",
"waterside",
"wave",
"waves",
"wayward",
"wearmada",
"weary",
"wedding",
"weeping",
"weer",
"wees",
"weet",
"weight",
"weihnacht",
"weihnachten",
"weil",
"weird",
"weisst",
"weit",
"welcome",
"well",
"welle",
"welsh",
"welt",
"wenn",
"were",
"wereld",
"west",
"western",
"whatcha",
"whatever",
"wheels",
"whenever",
"wherever",
"which",
"while",
"whimsical",
"whine",
"whiskey",
"whispering",
"whiter",
"whole",
"wiat",
"wicked",
"wieder",
"wiegenlied",
"wiem",
"wife",
"wild",
"wildwood",
"william",
"willie",
"willkommen",
"willow",
"wind",
"window",
"winds",
"wine",
"wing",
"winged",
"wings",
"winter",
"wintry",
"wird",
"wise",
"wish",
"wishing",
"wistful",
"witch",
"witches",
"within",
"without",
"witty",
"wohin",
"woke",
"wolf",
"wolverine",
"wolves",
"woman",
"women",
"wonder",
"wonderful",
"wonky",
"woodland",
"woodnote",
"woods",
"woody",
"word",
"words",
"work",
"working",
"workout",
"works",
"world",
"worlds",
"worried",
"worship",
"worst",
"worth",
"worthy",
"would",
"wrath",
"wreck",
"wrecking",
"wrist",
"writing",
"wrong",
"wry",
"wszystko",
"xiang",
"xiao",
"xico",
"xote",
"xuan",
"yaad",
"yaar",
"yaaro",
"yalan",
"yalanc",
"yall",
"yaln",
"yaman",
"yana",
"yand",
"yang",
"yankee",
"yaral",
"yardbird",
"yarim",
"ycie",
"ye",
"yeah",
"year",
"yearning",
"years",
"yegua",
"yehi",
"yeled",
"yellow",
"yeni",
"yesh",
"yesterday",
"yesterdays",
"yesu",
"yeter",
"yeux",
"yine",
"ying",
"yksi",
"yksin",
"yoga",
"yollar",
"york",
"yorum",
"youm",
"youre",
"youth",
"youths",
"yugoslav",
"yume",
"yung",
"zaman",
"zamba",
"zame",
"zapstep",
"zara",
"zauberfl",
"zehn",
"zeit",
"zero",
"zgar",
"zijn",
"zillertal",
"zillertaler",
"zim",
"zindagi",
"zing",
"zion",
"zivot",
"zlerin",
"znam",
"zolang",
"zolo",
"zombie",
"zomer",
"zonder",
"zone",
"zouglou",
"zouk",
"zydeco",
}
| [
"mwiens91@gmail.com"
] | mwiens91@gmail.com |
984d471f394ee4c497bf778e4fabb2af107c42c0 | da739696c3c88e760d0adb60f31fe069341ecb20 | /backend/home/migrations/0002_load_initial_data.py | 2ee56b403f0d4780b8f43156c355882de841197f | [] | no_license | crowdbotics-apps/app-23018 | 27b1cd94baeb226117429ecdf149bb1970e85c2d | 844910b8f1c6e50609bec1b2f8b2bdaf4f490230 | refs/heads/master | 2023-01-19T19:58:19.772854 | 2020-11-27T20:04:01 | 2020-11-27T20:04:01 | 316,589,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "app"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">app</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "app-23018.botics.co"
site_params = {
"name": "app",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
09bccd733a4e5bc6a895e0032bb27e2a2bd0ca26 | 9e1df555176bae216828c404ad7290c2eb030cbf | /pl_examples/bug_report_model.py | dbea2013d11104b37b432cca31d57808e7b7e328 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | shijianjian/pytorch-lightning | e11be4d4926a1a0c8f605e596bec19926d476876 | b6f3cf5e52dddedec6f7b3e85c0702b75907452c | refs/heads/master | 2023-03-02T14:58:54.139540 | 2021-02-10T05:38:23 | 2021-02-10T05:38:23 | 318,134,795 | 1 | 0 | Apache-2.0 | 2020-12-03T09:05:46 | 2020-12-03T09:05:45 | null | UTF-8 | Python | false | false | 4,217 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------
# --------------------------------------------
# --------------------------------------------
# USE THIS MODEL TO REPRODUCE A BUG YOU REPORT
# --------------------------------------------
# --------------------------------------------
# --------------------------------------------
import os
import torch
from torch.utils.data import Dataset
from pytorch_lightning import Trainer, LightningModule
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self.layer(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
# NOTE: If you are using a cmd line to run your script,
# provide the cmd line as below.
# opt = "--max_epochs 1 --limit_train_batches 1".split(" ")
# parser = ArgumentParser()
# args = parser.parse_args(opt)
def run_test():
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
print('override any method to prove your bug')
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
val_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
test_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model, train_data, val_data)
trainer.test(test_dataloaders=test_data)
if __name__ == '__main__':
run_test()
| [
"noreply@github.com"
] | shijianjian.noreply@github.com |
05fd5796d8e76b96a95e09044e3489a193ec8ee4 | c7d6c70132d626f7c572ecc3d74fc117cacc88bf | /eventory/ext/discord/compat.py | b3046a4e72145d75142a94e7c0c4e605b30afa0f | [
"MIT"
] | permissive | siku2/Eventory | b19a79e020fa8a25145889a64bba83c759e6c288 | 8a88b3328ae83b369a185a1ea266031e9eef04e8 | refs/heads/master | 2020-03-07T18:54:39.815670 | 2018-06-24T22:48:42 | 2018-06-24T22:48:42 | 127,656,364 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,690 | py | """This module serves to bridge the differences between the async and rewrite Discord.py versions."""
import logging
from typing import Callable, Dict, Union
import discord
from discord import Client, Colour, Embed, Message, User
from discord.embeds import EmptyEmbed
from discord.ext.commands import Context
from discord.ext.commands.view import StringView
log = logging.getLogger(__name__)
_REWRITE = discord.version_info[:3] >= (1, 0, 0)
if _REWRITE:
log.debug("Using the rewrite version of Discord.py, thank you!")
from discord import DMChannel, TextChannel
DiscordTextChannel = Union[TextChannel, DMChannel]
async def send_message(client: Client, channel: DiscordTextChannel, *args, **kwargs) -> Message:
return await channel.send(*args, **kwargs)
async def edit_message(client: Client, message: Message, *args, **kwargs) -> Message:
return await message.edit(*args, **kwargs)
async def wait_for_message(client: Client, check: Callable[[Message], bool] = None) -> Message:
return await client.wait_for("message", check=check)
async def get_context(client: Client, msg: Message) -> Context:
return await client.get_context(msg)
else:
import warnings
warnings.warn(
"It seems that you're not using the Discord.py rewrite. This extension is written for the rewrite version of Discord.py so it doesn't "
"necessarily run on your version", ImportWarning)
from discord import PrivateChannel, Channel
DiscordTextChannel = Union[PrivateChannel, Channel]
async def send_message(client: Client, channel: DiscordTextChannel, *args, **kwargs) -> Message:
return await client.send_message(channel, *args, **kwargs)
async def edit_message(client: Client, message: Message, *args, **kwargs) -> Message:
return await client.edit_message(message, *args, **kwargs)
async def wait_for_message(client: Client, check: Callable[[Message], bool] = None) -> Message:
return await client.wait_for_message(check=check)
async def get_context(client: Client, msg: Message) -> Context:
view = StringView(msg.content)
ctx = Context(prefix=None, view=view, bot=client, message=msg)
if client._skip_check(msg.author.id, client.user.id):
return ctx
prefix = await client._get_prefix(msg)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return ctx
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = client.all_commands.get(invoker)
return ctx
async def add_embed(client: Union[Client, Context], msg: Union[Context, Message, str] = None, description: Union[str, int] = EmptyEmbed,
colour: Union[int, Colour] = EmptyEmbed, *, author: Union[str, Dict, User] = None, footer: Union[str, Dict] = None, **kwargs):
"""Add an Embed to a message.
Args:
client: Discord client
msg: Message to attach the Embed to. You may also pass a Context for convenience.
description: Description of the Embed
colour: Colour for the Embed
author: Author of the Embed.
Providing a string merely sets the name of the author, the dictionary is fed directly to the set_author method and when provided with a
User it uses the name and the avatar_url.
footer: When provided with a string it uses it as the text for the footer and a dictionary is passed to the set_footer function.
"""
if isinstance(client, Context):
ctx = client
client = ctx.bot
if isinstance(msg, str):
colour = description
description = msg
msg = ctx.message
elif not msg:
msg = ctx.message
if isinstance(msg, Context):
msg = msg.message
em = Embed(description=description, colour=colour, **kwargs)
if author:
if isinstance(author, dict):
em.set_author(**author)
elif isinstance(author, User):
em.set_author(name=author.name, icon_url=author.avatar_url)
else:
em.set_author(name=author)
if footer:
if isinstance(footer, dict):
em.set_footer(**footer)
else:
em.set_footer(text=footer)
if msg.author.id == client.user.id:
await edit_message(client, msg, embed=em)
else:
await send_message(client, msg.channel, embed=em)
| [
"siku2@outlook.de"
] | siku2@outlook.de |
9a3fbbf5f56ecbc4200b39bba863e8bb5e2e19ac | 303c941a88a4f8a4bf2d958457719efb08a5a30f | /fail/bj_1115.py | df3aee67f38c8002932b7ea7d26c40ae9e3c69f2 | [] | no_license | Kim-Taesu/Algorithm_python | 4b8d8445a23463c3885ec75dd2a7c275f46c1f3f | 1d358eb7fe4bd04b6ed0ec3754e8be08eece9278 | refs/heads/master | 2020-06-10T17:36:07.260572 | 2020-03-16T05:53:01 | 2020-03-16T05:53:01 | 193,693,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import sys
from itertools import permutations
sys.setrecursionlimit(10 ** 6)
input = sys.stdin.readline
N = int(input())
P = list(map(int, input().strip().split(' ')))
perfect_p_list = set(permutations([i for i in range(1, N)], N - 1))
min_count = sys.maxsize
def compute_origin_p(p_tmp):
result = [0] * N
result[0] = p_tmp[0]
for t in range(len(p_tmp) - 1):
result[p_tmp[t]] = p_tmp[t + 1]
return result
for perfect_p_tmp in perfect_p_list:
origin_p = compute_origin_p(perfect_p_tmp)
diff_count = 0
for index in range(N):
if origin_p[index] != P[index]:
diff_count += 1
min_count = min(min_count, diff_count)
print(min_count)
| [
"dkdldoafotn@naver.com"
] | dkdldoafotn@naver.com |
a213b5557a2c932b30db4d8c8d0090a87238a01b | 699abe83fa9d142dcff2cc551d4ac052f7778058 | /real_estate/spiders/otodom.py | 5c22c9b3fef5f231270b2c7ab20b391fdb0df6c8 | [] | no_license | whosethere/otodom_crawler_scraper | 055313b9b47c93454d3b795712a2fa5830562bbe | d97a34fa7ad0c7b05f89d30d5d6436cf6b329a34 | refs/heads/master | 2022-06-25T11:19:37.368177 | 2020-05-09T10:52:25 | 2020-05-09T10:52:25 | 262,549,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,284 | py | import scrapy
from scrapy.linkextractors import LinkExtractor
import json
from pymongo import MongoClient
import base64
import io
from real_estate.items import Otodom
from scrapy.loader import ItemLoader
import datetime
import time
import re
class OtodomSpider(scrapy.Spider):
name = 'otodom'
allowed_domains = ['otodom.pl']
start_urls = ['https://www.otodom.pl/wynajem/mieszkanie/?nrAdsPerPage=72']
def parse(self, response):
ogloszenie = response.xpath('//article/@data-url').extract()
for url_ogloszenia in ogloszenie:
yield scrapy.Request(url_ogloszenia,
callback=self.parse_ogloszenie,
meta={'url_ogloszenia':url_ogloszenia})
next_page = response.xpath('//li[@class="pager-next"]/a/@href').extract_first()
if next_page:
yield scrapy.Request(response.urljoin(next_page), callback = self.parse)
def parse_ogloszenie(self, response):
link_ogloszenia = response.meta['url_ogloszenia']
typ_oferty = response.xpath('//div[@class="css-1gjwmw9"]/text()').extract()
tytul_ogloszeia = response.xpath('//div[@class="css-1ld8fwi"]/text()').extract()
lokalizacja = response.xpath('//a[@class="css-12hd9gg"]/text()').extract() # to do przetwarzanie tekstu
cena = response.xpath('//div[@class="css-1vr19r7"]/text()').extract()
# odds_away = match.xpath('normalize-space(./div/div[2]/market-selections/rate-button[2]/button)').extract_first()
czynsz_dodatkowo = ""
liczba_pokoi =""
licza_pieter =""
ogrzewanie =""
kaucja =""
rodzaj_zabudowy =""
material_budynku =""
stan_wykonczenia =""
powierzchnia =""
pietro =""
okna =""
dostepne_od =""
szczegoly_ogloszenia = response.xpath('normalize-space(//div[@class="css-1ci0qpi"]/ul/li)')
for szczegol in response.xpath('(//div[@class="css-1ci0qpi"]/ul/li)'):
szczegol = szczegol.extract()
szczegol = szczegol.replace("<li>", "").replace("<strong>", "").replace("</li>", "").replace("</strong>", "")
co,jakie = szczegol.split(":")
if co == "Czynsz - dodatkowo":
czynsz = jakie
if co == "Kaucja":
kaucja = jakie
if co == "Powierzchnia":
powierzchnia = jakie
if co == "Liczba pokoi":
liczba_pokoi = jakie
if co == "Rodzaj zabudowy":
rodzaj_zabudowy = jakie
if co == "Piętro":
pietro = jakie
if co == "Liczba pięter":
liczba_pieter = jakie
if co == "Materiał budynku":
material_budynku = jakie
if co == "Okna":
okna = jakie
if co == "Ogrzewanie":
ogrzewanie = jakie
if co == "Stan wykończenia":
stan_wykonczenia = jakie
if co == "Dostępne od":
dostepne_od = jakie
numer_oferty = response.xpath('//div[@class="css-kos6vh"]/text()').extract_first()
numer_oferty = numer_oferty.split(":")[1]
kiedy_dodano = response.xpath('//div[@class="css-lh1bxu"]/text()')[0].extract()
kiedy_aktualizowano = response.xpath('//div[@class="css-lh1bxu"]/text()')[1].extract()
# tytul_ogloszeia = response.xpath('//div[@class="css-1ld8fwi"]/text()').extract()
# lokalizacja = response.xpaht('//a[@class="css-12hd9gg"]/text()').extract() # to do przetwarzanie tekstu
# cena = response.xpath('//div[@class="css-1vr19r7"]/text()').extract()
# czynsz_dodatkowo = ""
# liczba_pokoi =""
# licza_pieter =""
# ogrzewanie =""
# kaucja =""
# rodzaj_zabudowy =""
# material_budynku =""
# stan_wykonczenia =""
# powierzchnia =""
# pietro =""
# okna =""
# dostepne_od =""
loader = ItemLoader(item=Otodom(), response=response)
loader.add_value('numer_oferty', numer_oferty)
loader.add_value('kiedy_dodano', kiedy_dodano)
loader.add_value('kiedy_aktualizowano', kiedy_aktualizowano)
loader.add_value('typ_oferty', typ_oferty)
loader.add_value('okna', okna)
loader.add_value('tytul_ogloszeia', tytul_ogloszeia)
loader.add_value('lokalizacja', lokalizacja)
loader.add_value('cena', cena)
loader.add_value('czynsz_dodatkowo', czynsz_dodatkowo)
loader.add_value('liczba_pokoi', liczba_pokoi)
loader.add_value('licza_pieter', licza_pieter)
loader.add_value('pietro', pietro)
loader.add_value('ogrzewanie', ogrzewanie)
loader.add_value('kaucja', kaucja)
loader.add_value('rodzaj_zabudowy', rodzaj_zabudowy)
loader.add_value('material_budynku', material_budynku)
loader.add_value('stan_wykonczenia', stan_wykonczenia)
loader.add_value('powierzchnia', powierzchnia)
loader.add_value('dostepne_od', dostepne_od)
loader.add_value('link_ogloszenia', link_ogloszenia)
yield loader.load_item() | [
"you@example.com"
] | you@example.com |
6440c4965fb42a080990cb5cad4c6b0f20166912 | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/1441. Build an Array With Stack Operations/1441.py | c57039a1e6a850ebf9c94019ab817c14266d8309 | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 350 | py | class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
ans = []
i = 0 # Target pointer
num = 1 # Curr num
while i < len(target):
t = target[i]
if t == num:
ans.append("Push")
i += 1
else:
ans.append("Push")
ans.append("Pop")
num += 1
return ans
| [
"me@pengyuc.com"
] | me@pengyuc.com |
db9d8b152dcb7561abcbe6406a615ce46892503e | 3cd1246ff58f26329021f2d13caa62221c91d5a4 | /testdata/python/genexp/ctx.py | c4655f604ff5dc8efc1bdbffbeff45cb8be81c53 | [] | no_license | mwkmwkmwk/unpyc | 0929e15fb37599496930299d7ced0bf1bedd7e99 | 000fdaec159050c94b7ecf6ab57be3950676f778 | refs/heads/master | 2020-12-01T14:01:57.592806 | 2016-03-21T14:11:43 | 2016-03-21T14:12:01 | 230,650,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | a + b + c + (d for e in f)
| [
"koriakin@0x04.net"
] | koriakin@0x04.net |
1c530fe372777c5ddae9fd3aae02fe09b03a7d9e | 0db05f7b843e8450bafd5ae23f8f70f9a9a8c151 | /Src/StdLib/Lib/site-packages/win32comext/shell/test/testShellFolder.py | 82d126408aa600a298de5cbeb8dbf491a19dd598 | [
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | IronLanguages/ironpython2 | 9c7f85bd8e6bca300e16f8c92f6384cecb979a6a | d00111890ce41b9791cb5bc55aedd071240252c4 | refs/heads/master | 2023-01-21T21:17:59.439654 | 2023-01-13T01:52:15 | 2023-01-13T01:52:15 | 91,620,472 | 1,171 | 288 | Apache-2.0 | 2023-01-13T01:52:16 | 2017-05-17T21:11:51 | Python | UTF-8 | Python | false | false | 581 | py | from win32com.shell import shell
from win32com.shell.shellcon import *
sf = shell.SHGetDesktopFolder()
print "Shell Folder is", sf
names = []
for i in sf: # Magically calls EnumObjects
name = sf.GetDisplayNameOf(i, SHGDN_NORMAL)
names.append(name)
# And get the enumerator manually
enum = sf.EnumObjects(0, SHCONTF_FOLDERS | SHCONTF_NONFOLDERS | SHCONTF_INCLUDEHIDDEN)
num = 0
for i in enum:
num += 1
if num != len(names):
print "Should have got the same number of names!?"
print "Found", len(names), "items on the desktop"
for name in names:
print name
| [
"pawel.jasinski@gmail.com"
] | pawel.jasinski@gmail.com |
8b86f8f4e9e3c5acf3431aa0b9eedf6fc4b41865 | 4e708d886792efad8e9398ace014e325b00d907d | /owo/client/enclosure/__main__.py | ba3958e87436afa6734eae36934a98e989c885a2 | [] | no_license | korkies22/OwO-core | 981ed8afb9f75432d5a4060f63c9fb591767d855 | 56f234db1f8fd18c78fd9dc4a390d37086ab671f | refs/heads/master | 2020-04-11T09:38:27.885985 | 2018-10-01T16:17:09 | 2018-10-01T16:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # Copyright 2017 OwO AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from owo.client.enclosure import Enclosure
def main():
enclosure = Enclosure()
try:
enclosure.run()
except Exception as e:
print(e)
finally:
sys.exit()
if __name__ == "__main__":
main()
| [
"j.alban@uniandes.edu.co"
] | j.alban@uniandes.edu.co |
5dfe362221ea0d80f0c73342b2ac4f27431c627e | a0c2bc4d98db71782a925ca2c83579ee949db5aa | /services/test/test_adminAPI_userpersonlinks.py | 3bb3866de754a4fd77cc0ba6a5d7dd893562a47f | [
"MIT"
] | permissive | rmetcalf9/saas_user_management_system | 0220c9711928c38e8e71ede9c175e51d4f4b0bbe | d3908c46614fb1b638553282cd72ba3634277495 | refs/heads/master | 2023-03-17T00:32:57.317015 | 2022-08-03T19:37:50 | 2022-08-03T19:37:50 | 163,972,910 | 1 | 0 | MIT | 2023-03-04T03:07:10 | 2019-01-03T12:54:20 | Python | UTF-8 | Python | false | false | 10,012 | py | from datetime import datetime, timedelta
import pytz
from TestHelperSuperClass import testHelperAPIClient, env, tenantWithNoAuthProviders, sampleInternalAuthProv001_CREATE, internalUSerSufix
from appObj import appObj
from constants import masterTenantName, jwtHeaderName, objectVersionHeaderName, DefaultHasAccountRole, masterTenantDefaultSystemAdminRole
from test_adminAPI import test_api as parent_test_api
from test_adminAPI_users import defaultUserData
import json
import copy
from userPersonCommon import getListOfUserIDsForPersonNoTenantCheck, GetUser
#Test User Person Links functoins of the admin API
userPersonLinkApiPath="/userpersonlinks/"
class test_adminAPIUserPersonLinks(parent_test_api):
def assertUserPersonLinkExists(self, UserID, personGUID):
def dbfn(storeConnection):
l = getListOfUserIDsForPersonNoTenantCheck(appObj, personGUID, storeConnection)
self.assertTrue(UserID in l, msg="UserPersonLink dosen't exsit but it should (1)")
userObj = GetUser(appObj, UserID, storeConnection)
if userObj is None:
self.assertFalse(True, msg="User not found but they should have an UserPersonLink")
self.assertTrue(personGUID in userObj._associatedPersonsList, msg="UserPersonLink dosen't exsit but it should (2)")
appObj.objectStore.executeInsideConnectionContext(dbfn)
def assertUserPersonLinkDosentExists(self, UserID, personGUID):
def dbfn(storeConnection):
l = getListOfUserIDsForPersonNoTenantCheck(appObj, personGUID, storeConnection)
self.assertFalse(UserID in l, msg="UserPersonLink dosen't exsit but it should (1)")
userObj = GetUser(appObj, UserID, storeConnection)
if userObj is None:
return
self.assertFalse(personGUID in userObj._associatedPersonsList, msg="UserPersonLink dosen't exsit but it should (2)")
appObj.objectStore.executeInsideConnectionContext(dbfn)
def test_createUserPersonLink(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 201, msg="Create auth failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_personInPayloadNotMatchingURL(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + 'XX' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 400, msg="Create auth should have failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkDosentExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_userInPayloadNotMatchingURL(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + 'XX' + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 400, msg="Create auth should have failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkDosentExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_createUserPersonLinkPersonInvalid(self):
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": 'INVALID_PERSON'
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 404, msg="Create auth should have failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkDosentExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_createUserPersonLinkUserInvalid(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": 'InvalidUser',
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 404, msg="Create auth should have failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkDosentExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_createUserPersonLinkAlreadyExists(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": "FORCED-CONSTANT-TESTING-PERSON-GUID"
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 400, msg="Create auth should have failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_deleteUserPersonLink(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 201, msg="Create auth failed - " + result.get_data(as_text=True))
result = self.testClient.delete(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
content_type='application/json'
)
self.assertEqual(result.status_code, 200, msg="Delete auth failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkDosentExists(newauthDICT["UserID"], newauthDICT["personGUID"])
def test_deleteUserPersonLinkPersonInvalid(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 201, msg="Create auth failed - " + result.get_data(as_text=True))
result = self.testClient.delete(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"] + 'XX',
headers={ jwtHeaderName: self.getNormalJWTToken()},
content_type='application/json'
)
self.assertEqual(result.status_code, 400, msg="Delete should have failed - " + result.get_data(as_text=True))
def test_deleteUserPersonLinkUserInvalid(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.post(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
data=json.dumps(newauthDICT),
content_type='application/json'
)
self.assertEqual(result.status_code, 201, msg="Create userpsersonlink failed - " + result.get_data(as_text=True))
result = self.testClient.delete(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + 'XX' + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
content_type='application/json'
)
self.assertEqual(result.status_code, 400, msg="Delete should have failed - " + result.get_data(as_text=True))
def test_deleteUserPersonLinkDosntExist(self):
newPerson = self.createPersonAndReturnDICT()
newauthDICT = {
"UserID": appObj.defaultUserGUID,
"personGUID": newPerson['guid']
}
result = self.testClient.delete(
self.adminAPIPrefix + '/' + masterTenantName + userPersonLinkApiPath + newauthDICT["UserID"] + '/' + newauthDICT["personGUID"],
headers={ jwtHeaderName: self.getNormalJWTToken()},
content_type='application/json'
)
self.assertEqual(result.status_code, 400, msg="Delete auth should have failed - " + result.get_data(as_text=True))
self.assertUserPersonLinkDosentExists(newauthDICT["UserID"], newauthDICT["personGUID"])
| [
"rmetcalf9@googlemail.com"
] | rmetcalf9@googlemail.com |
58c1d50482bd4ceff2380654a146d6c7476adab9 | 408f985c954ad6f0f3926e02dc18c31336bac194 | /precise_bbcode/core/utils.py | 8388589936970347e3e3e1dbc97d1e1a7f325542 | [
"BSD-3-Clause"
] | permissive | ellmetha/django-precise-bbcode | 176258d9436cc29002d59f29d4964a3bdd05721e | 24306622fc8ebd91c8c79543c18050de0b32f1f1 | refs/heads/main | 2023-08-28T13:35:07.027756 | 2023-07-27T01:07:15 | 2023-07-27T01:07:15 | 13,904,807 | 36 | 16 | BSD-3-Clause | 2023-08-19T18:44:47 | 2013-10-27T16:45:03 | Python | UTF-8 | Python | false | false | 322 | py | from functools import reduce
def replace(data, replacements):
"""
Performs several string substitutions on the initial ``data`` string using
a list of 2-tuples (old, new) defining substitutions and returns the resulting
string.
"""
return reduce(lambda a, kv: a.replace(*kv), replacements, data)
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
33f73c6e22ecded1f56c04ae0d5a569dae6d3013 | e09c0384767e46b79898762450d63d309c3728b5 | /tensorflow/python/keras/metrics_test.py | 88b763d56909d30de4d19db42230eff5eb5766ce | [
"Apache-2.0"
] | permissive | mattli002/tensorflow | bc4a1d4375fd05f708b58a56aee37ae2c08127b5 | efe565bc0981e80a52a97f3961cfba3e87023b42 | refs/heads/master | 2020-04-14T22:23:03.052138 | 2019-01-04T22:08:26 | 2019-01-04T22:13:56 | 164,160,059 | 0 | 0 | Apache-2.0 | 2019-01-04T22:16:44 | 2019-01-04T22:16:44 | null | UTF-8 | Python | false | false | 70,750 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.training.rmsprop import RMSPropOptimizer
@test_util.run_all_in_graph_and_eager_modes
class KerasMeanTest(test.TestCase):
# TODO(b/120949004): Re-enable garbage collection check
# @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_mean(self):
m = metrics.Mean(name='my_mean')
# check config
self.assertEqual(m.name, 'my_mean')
self.assertTrue(m.stateful)
self.assertEqual(m.dtype, dtypes.float32)
self.assertEqual(len(m.variables), 2)
self.evaluate(variables.variables_initializer(m.variables))
# check initial state
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# check __call__()
self.assertEqual(self.evaluate(m(100)), 100)
self.assertEqual(self.evaluate(m.total), 100)
self.assertEqual(self.evaluate(m.count), 1)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
self.assertEqual(self.evaluate(m.count), 3)
# check reset_states()
m.reset_states()
self.assertEqual(self.evaluate(m.total), 0)
self.assertEqual(self.evaluate(m.count), 0)
# Check save and restore config
m2 = metrics.Mean.from_config(m.get_config())
self.assertEqual(m2.name, 'my_mean')
self.assertTrue(m2.stateful)
self.assertEqual(m2.dtype, dtypes.float32)
self.assertEqual(len(m2.variables), 2)
def test_mean_with_sample_weight(self):
m = metrics.Mean(dtype=dtypes.float64)
self.assertEqual(m.dtype, dtypes.float64)
self.evaluate(variables.variables_initializer(m.variables))
# check scalar weight
result_t = m(100, sample_weight=0.5)
self.assertEqual(self.evaluate(result_t), 50 / 0.5)
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
# check weights not scalar and weights rank matches values rank
result_t = m([1, 5], sample_weight=[1, 0.2])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 52 / 1.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
# check weights broadcast
result_t = m([1, 2], sample_weight=0.5)
self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
# check weights squeeze
result_t = m([1, 5], sample_weight=[[1], [0.2]])
self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
# check weights expand
result_t = m([[1], [5]], sample_weight=[1, 0.2])
self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
# check values reduced to the dimensions of weight
result_t = m([[[1., 2.], [3., 2.], [0.5, 4.]]], sample_weight=[0.5])
result = np.round(self.evaluate(result_t), decimals=2) # 58.5 / 5.6
self.assertEqual(result, 10.45)
self.assertEqual(np.round(self.evaluate(m.total), decimals=2), 58.54)
self.assertEqual(np.round(self.evaluate(m.count), decimals=2), 5.6)
def test_mean_graph_with_placeholder(self):
with context.graph_mode(), self.cached_session() as sess:
m = metrics.Mean()
v = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
self.evaluate(variables.variables_initializer(m.variables))
# check __call__()
result_t = m(v, sample_weight=w)
result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
self.assertEqual(self.evaluate(m.total), 50)
self.assertEqual(self.evaluate(m.count), 0.5)
self.assertEqual(result, 50 / 0.5)
# check update_state() and result()
result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
self.assertAlmostEqual(result, 52 / 1.7, 2)
def test_save_restore(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
m = metrics.Mean()
checkpoint = checkpointable_utils.Checkpoint(mean=m)
self.evaluate(variables.variables_initializer(m.variables))
# update state
self.evaluate(m(100.))
self.evaluate(m(200.))
# save checkpoint and then add an update
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(m(1000.))
# restore to the same checkpoint mean object
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.evaluate(m(300.))
self.assertEqual(200., self.evaluate(m.result()))
# restore to a different checkpoint mean object
restore_mean = metrics.Mean()
restore_checkpoint = checkpointable_utils.Checkpoint(mean=restore_mean)
status = restore_checkpoint.restore(save_path)
restore_update = restore_mean(300.)
status.assert_consumed().run_restore_ops()
self.evaluate(restore_update)
self.assertEqual(200., self.evaluate(restore_mean.result()))
self.assertEqual(3, self.evaluate(restore_mean.count))
@test_util.run_all_in_graph_and_eager_modes
class KerasAccuracyTest(test.TestCase):
def test_accuracy(self):
acc_obj = metrics.Accuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [2], [3], [4]], [[1], [2], [3], [4]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# Check save and restore config
a2 = metrics.Accuracy.from_config(acc_obj.get_config())
self.assertEqual(a2.name, 'my acc')
self.assertTrue(a2.stateful)
self.assertEqual(len(a2.variables), 2)
self.assertEqual(a2.dtype, dtypes.float32)
# check with sample_weight
result_t = acc_obj([[2], [1]], [[2], [0]], sample_weight=[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.96, 2) # 4.5/4.7
def test_binary_accuracy(self):
acc_obj = metrics.BinaryAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check y_pred squeeze
update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertAlmostEqual(result, 0.75, 2) # 3/4
# check y_true squeeze
result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4/6
# check with sample_weight
result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
def test_binary_accuracy_threshold(self):
acc_obj = metrics.BinaryAccuracy(threshold=0.7)
self.evaluate(variables.variables_initializer(acc_obj.variables))
result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.5, 2)
def test_categorical_accuracy(self):
acc_obj = metrics.CategoricalAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
[[0.1, 0.1, 0.8], [0.05, 0, 0.95]], [[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([[2], [1]],
[[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([[2], [1]], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims(self):
acc_obj = metrics.SparseCategoricalAccuracy(name='my acc')
# check config
self.assertEqual(acc_obj.name, 'my acc')
self.assertTrue(acc_obj.stateful)
self.assertEqual(len(acc_obj.variables), 2)
self.assertEqual(acc_obj.dtype, dtypes.float32)
self.evaluate(variables.variables_initializer(acc_obj.variables))
# verify that correct value is returned
update_op = acc_obj.update_state([2, 1], [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
self.evaluate(update_op)
result = self.evaluate(acc_obj.result())
self.assertEqual(result, 1) # 2/2
# check with sample_weight
result_t = acc_obj([2, 1], [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
[[0.5], [0.2]])
result = self.evaluate(result_t)
self.assertAlmostEqual(result, 0.93, 2) # 2.5/2.7
def test_sparse_categorical_accuracy_mismatched_dims_dynamic(self):
with context.graph_mode(), self.cached_session() as sess:
acc_obj = metrics.SparseCategoricalAccuracy(name='my acc')
self.evaluate(variables.variables_initializer(acc_obj.variables))
t = array_ops.placeholder(dtypes.float32)
p = array_ops.placeholder(dtypes.float32)
w = array_ops.placeholder(dtypes.float32)
result_t = acc_obj(t, p, w)
result = sess.run(
result_t,
feed_dict=({
t: [2, 1],
p: [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
w: [[0.5], [0.2]]
}))
self.assertAlmostEqual(result, 0.71, 2) # 2.5/2.7
@test_util.run_all_in_graph_and_eager_modes
class FalsePositivesTest(test.TestCase):
def test_config(self):
fp_obj = metrics.FalsePositives(name='my_fp', thresholds=[0.4, 0.9])
self.assertEqual(fp_obj.name, 'my_fp')
self.assertEqual(len(fp_obj.variables), 1)
self.assertEqual(fp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fp_obj2 = metrics.FalsePositives.from_config(fp_obj.get_config())
self.assertEqual(fp_obj2.name, 'my_fp')
self.assertEqual(len(fp_obj2.variables), 1)
self.assertEqual(fp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
fp_obj = metrics.FalsePositives()
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(14., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fp_obj.result()
self.assertAllClose([7., 4., 2.], result)
def test_weighted_with_thresholds(self):
fp_obj = metrics.FalsePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0), (5.0, 15.0, 10.0, 0))
result = fp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([125., 42., 12.], self.evaluate(result))
def test_threshold_limit(self):
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
metrics.FalsePositives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegexp(
ValueError,
r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
metrics.FalsePositives(thresholds=[None])
@test_util.run_all_in_graph_and_eager_modes
class FalseNegativesTest(test.TestCase):
def test_config(self):
fn_obj = metrics.FalseNegatives(name='my_fn', thresholds=[0.4, 0.9])
self.assertEqual(fn_obj.name, 'my_fn')
self.assertEqual(len(fn_obj.variables), 1)
self.assertEqual(fn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
fn_obj2 = metrics.FalseNegatives.from_config(fn_obj.get_config())
self.assertEqual(fn_obj2.name, 'my_fn')
self.assertEqual(len(fn_obj2.variables), 1)
self.assertEqual(fn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
fn_obj = metrics.FalseNegatives()
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(5., self.evaluate(result))
def test_unweighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = fn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = fn_obj.result()
self.assertAllClose([1., 4., 6.], result)
def test_weighted_with_thresholds(self):
fn_obj = metrics.FalseNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(fn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((3.0,), (5.0,), (7.0,), (4.0,))
result = fn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([4., 16., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TrueNegativesTest(test.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name='my_tn', thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, 'my_tn')
self.assertEqual(len(tn_obj.variables), 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, 'my_tn')
self.assertEqual(len(tn_obj2.variables), 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose(3., result)
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tn_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tn_obj.result()
self.assertAllClose([2., 5., 7.], result)
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tn_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5., 15., 23.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class TruePositivesTest(test.TestCase):
def test_config(self):
tp_obj = metrics.TruePositives(name='my_tp', thresholds=[0.4, 0.9])
self.assertEqual(tp_obj.name, 'my_tp')
self.assertEqual(len(tp_obj.variables), 1)
self.assertEqual(tp_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tp_obj2 = metrics.TruePositives.from_config(tp_obj.get_config())
self.assertEqual(tp_obj2.name, 'my_tp')
self.assertEqual(len(tp_obj2.variables), 1)
self.assertEqual(tp_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose(7., result)
def test_weighted(self):
tp_obj = metrics.TruePositives()
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = tp_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(12., self.evaluate(result))
def test_unweighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
update_op = tp_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = tp_obj.result()
self.assertAllClose([6., 3., 1.], result)
def test_weighted_with_thresholds(self):
tp_obj = metrics.TruePositives(thresholds=[0.15, 0.5, 0.85])
self.evaluate(variables.variables_initializer(tp_obj.variables))
y_pred = constant_op.constant(((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3), (0, 1, 0.7, 0.3)))
y_true = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0),
(1, 1, 1, 1)))
result = tp_obj(y_true, y_pred, sample_weight=37.)
self.assertAllClose([222., 111., 37.], self.evaluate(result))
@test_util.run_all_in_graph_and_eager_modes
class PrecisionTest(test.TestCase):
def test_config(self):
p_obj = metrics.Precision(name='my_precision', thresholds=[0.4, 0.9])
self.assertEqual(p_obj.name, 'my_precision')
self.assertEqual(len(p_obj.variables), 2)
self.assertEqual([v.name for v in p_obj.variables],
['true_positives:0', 'false_positives:0'])
self.assertEqual(p_obj.thresholds, [0.4, 0.9])
# Check save and restore config
p_obj2 = metrics.Precision.from_config(p_obj.get_config())
self.assertEqual(p_obj2.name, 'my_precision')
self.assertEqual(len(p_obj2.variables), 2)
self.assertEqual(p_obj2.thresholds, [0.4, 0.9])
def test_value_is_idempotent(self):
p_obj = metrics.Precision(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = p_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(p_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_precision = self.evaluate(p_obj.result())
for _ in range(10):
self.assertArrayNear(initial_precision, self.evaluate(p_obj.result()),
1e-3)
def test_unweighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
p_obj = metrics.Precision(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, self.evaluate(result))
def test_div_by_zero(self):
p_obj = metrics.Precision()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
result = p_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
p_obj = metrics.Precision(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[4, 0], [3, 1]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(p_obj.variables))
update_op = p_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_precision = weighted_tp / weighted_positives
self.assertArrayNear([expected_precision, 0], self.evaluate(p_obj.result()),
1e-3)
@test_util.run_all_in_graph_and_eager_modes
class RecallTest(test.TestCase):
def test_config(self):
r_obj = metrics.Recall(name='my_recall', thresholds=[0.4, 0.9])
self.assertEqual(r_obj.name, 'my_recall')
self.assertEqual(len(r_obj.variables), 2)
self.assertEqual([v.name for v in r_obj.variables],
['true_positives:0', 'false_negatives:0'])
self.assertEqual(r_obj.thresholds, [0.4, 0.9])
# Check save and restore config
r_obj2 = metrics.Recall.from_config(r_obj.get_config())
self.assertEqual(r_obj2.name, 'my_recall')
self.assertEqual(len(r_obj2.variables), 2)
self.assertEqual(r_obj2.thresholds, [0.4, 0.9])
def test_value_is_idempotent(self):
r_obj = metrics.Recall(thresholds=[0.3, 0.72])
y_pred = random_ops.random_uniform(shape=(10, 3))
y_true = random_ops.random_uniform(shape=(10, 3))
update_op = r_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(r_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(r_obj.result())
for _ in range(10):
self.assertArrayNear(initial_recall, self.evaluate(r_obj.result()), 1e-3)
def test_unweighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0.5, self.evaluate(result))
def test_unweighted_all_incorrect(self):
r_obj = metrics.Recall(thresholds=[0.5])
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs)
y_true = constant_op.constant(1 - inputs)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertAlmostEqual(0, self.evaluate(result))
def test_weighted(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
y_true = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(
y_true,
y_pred,
sample_weight=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_recall = weighted_tp / weighted_t
self.assertAlmostEqual(expected_recall, self.evaluate(result))
def test_div_by_zero(self):
r_obj = metrics.Recall()
y_pred = constant_op.constant([0, 0, 0, 0])
y_true = constant_op.constant([0, 0, 0, 0])
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertEqual(0, self.evaluate(result))
def test_unweighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 0.7])
y_pred = constant_op.constant([1, 0, 0.6, 0], shape=(1, 4))
y_true = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred)
self.assertArrayNear([0.5, 0.], self.evaluate(result), 0)
def test_weighted_with_threshold(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
result = r_obj(y_true, y_pred, sample_weight=weights)
weighted_tp = 0 + 3.
weighted_positives = (0 + 3.) + (4. + 0.)
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(result), 1e-3)
def test_multiple_updates(self):
r_obj = metrics.Recall(thresholds=[0.5, 1.])
y_true = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
y_pred = constant_op.constant([[1, 0], [0.6, 0]],
shape=(2, 2),
dtype=dtypes.float32)
weights = constant_op.constant([[1, 4], [3, 2]],
shape=(2, 2),
dtype=dtypes.float32)
self.evaluate(variables.variables_initializer(r_obj.variables))
update_op = r_obj.update_state(y_true, y_pred, sample_weight=weights)
for _ in range(2):
self.evaluate(update_op)
weighted_tp = (0 + 3.) + (0 + 3.)
weighted_positives = ((0 + 3.) + (4. + 0.)) + ((0 + 3.) + (4. + 0.))
expected_recall = weighted_tp / weighted_positives
self.assertArrayNear([expected_recall, 0], self.evaluate(r_obj.result()),
1e-3)
@test_util.run_all_in_graph_and_eager_modes
class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SensitivityAtSpecificity(
0.4, num_thresholds=100, name='sensitivity_at_specificity_1')
self.assertEqual(s_obj.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.specificity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SensitivityAtSpecificity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'sensitivity_at_specificity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.specificity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_sensitivity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SensitivityAtSpecificity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.8, self.evaluate(result))
def test_unweighted_low_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SensitivityAtSpecificity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.675, self.evaluate(result))
def test_invalid_specificity(self):
with self.assertRaisesRegexp(
ValueError, r'`specificity` must be in the range \[0, 1\].'):
metrics.SensitivityAtSpecificity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
def test_config(self):
s_obj = metrics.SpecificityAtSensitivity(
0.4, num_thresholds=100, name='specificity_at_sensitivity_1')
self.assertEqual(s_obj.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.sensitivity, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
# Check save and restore config
s_obj2 = metrics.SpecificityAtSensitivity.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, 'specificity_at_sensitivity_1')
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.sensitivity, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
def test_value_is_idempotent(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
y_pred = random_ops.random_uniform((10, 3),
maxval=1,
dtype=dtypes.float32,
seed=1)
y_true = random_ops.random_uniform((10, 3),
maxval=2,
dtype=dtypes.int64,
seed=1)
update_op = s_obj.update_state(y_true, y_pred)
self.evaluate(variables.variables_initializer(s_obj.variables))
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_specificity = self.evaluate(s_obj.result())
for _ in range(10):
self.assertAlmostEqual(initial_specificity, self.evaluate(s_obj.result()),
1e-3)
def test_unweighted_all_correct(self):
s_obj = metrics.SpecificityAtSensitivity(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = constant_op.constant(inputs, dtype=dtypes.float32)
y_true = constant_op.constant(inputs)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(1, self.evaluate(result))
def test_unweighted_high_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.8)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_unweighted_low_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = constant_op.constant(label_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred)
self.assertAlmostEqual(0.6, self.evaluate(result))
@parameterized.parameters([dtypes.bool, dtypes.int32, dtypes.float32])
def test_weighted(self, label_dtype):
s_obj = metrics.SpecificityAtSensitivity(0.4)
pred_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y_pred = constant_op.constant(pred_values, dtype=dtypes.float32)
y_true = math_ops.cast(label_values, dtype=label_dtype)
weights = constant_op.constant(weight_values)
self.evaluate(variables.variables_initializer(s_obj.variables))
result = s_obj(y_true, y_pred, sample_weight=weights)
self.assertAlmostEqual(0.4, self.evaluate(result))
def test_invalid_sensitivity(self):
with self.assertRaisesRegexp(
ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
metrics.SpecificityAtSensitivity(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
@test_util.run_all_in_graph_and_eager_modes
class CosineProximityTest(test.TestCase):
def test_config(self):
cosine_obj = metrics.CosineProximity(name='my_cos', dtype=dtypes.int32)
self.assertEqual(cosine_obj.name, 'my_cos')
self.assertEqual(cosine_obj._dtype, dtypes.int32)
# Check save and restore config
cosine_obj2 = metrics.CosineProximity.from_config(cosine_obj.get_config())
self.assertEqual(cosine_obj2.name, 'my_cos')
self.assertEqual(cosine_obj2._dtype, dtypes.int32)
def test_unweighted(self):
cosine_obj = metrics.CosineProximity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cosine_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cosine_obj.result()
self.assertAllClose(-0.60723, result, atol=1e-5)
def test_weighted(self):
cosine_obj = metrics.CosineProximity()
self.evaluate(variables.variables_initializer(cosine_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = cosine_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(-0.59916, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = metrics.MeanAbsoluteError(name='my_mae', dtype=dtypes.int32)
self.assertEqual(mae_obj.name, 'my_mae')
self.assertEqual(mae_obj._dtype, dtypes.int32)
# Check save and restore config
mae_obj2 = metrics.MeanAbsoluteError.from_config(mae_obj.get_config())
self.assertEqual(mae_obj2.name, 'my_mae')
self.assertEqual(mae_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mae_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mae_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mae_obj = metrics.MeanAbsoluteError()
self.evaluate(variables.variables_initializer(mae_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = metrics.MeanAbsolutePercentageError(
name='my_mape', dtype=dtypes.int32)
self.assertEqual(mape_obj.name, 'my_mape')
self.assertEqual(mape_obj._dtype, dtypes.int32)
# Check save and restore config
mape_obj2 = metrics.MeanAbsolutePercentageError.from_config(
mape_obj.get_config())
self.assertEqual(mape_obj2.name, 'my_mape')
self.assertEqual(mape_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mape_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mape_obj.result()
self.assertAllClose(35e7, result, atol=1e-5)
def test_weighted(self):
mape_obj = metrics.MeanAbsolutePercentageError()
self.evaluate(variables.variables_initializer(mape_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(40e7, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = metrics.MeanSquaredError(name='my_mse', dtype=dtypes.int32)
self.assertEqual(mse_obj.name, 'my_mse')
self.assertEqual(mse_obj._dtype, dtypes.int32)
# Check save and restore config
mse_obj2 = metrics.MeanSquaredError.from_config(mse_obj.get_config())
self.assertEqual(mse_obj2.name, 'my_mse')
self.assertEqual(mse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = mse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = metrics.MeanSquaredError()
self.evaluate(variables.variables_initializer(mse_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = metrics.MeanSquaredLogarithmicError(
name='my_msle', dtype=dtypes.int32)
self.assertEqual(msle_obj.name, 'my_msle')
self.assertEqual(msle_obj._dtype, dtypes.int32)
# Check save and restore config
msle_obj2 = metrics.MeanSquaredLogarithmicError.from_config(
msle_obj.get_config())
self.assertEqual(msle_obj2.name, 'my_msle')
self.assertEqual(msle_obj2._dtype, dtypes.int32)
def test_unweighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = msle_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = msle_obj.result()
self.assertAllClose(0.24022, result, atol=1e-5)
def test_weighted(self):
msle_obj = metrics.MeanSquaredLogarithmicError()
self.evaluate(variables.variables_initializer(msle_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.26082, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = metrics.Hinge(name='hinge', dtype=dtypes.int32)
self.assertEqual(hinge_obj.name, 'hinge')
self.assertEqual(hinge_obj._dtype, dtypes.int32)
# Check save and restore config
hinge_obj2 = metrics.Hinge.from_config(hinge_obj.get_config())
self.assertEqual(hinge_obj2.name, 'hinge')
self.assertEqual(hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = hinge_obj.result()
self.assertAllClose(0.65, result, atol=1e-5)
def test_weighted(self):
hinge_obj = metrics.Hinge()
self.evaluate(variables.variables_initializer(hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.65714, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = metrics.SquaredHinge(name='sq_hinge', dtype=dtypes.int32)
self.assertEqual(sq_hinge_obj.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
sq_hinge_obj2 = metrics.SquaredHinge.from_config(sq_hinge_obj.get_config())
self.assertEqual(sq_hinge_obj2.name, 'sq_hinge')
self.assertEqual(sq_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = sq_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = sq_hinge_obj.result()
self.assertAllClose(0.65, result, atol=1e-5)
def test_weighted(self):
sq_hinge_obj = metrics.SquaredHinge()
self.evaluate(variables.variables_initializer(sq_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.65714, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = metrics.CategoricalHinge(
name='cat_hinge', dtype=dtypes.int32)
self.assertEqual(cat_hinge_obj.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj._dtype, dtypes.int32)
# Check save and restore config
cat_hinge_obj2 = metrics.CategoricalHinge.from_config(
cat_hinge_obj.get_config())
self.assertEqual(cat_hinge_obj2.name, 'cat_hinge')
self.assertEqual(cat_hinge_obj2._dtype, dtypes.int32)
def test_unweighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
update_op = cat_hinge_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = cat_hinge_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
cat_hinge_obj = metrics.CategoricalHinge()
self.evaluate(variables.variables_initializer(cat_hinge_obj.variables))
y_true = constant_op.constant(((0, 1, 0, 1, 0), (0, 0, 1, 1, 1),
(1, 1, 1, 1, 0), (0, 0, 0, 0, 1)))
y_pred = constant_op.constant(((0, 0, 1, 1, 0), (1, 1, 1, 1, 1),
(0, 1, 0, 1, 0), (1, 1, 1, 1, 1)))
sample_weight = constant_op.constant((1., 1.5, 2., 2.5))
result = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.5, self.evaluate(result), atol=1e-5)
@test_util.run_all_in_graph_and_eager_modes
class RootMeanSquaredErrorTest(test.TestCase):
def test_config(self):
rmse_obj = metrics.RootMeanSquaredError(name='rmse', dtype=dtypes.int32)
self.assertEqual(rmse_obj.name, 'rmse')
self.assertEqual(rmse_obj._dtype, dtypes.int32)
rmse_obj2 = metrics.RootMeanSquaredError.from_config(rmse_obj.get_config())
self.assertEqual(rmse_obj2.name, 'rmse')
self.assertEqual(rmse_obj2._dtype, dtypes.int32)
def test_unweighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6))
y_pred = constant_op.constant((1, 3, 2))
update_op = rmse_obj.update_state(y_true, y_pred)
self.evaluate(update_op)
result = rmse_obj.result()
# error = [-1, -1, -4], square(error) = [1, 1, 16], mean = 18/3 = 6
self.assertAllClose(math.sqrt(6), result, atol=1e-3)
def test_weighted(self):
rmse_obj = metrics.RootMeanSquaredError()
self.evaluate(variables.variables_initializer(rmse_obj.variables))
y_true = constant_op.constant((2, 4, 6, 8))
y_pred = constant_op.constant((1, 3, 2, 3))
sample_weight = constant_op.constant((0, 1, 0, 1))
result = rmse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(math.sqrt(13), self.evaluate(result), atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class TopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.TopKCategoricalAccuracy(name='topkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'topkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.TopKCategoricalAccuracy.from_config(a_obj.get_config())
self.assertEqual(a_obj2.name, 'topkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.TopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([[0, 0, 1], [0, 1, 0]])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.TopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_true = constant_op.constant([[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.TopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
@test_util.run_all_in_graph_and_eager_modes
class SparseTopKCategoricalAccuracyTest(test.TestCase):
def test_config(self):
a_obj = metrics.SparseTopKCategoricalAccuracy(
name='stopkca', dtype=dtypes.int32)
self.assertEqual(a_obj.name, 'stopkca')
self.assertEqual(a_obj._dtype, dtypes.int32)
a_obj2 = metrics.SparseTopKCategoricalAccuracy.from_config(
a_obj.get_config())
self.assertEqual(a_obj2.name, 'stopkca')
self.assertEqual(a_obj2._dtype, dtypes.int32)
def test_correctness(self):
a_obj = metrics.SparseTopKCategoricalAccuracy()
self.evaluate(variables.variables_initializer(a_obj.variables))
y_true = constant_op.constant([2, 1])
y_pred = constant_op.constant([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
result = a_obj(y_true, y_pred)
self.assertEqual(1, self.evaluate(result)) # both the samples match
# With `k` < 5.
a_obj = metrics.SparseTopKCategoricalAccuracy(k=1)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only sample #2 matches
# With `k` > 5.
y_pred = constant_op.constant([[0.5, 0.9, 0.1, 0.7, 0.6, 0.5, 0.4],
[0.05, 0.95, 0, 0, 0, 0, 0]])
a_obj = metrics.SparseTopKCategoricalAccuracy(k=6)
self.evaluate(variables.variables_initializer(a_obj.variables))
result = a_obj(y_true, y_pred)
self.assertEqual(0.5, self.evaluate(result)) # only 1 sample matches.
@test_util.run_all_in_graph_and_eager_modes
class LogcoshTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_results = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
logcosh_obj = metrics.Logcosh(name='logcosh', dtype=dtypes.int32)
self.assertEqual(logcosh_obj.name, 'logcosh')
self.assertEqual(logcosh_obj._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
logcosh_obj = metrics.Logcosh()
self.evaluate(variables.variables_initializer(logcosh_obj.variables))
update_op = logcosh_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = logcosh_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
logcosh_obj = metrics.Logcosh()
self.evaluate(variables.variables_initializer(logcosh_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
class PoissonTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_results = y_pred - np.multiply(y_true, np.log(y_pred))
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
poisson_obj = metrics.Poisson(name='poisson', dtype=dtypes.int32)
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj._dtype, dtypes.int32)
poisson_obj2 = metrics.Poisson.from_config(poisson_obj.get_config())
self.assertEqual(poisson_obj2.name, 'poisson')
self.assertEqual(poisson_obj2._dtype, dtypes.int32)
def test_unweighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(variables.variables_initializer(poisson_obj.variables))
update_op = poisson_obj.update_state(self.y_true, self.y_pred)
self.evaluate(update_op)
result = poisson_obj.result()
expected_result = np.sum(self.expected_results) / self.batch_size
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
self.setup()
poisson_obj = metrics.Poisson()
self.evaluate(variables.variables_initializer(poisson_obj.variables))
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
result = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
sample_weight = np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3))
expected_result = np.multiply(self.expected_results, sample_weight)
expected_result = np.sum(expected_result) / np.sum(sample_weight)
self.assertAllClose(self.evaluate(result), expected_result, atol=1e-3)
def _get_model(compile_metrics):
model_layers = [
layers.Dense(3, activation='relu', kernel_initializer='ones'),
layers.Dense(1, activation='sigmoid', kernel_initializer='ones')]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model.compile(
loss='mae',
metrics=compile_metrics,
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ResetStatesTest(keras_parameterized.TestCase):
def test_reset_states_false_positives(self):
fp_obj = metrics.FalsePositives()
model = _get_model([fp_obj])
x = np.ones((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fp_obj.accumulator), 100.)
def test_reset_states_false_negatives(self):
fn_obj = metrics.FalseNegatives()
model = _get_model([fn_obj])
x = np.zeros((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(fn_obj.accumulator), 100.)
def test_reset_states_true_negatives(self):
tn_obj = metrics.TrueNegatives()
model = _get_model([tn_obj])
x = np.zeros((100, 4))
y = np.zeros((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tn_obj.accumulator), 100.)
def test_reset_states_true_positives(self):
tp_obj = metrics.TruePositives()
model = _get_model([tp_obj])
x = np.ones((100, 4))
y = np.ones((100, 1))
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(tp_obj.accumulator), 100.)
def test_reset_states_precision(self):
p_obj = metrics.Precision()
model = _get_model([p_obj])
x = np.concatenate((np.ones((50, 4)), np.ones((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.zeros((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.tp), 50.)
self.assertEqual(self.evaluate(p_obj.fp), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(p_obj.tp), 50.)
self.assertEqual(self.evaluate(p_obj.fp), 50.)
def test_reset_states_recall(self):
r_obj = metrics.Recall()
model = _get_model([r_obj])
x = np.concatenate((np.ones((50, 4)), np.zeros((50, 4))))
y = np.concatenate((np.ones((50, 1)), np.ones((50, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.tp), 50.)
self.assertEqual(self.evaluate(r_obj.fn), 50.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(r_obj.tp), 50.)
self.assertEqual(self.evaluate(r_obj.fn), 50.)
def test_reset_states_sensitivity_at_specificity(self):
s_obj = metrics.SensitivityAtSpecificity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
def test_reset_states_specificity_at_sensitivity(self):
s_obj = metrics.SpecificityAtSensitivity(0.5, num_thresholds=1)
model = _get_model([s_obj])
x = np.concatenate((np.ones((25, 4)), np.zeros((25, 4)), np.zeros((25, 4)),
np.ones((25, 4))))
y = np.concatenate((np.ones((25, 1)), np.zeros((25, 1)), np.ones((25, 1)),
np.zeros((25, 1))))
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
model.evaluate(x, y)
self.assertEqual(self.evaluate(s_obj.tp), 25.)
self.assertEqual(self.evaluate(s_obj.fp), 25.)
self.assertEqual(self.evaluate(s_obj.fn), 25.)
self.assertEqual(self.evaluate(s_obj.tn), 25.)
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
02f82c3bdce5f373fb441ff9f282e97254250537 | d138deda43e36f6c79c5e3a9ef1cc62c6a92e881 | /python/paddle/fluid/tests/unittests/test_dist_mnist_ring_allreduce.py | 4436064dc28ed1276481378c70aa3b306486e0c8 | [
"Apache-2.0"
] | permissive | seiriosPlus/Paddle | 51afd6f5c85c3ce41dd72953ee659d1539c19f90 | 9602a182b2a4979247c09df1ec283fc39cb4a981 | refs/heads/develop | 2021-08-16T16:05:10.848535 | 2020-12-27T15:15:19 | 2020-12-27T15:15:19 | 123,257,829 | 2 | 0 | Apache-2.0 | 2019-12-10T08:22:01 | 2018-02-28T08:57:42 | C++ | UTF-8 | Python | false | false | 1,177 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
import paddle
paddle.enable_static()
class TestDistMnistNCCL2(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._use_reader_alloc = False
self._nccl2_mode = True
def test_dist_train(self):
import paddle.fluid as fluid
if fluid.core.is_compiled_with_cuda():
self.check_with_place("dist_mnist.py", delta=1e-5)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | seiriosPlus.noreply@github.com |
fd3cf4d40487d172afc5c8e1119786c68a9a7f34 | 630804da2c327c6723a7a808c23381348b1c79be | /Week1/Day1/FirstAssignment.py | 0920e18f448ce3cb1dc2c941f453446b81d409bf | [] | no_license | AjayKarki/DWIT_Training | 9f1bc49ff19acbe96d121e115acd0d1ba9c05ff4 | e8ce49e2c73e29130a352bd9f5fcab4fe86ebf13 | refs/heads/master | 2020-09-23T13:01:31.056432 | 2019-12-25T15:09:39 | 2019-12-25T15:09:39 | 225,506,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py |
first_number = int(input("Enter a number "))
second_number = int(input("Enter another number "))
"""
This is an example of
Multiline
Comment
"""
print("Sum is ", first_number+second_number)
print("Diff is ", first_number-second_number)
print("Prod is ", first_number*second_number)
print("Div is ", first_number/second_number)
print("Mod is ", first_number % second_number) | [
"ajaykarki333@gmail.com"
] | ajaykarki333@gmail.com |
fb66ec4f4fe8f900b439e9ce33725c9fb5f5e3fb | ef4a12140c89ddd01c335000e15479dff85c48c6 | /kleinworth/spiders/spider.py | e969c9d462814f10ac9746d7f20af6c590fd4209 | [] | no_license | SimeonYS/kleinworth | 8f06e717f9f7c1a3377033ddf190b64bc7784f8e | c4575f0daeacf4771359e4b213c6cbf850fe750d | refs/heads/main | 2023-03-19T08:17:54.626925 | 2021-03-15T10:10:36 | 2021-03-15T10:10:36 | 347,920,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import KleinworthItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
class KleinworthSpider(scrapy.Spider):
name = 'kleinworth'
start_urls = ['https://www.kleinworthambros.com/en/tags/tag/press-releases/',
'https://www.kleinworthambros.com/en/tags/tag/news/'
]
def parse(self, response):
post_links = response.xpath('//div[contains(@id,"card2")]/@data-href | //div[@class="taxoWrap"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
date = response.xpath('//div[@class="sgnews_single_date"]/text()').get()
title = response.xpath('(//h1//text())[last()]').get()
content = response.xpath('//div[@class="intro"]//text()').getall() + response.xpath('//div[@class="sgnews_single_content"]//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=KleinworthItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
yield item.load_item()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
cef1aa623ceaeb8e1400245c7f160633065eb08c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pygame/pygame-physics/pygame_projectile_angle.py | b4956e8676459708bfbca383c3da3aa096cbd52d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:018a844bc0bfcb3938201de01d763672c6bdce35b5b911524f4e2222f2b9b540
size 830
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
f353d4066d0ca5278a6d6dccbf24d5374f660dc5 | 0161a3ab3710ffee631a6b7ec038e68de4c387ce | /imdb/imdb/settings.py | 59a5b6079b2648a7a7d63fa726d413ac08518812 | [] | no_license | tarungoyal1/learn_scrapy | 38536bbe1d732dc7dbdad37fb2e8814d81f838b3 | 839e24faa9bc60b83ffe805d8bb987d92dd07443 | refs/heads/master | 2022-11-14T21:59:55.310898 | 2020-07-11T02:12:08 | 2020-07-11T02:12:08 | 273,656,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | # -*- coding: utf-8 -*-
# Scrapy settings for imdb project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'imdb'
SPIDER_MODULES = ['imdb.spiders']
NEWSPIDER_MODULE = 'imdb.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'imdb (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'imdb.middlewares.ImdbSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'imdb.middlewares.ImdbDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'imdb.pipelines.ImdbPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_EXPORT_ENCODING = 'utf-8' | [
"tarun13317@gmail.com"
] | tarun13317@gmail.com |
08910e614d2cf4644137974150405fa8943e7a99 | 3fe9d17c2151cb3853986a79e2ce2010fbc27b4d | /utils.py | 2a2a787daa66ee87fc9f20df7944bd59860bec87 | [] | no_license | coolmaksat/graembeds | 82c2d6e1fecb51e33aa0ace349a3d434b608534f | 35ecddb9d8ef1d9fb648186a6195fd0703d9b642 | refs/heads/master | 2020-03-27T20:18:08.315222 | 2018-09-06T13:44:14 | 2018-09-06T13:44:14 | 147,056,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import numpy as np
import math
class Dataset(object):
"""Dataset class for handling data operations.
It requires specific format for graph data files.
Graph should be stored in `data` folder under
its the folder with its name and should contain
three files `train.txt`, `valid.txt`, `test.txt`.
All three files should have the same format where
each line represents a triple (head, relation, tail)
"""
def __init__(self, name):
self.name = name
self.train_triples = []
self.valid_triples = []
self.test_triples = []
self.node_mappings = {}
self.relation_mappings = {}
for triple in self.load('train'):
self.train_triples.append(triple)
for triple in self.load('valid'):
self.valid_triples.append(triple)
for triple in self.load('test'):
self.test_triples.append(triple)
self.train_triple_set = set(self.train_triples)
self.triple_set = set(
self.train_triples + self.valid_triples + self.test_triples)
self.train_triples = np.array(self.train_triples)
self.test_triples = np.array(self.test_triples)
self.valid_triples = np.array(self.valid_triples)
self.train_size = len(self.train_triples)
self.valid_size = len(self.valid_triples)
self.test_size = len(self.test_triples)
self.nb_nodes = len(self.node_mappings)
self.nb_relations = len(self.relation_mappings)
def load(self, part):
"""Reads dataset files (train, valid, test) and
generates triples with integer ids.
If entity or relation is not in the mapping variables
a new id is created and entity or relation is put
to the corresponding mapping variable.
Arguments:
part: The name of dataset part (train, valid, test)
"""
with open('data/' + self.name + '/' + part + '.txt') as f:
for line in f:
it = line.strip().split()
if it[0] not in self.node_mappings:
self.node_mappings[it[0]] = len(self.node_mappings)
if it[2] not in self.node_mappings:
self.node_mappings[it[2]] = len(self.node_mappings)
if it[1] not in self.relation_mappings:
self.relation_mappings[it[1]] = len(self.relation_mappings)
yield (
self.node_mappings[it[0]],
self.relation_mappings[it[1]],
self.node_mappings[it[2]])
def batch_generator(self, part, batch_size=256):
triples = getattr(self, part + '_triples')
n = len(triples)
index = np.arange(n)
np.random.shuffle(index)
return Generator(
triples[index], self.nb_nodes, self.train_triple_set,
batch_size=batch_size)
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
a6e3d61630fcd661507496cd82b8e633ac857ffb | 539d003125eebf761ba320223566cd56eeefe247 | /mundiapi/models/get_split_response.py | 4d7b029a9500b863546dae4bbb9a066b8284df12 | [
"MIT"
] | permissive | mundipagg/MundiApi-NodeJS | 6e58afb33510a723574ee06bec107654409910af | f0c67e1f92471a7a0e2d0b0cb1765105f07fb8cb | refs/heads/master | 2023-06-25T23:04:42.429866 | 2023-06-19T16:10:31 | 2023-06-19T16:10:31 | 101,078,084 | 9 | 5 | NOASSERTION | 2023-06-01T17:50:21 | 2017-08-22T15:25:30 | JavaScript | UTF-8 | Python | false | false | 2,179 | py | # -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import mundiapi.models.get_recipient_response
class GetSplitResponse(object):
"""Implementation of the 'GetSplitResponse' model.
Split response
Attributes:
mtype (string): Type
amount (int): Amount
recipient (GetRecipientResponse): Recipient
gateway_id (string): The split rule gateway id
"""
# Create a mapping from Model property names to API property names
_names = {
"mtype":'type',
"amount":'amount',
"gateway_id":'gateway_id',
"recipient":'recipient'
}
def __init__(self,
mtype=None,
amount=None,
gateway_id=None,
recipient=None):
"""Constructor for the GetSplitResponse class"""
# Initialize members of the class
self.mtype = mtype
self.amount = amount
self.recipient = recipient
self.gateway_id = gateway_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mtype = dictionary.get('type')
amount = dictionary.get('amount')
gateway_id = dictionary.get('gateway_id')
recipient = mundiapi.models.get_recipient_response.GetRecipientResponse.from_dictionary(dictionary.get('recipient')) if dictionary.get('recipient') else None
# Return an object of this model
return cls(mtype,
amount,
gateway_id,
recipient)
| [
"noreply@github.com"
] | mundipagg.noreply@github.com |
9d96b2dcf2e0ad95dd30fbddadd9690e92e19b73 | 9dc1c85e7d86d29400af79125e9cd89a82a9b8ab | /myproject/portfolio/migrations/0008_auto_20210411_1227.py | 1ff519e0632e898c671a29b31ad605be406f6c6f | [
"MIT"
] | permissive | borko81/simple_django | e284ff8f79b3e708b4903ba0b774e3a480de9190 | 9dbd2d848cbf0ff0c58e93471853c5b21c769758 | refs/heads/master | 2023-07-14T01:25:13.294095 | 2021-08-16T15:48:00 | 2021-08-16T15:48:00 | 349,369,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 3.1.7 on 2021-04-11 09:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0007_auto_20210402_2045'),
]
operations = [
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='img/'),
),
]
| [
"bstoilov81@gmail.com"
] | bstoilov81@gmail.com |
47f64b5e8973a03abf00d861f93525f47544fea7 | 84166c246e819c19acc1dcd77a405b85ca554c9b | /app/models.py | 192cbcd040ed2ace4a75d75d5408515272105f1b | [
"MIT"
] | permissive | kenmutuma001/Blog | 992a98a605f7247bade6577b8e02a277b30db294 | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | refs/heads/master | 2020-05-18T13:32:34.689064 | 2019-05-01T16:55:32 | 2019-05-01T16:55:32 | 184,439,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255))
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
email = db.Column(db.String(255), unique=True, index=True)
password_hash = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
blogs = db.relationship('Blog', backref='blogs', lazy="dynamic")
# comments = db.relationship('Comment', backref='comments', lazy="dynamic")
@property
def password(self):
raise AttributeError('Cant read password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
def __repr__(self):
return f'User {self.username}'
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
user_id = db.relationship('User', backref='users', lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class Blog(UserMixin, db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer, primary_key=True)
post = db.Column(db.String(255))
body = db.Column(db.String(1000))
category = db.Column(db.String(1000))
date_posted = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comment', backref='comments', lazy="dynamic")
def save_blog(self):
db.session.add(self)
db.session.commit()
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
comment = db.Column(db.String(1000))
date_posted = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("blogs.id"))
pitch = db.Column(db.Integer, db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
class Popular:
'''
News class to define Objects
'''
def __init__(self, author, quote):
self.author = author
self.quote = quote
| [
"santa@northpole.com"
] | santa@northpole.com |
2fdc4462eba62b8677c1fc96b35656c7681cd085 | 14f085fe9db8179dd44c18f00c1184881dcfe21a | /testing/mos_2d.py | c174c00a97eef685b5fb02f156d04f6eef906d24 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSL-1.0",
"BSD-2-Clause",
"MPL-2.0"
] | permissive | devsim/devsim | 7ba495952239d4e9c0170c0a5a89905aa9eb3e1e | 3d979d6a98685b2e51c15eebd20afdc1e643fc3a | refs/heads/main | 2023-08-31T10:40:41.346966 | 2023-08-30T16:42:56 | 2023-08-30T16:42:56 | 8,838,727 | 158 | 69 | Apache-2.0 | 2023-07-15T03:21:34 | 2013-03-17T18:01:17 | C++ | UTF-8 | Python | false | false | 2,939 | py | # Copyright 2013 DEVSIM LLC
#
# SPDX-License-Identifier: Apache-2.0
from devsim.python_packages.simple_physics import *
from devsim.python_packages.ramp import *
from devsim import *
import mos_2d_create
device = "mymos"
silicon_regions=("gate", "bulk")
oxide_regions=("oxide",)
regions = ("gate", "bulk", "oxide")
interfaces = ("bulk_oxide", "gate_oxide")
for i in regions:
CreateSolution(device, i, "Potential")
for i in silicon_regions:
SetSiliconParameters(device, i, 300)
CreateSiliconPotentialOnly(device, i)
for i in oxide_regions:
SetOxideParameters(device, i, 300)
CreateOxidePotentialOnly(device, i, "log_damp")
### Set up contacts
contacts = get_contact_list(device=device)
for i in contacts:
tmp = get_region_list(device=device, contact=i)
r = tmp[0]
print("%s %s" % (r, i))
CreateSiliconPotentialOnlyContact(device, r, i)
set_parameter(device=device, name=GetContactBiasName(i), value=0.0)
for i in interfaces:
CreateSiliconOxideInterface(device, i)
solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
#
##write_devices -file gmsh_mos2d_potentialonly.flps -type floops
write_devices(file="gmsh_mos2d_potentialonly", type="vtk")
for i in silicon_regions:
CreateSolution(device, i, "Electrons")
CreateSolution(device, i, "Holes")
set_node_values(device=device, region=i, name="Electrons", init_from="IntrinsicElectrons")
set_node_values(device=device, region=i, name="Holes", init_from="IntrinsicHoles")
CreateSiliconDriftDiffusion(device, i, "mu_n", "mu_p")
for c in contacts:
tmp = get_region_list(device=device, contact=c)
r = tmp[0]
CreateSiliconDriftDiffusionAtContact(device, r, c)
solve(type="dc", absolute_error=1.0e30, relative_error=1e-5, maximum_iterations=30)
for r in silicon_regions:
node_model(device=device, region=r, name="logElectrons", equation="log(Electrons)/log(10)")
write_devices(file="mos_2d_dd.msh", type="devsim")
with open("mos_2d_params.py", "w", encoding="utf-8") as ofh:
ofh.write('import devsim\n')
for p in get_parameter_list():
if p in ('solver_callback', 'direct_solver', 'info'):
continue
v=repr(get_parameter(name=p))
ofh.write('devsim.set_parameter(name="%s", value=%s)\n' % (p, v))
for i in get_device_list():
for p in get_parameter_list(device=i):
v=repr(get_parameter(device=i, name=p))
ofh.write('devsim.set_parameter(device="%s", name="%s", value=%s)\n' % (i, p, v))
for i in get_device_list():
for j in get_region_list(device=i):
for p in get_parameter_list(device=i, region=j):
v=repr(get_parameter(device=i, region=j, name=p))
ofh.write('devsim.set_parameter(device="%s", region="%s", name="%s", value=%s)\n' % (i, j, p, v))
| [
"juan@tcad.com"
] | juan@tcad.com |
c75af74434ea3e57608c0e597caaa3a04ba11afd | c71e5115b895065d2abe4120799ffc28fa729086 | /procon-archive/atcoder.jp/arc091/arc091_a/Main.py | 04345b8d0e5e40d70aee3f64ae87b4124bc40efc | [] | no_license | ken0105/competitive-programming | eb82f92a7b7ad0db601ea341c1441de6c6165064 | f918f85a0ea6dfbe9cac3ef835f80503bb16a75d | refs/heads/master | 2023-06-05T09:55:25.264731 | 2021-06-29T14:38:20 | 2021-06-29T14:38:20 | 328,328,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | if __name__ == '__main__':
n, m = map(int, input().split())
print(abs(n * m - (2 * n + 2 * m - 4)))
| [
"iwata.kenaaa@gmail.com"
] | iwata.kenaaa@gmail.com |
a5649bccb1465ea6b0584343b7c494035ac67e5a | 9b60beb5ef167dc57a700ddaa0481a173e9c24d8 | /my315ok/products/browser/multipgkuptabs.py | cc1156178510f15ad10b490a53baa4de5b455653 | [] | no_license | adam139/my315ok.products | 8e9460f649e6c57575b3a7838e92fa40f147a99d | d335ed679dd68cddc6aee006f873a038b5258508 | refs/heads/master | 2020-12-25T16:48:12.378815 | 2018-04-08T00:35:20 | 2018-04-08T00:35:20 | 23,514,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py |
from five import grok
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from plone.dexterity.interfaces import IDexterityContent
class multipgview(grok.View):
grok.context(IDexterityContent)
grok.require('zope2.View')
grok.name('multipgview') | [
"yuejun.tang@gmail.com"
] | yuejun.tang@gmail.com |
daec25b7662ba33c27388b1ecb296a9bbbed7d46 | 6437a3a4a31ab9ad233d6b2d985beb50ed50de23 | /PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/markdown/extensions/sane_lists.py | fc36c0b8b890e3faacae1b8b8565f9b7039063fb | [] | no_license | sreyemnayr/jss-lost-mode-app | 03ddc472decde3c17a11294d8ee48b02f83b71e7 | 3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa | refs/heads/master | 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py |
"""
Sane List Extension for Python-Markdown
=======================================
Modify the behavior of Lists in Python-Markdown t act in a sane manor.
In standard Markdown sytex, the following would constitute a single
ordered list. However, with this extension, the output would include
two lists, the first an ordered list and the second and unordered list.
1. ordered
2. list
* unordered
* list
Copyright 2011 - [Waylan Limberg](http://achinghead.com)
"""
import re
import markdown
class SaneOListProcessor(markdown.blockprocessors.OListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
SIBLING_TAGS = ['ol']
class SaneUListProcessor(markdown.blockprocessors.UListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
SIBLING_TAGS = ['ul']
class SaneListExtension(markdown.Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Override existing Processors. """
md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
def makeExtension(configs={}):
return SaneListExtension(configs=configs)
| [
"ryanmeyersweb@gmail.com"
] | ryanmeyersweb@gmail.com |
252bbfbe0e32a1b8e8dc584e470f0cfd6aec5562 | 431a1f738b1edfba7dad8d10a6b7520d51d917cb | /Samples/UserSamples/2017/jetFakes_Splits/Fakes5_Config.py | 8ef14d82ba983ee7a83490e4228eb327a6245ff4 | [] | no_license | aloeliger/DatacardCreator | 5ce702e46fbb77e843b44d8fe088c2645a4a8f66 | 5c7e890276a5be079ed3b677a471c1dcadcba52d | refs/heads/master | 2022-02-26T19:52:30.563747 | 2022-02-16T20:24:48 | 2022-02-16T20:24:48 | 215,602,523 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.FakeFactorUncertainty import FakeFactorUncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
FakeSample = Sample()
FakeSample.name = 'jetFakes'
FakeSample.path = '/data/aloeliger/SMHTT_Selected_2017_Deep/'
FakeSample.files = ['Fake.root']
FakeSample.definition = ''
FakeSample.uncertainties = [
FakeFactorUncertainty()
]
FakeSample.eventDictionaryInstance = MuTauEventDictionary
FakeSample.CreateEventWeight = FakeSample.CreateEventWeight_Fake
FakeSample.startEntry = 1300000
FakeSample.endEntry = 1620000
| [
"aloelige@cern.ch"
] | aloelige@cern.ch |
17095e42f3f6435a3d69c24c293d79facc2991ee | 5679731cee36c537615d285ed72810f4c6b17380 | /049_GroupAnagrams.py | 6698f9742a7e6f69b93e644c08ed09f4dbb99aab | [] | no_license | manofmountain/LeetCode | 6b76105190a9b62df65a7b56b6def4120498b9fa | 718f688b3d316e8c10ef680d9c21ecd518d062f8 | refs/heads/master | 2021-01-12T03:41:48.318116 | 2017-07-18T12:35:58 | 2017-07-18T12:35:58 | 78,252,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
res = list()
indexTable = {}
index = 0
for str in strs:
keyStr = ''.join(sorted(str))
if keyStr not in indexTable:
res.append([str])
indexTable[keyStr] = index
index += 1
else:
res[indexTable[keyStr]].append(str)
return res
| [
"noreply@github.com"
] | manofmountain.noreply@github.com |
36a555ab5b94a8f951fdc5e96a744c84a3851024 | a8d9ed754db055f9ce6d573ddb5d90d2d91c5beb | /canal/data.py | 4873e6077ffe7ff8922ee71b5a07d6092505a494 | [] | no_license | linkcheng/spark_demo | 6c84ccf33c82045af0ab6c0547d3216a6d43d1df | db212ed138ac19e42d9c8bc9fa5d7e4d0facee6f | refs/heads/master | 2021-09-09T04:58:59.917278 | 2019-12-26T07:49:01 | 2019-12-26T07:49:01 | 145,976,836 | 0 | 0 | null | 2021-09-08T00:56:35 | 2018-08-24T10:15:15 | Jupyter Notebook | UTF-8 | Python | false | false | 3,440 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
@author: Link
@contact: zheng.long@sfy.com
@module: data
@date: 2019-01-02
bin/kafka-console-consumer.sh --bootstrap-server 192.168.30.141:6667,192.168.30.140:6667,192.168.30.139:6667 --topic example
"""
null = None
false = False
true = True
data = {
"data": [
{
"id": "1111111349",
"name": "张三",
"password_digest": "957f8fcecdc4bc8199d8c52c6d998719",
"mobile": "13512341234",
"email": "",
"created_time": "2015-08-27 23:54:08",
"created_ip": "11.11.11.11",
"last_login_time": "2018-11-08 22:59:19",
"last_login_ip": "22.22.22.22",
"old_id": "3056",
"weixin_open_id": null,
"gender": "3",
"person_id": "53633",
"utm_source": "",
"order_cnt": "3",
"bill_status": "2",
"console_remark": "",
"updated_time": "2018-12-26 14:25:27",
"app_source": null,
"is_get_authorize": "0",
"user_address_list": null,
"bankNumber": null,
"biz_event_status": null,
"biz_event_time": null,
"biz_event_data": null,
"invitation_code": null,
"used_invitation_code": null
}
],
"database": "test",
"es": 1545805527000,
"id": 19,
"isDdl": false,
"mysqlType": {
"id": "bigint(20) unsigned",
"name": "varchar(63)",
"password_digest": "varchar(63)",
"mobile": "varchar(63)",
"email": "varchar(127)",
"created_time": "datetime",
"created_ip": "varchar(63)",
"last_login_time": "datetime",
"last_login_ip": "varchar(63)",
"old_id": "bigint(20) unsigned",
"weixin_open_id": "varchar(63)",
"gender": "enum('male','female','other')",
"person_id": "bigint(20) unsigned",
"utm_source": "varchar(127)",
"order_cnt": "smallint(5) unsigned",
"bill_status": "enum('none','has_bill','has_overdue','overdue')",
"console_remark": "varchar(512)",
"updated_time": "timestamp",
"app_source": "varchar(63)",
"is_get_authorize": "tinyint(1)",
"user_address_list": "varchar(500)",
"bankNumber": "varchar(36)",
"biz_event_status": "varchar(32)",
"biz_event_time": "datetime",
"biz_event_data": "varchar(500)",
"invitation_code": "varchar(16)",
"used_invitation_code": "varchar(16)"
},
"old": null,
"sql": "",
"sqlType": {
"id": -5,
"name": 12,
"password_digest": 12,
"mobile": 12,
"email": 12,
"created_time": 93,
"created_ip": 12,
"last_login_time": 93,
"last_login_ip": 12,
"old_id": -5,
"weixin_open_id": 12,
"gender": 4,
"person_id": -5,
"utm_source": 12,
"order_cnt": 5,
"bill_status": 4,
"console_remark": 12,
"updated_time": 93,
"app_source": 12,
"is_get_authorize": -7,
"user_address_list": 12,
"bankNumber": 12,
"biz_event_status": 12,
"biz_event_time": 93,
"biz_event_data": 12,
"invitation_code": 12,
"used_invitation_code": 12
},
"table": "User",
"ts": 1545805527438,
"type": "INSERT"
}
| [
"zheng.long@shoufuyou.com"
] | zheng.long@shoufuyou.com |
cdc5750d92187c21877277e4d8b50b0c76485614 | 9e1bda53da4c5e98190f5f25235f528d692ee5a8 | /.history/my_app/views_20210405182534.py | 224bf0f237e70a0e4e86dd9a6c523ac1dcd617e5 | [] | no_license | Jumayev-A/Project-3 | 3d373181af6a87e3fe319a13d28fcd18941167b7 | 34ddd009726cbba9ae52e74a46d554fd735566e2 | refs/heads/main | 2023-06-10T11:02:06.446151 | 2021-07-07T06:19:11 | 2021-07-07T06:19:11 | 350,375,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | from django.shortcuts import render, redirect
from django.urls import reverse_lazy, reverse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.views.generic.edit import DeleteView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from my_app.models import CategoryModel, BlogModel
from my_app.forms import BlogForm
# Create your views here.
def home(request):
categories = CategoryModel.objects.all()
context = {
'categories':categories,
}
if request.method == 'POST':
q = request.POST.get('q')
if q:
queryset = BlogModel.objects.filter(Q(title__icontains=q) )
context['q']=queryset
print(context)
if len(queryset) == 0:
messages.success(request, 'tapylmady')
return render(request, 'home.html',context)
@login_required(login_url='/account/login/')
def create_category(request):
user = request.user
if request.method == 'POST':
name = request.POST.get('name')
title = request.POST.get('title')
file = request.FILES.get('file')
CategoryModel.objects.create(user=user, name=name, title=title, file=file).save()
print(user)
return redirect('my_app:home')
return render(request, 'create_category.html',{})
class CategoryDeleteView(DeleteView):
model = CategoryModel
success_url = reverse_lazy('my_app:home')
@login_required(login_url='/account/login/')
def update_category(request, pk):
model = CategoryModel.objects.get(id=pk)
if request.method == 'POST':
name = request.POST.get('name')
title = request.POST.get('title')
file = request.FILES.get('file')
model = CategoryModel.objects.get(id=pk)
model.name = name
model.title = title
model.file = file
model.save()
return redirect('my_app:home')
return render(request, 'update_category.html',{'model':model})
def view_blog(request, pk):
page = request.GET.get('page', 1)
blog = BlogModel.objects.filter(category_id=pk)
paginator = Paginator(blog, 3)
try:
blogs = paginator.page(page)
except EmptyPage:
blogs = paginator.page(paginator.num_pages)
except PageNotAnInteger:
blogs = paginator.page(1)
count_pag = paginator.page_range
return render(request, 'blog_list.html',{'count_pag':count_pag,'blogs':blogs,'pk':pk})
@login_required(login_url='/account/login/')
def create_blog(request, pk):
user = request.user
form = BlogForm()
if request.method == 'POST':
form = BlogForm(request.POST, request.FILES)
if form.is_valid():
form = form.save(commit=False)
form.category_id=pk
form.save()
return redirect('my_app:blog_list',pk)
return render(request, 'create_blog.html',{"pk":pk, 'form':form})
class BlogDeleteView(DeleteView):
model = BlogModel
print(model)
print(object)
def get_success_url(self):
return reverse('my_app:blog_list',args = {self.object.category_id})
@login_required(login_url='/account/login/')
def update_blog(request, pk):
model = BlogModel.objects.get(id=pk)
form = BlogForm(model)
# if request.method == 'POST':
# title = request.POST.get('title')
# description = request.POST.get('description')
# file = request.FILES.get('file')
# model = BlogModel.objects.get(id=pk)
# model.title = title
# model.description = description
# model.file = file
# model.save()
# return redirect('my_app:blog_list', model.category_id)
return render(request, 'update_blog.html',{'form':form})
def blog_detail(request, id):
blog = BlogModel.objects.get(id=id)
return render(request, 'blog_detail.html',{'blog':blog,'model.category_id'})
| [
"abdy.jumayev@gmail.com"
] | abdy.jumayev@gmail.com |
21fda4d68e16fb134142cf28caea0fd629dfbfd4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_237/ch87_2019_06_06_23_04_02_860281.py | 7d6be3cdd3cdd575fd5232e368daa05fb936291d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | with open ("churras.txt", "r") as arquivo:
conteudo = arquivo.readlines()
quantidade = conteudo[1::3]
preco = conteudo[2::3]
soma = 0
len_listas = len(quantidade)
for e in range(1,len_listas):
soma += int(quantidade[e-1]) * float(preco[e-1][:5])
print(soma)
| [
"you@example.com"
] | you@example.com |
faf65486b715c94e7e440960343f1c1f21ddd3ce | c703b8ac3b5545857f6c95efa2d61eaf7a664021 | /iPERCore/tools/human_pose2d_estimators/utils/pose_utils.py | 4fbca240c879142f00503100fc2829bfa31a860c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | iPERDance/iPERCore | d29681d229b3098b3517b1abf4f7ea65f579de73 | fcf9a18ffd66bf3fdd3eea4153a3bc4785131848 | refs/heads/main | 2023-07-30T15:04:15.835396 | 2023-04-12T14:21:23 | 2023-04-12T14:21:23 | 313,664,064 | 2,520 | 339 | Apache-2.0 | 2023-05-12T03:26:52 | 2020-11-17T15:36:25 | Python | UTF-8 | Python | false | false | 9,392 | py | # original file comes from Copyright (c) 2018 algo, https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/blob/master/modules/pose.py
# this modified file Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import cv2
import numpy as np
from .one_euro_filter import OneEuroFilter
colors = {
"pink": [197, 27, 125], # L lower leg
"Violet": [238, 130, 238],
"DarkViolet": [148, 0, 211],
"light_pink": [233, 163, 201], # L upper leg
"light_green": [161, 215, 106], # L lower arm
"green": [77, 146, 33], # L upper arm
"IndianRed": [205, 92, 92],
"RosyBrown2": [238, 180, 180],
"red": [215, 48, 39], # head
"light_red": [252, 146, 114], # head
"light_orange": [252, 141, 89], # chest
"DarkOrange2": [238, 118, 0],
"purple": [118, 42, 131], # R lower leg
"BlueViolet": [138, 43, 226],
"light_purple": [175, 141, 195], # R upper
"light_blue": [145, 191, 219], # R lower arm
"MediumSlateBlue": [123, 104, 238],
"DarkSlateBlue": [72, 61, 139],
"NavyBlue": [0, 0, 128],
"LightSlateBlue": [132, 112, 255],
"blue": [69, 117, 180], # R upper arm
"gray": [130, 130, 130], #
"YellowGreen": [154, 205, 50],
"LightCoral": [240, 128, 128],
"Aqua": [0, 255, 255],
"chocolate": [210, 105, 30],
"white": [255, 255, 255], #
}
jcolors = [
"light_red", "light_pink", "light_green", "red", "pink", "green",
"light_orange", "light_purple", "light_blue", "DarkOrange2", "purple", "blue",
"MediumSlateBlue", "YellowGreen", "LightCoral", "YellowGreen", "green", "LightSlateBlue", "MediumSlateBlue",
"DarkSlateBlue", "DarkSlateBlue", "Violet", "BlueViolet", "NavyBlue", "RosyBrown2", "Aqua", "chocolate"
]
ecolors = {
0: "IndianRed",
1: "RosyBrown2",
2: "light_pink",
3: "pink",
4: "Violet",
5: "DarkViolet",
6: "light_blue",
7: "DarkSlateBlue",
8: "LightSlateBlue",
9: "NavyBlue",
10: "MediumSlateBlue",
11: "blue",
12: "BlueViolet",
13: "DarkSlateBlue",
14: "purple",
15: "Violet",
16: "BlueViolet",
17: "RosyBrown2",
18: "light_green",
19: "YellowGreen",
20: "light_red",
21: "light_pink",
22: "light_green",
23: "pink",
24: "green",
25: "chocolate",
26: "Aqua"
}
class BasePose(object):
@classmethod
def get_bbox(cls, keypoints):
found_keypoints = np.zeros((np.count_nonzero(keypoints[:, 0] != -1), 2), dtype=np.float32)
found_kpt_id = 0
for kpt_id in range(cls.num_kpts):
if keypoints[kpt_id, 0] == -1:
continue
found_keypoints[found_kpt_id] = keypoints[kpt_id, 0:2]
found_kpt_id += 1
## (x0, y0, w, h)
# bbox = cv2.boundingRect(found_keypoints)
# (x0, y0, x1, y1)
x0, y0 = np.min(found_keypoints, axis=0)
x1, y1 = np.max(found_keypoints, axis=0)
bbox = np.array([x0, y0, x1, y1], dtype=np.float32)
return bbox
@classmethod
def get_similarity(cls, a, b, threshold=0.5):
num_similar_kpt = 0
for kpt_id in range(cls.num_kpts):
if a.keypoints[kpt_id, 0] != -1 and b.keypoints[kpt_id, 0] != -1:
distance = np.sum((a.keypoints[kpt_id] - b.keypoints[kpt_id]) ** 2)
area = max(a.bbox[2] * a.bbox[3], b.bbox[2] * b.bbox[3])
similarity = np.exp(-distance / (2 * (area + np.spacing(1)) * cls.vars[kpt_id]))
if similarity > threshold:
num_similar_kpt += 1
return num_similar_kpt
@classmethod
def track_poses(cls, previous_poses, current_poses, threshold=3, smooth=False):
"""
Propagate poses ids from previous frame results. Id is propagated,
if there are at least `threshold` similar keypoints between pose from previous frame and current.
If correspondence between pose on previous and current frame was established, pose keypoints are smoothed.
Args:
previous_poses: poses from previous frame with ids;
current_poses: poses from current frame to assign ids;
threshold: minimal number of similar keypoints between poses;
smooth: smooth pose keypoints between frames.
Returns:
current_poses (list of BasePose): the current poses.
"""
current_poses = sorted(current_poses, key=lambda pose: pose.confidence,
reverse=True) # match confident poses first
mask = np.ones(len(previous_poses), dtype=np.int32)
for current_pose in current_poses:
best_matched_id = None
best_matched_pose_id = None
best_matched_iou = 0
for id, previous_pose in enumerate(previous_poses):
if not mask[id]:
continue
iou = cls.get_similarity(current_pose, previous_pose)
if iou > best_matched_iou:
best_matched_iou = iou
best_matched_pose_id = previous_pose.id
best_matched_id = id
if best_matched_iou >= threshold:
mask[best_matched_id] = 0
else: # pose not similar to any previous
best_matched_pose_id = None
current_pose.update_id(best_matched_pose_id)
if smooth:
for kpt_id in range(cls.num_kpts):
if current_pose.keypoints[kpt_id, 0] == -1:
continue
# reuse filter if previous pose has valid filter
if (best_matched_pose_id is not None
and previous_poses[best_matched_id].keypoints[kpt_id, 0] != -1):
current_pose.filters[kpt_id] = previous_poses[best_matched_id].filters[kpt_id]
current_pose.keypoints[kpt_id, 0] = current_pose.filters[kpt_id][0](
current_pose.keypoints[kpt_id, 0])
current_pose.keypoints[kpt_id, 1] = current_pose.filters[kpt_id][1](
current_pose.keypoints[kpt_id, 1])
current_pose.bbox = cls.get_bbox(current_pose.keypoints)
return current_poses
def update_id(self, id=None):
self.id = id
if self.id is None:
self.id = self.last_id + 1
self.last_id += 1
def draw(self, img, radius=6):
assert self.keypoints.shape == (self.num_kpts, 3)
for part_id in range(len(self.BODY_PARTS_IDS_RENDER)):
kpt_a_id = self.BODY_PARTS_KPT_IDS[part_id][0]
global_kpt_a_id = self.keypoints[kpt_a_id, 0]
if global_kpt_a_id != -1:
x_a, y_a, s_a = self.keypoints[kpt_a_id]
cv2.circle(img, (int(x_a), int(y_a)), radius, colors[jcolors[kpt_a_id]], -1)
kpt_b_id = self.BODY_PARTS_KPT_IDS[part_id][1]
global_kpt_b_id = self.keypoints[kpt_b_id, 0]
if global_kpt_b_id != -1:
x_b, y_b, s_b = self.keypoints[kpt_b_id]
cv2.circle(img, (int(x_b), int(y_b)), radius, colors[jcolors[kpt_b_id]], -1)
if global_kpt_a_id != -1 and global_kpt_b_id != -1:
cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), colors[ecolors[part_id]], radius // 2)
class OpenPoseBody25(BasePose):
num_kpts = 25
pose_entry_size = 27
kpt_names = [
"Nose", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "MidHip", "RHip", "RKnee",
"RAnkle", "LHip", "LKnee", "LAnkle", "REye", "LEye", "REar", "LEar", "LBigToe", "LSmallToe", "LHeel", "RBigToe",
"RSmallToe", "RHeel"
]
BODY_PARTS_KPT_IDS = [
(1, 8), (1, 2), (1, 5), (2, 3), (3, 4), (5, 6),
(6, 7), (8, 9), (9, 10), (10, 11), (8, 12), (12, 13),
(13, 14), (1, 0), (0, 15), (15, 17), (0, 16), (16, 18),
(2, 17), (5, 18), (14, 19), (19, 20), (14, 21), (11, 22),
(22, 23), (11, 24)
]
BODY_PARTS_PAF_IDS = [
(0, 1), (14, 15), (22, 23), (16, 17), (18, 19), (24, 25),
(26, 27), (6, 7), (2, 3), (4, 5), (8, 9), (10, 11),
(12, 13), (30, 31), (32, 33), (36, 37), (34, 35), (38, 39),
(20, 21), (28, 29), (40, 41), (42, 43), (44, 45), (46, 47),
(48, 49), (50, 51)
]
BODY_PARTS_IDS_RENDER = [
(1, 8), (1, 2), (1, 5), (2, 3), (3, 4), (5, 6),
(6, 7), (8, 9), (9, 10), (10, 11), (8, 12), (12, 13),
(13, 14), (1, 0), (0, 15), (15, 17), (0, 16), (16, 18),
(14, 19), (19, 20), (14, 21), (11, 22),
(22, 23), (11, 24)
]
sigmas = np.array([
.26, .79, .79, .72, .62, .79, .72, .62,
# TODO, additional mid-hip
0.79,
1.07, .87, .89, 1.07, .87, .89, .25, .25, .35, .35,
# TODO, additional 6 feets
.35, .35, .25, .35, .35, .25
], dtype=np.float32) / 10.0
vars = (sigmas * 2) ** 2
last_id = -1
def __init__(self, keypoints, confidence):
super().__init__()
self.keypoints = keypoints
self.confidence = confidence
self.bbox = self.get_bbox(self.keypoints)
self.id = None
self.filters = [[OneEuroFilter(), OneEuroFilter()] for _ in range(self.num_kpts)]
POSE_CLASS = {
"BODY_25": OpenPoseBody25
}
| [
"liuwen@shanghaitech.edu.cn"
] | liuwen@shanghaitech.edu.cn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.