hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
632076a5b0b4718ee9b622d4f1c6eb2d7a83c415
| 2,131
|
py
|
Python
|
join.py
|
geekyvyas/Sleep-Tight
|
20aaaf6d192fbec97bc549e6024fe13247beac19
|
[
"MIT"
] | 2
|
2020-12-01T02:53:23.000Z
|
2020-12-01T04:37:37.000Z
|
join.py
|
geekyvyas/Sleep-Tight
|
20aaaf6d192fbec97bc549e6024fe13247beac19
|
[
"MIT"
] | null | null | null |
join.py
|
geekyvyas/Sleep-Tight
|
20aaaf6d192fbec97bc549e6024fe13247beac19
|
[
"MIT"
] | 2
|
2020-11-27T19:02:57.000Z
|
2020-12-01T05:30:10.000Z
|
import pickle
from firebase import firebase
import pyautogui
import time
firebase = firebase.FirebaseApplication('https://sleep-tight-8a6df.firebaseio.com/', None)
id2 = pickle.load(open("chrome","rb"))
X = firebase.get('/sleep-tight-8a6df/Chrome/'+ str(id2) , 'CX' )
Y = firebase.get('/sleep-tight-8a6df/Chrome/'+ str(id2) , 'CY' )
pyautogui.click(X, Y)
time.sleep(5)
pyautogui.write('https://cuchd.blackboard.com/ultra/course')
pyautogui.keyDown('enter')
time.sleep(10)
id2 = pickle.load(open("sign","rb"))
X = firebase.get('/sleep-tight-8a6df/signin/'+ str(id2) , 'SX' )
Y = firebase.get('/sleep-tight-8a6df/signin/'+ str(id2) , 'SY' )
pyautogui.click(X, Y)
time.sleep(15)
st = "ELT"
i = pickle.load(open(st,"rb"))
saq = int(i)
space(saq)
slass(st)
pyautogui.alert('After clicking ok move your mouse on join session and wait for another prompt.')
time.sleep(5)
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.alert('Done!!!')
time.sleep(2)
pyautogui.click(currentMouseX, currentMouseY)
data = {
'X': currentMouseX,
'Y': currentMouseY
}
result = firebase.post('/sleep-tight-8a6df/jssion/',data)
final = ''.join(key + str(val) for key, val in result.items())
data = str(final)
proxy = data[4:24]
pickle.dump(proxy, open("jesi","wb"))
pyautogui.alert('After clicking ok move your mouse on course room and wait for another prompt.')
time.sleep(4)
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.alert('Done!!!')
time.sleep(2)
data = {
'X': currentMouseX,
'Y': currentMouseY
}
result = firebase.post('/sleep-tight-8a6df/jssion1/',data)
final = ''.join(key + str(val) for key, val in result.items())
data = str(final)
proxy = data[4:24]
pickle.dump(proxy, open("jesin","wb"))
pyautogui.alert('Now Run tropy.py using the command given in github README.md file.')
| 26.6375
| 97
| 0.679493
|
import pickle
from firebase import firebase
import pyautogui
import time
def space(saq):
for j in range(0, saq):
pyautogui.scroll(-100)
def slass(st):
id2 = pickle.load(open(st+"1","rb"))
CX = firebase.get('/sleep-tight-8a6df/'+st+'/'+ str(id2) , 'X' )
CY = firebase.get('/sleep-tight-8a6df/'+st+'/'+ str(id2) , 'Y' )
pyautogui.click(CX, CY)
firebase = firebase.FirebaseApplication('https://sleep-tight-8a6df.firebaseio.com/', None)
id2 = pickle.load(open("chrome","rb"))
X = firebase.get('/sleep-tight-8a6df/Chrome/'+ str(id2) , 'CX' )
Y = firebase.get('/sleep-tight-8a6df/Chrome/'+ str(id2) , 'CY' )
pyautogui.click(X, Y)
time.sleep(5)
pyautogui.write('https://cuchd.blackboard.com/ultra/course')
pyautogui.keyDown('enter')
time.sleep(10)
id2 = pickle.load(open("sign","rb"))
X = firebase.get('/sleep-tight-8a6df/signin/'+ str(id2) , 'SX' )
Y = firebase.get('/sleep-tight-8a6df/signin/'+ str(id2) , 'SY' )
pyautogui.click(X, Y)
time.sleep(15)
st = "ELT"
i = pickle.load(open(st,"rb"))
saq = int(i)
space(saq)
slass(st)
pyautogui.alert('After clicking ok move your mouse on join session and wait for another prompt.')
time.sleep(5)
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.alert('Done!!!')
time.sleep(2)
pyautogui.click(currentMouseX, currentMouseY)
data = {
'X': currentMouseX,
'Y': currentMouseY
}
result = firebase.post('/sleep-tight-8a6df/jssion/',data)
final = ''.join(key + str(val) for key, val in result.items())
data = str(final)
proxy = data[4:24]
pickle.dump(proxy, open("jesi","wb"))
pyautogui.alert('After clicking ok move your mouse on course room and wait for another prompt.')
time.sleep(4)
currentMouseX, currentMouseY = pyautogui.position()
pyautogui.alert('Done!!!')
time.sleep(2)
data = {
'X': currentMouseX,
'Y': currentMouseY
}
result = firebase.post('/sleep-tight-8a6df/jssion1/',data)
final = ''.join(key + str(val) for key, val in result.items())
data = str(final)
proxy = data[4:24]
pickle.dump(proxy, open("jesin","wb"))
pyautogui.alert('Now Run tropy.py using the command given in github README.md file.')
| 254
| 0
| 46
|
96a0a75dc8ec2587e6eff3eebbee708fbf747ff7
| 5,361
|
py
|
Python
|
tests/functional/regressions/test_refactoring_execution.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 530
|
2019-06-04T11:45:36.000Z
|
2022-03-31T09:29:56.000Z
|
tests/functional/regressions/test_refactoring_execution.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 242
|
2019-06-04T11:53:08.000Z
|
2022-03-28T07:06:27.000Z
|
tests/functional/regressions/test_refactoring_execution.py
|
matt-koevort/tartiflette
|
5777866b133d846ce4f8aa03f735fa81832896cd
|
[
"MIT"
] | 36
|
2019-06-21T06:40:27.000Z
|
2021-11-04T13:11:16.000Z
|
import pytest
_EXPECTED = {
"data": {
"dog": {
"name": "Dog",
"nickname": "Doggo",
"barkVolume": 2,
"doesKnowCommand": True,
"isHousetrained": False,
"owner": {"name": "Hooman"},
}
}
}
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
resolvers={
"Query.dog": resolve_query_dog,
"Dog.doesKnowCommand": resolve_dog_does_know_command,
"Dog.isHousetrained": resolve_dog_is_housetrained,
"Dog.owner": resolve_dog_owner,
"Dog.friends": resolve_dog_friends,
"Query.cat": resolve_query_cat,
"Cat.doesKnowCommand": resolve_cat_does_know_command,
"Query.human": resolve_query_human,
"Query.catOrDog": resolve_query_cat_or_dog,
}
)
@pytest.mark.parametrize(
"operation_name,query,variables,expected",
[
(
None,
"""
query {
dog {
name
nickname
barkVolume
doesKnowCommand(dogCommand: DOWN)
isHousetrained(atOtherHomes: true)
owner {
name
}
}
}
""",
None,
_EXPECTED,
),
(
"Dog",
"""
fragment HumanFields on Human {
... on Human {
name
}
}
fragment LightCatOrDogFields on CatOrDog {
... on Cat {
name
nickname
}
... on Dog {
name
nickname
}
}
fragment LightDogFields on Dog {
name
barkVolume
}
fragment DogFields on Dog {
name
doesKnowCommand(dogCommand: DOWN)
isHousetrained(atOtherHomes: true)
owner {
... on Human {
...HumanFields
}
}
friends {
...LightCatOrDogFields
}
}
fragment CatFields on Cat {
name
}
fragment QueryDogFields on Query {
... on Query {
... {
dog {
... on Dog {
...DogFields
}
}
dog {
name
nickname
barkVolume
}
dog {
...LightDogFields
}
}
}
}
query Dog {
... on Query {
...QueryDogFields
}
}
query Cat {
cat {
...CatFields
}
}
""",
None,
{
"data": {
"dog": {
"name": "Dog",
"doesKnowCommand": True,
"isHousetrained": False,
"owner": {"name": "Hooman"},
"friends": [
{"name": "Dog", "nickname": "Doggo"},
{"name": "Cat", "nickname": "Catto"},
],
"nickname": "Doggo",
"barkVolume": 2,
}
}
},
),
(
None,
"""
query CatOrDog {
catOrDog(id: 1) {
... on Dog {
name
}
... on Dog {
nickname
}
... on Cat {
name
}
}
}
""",
None,
{"data": {"catOrDog": {"name": "Dog", "nickname": "Doggo"}}},
),
],
)
| 24.705069
| 73
| 0.403096
|
import pytest
async def resolve_query_dog(parent, args, ctx, info):
return {"name": "Dog", "nickname": "Doggo", "barkVolume": 2}
async def resolve_dog_does_know_command(parent, args, ctx, info):
return True
async def resolve_dog_is_housetrained(parent, args, ctx, info):
return False
async def resolve_dog_owner(parent, args, ctx, info):
return {"name": "Hooman"}
async def resolve_query_cat(parent, args, ctx, info):
return {"name": "Cat", "nickname": "Catto", "meowVolume": 1}
async def resolve_cat_does_know_command(parent, args, ctx, info):
return False
async def resolve_query_human(parent, args, ctx, info):
return {"name": "Hooman"}
async def resolve_query_cat_or_dog(parent, args, ctx, info):
return {"_typename": "Dog", "name": "Dog", "nickname": "Doggo"}
async def resolve_dog_friends(parent, args, ctx, info):
return [
{"_typename": "Dog", "name": "Dog", "nickname": "Doggo"},
{"_typename": "Cat", "name": "Cat", "nickname": "Catto"},
]
_EXPECTED = {
"data": {
"dog": {
"name": "Dog",
"nickname": "Doggo",
"barkVolume": 2,
"doesKnowCommand": True,
"isHousetrained": False,
"owner": {"name": "Hooman"},
}
}
}
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
resolvers={
"Query.dog": resolve_query_dog,
"Dog.doesKnowCommand": resolve_dog_does_know_command,
"Dog.isHousetrained": resolve_dog_is_housetrained,
"Dog.owner": resolve_dog_owner,
"Dog.friends": resolve_dog_friends,
"Query.cat": resolve_query_cat,
"Cat.doesKnowCommand": resolve_cat_does_know_command,
"Query.human": resolve_query_human,
"Query.catOrDog": resolve_query_cat_or_dog,
}
)
@pytest.mark.parametrize(
"operation_name,query,variables,expected",
[
(
None,
"""
query {
dog {
name
nickname
barkVolume
doesKnowCommand(dogCommand: DOWN)
isHousetrained(atOtherHomes: true)
owner {
name
}
}
}
""",
None,
_EXPECTED,
),
(
"Dog",
"""
fragment HumanFields on Human {
... on Human {
name
}
}
fragment LightCatOrDogFields on CatOrDog {
... on Cat {
name
nickname
}
... on Dog {
name
nickname
}
}
fragment LightDogFields on Dog {
name
barkVolume
}
fragment DogFields on Dog {
name
doesKnowCommand(dogCommand: DOWN)
isHousetrained(atOtherHomes: true)
owner {
... on Human {
...HumanFields
}
}
friends {
...LightCatOrDogFields
}
}
fragment CatFields on Cat {
name
}
fragment QueryDogFields on Query {
... on Query {
... {
dog {
... on Dog {
...DogFields
}
}
dog {
name
nickname
barkVolume
}
dog {
...LightDogFields
}
}
}
}
query Dog {
... on Query {
...QueryDogFields
}
}
query Cat {
cat {
...CatFields
}
}
""",
None,
{
"data": {
"dog": {
"name": "Dog",
"doesKnowCommand": True,
"isHousetrained": False,
"owner": {"name": "Hooman"},
"friends": [
{"name": "Dog", "nickname": "Doggo"},
{"name": "Cat", "nickname": "Catto"},
],
"nickname": "Doggo",
"barkVolume": 2,
}
}
},
),
(
None,
"""
query CatOrDog {
catOrDog(id: 1) {
... on Dog {
name
}
... on Dog {
nickname
}
... on Cat {
name
}
}
}
""",
None,
{"data": {"catOrDog": {"name": "Dog", "nickname": "Doggo"}}},
),
],
)
async def test_refactoring_execution(
engine, operation_name, query, variables, expected
):
assert (
await engine.execute(
query, operation_name=operation_name, variables=variables
)
== expected
)
| 1,015
| 0
| 229
|
16cce0186da20c0727107873fe01ccdc2e82f594
| 1,632
|
py
|
Python
|
setup.py
|
msftgits/xblock-azure-media-services
|
ab0a61484611d990d603cc4e3dbeac7c43be435f
|
[
"MIT"
] | 7
|
2016-08-12T03:48:18.000Z
|
2018-07-30T23:02:29.000Z
|
setup.py
|
msftgits/xblock-azure-media-services
|
ab0a61484611d990d603cc4e3dbeac7c43be435f
|
[
"MIT"
] | 17
|
2017-01-31T03:20:20.000Z
|
2018-11-02T21:36:09.000Z
|
setup.py
|
msftgits/xblock-azure-media-services
|
ab0a61484611d990d603cc4e3dbeac7c43be435f
|
[
"MIT"
] | 11
|
2019-08-13T07:11:32.000Z
|
2021-12-30T09:52:03.000Z
|
# Copyright (c) Microsoft Corporation. All Rights Reserved.
# Licensed under the MIT license. See LICENSE file on the project webpage for details.
"""Setup for azure_media_services XBlock."""
import os
from setuptools import setup
def package_data(pkg, roots):
"""Generic function to find package_data.
All of the files under each of the `roots` will be declared as package
data for package `pkg`.
"""
data = []
for root in roots:
for dirname, __, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data}
setup(
name='azure_media_services-xblock',
version='0.0.1',
description='This XBlock implements a video player that utilizes the Azure Media Services.',
packages=[
'azure_media_services',
],
include_package_data=True,
dependency_links=[
# At the moment of writing PyPI hosts outdated version of xblock-utils, hence git
# Replace dependency links with numbered versions when it's released on PyPI
'git+https://github.com/edx/xblock-utils.git@v1.0.5#egg=xblock-utils==1.0.5',
],
install_requires=[
'PyJWT',
'bleach',
'mako',
'requests>=2.9.1,<3.0.0',
'XBlock>=0.4.10,<2.0.0',
'xblock-utils>=1.0.2,<=1.0.5',
],
entry_points={
'xblock.v1': [
'azure_media_services = azure_media_services:AMSXBlock',
]
},
package_data=package_data("azure_media_services", ["static", "templates", "public", "translations"]),
)
| 29.672727
| 105
| 0.637255
|
# Copyright (c) Microsoft Corporation. All Rights Reserved.
# Licensed under the MIT license. See LICENSE file on the project webpage for details.
"""Setup for azure_media_services XBlock."""
import os
from setuptools import setup
def package_data(pkg, roots):
"""Generic function to find package_data.
All of the files under each of the `roots` will be declared as package
data for package `pkg`.
"""
data = []
for root in roots:
for dirname, __, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data}
setup(
name='azure_media_services-xblock',
version='0.0.1',
description='This XBlock implements a video player that utilizes the Azure Media Services.',
packages=[
'azure_media_services',
],
include_package_data=True,
dependency_links=[
# At the moment of writing PyPI hosts outdated version of xblock-utils, hence git
# Replace dependency links with numbered versions when it's released on PyPI
'git+https://github.com/edx/xblock-utils.git@v1.0.5#egg=xblock-utils==1.0.5',
],
install_requires=[
'PyJWT',
'bleach',
'mako',
'requests>=2.9.1,<3.0.0',
'XBlock>=0.4.10,<2.0.0',
'xblock-utils>=1.0.2,<=1.0.5',
],
entry_points={
'xblock.v1': [
'azure_media_services = azure_media_services:AMSXBlock',
]
},
package_data=package_data("azure_media_services", ["static", "templates", "public", "translations"]),
)
| 0
| 0
| 0
|
250e3619ae2c338be24317e38207f7f12ae1b258
| 2,206
|
py
|
Python
|
pipeline/coann/brents_bpbio/biostuff/tests/test_blast_line.py
|
gturco/find_cns
|
63e08d17d9c81e250ef2637216fbf947cc295823
|
[
"MIT"
] | 4
|
2016-03-21T19:19:24.000Z
|
2019-10-23T09:20:13.000Z
|
pipeline/coann/brents_bpbio/biostuff/tests/test_blast_line.py
|
hengbingao/find_cns
|
63e08d17d9c81e250ef2637216fbf947cc295823
|
[
"MIT"
] | 10
|
2016-03-21T16:55:29.000Z
|
2022-03-22T07:26:03.000Z
|
pipeline/coann/brents_bpbio/biostuff/tests/test_blast_line.py
|
hengbingao/find_cns
|
63e08d17d9c81e250ef2637216fbf947cc295823
|
[
"MIT"
] | 5
|
2016-03-02T16:20:05.000Z
|
2021-07-28T02:31:23.000Z
|
from biostuff import BlastLine, BlastFile
some_attrs = ('qstart', 'qstop', 'sstart', 'sstop', 'pctid', 'score', 'query',
'subject')
| 26.261905
| 78
| 0.601995
|
from biostuff import BlastLine, BlastFile
some_attrs = ('qstart', 'qstop', 'sstart', 'sstop', 'pctid', 'score', 'query',
'subject')
def test_blastfile():
f = "tests/data/tabd.blast"
bf = BlastFile(f)
fh = open(f, 'r')
# iterate via python and c and check each line is the same.
for line, b in zip(fh, bf):
bl = BlastLine(line)
assert isinstance(b, BlastLine)
assert bl == b
i = 0
for c in bf:
i += 1
assert isinstance(c, BlastLine)
assert i == len(open(f).readlines())
del bf
def test_blastfile_list():
f = "tests/data/tabd.blast"
blasts = list(BlastFile(f))
assert len(blasts) == len(open(f).readlines())
def test_blastline():
f = "tests/data/tabd.blast"
blasts = []
for line in open(f):
b = BlastLine(line)
blasts.append(BlastLine(line))
yield check_type, blasts, ('qstart', 'qstop', 'sstart', 'sstop',
'nmismatch', 'ngaps'), int
yield check_type, blasts, ('evalue', 'score', 'pctid'), float
yield check_type, blasts, ('query', 'subject'), str
def check_type(blasts, attrs, klass):
for b in blasts:
for attr in attrs:
assert isinstance(getattr(b, attr), klass)
def test_query_subject_props():
f = "tests/data/tabd.blast"
line = BlastLine(open(f).readline())
line.query = "asdf"
line.subject = "dddd"
assert line.query == "asdf"
assert line.subject == "dddd"
assert "asdf" in line.to_blast_line()
assert "dddd" in line.to_blast_line()
def test_to_string():
f = "tests/data/tabd.blast"
for line in open(f):
a = BlastLine(line)
b = BlastLine(a.to_blast_line())
# works better than string comparison because of floats.
for attr in some_attrs:
assert getattr(a, attr) == getattr(b, attr), (a, b, attr)
def test_pickle():
import cPickle
f = "tests/data/tabd.blast"
line = BlastLine(open(f).readline())
d = cPickle.dumps(line, -1)
loaded = cPickle.loads(d)
for k in BlastLine.attrs:
assert getattr(loaded, k) == getattr(line, k)
loaded.query = "asdf"
assert loaded.query != line.query
| 1,898
| 0
| 161
|
78b965963b0fdbf939b3cdfaf57a5f0c64642a25
| 1,433
|
py
|
Python
|
enemy.py
|
Harris-Lodi/Alien_Invasion_Game
|
449f694d5a51c806d5174a1a122dc881a3cb1ca0
|
[
"MIT"
] | null | null | null |
enemy.py
|
Harris-Lodi/Alien_Invasion_Game
|
449f694d5a51c806d5174a1a122dc881a3cb1ca0
|
[
"MIT"
] | null | null | null |
enemy.py
|
Harris-Lodi/Alien_Invasion_Game
|
449f694d5a51c806d5174a1a122dc881a3cb1ca0
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.sprite import Sprite
# a class to represent a single enemy in the fleet
# init the enemy and it's starting position
# function to check if enemy is at edge of screen
# enemy update function
| 31.844444
| 77
| 0.653873
|
import pygame
from pygame.sprite import Sprite
# a class to represent a single enemy in the fleet
class Enemy(Sprite):
# init the enemy and it's starting position
def __init__(self, ai_game):
# just like with bullet, initialize the Sprite super class
# and set enemy to main game screen
# import settings for enemy from ai_game
super().__init__()
self.screen = ai_game.screen
self.settings = ai_game.settings
# load the enemy image and set it's rect attribute
self.image = pygame.image.load('Images/enemy.bmp')
self.rect = self.image.get_rect()
# start each new enemy near the top left of the screen
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# store the enemies exact horizontal position
self.x = float(self.rect.x)
# function to check if enemy is at edge of screen
def check_edges(self):
# return true if enemy is at edge of screen
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right or self.rect.left <= 0:
return True
# enemy update function
def update(self):
# move enemy to the right or left depending on fleet_direction
self.x += (self.settings.enemy_speed * self.settings.fleet_direction)
# update position and record it to this instance class
self.rect.x = self.x
| 1,100
| -1
| 100
|
aa4d461901599ec9db199d26d7650629fd8584e3
| 10,822
|
py
|
Python
|
error_report/sentry.py
|
biolab/orange-web
|
f1b1e4fc6acd00dd4033cdf55baef0b8d216428a
|
[
"Qhull"
] | 17
|
2016-04-14T17:07:20.000Z
|
2021-02-14T09:27:50.000Z
|
error_report/sentry.py
|
biolab/orange-web
|
f1b1e4fc6acd00dd4033cdf55baef0b8d216428a
|
[
"Qhull"
] | 38
|
2015-08-28T08:53:20.000Z
|
2019-05-10T11:49:24.000Z
|
error_report/sentry.py
|
biolab/orange-web
|
f1b1e4fc6acd00dd4033cdf55baef0b8d216428a
|
[
"Qhull"
] | 27
|
2015-01-29T10:44:12.000Z
|
2021-12-12T17:21:26.000Z
|
import copy
import logging
import re
import uuid
from django.conf import settings
from raven import Client
logger = logging.getLogger(__name__)
REPORTS_BASE_URL = 'http://qa.orange.biolab.si/errors/{}'
PYTHON_FOLDERS = [
"site-packages",
"dist-packages",
"Python34.lib",
"anaconda3.lib",
"lib.python3.4",
"orange3",
]
ORANGE_ADDONS = [
'orangecontrib',
'lekbf',
'_textable',
'orangebiodepot',
]
FRAMES_RE = re.compile('File "([^"]+)", line (\d+), in ([^ ]+) (.*)')
DEVICE_RE = re.compile('Python ([\d\.]+) on ([^ ]+) ([^ ]+) (.+) ([^ ]+)$')
# Modules that should not be grouped by
GENERAL_MODULES = [
"Orange.data.domain:232", # domain.index(attr_name)
"sklearn.utils.validation:424", # check_array
"Orange.util:141", # attrgetter(attr)(obj)
"Orange.statistics.util:52", # bincount
]
ORANGE3_DATASETS = ('Orange3-Datasets', "https://2cb16c369f474e799ae384045dbf489e:b35f4e39d8b1417190aeb475e8c3df0a@sentry.io/167538")
ORANGE_SPECTROSCOPY = "https://1cb3697dbfc04f748bae548865f1b1a8:eb0b726e492b44358a277c97c8c631f2@sentry.io/176038"
DSN_3RDPARTY = "https://d077c44bbab1407595c9838ace02aea5:f3f434118ea44e0a9e61c580ca156505@sentry.io/176069"
DSN_TEXTABLE = "https://489e53f2068441f48d0d7bb3f5f066d5:299379ad47a140dfaee2042a6bb4204f@sentry.io/207453"
SINGLE_CELL = "https://3acf738fd9a3458ab76cabcfaa072dcf:6b24664b8a67412382986cd388de965b@sentry.io/209789"
DSN_ORANGE = "https://6f0311046ad2438598ae121cdabd878f:df101b5249ea4c89a82fc1f5da73886d@sentry.io/124497"
# For addons with separate DSNs mapping from namespace to addon name
# must be provided for reporting addon version as release.
NAMESPACE_TO_ADDON = {
'associate': ('Orange3-Associate', "https://cde61b47c74c4f98931264c1112b1bc2:10cfb3b76a16466fb6583a7952c660a8@sentry.io/167541"),
'bioinformatics': ('Orange3-Bioinformatics', "https://2e100fa55b83432e83aa04dc54962e5f@sentry.io/1311211"),
'conformal': ('Orange3-Conformal-Prediction', "https://3cf0bca1e5ed4b6a811c9980f27ed8ee:94015ed538b04bdcb4da2c35f0d792f8@sentry.io/167539"),
'datafusion': ('Orange3-DataFusion', "https://894bd2e1f47a4271834b8fbc019fc90b:e9d52ebb81354ca0b84fa64624f3882a@sentry.io/167542"),
'wbd': ORANGE3_DATASETS,
'datasets': ORANGE3_DATASETS,
'educational': ('Orange3-Educational', "https://93323bc17a094974a830b25abbae01b5:4fd5e7c529e34afd97ceca08ed4f059d@sentry.io/167545"),
'geo': ('Orange3-Geo', "https://f3b7d23593d14247808b70ff964b3956:ff25c1d23d3a4eca849429c731c874d9@sentry.io/167528"),
'imageanalytics': ('Orange3-ImageAnalytics', "https://cc2ef6171aad4b6ba344e2851169db7d:cd21ed3e80ae4f4385b31a24e0d036cf@sentry.io/161064"),
'network': ('Orange3-Network', "https://14706c0ff3e047d999cff64e6100eb25:1dd7b84d0afc449abba1757e3520b0c2@sentry.io/167534"),
'prototypes': ('Orange3-Prototypes', "https://d7440097e7f64e4cbff90dd31fc8876e:dde09f7ba917431884b7eb04c814b824@sentry.io/167530"),
'recommendation': ('Orange3-Recommendation', "https://e447ddb4e80149289bca679121359c03:e4b9a0f1a1414f7d906e56b8e28be9cc@sentry.io/167543"),
'text': ('Orange3-Text', "https://38ffabded40c46b9952b2acebc726866:147d6a5becfa40499b6d79e858fb6ef1@sentry.io/128443"),
'timeseries': ('Orange3-Timeseries', "https://e8f30f9dbaf74635bb10e37abe0b5354:2478a41e2f95463db8ceebfeb060cc99@sentry.io/161065"),
'testing': ('', "https://261797e8fa4544ffb931bc495157d2e3:44e30b93f9f1463a975725f82ca18039@sentry.io/128442"),
'lekbf': ('lekbf', "https://7da121cc693045c688d5ffd2d320e65b:1e2b3e613c85437ba8f005035572b3b7@sentry.io/174357"),
'infrared': ('Orange-Infrared', ORANGE_SPECTROSCOPY),
'spectroscopy': ('Orange-Spectroscopy', ORANGE_SPECTROSCOPY),
'monroe_anal': ('monroe-anal', "https://26940ac80e9f4cf095dd6c90e7e7e674:37d903fdd6364d52be6e50614d5cfccf@sentry.io/242335"),
'spark': ('Orange3-spark', DSN_3RDPARTY),
'tomwer': ('tomwer', DSN_3RDPARTY),
'textable_prototypes': ('Orange3-Textable-Prototypes', DSN_TEXTABLE),
'orangebiodepot': ('orangebiodepot', DSN_3RDPARTY),
'_textable': ('Orange3-Textable', DSN_TEXTABLE),
'variants': ('Orange3-Variants', SINGLE_CELL),
'single_cell': ('Orange3-SingleCell', SINGLE_CELL),
'chem': ('Orange3-Chemoinformatics', "https://a2cfd734538c4892ad3c02679891fa44:1fdd2cbd5bef4c7897d1a10077e9de97@sentry.io/275477"),
}
| 41.305344
| 151
| 0.660044
|
import copy
import logging
import re
import uuid
from django.conf import settings
from raven import Client
logger = logging.getLogger(__name__)
REPORTS_BASE_URL = 'http://qa.orange.biolab.si/errors/{}'
PYTHON_FOLDERS = [
"site-packages",
"dist-packages",
"Python34.lib",
"anaconda3.lib",
"lib.python3.4",
"orange3",
]
ORANGE_ADDONS = [
'orangecontrib',
'lekbf',
'_textable',
'orangebiodepot',
]
FRAMES_RE = re.compile('File "([^"]+)", line (\d+), in ([^ ]+) (.*)')
DEVICE_RE = re.compile('Python ([\d\.]+) on ([^ ]+) ([^ ]+) (.+) ([^ ]+)$')
# Modules that should not be grouped by
GENERAL_MODULES = [
"Orange.data.domain:232", # domain.index(attr_name)
"sklearn.utils.validation:424", # check_array
"Orange.util:141", # attrgetter(attr)(obj)
"Orange.statistics.util:52", # bincount
]
ORANGE3_DATASETS = ('Orange3-Datasets', "https://2cb16c369f474e799ae384045dbf489e:b35f4e39d8b1417190aeb475e8c3df0a@sentry.io/167538")
ORANGE_SPECTROSCOPY = "https://1cb3697dbfc04f748bae548865f1b1a8:eb0b726e492b44358a277c97c8c631f2@sentry.io/176038"
DSN_3RDPARTY = "https://d077c44bbab1407595c9838ace02aea5:f3f434118ea44e0a9e61c580ca156505@sentry.io/176069"
DSN_TEXTABLE = "https://489e53f2068441f48d0d7bb3f5f066d5:299379ad47a140dfaee2042a6bb4204f@sentry.io/207453"
SINGLE_CELL = "https://3acf738fd9a3458ab76cabcfaa072dcf:6b24664b8a67412382986cd388de965b@sentry.io/209789"
DSN_ORANGE = "https://6f0311046ad2438598ae121cdabd878f:df101b5249ea4c89a82fc1f5da73886d@sentry.io/124497"
# For addons with separate DSNs mapping from namespace to addon name
# must be provided for reporting addon version as release.
NAMESPACE_TO_ADDON = {
'associate': ('Orange3-Associate', "https://cde61b47c74c4f98931264c1112b1bc2:10cfb3b76a16466fb6583a7952c660a8@sentry.io/167541"),
'bioinformatics': ('Orange3-Bioinformatics', "https://2e100fa55b83432e83aa04dc54962e5f@sentry.io/1311211"),
'conformal': ('Orange3-Conformal-Prediction', "https://3cf0bca1e5ed4b6a811c9980f27ed8ee:94015ed538b04bdcb4da2c35f0d792f8@sentry.io/167539"),
'datafusion': ('Orange3-DataFusion', "https://894bd2e1f47a4271834b8fbc019fc90b:e9d52ebb81354ca0b84fa64624f3882a@sentry.io/167542"),
'wbd': ORANGE3_DATASETS,
'datasets': ORANGE3_DATASETS,
'educational': ('Orange3-Educational', "https://93323bc17a094974a830b25abbae01b5:4fd5e7c529e34afd97ceca08ed4f059d@sentry.io/167545"),
'geo': ('Orange3-Geo', "https://f3b7d23593d14247808b70ff964b3956:ff25c1d23d3a4eca849429c731c874d9@sentry.io/167528"),
'imageanalytics': ('Orange3-ImageAnalytics', "https://cc2ef6171aad4b6ba344e2851169db7d:cd21ed3e80ae4f4385b31a24e0d036cf@sentry.io/161064"),
'network': ('Orange3-Network', "https://14706c0ff3e047d999cff64e6100eb25:1dd7b84d0afc449abba1757e3520b0c2@sentry.io/167534"),
'prototypes': ('Orange3-Prototypes', "https://d7440097e7f64e4cbff90dd31fc8876e:dde09f7ba917431884b7eb04c814b824@sentry.io/167530"),
'recommendation': ('Orange3-Recommendation', "https://e447ddb4e80149289bca679121359c03:e4b9a0f1a1414f7d906e56b8e28be9cc@sentry.io/167543"),
'text': ('Orange3-Text', "https://38ffabded40c46b9952b2acebc726866:147d6a5becfa40499b6d79e858fb6ef1@sentry.io/128443"),
'timeseries': ('Orange3-Timeseries', "https://e8f30f9dbaf74635bb10e37abe0b5354:2478a41e2f95463db8ceebfeb060cc99@sentry.io/161065"),
'testing': ('', "https://261797e8fa4544ffb931bc495157d2e3:44e30b93f9f1463a975725f82ca18039@sentry.io/128442"),
'lekbf': ('lekbf', "https://7da121cc693045c688d5ffd2d320e65b:1e2b3e613c85437ba8f005035572b3b7@sentry.io/174357"),
'infrared': ('Orange-Infrared', ORANGE_SPECTROSCOPY),
'spectroscopy': ('Orange-Spectroscopy', ORANGE_SPECTROSCOPY),
'monroe_anal': ('monroe-anal', "https://26940ac80e9f4cf095dd6c90e7e7e674:37d903fdd6364d52be6e50614d5cfccf@sentry.io/242335"),
'spark': ('Orange3-spark', DSN_3RDPARTY),
'tomwer': ('tomwer', DSN_3RDPARTY),
'textable_prototypes': ('Orange3-Textable-Prototypes', DSN_TEXTABLE),
'orangebiodepot': ('orangebiodepot', DSN_3RDPARTY),
'_textable': ('Orange3-Textable', DSN_TEXTABLE),
'variants': ('Orange3-Variants', SINGLE_CELL),
'single_cell': ('Orange3-SingleCell', SINGLE_CELL),
'chem': ('Orange3-Chemoinformatics', "https://a2cfd734538c4892ad3c02679891fa44:1fdd2cbd5bef4c7897d1a10077e9de97@sentry.io/275477"),
}
def guess_module(filename):
file_module = filename.replace("\\\\", "\\").replace("/", ".").replace("\\", ".")
for f in PYTHON_FOLDERS + ORANGE_ADDONS:
base, prefixed, module = file_module.partition(f + ".")
if not prefixed:
continue
# fix for addons in dev mode; e.g `orangecontrib.` belongs to module
if f in ORANGE_ADDONS:
module = prefixed + module
for ext in [".py", ".__init__"]:
if module.endswith(ext):
module = module[:-len(ext)]
return module
def extract_frames(stack_trace):
if isinstance(stack_trace, list):
stack_trace = "\n".join(stack_trace)
frames = FRAMES_RE.findall(stack_trace)
return [dict(lineno=lineno,
function=function,
filename=fn if guess_module(fn) is None else None,
module=guess_module(fn),
context_line=line)
for fn, lineno, function, line in frames]
def get_device_info(env):
if isinstance(env, list):
env = "".join(list).strip()
device_info = DEVICE_RE.findall(env)
for py_version, os, os_version, os_build, machine in device_info:
return dict(os=dict(name=os,
version=os_version,
build=os_build),
runtime=dict(name="python",
version=py_version))
def get_exception(ex, st):
if isinstance(ex, list):
ex = "".join(ex)
exc_type, _, exc_message = ex.partition(":")
return dict(
values=[
dict(
stacktrace=dict(
frames=extract_frames(st)
),
type=exc_type,
value=exc_message,
)
]
)
def get_version(v):
if isinstance(v, list):
v = " ".join(v)
return v.partition("0+")[0]
def get_dsn(name, prefix=None):
if name.upper() == "ORANGE":
return DSN_ORANGE
elif name in NAMESPACE_TO_ADDON:
return NAMESPACE_TO_ADDON[name][1]
elif prefix in NAMESPACE_TO_ADDON:
return NAMESPACE_TO_ADDON[prefix][1]
else:
return NAMESPACE_TO_ADDON["testing"][1]
def prep_addon_data(addon, data, duplicated):
# make a copy so we can have different tags
addon_data = copy.deepcopy(data)
# flag duplication status
data["tags"]["addon"] = addon
data["tags"]["duplicated_in_addon"] = duplicated
addon_data["tags"]["duplicated_in_core"] = duplicated
# replace release with addon version
addon_data["tags"]["orange_version"] = data['release']
addon_data["release"] = "unknown"
if addon in NAMESPACE_TO_ADDON:
addon = NAMESPACE_TO_ADDON[addon][0]
for package, version in addon_data['modules'].items():
if addon == package:
addon_data['release'] = get_version(version)
break
return addon_data
def get_dsn_report_pairs(sentry_report):
logger.info("Getting DNS report pairs.")
frames = sentry_report['exception']['values'][0]['stacktrace']['frames']
modules = [f['module'] for f in frames if f.get('module') not in
(None, '', 'Orange.canvas.scheme.widgetsscheme')]
def _filter_modules(names):
return [m for m in modules
if m and any(m.startswith(n + '.') for n in names)]
core_calls = _filter_modules(['Orange'])
addon_calls = _filter_modules(ORANGE_ADDONS)
last_in_addon = _filter_modules(['Orange'] + ORANGE_ADDONS)
last_in_addon = last_in_addon and last_in_addon[-1] in addon_calls
addon, prefix, addon_dsn = None, None, None
if any(addon_calls):
prefix, addon = addon_calls[0].split('.')[:2]
addon_dsn = get_dsn(addon, prefix)
if any(addon_calls) and addon_dsn:
# errors whose stacktrace contains call from addon & core and the
# last call does not come from addon are sent to both issue trackers
duplicated = any(core_calls) and not last_in_addon
yield addon_dsn, prep_addon_data(addon, sentry_report, duplicated)
if duplicated:
yield get_dsn('ORANGE'), sentry_report
else:
sentry_report["tags"]["duplicated_in_addon"] = 'False'
yield get_dsn('ORANGE'), sentry_report
def create_sentry_report(report):
if "Exception" not in report:
return {}
module = report["Module"][0]
widget_module = report.get("Widget Module", [""])[0]
culprit = widget_module or module
machine_id = report["Machine ID"][0]
packages = report.get("Installed Packages", "")
if isinstance(packages, list):
packages = ' '.join(packages)
packages = dict(p.split('==') for p in packages.split(', ') if p)
schema_url = report.get("Widget Scheme", "")
schema_url = REPORTS_BASE_URL.format(schema_url) if schema_url else '<not-provided>'
data = dict(
event_id=uuid.uuid4().hex,
platform="python",
exception=get_exception(report["Exception"], report["Stack Trace"]),
culprit=culprit,
release=get_version(report["Version"]),
user=dict(id=machine_id),
contexts=get_device_info(report["Environment"][0]),
tags=dict(),
modules=packages,
extra={'Schema Url': schema_url, }
)
if module not in GENERAL_MODULES:
# group issues by the module of the last frame
# (unless the last frame is too general)
data["fingerprint"] = [module]
info_msg = "Sentry report created."
if "Exception" in report:
info_msg += " Exception: {}".format(report["Exception"])
logger.info(info_msg)
return data
def send_to_sentry(report):
sentry_report = create_sentry_report(report)
if not sentry_report:
return
for dsn, report in get_dsn_report_pairs(sentry_report):
logger.info("Sending to {}.".format(dsn))
try:
client = Client(dsn, raise_send_errors=True)
client.send(**report)
except Exception as ex:
# There is nothing we can do if sentry is not available
logger.exception("Sending report failed: {}.".format(ex))
else:
logger.info("Report has been sent to sentry.")
| 6,023
| 0
| 230
|
3f095342eb1ff4768b62daab63c4c5199750e7c7
| 1,465
|
py
|
Python
|
amd64-linux/lib/python/mod_AM79C973_commands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 1
|
2020-06-15T10:41:18.000Z
|
2020-06-15T10:41:18.000Z
|
amd64-linux/lib/python/mod_AM79C973_commands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | null | null | null |
amd64-linux/lib/python/mod_AM79C973_commands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 3
|
2020-08-10T10:25:02.000Z
|
2021-09-12T01:12:09.000Z
|
# Copyright 2006-2007 Virtutech AB
import sim_commands
sim_commands.new_pci_header_command('AM79C973', None)
sim_commands.new_info_command('AM79C973', get_info)
sim_commands.new_status_command('AM79C973', get_status)
| 38.552632
| 92
| 0.564505
|
# Copyright 2006-2007 Virtutech AB
import sim_commands
def checkbit(a, bit):
if a & (1 << bit):
return 1
else:
return 0
def get_info(obj):
return [ (None, [
("PHY object", obj.phy),
] ) ] + sim_commands.get_pci_info(obj)
def get_status(obj):
csr0 = obj.csr_csr0
csr0a = "INIT=%d STRT=%d STOP=%d TDMD=%d TXON=%d RXON=%d INEA=%d INTR=%d" % (
checkbit(csr0, 0), checkbit(csr0, 1), checkbit(csr0, 2), checkbit(csr0, 3),
checkbit(csr0, 4), checkbit(csr0, 5), checkbit(csr0, 6), checkbit(csr0, 7))
csr0b = "IDON=%d TINT=%d RINT=%d MERR=%d MISS=%d CERR=%d BABL=%d ERR=%d" % (
checkbit(csr0, 8), checkbit(csr0, 9), checkbit(csr0, 10), checkbit(csr0, 11),
checkbit(csr0, 12), checkbit(csr0, 13), checkbit(csr0, 14), checkbit(csr0, 15))
return ([ (None,
[ ("CSR0", csr0a),
("", csr0b),
("CSR1", "0x%x" % obj.csr_csr1),
("CSR2", "0x%x" % obj.csr_csr2),
("CSR3", "BCON=%d ACON=%d BSWP=%d" % (
(checkbit(obj.csr_csr3, 0), checkbit(obj.csr_csr3, 1), checkbit(obj.csr_csr3, 2)))),
("CSR15", "0x%x" % obj.csr_csr15),
("RAP", obj.ioreg_rap) ]),
] + sim_commands.get_pci_status(obj))
sim_commands.new_pci_header_command('AM79C973', None)
sim_commands.new_info_command('AM79C973', get_info)
sim_commands.new_status_command('AM79C973', get_status)
| 1,177
| 0
| 69
|
1952ef4f5d62b3cb9b445e1bcbb21e0b780982a0
| 638
|
py
|
Python
|
Testing/unit_test/pytest_for_python/tests/test_setup_teardown_demo.py
|
Ziang-Lu/edX-Software-Object-Oriented-Design
|
f0d7660c8377c0055e61978bda754a82079f2856
|
[
"MIT"
] | 1
|
2018-04-04T21:44:46.000Z
|
2018-04-04T21:44:46.000Z
|
Testing/unit_test/pytest_for_python/tests/test_setup_teardown_demo.py
|
Ziang-Lu/Software-Development-and-Design
|
f0d7660c8377c0055e61978bda754a82079f2856
|
[
"MIT"
] | null | null | null |
Testing/unit_test/pytest_for_python/tests/test_setup_teardown_demo.py
|
Ziang-Lu/Software-Development-and-Design
|
f0d7660c8377c0055e61978bda754a82079f2856
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pytest setup_module() and teardown_module() demo.
Assumption: creating a user is a very resource-consuming process
=> Thus, we don't want to do user creation every time we run a test.
"""
__author__ = 'Ziang Lu'
import pytest
from pytest_for_python.src.codes import User, is_member, is_prime_member
user = None
| 18.228571
| 72
| 0.725705
|
#!usr/bin/env python3
# -*- coding: utf-8 -*-
"""
pytest setup_module() and teardown_module() demo.
Assumption: creating a user is a very resource-consuming process
=> Thus, we don't want to do user creation every time we run a test.
"""
__author__ = 'Ziang Lu'
import pytest
from pytest_for_python.src.codes import User, is_member, is_prime_member
user = None
def setup_module(module):
global user
user = User(name='Williams', pwd='iamwill')
def test_user_is_member():
assert not is_member(user)
def test_user_is_prime_member():
assert is_prime_member(user)
def teardown_module(module):
user.clean_up()
| 175
| 0
| 92
|
4fdbc43ae4020bb00409ab430663c9095dfffe93
| 12,036
|
py
|
Python
|
sassh/main.py
|
joaompinto/sassh
|
d41df6cc7529f254244e1bff0ca67fc73523e4c1
|
[
"Apache-2.0"
] | 1
|
2017-09-23T20:57:05.000Z
|
2017-09-23T20:57:05.000Z
|
sassh/main.py
|
joaompinto/sassh
|
d41df6cc7529f254244e1bff0ca67fc73523e4c1
|
[
"Apache-2.0"
] | 1
|
2017-08-18T07:32:28.000Z
|
2017-08-18T08:02:26.000Z
|
sassh/main.py
|
joaompinto/sassh
|
d41df6cc7529f254244e1bff0ca67fc73523e4c1
|
[
"Apache-2.0"
] | 1
|
2019-07-17T13:22:38.000Z
|
2019-07-17T13:22:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import socket
import errno
from getpass import getpass
from optparse import OptionParser
from sassh.connectionlib import Library, Connection
from sassh.sshclient import SSHClient
from paramiko import SSHException
try:
import pygtk
pygtk.require('2.0')
import gtk
GTK_AVAILABLE = True
except ImportError:
GTK_AVAILABLE = False
EXTRA_HELP = """\
While connected the following key binds are available:
'CTRL-X' followed by 'p' to send the connection password (e.g. for sudo)
; 'CTRL-X' followed by 'n' to generate new password (e.g. when password expired)
"""
class Main():
""" Main class for the application """
def parse_args(self):
""" Parse command line arguments """
parser = OptionParser(epilog=EXTRA_HELP)
parser.add_option("-a", "--add-connection",
action="store", type="string", dest="add_connection",
help="Add connection to the configuration database")
parser.add_option("-d", "--del-connection",
action="store_true", dest="del_connection",
help="Delete host from the configuration database")
parser.add_option("-g", "--get",
action="store", type="string", dest="get_file",
help="Get file from server")
parser.add_option("--put",
action="store", type="string", dest="put_file",
help="Put file from server")
parser.add_option("-k", "--use-key",
action="store_true", dest="set_use_key",
help="Set connection to use key based authentication")
parser.add_option("-l", "--list",
action="store_true", dest="list",
help="List configured connections names")
parser.add_option("-L", "--long-list",
action="store_true", dest="long_list",
help="List configured connections (with details)")
parser.add_option("-p", "--set-password",
action="store", type="string", dest="set_password",
help="Set connection password")
parser.add_option("-r", "--run",
action="store", type="string", dest="run_command",
help="Run command and exit")
parser.add_option("-R", "--run-su",
action="store", type="string", dest="run_su_script",
help="Run script with super user privileges")
parser.add_option("--reset",
action="store_true", dest="reset",
help="Change password for connection")
parser.add_option("-s", "--set-connection",
action="store", type="string", dest="set_connection",
help="Set login information for connection")
parser.add_option("-S", "--set-step-stone",
action="store", type="string", dest="set_step_stone",
help="Set stepping stone connection")
parser.add_option("-t", "--change-tags",
action="store", type="string", dest="change_tags",
help="Change connection tags")
parser.add_option("--super",
action="store_true", dest="super",
help="Perform 'sudo su -' after logging in")
parser.add_option("-w", "--show-connection",
action="store_true", dest="show_connection",
help="Show connection information")
self.options, self.args = parser.parse_args()
def _get_sassh_gpg_pub_key(self):
""" Check that the environment variable SASSH_GPG_PUB_KEY is defined """
sassh_gpg_pub_key = os.getenv('SASSH_GPG_PUB_KEY')
if not sassh_gpg_pub_key:
print """
sassh uses a GPG encrypted file to store connection passwords.
You must generate a GPG keypair with "gpg --gen-key" .
YOU SHOULD PROTECT THE KEY WITH A PASSPHRASE .
Then set your shell's SASSH_GPG_PUB_KEY variable to to the public id as
displayed from "gpg --list-keys", e.g: pub 4096R/7FD63AB0
export SASSH_GPG_PUB_KEY="7FD63AB0"
"""
sys.exit(1)
self.sassh_gpg_pub_key = sassh_gpg_pub_key
def _list_connections(self, pattern, long_list):
""" List all the configured connections """
library = self.host_library
for connection_name in library.connections:
connection = None
if pattern and pattern[0] == '+':
connection = library.getbyname(connection_name)
if not connection.tags or pattern not in connection.tags:
continue
else:
if not connection_name.lower().startswith(pattern.lower()):
continue
if long_list:
connection = connection or library.getbyname(connection_name)
show_fields = connection.name+" "
show_fields += "-a "+connection.url+" "
if connection.use_key:
show_fields += "-k "
if connection.step_stone:
show_fields += "-S "+connection.step_stone+" "
if connection.tags and len(connection.tags) > 1:
show_fields += "-t "+connection.tags
print show_fields
else:
print connection_name
sys.exit(0)
def _process_args(self):
""" Return connection definition after processing cmd arguments """
options, args = self.options, self.args
# Check connection availability and management options
if len(args) < 1 and not (options.list or options.long_list):
print "Usage:"
print " %s connection_name [options]" % sys.argv[0]
print " %s --list" % sys.argv[0]
sys.exit(2)
library = self.host_library
if (options.list or options.long_list):
pattern = args[0] if len(args) > 0 else ''
self._list_connections(pattern, options.long_list)
connection_name = args[0].lower()
if options.set_step_stone:
try:
library.getbyname(options.set_step_stone)
except IOError:
print 'No connection with name %s !' % options.set_step_stone
sys.exit(4)
try:
connection = library.getbyname(connection_name)
except IOError:
if not options.add_connection:
print 'No connection with name %s !' % connection_name
print 'If you want to add it use "--add-connection"'
sys.exit(3)
else:
connection = Connection(connection_name)
else:
if options.add_connection:
print "Connection with name %s is already stored!" % \
connection_name
sys.exit(4)
if options.del_connection:
library.remove(connection)
sys.exit(0)
if options.show_connection:
print "URL", connection.url
if GTK_AVAILABLE:
show_password = '(Copied to th clipboard)'
clipboard = gtk.clipboard_get()
clipboard.set_text(connection.password)
clipboard.store()
else:
show_password = connection.password
print "PASSWORD", show_password
if connection.use_key:
print "USING KEY"
print connection.tags or '+'
sys.exit(0)
if options.reset:
options.set_connection = connection.url
options.password = None
if options.change_tags:
if options.change_tags[0] != '+':
print "Tags format is: +tag1+tag2...+tagN"
sys.exit(4)
connection.change_tags(options.change_tags)
if options.set_step_stone:
connection.step_stone = options.set_step_stone
if options.set_password:
if options.set_use_key:
sys.stderr.write('You are already setting to key authentication!\n')
sys.exit(5)
else:
connection.use_key = False
connection.password = options.set_password
if options.set_use_key:
connection.use_key = True
# Ask for login password if setting a connection url
new_connection_url = options.add_connection or options.set_connection
if new_connection_url:
connection.url = new_connection_url
if not connection.password and not connection.use_key:
options.set_password = True
while True:
print "Type the password for connection %s [%s]: " \
% (connection_name, connection.url)
password1 = getpass()
if len(password1) < 1:
print "Password must be at least 1 chars long!"
print
continue
print "Re-type the password for connection %s [%s]: " \
% (connection_name, connection.url)
password2 = getpass()
if password1 != password2:
print "Passwords do not match!"
print
else:
break
connection.password = password1
only_save = new_connection_url \
or options.set_step_stone \
or options.change_tags \
or options.set_password \
or options.set_use_key
if only_save:
library.save(connection)
return None
else:
return connection
def run(self):
""" parse arguments and call the corresponding execution logic """
stderr = sys.stderr
self.parse_args()
connection = self._process_args()
options = self.options
if not connection: # Connection was changed
return
sshclient = SSHClient(connection, self.host_library)
if options.run_command or options.get_file or options.put_file or options.run_su_script:
sshclient.verbose = False
try:
sshclient.connect()
except SSHException, err:
stderr.write( "SSH error connecting to %s - %s\n"
% (connection.name, err.args[0]))
sys.exit(4)
except socket.timeout:
stderr.write("Connection timeout - unable to connect to %s !\n"
% connection.name)
sys.exit(2)
except socket.error, err:
errorcode = err[0]
if errorcode == errno.ECONNREFUSED:
stderr.write("Connection refused - unable to connect to %s !\n"
% connection.name)
sys.exit(3)
else:
raise
if options.super:
sshclient.perform_sudo()
if options.run_su_script:
sshclient.run_su_script(options.run_su_script)
elif options.run_command:
sshclient.run_command(options.run_command)
elif options.get_file:
sshclient.get_file(options.get_file)
elif options.put_file:
sshclient.put_file(options.put_file)
else:
sshclient.interactive_shell()
| 41.360825
| 97
| 0.547607
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import socket
import errno
from getpass import getpass
from optparse import OptionParser
from sassh.connectionlib import Library, Connection
from sassh.sshclient import SSHClient
from paramiko import SSHException
try:
import pygtk
pygtk.require('2.0')
import gtk
GTK_AVAILABLE = True
except ImportError:
GTK_AVAILABLE = False
EXTRA_HELP = """\
While connected the following key binds are available:
'CTRL-X' followed by 'p' to send the connection password (e.g. for sudo)
; 'CTRL-X' followed by 'n' to generate new password (e.g. when password expired)
"""
class Main():
""" Main class for the application """
def __init__(self):
self._get_sassh_gpg_pub_key()
self.host_library = Library('sassh', self.sassh_gpg_pub_key)
self.options = self.args = None
self.sassh_gpg_pub_key = None
def parse_args(self):
""" Parse command line arguments """
parser = OptionParser(epilog=EXTRA_HELP)
parser.add_option("-a", "--add-connection",
action="store", type="string", dest="add_connection",
help="Add connection to the configuration database")
parser.add_option("-d", "--del-connection",
action="store_true", dest="del_connection",
help="Delete host from the configuration database")
parser.add_option("-g", "--get",
action="store", type="string", dest="get_file",
help="Get file from server")
parser.add_option("--put",
action="store", type="string", dest="put_file",
help="Put file from server")
parser.add_option("-k", "--use-key",
action="store_true", dest="set_use_key",
help="Set connection to use key based authentication")
parser.add_option("-l", "--list",
action="store_true", dest="list",
help="List configured connections names")
parser.add_option("-L", "--long-list",
action="store_true", dest="long_list",
help="List configured connections (with details)")
parser.add_option("-p", "--set-password",
action="store", type="string", dest="set_password",
help="Set connection password")
parser.add_option("-r", "--run",
action="store", type="string", dest="run_command",
help="Run command and exit")
parser.add_option("-R", "--run-su",
action="store", type="string", dest="run_su_script",
help="Run script with super user privileges")
parser.add_option("--reset",
action="store_true", dest="reset",
help="Change password for connection")
parser.add_option("-s", "--set-connection",
action="store", type="string", dest="set_connection",
help="Set login information for connection")
parser.add_option("-S", "--set-step-stone",
action="store", type="string", dest="set_step_stone",
help="Set stepping stone connection")
parser.add_option("-t", "--change-tags",
action="store", type="string", dest="change_tags",
help="Change connection tags")
parser.add_option("--super",
action="store_true", dest="super",
help="Perform 'sudo su -' after logging in")
parser.add_option("-w", "--show-connection",
action="store_true", dest="show_connection",
help="Show connection information")
self.options, self.args = parser.parse_args()
def _get_sassh_gpg_pub_key(self):
""" Check that the environment variable SASSH_GPG_PUB_KEY is defined """
sassh_gpg_pub_key = os.getenv('SASSH_GPG_PUB_KEY')
if not sassh_gpg_pub_key:
print """
sassh uses a GPG encrypted file to store connection passwords.
You must generate a GPG keypair with "gpg --gen-key" .
YOU SHOULD PROTECT THE KEY WITH A PASSPHRASE .
Then set your shell's SASSH_GPG_PUB_KEY variable to to the public id as
displayed from "gpg --list-keys", e.g: pub 4096R/7FD63AB0
export SASSH_GPG_PUB_KEY="7FD63AB0"
"""
sys.exit(1)
self.sassh_gpg_pub_key = sassh_gpg_pub_key
def _list_connections(self, pattern, long_list):
""" List all the configured connections """
library = self.host_library
for connection_name in library.connections:
connection = None
if pattern and pattern[0] == '+':
connection = library.getbyname(connection_name)
if not connection.tags or pattern not in connection.tags:
continue
else:
if not connection_name.lower().startswith(pattern.lower()):
continue
if long_list:
connection = connection or library.getbyname(connection_name)
show_fields = connection.name+" "
show_fields += "-a "+connection.url+" "
if connection.use_key:
show_fields += "-k "
if connection.step_stone:
show_fields += "-S "+connection.step_stone+" "
if connection.tags and len(connection.tags) > 1:
show_fields += "-t "+connection.tags
print show_fields
else:
print connection_name
sys.exit(0)
def _process_args(self):
""" Return connection definition after processing cmd arguments """
options, args = self.options, self.args
# Check connection availability and management options
if len(args) < 1 and not (options.list or options.long_list):
print "Usage:"
print " %s connection_name [options]" % sys.argv[0]
print " %s --list" % sys.argv[0]
sys.exit(2)
library = self.host_library
if (options.list or options.long_list):
pattern = args[0] if len(args) > 0 else ''
self._list_connections(pattern, options.long_list)
connection_name = args[0].lower()
if options.set_step_stone:
try:
library.getbyname(options.set_step_stone)
except IOError:
print 'No connection with name %s !' % options.set_step_stone
sys.exit(4)
try:
connection = library.getbyname(connection_name)
except IOError:
if not options.add_connection:
print 'No connection with name %s !' % connection_name
print 'If you want to add it use "--add-connection"'
sys.exit(3)
else:
connection = Connection(connection_name)
else:
if options.add_connection:
print "Connection with name %s is already stored!" % \
connection_name
sys.exit(4)
if options.del_connection:
library.remove(connection)
sys.exit(0)
if options.show_connection:
print "URL", connection.url
if GTK_AVAILABLE:
show_password = '(Copied to th clipboard)'
clipboard = gtk.clipboard_get()
clipboard.set_text(connection.password)
clipboard.store()
else:
show_password = connection.password
print "PASSWORD", show_password
if connection.use_key:
print "USING KEY"
print connection.tags or '+'
sys.exit(0)
if options.reset:
options.set_connection = connection.url
options.password = None
if options.change_tags:
if options.change_tags[0] != '+':
print "Tags format is: +tag1+tag2...+tagN"
sys.exit(4)
connection.change_tags(options.change_tags)
if options.set_step_stone:
connection.step_stone = options.set_step_stone
if options.set_password:
if options.set_use_key:
sys.stderr.write('You are already setting to key authentication!\n')
sys.exit(5)
else:
connection.use_key = False
connection.password = options.set_password
if options.set_use_key:
connection.use_key = True
# Ask for login password if setting a connection url
new_connection_url = options.add_connection or options.set_connection
if new_connection_url:
connection.url = new_connection_url
if not connection.password and not connection.use_key:
options.set_password = True
while True:
print "Type the password for connection %s [%s]: " \
% (connection_name, connection.url)
password1 = getpass()
if len(password1) < 1:
print "Password must be at least 1 chars long!"
print
continue
print "Re-type the password for connection %s [%s]: " \
% (connection_name, connection.url)
password2 = getpass()
if password1 != password2:
print "Passwords do not match!"
print
else:
break
connection.password = password1
only_save = new_connection_url \
or options.set_step_stone \
or options.change_tags \
or options.set_password \
or options.set_use_key
if only_save:
library.save(connection)
return None
else:
return connection
def run(self):
""" parse arguments and call the corresponding execution logic """
stderr = sys.stderr
self.parse_args()
connection = self._process_args()
options = self.options
if not connection: # Connection was changed
return
sshclient = SSHClient(connection, self.host_library)
if options.run_command or options.get_file or options.put_file or options.run_su_script:
sshclient.verbose = False
try:
sshclient.connect()
except SSHException, err:
stderr.write( "SSH error connecting to %s - %s\n"
% (connection.name, err.args[0]))
sys.exit(4)
except socket.timeout:
stderr.write("Connection timeout - unable to connect to %s !\n"
% connection.name)
sys.exit(2)
except socket.error, err:
errorcode = err[0]
if errorcode == errno.ECONNREFUSED:
stderr.write("Connection refused - unable to connect to %s !\n"
% connection.name)
sys.exit(3)
else:
raise
if options.super:
sshclient.perform_sudo()
if options.run_su_script:
sshclient.run_su_script(options.run_su_script)
elif options.run_command:
sshclient.run_command(options.run_command)
elif options.get_file:
sshclient.get_file(options.get_file)
elif options.put_file:
sshclient.put_file(options.put_file)
else:
sshclient.interactive_shell()
| 183
| 0
| 27
|
122621fb346426bde1492a27cde9e0a3a348a9cc
| 2,456
|
py
|
Python
|
tests/etl/epic2op/predictor_server_long.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 6
|
2018-06-27T00:09:55.000Z
|
2019-03-07T14:06:53.000Z
|
tests/etl/epic2op/predictor_server_long.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 3
|
2021-03-31T18:37:46.000Z
|
2021-06-01T21:49:41.000Z
|
tests/etl/epic2op/predictor_server_long.py
|
cloud-cds/cds-stack
|
d68a1654d4f604369a071f784cdb5c42fc855d6e
|
[
"Apache-2.0"
] | 3
|
2020-01-24T16:40:49.000Z
|
2021-09-30T02:28:55.000Z
|
import asyncio
import logging
import os
import json
import etl.io_config.server_protocol as protocol
alert_dns = '127.0.0.1'
predictor_dns = '0.0.0.0'
SRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'
logging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT)
loop = asyncio.get_event_loop()
coro = asyncio.start_server(notification_loop, predictor_dns, 8182, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
logging.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| 26.408602
| 106
| 0.680782
|
import asyncio
import logging
import os
import json
import etl.io_config.server_protocol as protocol
alert_dns = '127.0.0.1'
predictor_dns = '0.0.0.0'
SRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'
logging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT)
async def start_predicting(writer, job_tsp):
logging.info("Connecting to server")
reader, writer = await asyncio.open_connection(alert_dns, 31000)
await protocol.write_message(writer, {
'type': 'START',
'time': job_tsp,
'hosp': 'HCGH',
'dns': predictor_dns,
'predictor_id': 0,
'predictor_type': 'active',
'predictor_model': 'long'
})
logging.info("Predicting on patients and sending heart beats")
for i in range(10):
await protocol.write_message(writer, {
'type': 'HB',
'time': job_tsp,
'hosp': 'HCGH',
'dns': predictor_dns,
'predictor_id': 0,
'predictor_type': 'active',
'predictor_model': 'short'
})
await asyncio.sleep(1)
logging.info("heart beats")
await protocol.write_message(writer,
{'type': 'FIN',
'time': job_tsp,
'hosp': 'HCGH',
'predictor_id': 0,
'enc_ids': [37261, 38746],
'predictor_model': 'long'
}
)
writer.close()
async def notification_loop(reader, writer):
# main workflow
addr = writer.transport.get_extra_info('peername')
sock = writer.transport.get_extra_info('socket')
if not addr:
logging.error('Connection made without a valid remote address, (Timeout %s)' % str(sock.gettimeout()))
finished()
return
else:
logging.info('Connection from %s (Timeout %s)' % (str(addr), str(sock.gettimeout())))
while not reader.at_eof():
message = await protocol.read_message(reader, writer)
logging.info("recv: msg {}".format(message))
if message == protocol.CONNECTION_CLOSED:
break
elif message.get('type') == 'ETL':
await start_predicting(writer, message['time'])
logging.info("Closing the client socket")
writer.close()
loop = asyncio.get_event_loop()
coro = asyncio.start_server(notification_loop, predictor_dns, 8182, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
logging.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| 1,692
| 0
| 46
|
2fd714a8950f2c7b4486141a9f24bd33c7a6eb72
| 14,375
|
py
|
Python
|
ironic_inspector/plugins/standard.py
|
NaohiroTamura/ironic-inspector
|
7b7fba72de46806ce84d6d4758a2343b52b0c96d
|
[
"Apache-2.0"
] | null | null | null |
ironic_inspector/plugins/standard.py
|
NaohiroTamura/ironic-inspector
|
7b7fba72de46806ce84d6d4758a2343b52b0c96d
|
[
"Apache-2.0"
] | null | null | null |
ironic_inspector/plugins/standard.py
|
NaohiroTamura/ironic-inspector
|
7b7fba72de46806ce84d6d4758a2343b52b0c96d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard set of plugins."""
import base64
import datetime
import os
import sys
import netaddr
from oslo_config import cfg
from oslo_utils import units
import six
from ironic_inspector.common.i18n import _, _LC, _LE, _LI, _LW
from ironic_inspector import conf
from ironic_inspector.plugins import base
from ironic_inspector import utils
CONF = cfg.CONF
LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard')
class RootDiskSelectionHook(base.ProcessingHook):
"""Smarter root disk selection using Ironic root device hints.
This hook must always go before SchedulerHook, otherwise root_disk field
might not be updated.
"""
def before_update(self, introspection_data, node_info, **kwargs):
"""Detect root disk from root device hints and IPA inventory."""
hints = node_info.node().properties.get('root_device')
if not hints:
LOG.debug('Root device hints are not provided',
node_info=node_info, data=introspection_data)
return
inventory = introspection_data.get('inventory')
if not inventory:
raise utils.Error(
_('Root device selection requires ironic-python-agent '
'as an inspection ramdisk'),
node_info=node_info, data=introspection_data)
disks = inventory.get('disks', [])
if not disks:
raise utils.Error(_('No disks found'),
node_info=node_info, data=introspection_data)
for disk in disks:
properties = disk.copy()
# Root device hints are in GiB, data from IPA is in bytes
properties['size'] //= units.Gi
for name, value in hints.items():
actual = properties.get(name)
if actual != value:
LOG.debug('Disk %(disk)s does not satisfy hint '
'%(name)s=%(value)s, actual value is %(actual)s',
{'disk': disk.get('name'), 'name': name,
'value': value, 'actual': actual},
node_info=node_info, data=introspection_data)
break
else:
LOG.debug('Disk %(disk)s of size %(size)s satisfies '
'root device hints',
{'disk': disk.get('name'), 'size': disk['size']},
node_info=node_info, data=introspection_data)
introspection_data['root_disk'] = disk
return
raise utils.Error(_('No disks satisfied root device hints'),
node_info=node_info, data=introspection_data)
class SchedulerHook(base.ProcessingHook):
"""Nova scheduler required properties."""
KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb')
def before_update(self, introspection_data, node_info, **kwargs):
"""Update node with scheduler properties."""
inventory = introspection_data.get('inventory')
errors = []
root_disk = introspection_data.get('root_disk')
if root_disk:
introspection_data['local_gb'] = root_disk['size'] // units.Gi
if CONF.processing.disk_partitioning_spacing:
introspection_data['local_gb'] -= 1
elif inventory:
errors.append(_('root disk is not supplied by the ramdisk and '
'root_disk_selection hook is not enabled'))
if inventory:
try:
introspection_data['cpus'] = int(inventory['cpu']['count'])
introspection_data['cpu_arch'] = six.text_type(
inventory['cpu']['architecture'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing CPU information: %s') %
inventory.get('cpu'))
try:
introspection_data['memory_mb'] = int(
inventory['memory']['physical_mb'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing memory information: %s; '
'introspection requires physical memory size '
'from dmidecode') %
inventory.get('memory'))
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'),
node_info=node_info, data=introspection_data)
missing = [key for key in self.KEYS
if not introspection_data.get(key)]
if missing:
raise utils.Error(
_('The following required parameters are missing: %s') %
missing,
node_info=node_info, data=introspection_data)
if errors:
raise utils.Error(_('The following problems encountered: %s') %
'; '.join(errors),
node_info=node_info, data=introspection_data)
LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, '
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
{key: introspection_data.get(key) for key in self.KEYS},
node_info=node_info, data=introspection_data)
overwrite = CONF.processing.overwrite_existing
properties = {key: str(introspection_data[key])
for key in self.KEYS if overwrite or
not node_info.node().properties.get(key)}
node_info.update_properties(**properties)
class ValidateInterfacesHook(base.ProcessingHook):
"""Hook to validate network interfaces."""
def _get_interfaces(self, data=None):
"""Convert inventory to a dict with interfaces.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
result = {}
inventory = data.get('inventory', {})
if inventory:
for iface in inventory.get('interfaces', ()):
name = iface.get('name')
mac = iface.get('mac_address')
ip = iface.get('ipv4_address')
if not name:
LOG.error(_LE('Malformed interface record: %s'),
iface, data=data)
continue
LOG.debug('Found interface %(name)s with MAC "%(mac)s" and '
'IP address "%(ip)s"',
{'name': name, 'mac': mac, 'ip': ip}, data=data)
result[name] = {'ip': ip, 'mac': mac}
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'), data=data)
result = data.get('interfaces')
return result
def _validate_interfaces(self, interfaces, data=None):
"""Validate interfaces on correctness and suitability.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
if not interfaces:
raise utils.Error(_('No interfaces supplied by the ramdisk'),
data=data)
pxe_mac = utils.get_pxe_mac(data)
if not pxe_mac and CONF.processing.add_ports == 'pxe':
LOG.warning(_LW('No boot interface provided in the introspection '
'data, will add all ports with IP addresses'))
result = {}
for name, iface in interfaces.items():
mac = iface.get('mac')
ip = iface.get('ip')
if not mac:
LOG.debug('Skipping interface %s without link information',
name, data=data)
continue
if not utils.is_valid_mac(mac):
LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not '
'valid, skipping'),
{'mac': mac, 'name': name},
data=data)
continue
mac = mac.lower()
if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()):
LOG.debug('Skipping local interface %s', name, data=data)
continue
if (CONF.processing.add_ports == 'pxe' and pxe_mac
and mac != pxe_mac):
LOG.debug('Skipping interface %s as it was not PXE booting',
name, data=data)
continue
elif CONF.processing.add_ports != 'all' and not ip:
LOG.debug('Skipping interface %s as it did not have '
'an IP address assigned during the ramdisk run',
name, data=data)
continue
result[name] = {'ip': ip, 'mac': mac.lower()}
if not result:
raise utils.Error(_('No suitable interfaces found in %s') %
interfaces, data=data)
return result
def before_processing(self, introspection_data, **kwargs):
"""Validate information about network interfaces."""
bmc_address = utils.get_ipmi_address_from_data(introspection_data)
if bmc_address:
introspection_data['ipmi_address'] = bmc_address
else:
LOG.debug('No BMC address provided in introspection data, '
'assuming virtual environment', data=introspection_data)
all_interfaces = self._get_interfaces(introspection_data)
interfaces = self._validate_interfaces(all_interfaces,
introspection_data)
LOG.info(_LI('Using network interface(s): %s'),
', '.join('%s %s' % (name, items)
for (name, items) in interfaces.items()),
data=introspection_data)
introspection_data['all_interfaces'] = all_interfaces
introspection_data['interfaces'] = interfaces
valid_macs = [iface['mac'] for iface in interfaces.values()]
introspection_data['macs'] = valid_macs
def before_update(self, introspection_data, node_info, **kwargs):
"""Drop ports that are not present in the data."""
if CONF.processing.keep_ports == 'present':
expected_macs = {
iface['mac']
for iface in introspection_data['all_interfaces'].values()
}
elif CONF.processing.keep_ports == 'added':
expected_macs = set(introspection_data['macs'])
else:
return
# list is required as we modify underlying dict
for port in list(node_info.ports().values()):
if port.address not in expected_macs:
LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is "
"not in expected MAC list %(expected)s"),
{'port': port.uuid,
'mac': port.address,
'expected': list(sorted(expected_macs))},
node_info=node_info, data=introspection_data)
node_info.delete_port(port)
class RamdiskErrorHook(base.ProcessingHook):
"""Hook to process error send from the ramdisk."""
DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f'
| 40.954416
| 79
| 0.559304
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard set of plugins."""
import base64
import datetime
import os
import sys
import netaddr
from oslo_config import cfg
from oslo_utils import units
import six
from ironic_inspector.common.i18n import _, _LC, _LE, _LI, _LW
from ironic_inspector import conf
from ironic_inspector.plugins import base
from ironic_inspector import utils
CONF = cfg.CONF
LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard')
class RootDiskSelectionHook(base.ProcessingHook):
"""Smarter root disk selection using Ironic root device hints.
This hook must always go before SchedulerHook, otherwise root_disk field
might not be updated.
"""
def before_update(self, introspection_data, node_info, **kwargs):
"""Detect root disk from root device hints and IPA inventory."""
hints = node_info.node().properties.get('root_device')
if not hints:
LOG.debug('Root device hints are not provided',
node_info=node_info, data=introspection_data)
return
inventory = introspection_data.get('inventory')
if not inventory:
raise utils.Error(
_('Root device selection requires ironic-python-agent '
'as an inspection ramdisk'),
node_info=node_info, data=introspection_data)
disks = inventory.get('disks', [])
if not disks:
raise utils.Error(_('No disks found'),
node_info=node_info, data=introspection_data)
for disk in disks:
properties = disk.copy()
# Root device hints are in GiB, data from IPA is in bytes
properties['size'] //= units.Gi
for name, value in hints.items():
actual = properties.get(name)
if actual != value:
LOG.debug('Disk %(disk)s does not satisfy hint '
'%(name)s=%(value)s, actual value is %(actual)s',
{'disk': disk.get('name'), 'name': name,
'value': value, 'actual': actual},
node_info=node_info, data=introspection_data)
break
else:
LOG.debug('Disk %(disk)s of size %(size)s satisfies '
'root device hints',
{'disk': disk.get('name'), 'size': disk['size']},
node_info=node_info, data=introspection_data)
introspection_data['root_disk'] = disk
return
raise utils.Error(_('No disks satisfied root device hints'),
node_info=node_info, data=introspection_data)
class SchedulerHook(base.ProcessingHook):
"""Nova scheduler required properties."""
KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb')
def before_update(self, introspection_data, node_info, **kwargs):
"""Update node with scheduler properties."""
inventory = introspection_data.get('inventory')
errors = []
root_disk = introspection_data.get('root_disk')
if root_disk:
introspection_data['local_gb'] = root_disk['size'] // units.Gi
if CONF.processing.disk_partitioning_spacing:
introspection_data['local_gb'] -= 1
elif inventory:
errors.append(_('root disk is not supplied by the ramdisk and '
'root_disk_selection hook is not enabled'))
if inventory:
try:
introspection_data['cpus'] = int(inventory['cpu']['count'])
introspection_data['cpu_arch'] = six.text_type(
inventory['cpu']['architecture'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing CPU information: %s') %
inventory.get('cpu'))
try:
introspection_data['memory_mb'] = int(
inventory['memory']['physical_mb'])
except (KeyError, ValueError, TypeError):
errors.append(_('malformed or missing memory information: %s; '
'introspection requires physical memory size '
'from dmidecode') %
inventory.get('memory'))
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'),
node_info=node_info, data=introspection_data)
missing = [key for key in self.KEYS
if not introspection_data.get(key)]
if missing:
raise utils.Error(
_('The following required parameters are missing: %s') %
missing,
node_info=node_info, data=introspection_data)
if errors:
raise utils.Error(_('The following problems encountered: %s') %
'; '.join(errors),
node_info=node_info, data=introspection_data)
LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, '
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
{key: introspection_data.get(key) for key in self.KEYS},
node_info=node_info, data=introspection_data)
overwrite = CONF.processing.overwrite_existing
properties = {key: str(introspection_data[key])
for key in self.KEYS if overwrite or
not node_info.node().properties.get(key)}
node_info.update_properties(**properties)
class ValidateInterfacesHook(base.ProcessingHook):
"""Hook to validate network interfaces."""
def __init__(self):
if CONF.processing.add_ports not in conf.VALID_ADD_PORTS_VALUES:
LOG.critical(_LC('Accepted values for [processing]add_ports are '
'%(valid)s, got %(actual)s'),
{'valid': conf.VALID_ADD_PORTS_VALUES,
'actual': CONF.processing.add_ports})
sys.exit(1)
if CONF.processing.keep_ports not in conf.VALID_KEEP_PORTS_VALUES:
LOG.critical(_LC('Accepted values for [processing]keep_ports are '
'%(valid)s, got %(actual)s'),
{'valid': conf.VALID_KEEP_PORTS_VALUES,
'actual': CONF.processing.keep_ports})
sys.exit(1)
def _get_interfaces(self, data=None):
"""Convert inventory to a dict with interfaces.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
result = {}
inventory = data.get('inventory', {})
if inventory:
for iface in inventory.get('interfaces', ()):
name = iface.get('name')
mac = iface.get('mac_address')
ip = iface.get('ipv4_address')
if not name:
LOG.error(_LE('Malformed interface record: %s'),
iface, data=data)
continue
LOG.debug('Found interface %(name)s with MAC "%(mac)s" and '
'IP address "%(ip)s"',
{'name': name, 'mac': mac, 'ip': ip}, data=data)
result[name] = {'ip': ip, 'mac': mac}
else:
LOG.warning(_LW('No inventory provided: using old bash ramdisk '
'is deprecated, please switch to '
'ironic-python-agent'), data=data)
result = data.get('interfaces')
return result
def _validate_interfaces(self, interfaces, data=None):
"""Validate interfaces on correctness and suitability.
:return: dict interface name -> dict with keys 'mac' and 'ip'
"""
if not interfaces:
raise utils.Error(_('No interfaces supplied by the ramdisk'),
data=data)
pxe_mac = utils.get_pxe_mac(data)
if not pxe_mac and CONF.processing.add_ports == 'pxe':
LOG.warning(_LW('No boot interface provided in the introspection '
'data, will add all ports with IP addresses'))
result = {}
for name, iface in interfaces.items():
mac = iface.get('mac')
ip = iface.get('ip')
if not mac:
LOG.debug('Skipping interface %s without link information',
name, data=data)
continue
if not utils.is_valid_mac(mac):
LOG.warning(_LW('MAC %(mac)s for interface %(name)s is not '
'valid, skipping'),
{'mac': mac, 'name': name},
data=data)
continue
mac = mac.lower()
if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()):
LOG.debug('Skipping local interface %s', name, data=data)
continue
if (CONF.processing.add_ports == 'pxe' and pxe_mac
and mac != pxe_mac):
LOG.debug('Skipping interface %s as it was not PXE booting',
name, data=data)
continue
elif CONF.processing.add_ports != 'all' and not ip:
LOG.debug('Skipping interface %s as it did not have '
'an IP address assigned during the ramdisk run',
name, data=data)
continue
result[name] = {'ip': ip, 'mac': mac.lower()}
if not result:
raise utils.Error(_('No suitable interfaces found in %s') %
interfaces, data=data)
return result
def before_processing(self, introspection_data, **kwargs):
"""Validate information about network interfaces."""
bmc_address = utils.get_ipmi_address_from_data(introspection_data)
if bmc_address:
introspection_data['ipmi_address'] = bmc_address
else:
LOG.debug('No BMC address provided in introspection data, '
'assuming virtual environment', data=introspection_data)
all_interfaces = self._get_interfaces(introspection_data)
interfaces = self._validate_interfaces(all_interfaces,
introspection_data)
LOG.info(_LI('Using network interface(s): %s'),
', '.join('%s %s' % (name, items)
for (name, items) in interfaces.items()),
data=introspection_data)
introspection_data['all_interfaces'] = all_interfaces
introspection_data['interfaces'] = interfaces
valid_macs = [iface['mac'] for iface in interfaces.values()]
introspection_data['macs'] = valid_macs
def before_update(self, introspection_data, node_info, **kwargs):
"""Drop ports that are not present in the data."""
if CONF.processing.keep_ports == 'present':
expected_macs = {
iface['mac']
for iface in introspection_data['all_interfaces'].values()
}
elif CONF.processing.keep_ports == 'added':
expected_macs = set(introspection_data['macs'])
else:
return
# list is required as we modify underlying dict
for port in list(node_info.ports().values()):
if port.address not in expected_macs:
LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is "
"not in expected MAC list %(expected)s"),
{'port': port.uuid,
'mac': port.address,
'expected': list(sorted(expected_macs))},
node_info=node_info, data=introspection_data)
node_info.delete_port(port)
class RamdiskErrorHook(base.ProcessingHook):
"""Hook to process error send from the ramdisk."""
DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f'
def before_processing(self, introspection_data, **kwargs):
error = introspection_data.get('error')
logs = introspection_data.get('logs')
if error or CONF.processing.always_store_ramdisk_logs:
if logs:
self._store_logs(logs, introspection_data)
else:
LOG.debug('No logs received from the ramdisk',
data=introspection_data)
if error:
raise utils.Error(_('Ramdisk reported error: %s') % error,
data=introspection_data)
def _store_logs(self, logs, introspection_data):
if not CONF.processing.ramdisk_logs_dir:
LOG.warning(
_LW('Failed to store logs received from the ramdisk '
'because ramdisk_logs_dir configuration option '
'is not set'),
data=introspection_data)
return
if not os.path.exists(CONF.processing.ramdisk_logs_dir):
os.makedirs(CONF.processing.ramdisk_logs_dir)
time_fmt = datetime.datetime.utcnow().strftime(self.DATETIME_FORMAT)
bmc_address = introspection_data.get('ipmi_address', 'unknown')
file_name = 'bmc_%s_%s' % (bmc_address, time_fmt)
with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name),
'wb') as fp:
fp.write(base64.b64decode(logs))
LOG.info(_LI('Ramdisk logs stored in file %s'), file_name,
data=introspection_data)
| 2,209
| 0
| 81
|
5b230dec7eab3cfb02f6bb4a08d27808289fbced
| 366
|
py
|
Python
|
oommfc/evolvers/__init__.py
|
ubermag/oommfc
|
38deb4f6209fff03c4b9d573f84c934af7d078a5
|
[
"BSD-3-Clause"
] | 23
|
2019-09-18T10:58:00.000Z
|
2022-02-07T07:05:49.000Z
|
oommfc/evolvers/__init__.py
|
ubermag/oommfc
|
38deb4f6209fff03c4b9d573f84c934af7d078a5
|
[
"BSD-3-Clause"
] | 43
|
2019-08-22T04:31:36.000Z
|
2022-03-28T09:09:15.000Z
|
oommfc/evolvers/__init__.py
|
ubermag/oommfc
|
38deb4f6209fff03c4b9d573f84c934af7d078a5
|
[
"BSD-3-Clause"
] | 7
|
2020-04-25T13:25:25.000Z
|
2021-12-06T15:06:28.000Z
|
from .cgevolver import CGEvolver
from .eulerevolver import EulerEvolver
from .rungekuttaevolver import RungeKuttaEvolver
from .spintevolver import SpinTEvolver
from .spinxferevolver import SpinXferEvolver
from .uhh_thetaevolver import UHH_ThetaEvolver
from .xf_thermheunevolver import Xf_ThermHeunEvolver
from .xf_thermspinxferevolver import Xf_ThermSpinXferEvolver
| 40.666667
| 60
| 0.89071
|
from .cgevolver import CGEvolver
from .eulerevolver import EulerEvolver
from .rungekuttaevolver import RungeKuttaEvolver
from .spintevolver import SpinTEvolver
from .spinxferevolver import SpinXferEvolver
from .uhh_thetaevolver import UHH_ThetaEvolver
from .xf_thermheunevolver import Xf_ThermHeunEvolver
from .xf_thermspinxferevolver import Xf_ThermSpinXferEvolver
| 0
| 0
| 0
|
db849eea5e1442cdce0205480c96ac7d81db1226
| 803
|
py
|
Python
|
restrictmethodorigin/base.py
|
Ruhshan/django-restrictmethodorigin
|
9c02e1e9851ca7cdc07620ffdb081668ee648e5d
|
[
"MIT"
] | 2
|
2018-03-11T16:09:34.000Z
|
2018-07-16T06:51:53.000Z
|
restrictmethodorigin/base.py
|
Ruhshan/django-restrictmethodorigin
|
9c02e1e9851ca7cdc07620ffdb081668ee648e5d
|
[
"MIT"
] | null | null | null |
restrictmethodorigin/base.py
|
Ruhshan/django-restrictmethodorigin
|
9c02e1e9851ca7cdc07620ffdb081668ee648e5d
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.http import HttpResponseForbidden
target_methods = settings.METHOD_ORIGIN.keys()
http_methods = ['CONNECT', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']
| 33.458333
| 77
| 0.632628
|
from django.conf import settings
from django.http import HttpResponseForbidden
target_methods = settings.METHOD_ORIGIN.keys()
http_methods = ['CONNECT', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']
def OriginRestrictor(get_response):
def middleware(request):
forbid = False
for method in http_methods:
if request.method==method and method in target_methods:
allowed_origin = settings.METHOD_ORIGIN[method]
request_origin = request.META['REMOTE_ADDR']
if request_origin not in allowed_origin:
forbid=True
break
if forbid==True:
return HttpResponseForbidden()
response = get_response(request)
return response
return middleware
| 575
| 0
| 23
|
70a6ef452aec3b684b02e99d069966cee34a952c
| 928
|
py
|
Python
|
Mundo 3/Aula17.Ex85.py
|
uirasiqueira/Exercicios_Python
|
409b7be9cf278e3043149654de7b41be56a3d951
|
[
"MIT"
] | null | null | null |
Mundo 3/Aula17.Ex85.py
|
uirasiqueira/Exercicios_Python
|
409b7be9cf278e3043149654de7b41be56a3d951
|
[
"MIT"
] | null | null | null |
Mundo 3/Aula17.Ex85.py
|
uirasiqueira/Exercicios_Python
|
409b7be9cf278e3043149654de7b41be56a3d951
|
[
"MIT"
] | null | null | null |
''' Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os
em uma lista única que mantenha separados os valores pares e ímpares.
No final, mostre os valores pares e ímpares em ordem crescente.'''
'''princ = []
impar= []
par= []
for c in range (0,7):
n = int(input('Digite um número: '))
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
princ.append(sorted(impar[:]))
princ.append(sorted(par[:]))
print(f'Os valores pares digitados foram: {princ[0]}\n'
f'Os valores ímpares digitados foram: {princ[1]}')'''
#guanabara methods
núm = [[], []]
valor = 0
for c in range (1,8):
valor = int(input(f'Digite o {c}ª valor: '))
if valor %2 ==0:
núm[0].append(valor)
else:
núm[1].append(valor)
print('~'*30)
núm[0].sort()
núm[1].sort()
print(f'Os valores pares digitados foram: {núm[0]}')
print(f'Os valores ímpares digitados foram: {núm[1]}')
| 27.294118
| 86
| 0.632543
|
''' Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os
em uma lista única que mantenha separados os valores pares e ímpares.
No final, mostre os valores pares e ímpares em ordem crescente.'''
'''princ = []
impar= []
par= []
for c in range (0,7):
n = int(input('Digite um número: '))
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
princ.append(sorted(impar[:]))
princ.append(sorted(par[:]))
print(f'Os valores pares digitados foram: {princ[0]}\n'
f'Os valores ímpares digitados foram: {princ[1]}')'''
#guanabara methods
núm = [[], []]
valor = 0
for c in range (1,8):
valor = int(input(f'Digite o {c}ª valor: '))
if valor %2 ==0:
núm[0].append(valor)
else:
núm[1].append(valor)
print('~'*30)
núm[0].sort()
núm[1].sort()
print(f'Os valores pares digitados foram: {núm[0]}')
print(f'Os valores ímpares digitados foram: {núm[1]}')
| 0
| 0
| 0
|
1e0c4b6f02e04cd28a089a60101106ce4a5f7ec2
| 1,456
|
py
|
Python
|
slack_conn.py
|
eshioji/omibot
|
606377942350a61ec128fbcd6e3ec1cf8d1605b7
|
[
"Apache-2.0"
] | null | null | null |
slack_conn.py
|
eshioji/omibot
|
606377942350a61ec128fbcd6e3ec1cf8d1605b7
|
[
"Apache-2.0"
] | null | null | null |
slack_conn.py
|
eshioji/omibot
|
606377942350a61ec128fbcd6e3ec1cf8d1605b7
|
[
"Apache-2.0"
] | null | null | null |
import time
from slackclient import SlackClient
import common
import config
if __name__ == '__main__':
conn = SlackConn(config.slack_token)
conn.upload_img('/Users/omibot/data/omibot/sentry/Dienstag, 31. Oktober 2017 um 14:15:51/Image2.jpeg', '#allgemein')
| 26.962963
| 120
| 0.538462
|
import time
from slackclient import SlackClient
import common
import config
class SlackConn:
def __init__(self, slack_token):
self.slack_token = slack_token
self.sc = SlackClient(slack_token)
self.listening = True
def post_msg(self, msg, channel=config.general_channel):
ret = self.sc.api_call(
"chat.postMessage",
channel=channel,
text=msg,
as_user=True
)
if not ret['ok']:
raise ValueError(ret)
else:
return ret
def listen(self, on_message):
common.info("Listening")
if self.sc.rtm_connect():
while self.listening:
time.sleep(1)
msgs = self.sc.rtm_read()
for msg in msgs:
if msg['type'] == 'error':
raise ValueError(msg)
elif msg['type'] == 'message':
on_message(msg)
else:
raise ValueError('Connection Failed')
def upload_img(self, img, channel):
self.sc.api_call(
'files.upload',
channels=channel,
as_user=True,
filename=img,
file=open(img, 'rb'),
)
if __name__ == '__main__':
conn = SlackConn(config.slack_token)
conn.upload_img('/Users/omibot/data/omibot/sentry/Dienstag, 31. Oktober 2017 um 14:15:51/Image2.jpeg', '#allgemein')
| 1,061
| -5
| 130
|
14223365b4a249e7ea4ba0ff7d14b335763258c3
| 609
|
py
|
Python
|
easistrain/log_parameters.py
|
woutdenolf/easistrain
|
0484168e33e548af01a5cc649abf815c45b182f1
|
[
"MIT"
] | null | null | null |
easistrain/log_parameters.py
|
woutdenolf/easistrain
|
0484168e33e548af01a5cc649abf815c45b182f1
|
[
"MIT"
] | 11
|
2021-11-10T08:36:22.000Z
|
2022-03-21T08:31:17.000Z
|
easistrain/log_parameters.py
|
EASI-STRESS/easistrain
|
86192d1c4135875daec8e4e4abcb67e372f86efb
|
[
"MIT"
] | 1
|
2021-08-04T14:02:16.000Z
|
2021-08-04T14:02:16.000Z
|
import os
from datetime import datetime
| 38.0625
| 87
| 0.597701
|
import os
from datetime import datetime
def log_parameters(filename, parameters, task_name):
filename = os.path.join(parameters["root_data"], filename)
with open(filename, "w") as fwlog:
fwlog.write(f"{task_name.upper()} LOG FILE\n")
fwlog.write(f"Date and time : {datetime.now()}\n")
fwlog.write(
f"#$#$#$#$#$#$#The arguments used for {task_name.lower()} are below: \n"
)
for name, value in parameters.items():
fwlog.write(f"{name} = {value}\n")
fwlog.write("************____________________________________**************\n")
| 545
| 0
| 23
|
761d615503334752d3b3dc0238f8967f8df622ab
| 1,481
|
py
|
Python
|
SC101_Assignment1/draw_line.py
|
kevinfang418/sc-projects
|
c5b9023b137c704de3488fe2f5fe307187d957b6
|
[
"MIT"
] | null | null | null |
SC101_Assignment1/draw_line.py
|
kevinfang418/sc-projects
|
c5b9023b137c704de3488fe2f5fe307187d957b6
|
[
"MIT"
] | null | null | null |
SC101_Assignment1/draw_line.py
|
kevinfang418/sc-projects
|
c5b9023b137c704de3488fe2f5fe307187d957b6
|
[
"MIT"
] | null | null | null |
"""
File: draw_line.py
Name: Kevin Fang
-------------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GLine
from campy.graphics.gwindow import GWindow
from campy.gui.events.mouse import onmouseclicked
# Assign window as constant to create canvas
window = GWindow()
SIZE = 10
# a, b ,c ,d are global variables, so define them as 0 value
a = b = c = d = 0
def main():
"""
This program creates lines on an instance of GWindow class.
There is a circle indicating the user’s first click. A line appears
at the condition where the circle disappears as the user clicks
on the canvas for the second time.
"""
onmouseclicked(set_point)
if __name__ == "__main__":
main()
| 27.425926
| 87
| 0.594868
|
"""
File: draw_line.py
Name: Kevin Fang
-------------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GLine
from campy.graphics.gwindow import GWindow
from campy.gui.events.mouse import onmouseclicked
# Assign window as constant to create canvas
window = GWindow()
SIZE = 10
# a, b ,c ,d are global variables, so define them as 0 value
a = b = c = d = 0
def main():
"""
This program creates lines on an instance of GWindow class.
There is a circle indicating the user’s first click. A line appears
at the condition where the circle disappears as the user clicks
on the canvas for the second time.
"""
onmouseclicked(set_point)
def set_point(event):
# a and b are global variables to store mouse event when everytime mouse clicked
global a, b, c, d
a = event.x
b = event.y
# check c and d are circle (object)
maybe_circle = window.get_object_at(c, d)
# draw circle when c and d are (0, 0)
if c == d == 0:
point = GOval(SIZE, SIZE, x=a-SIZE/2, y=b-SIZE/2)
point.filled = False
window.add(point)
c = a
d = b
# if (c, d) is circle and not (0, 0), we need to draw a line from (c, d) to (a, b)
elif maybe_circle is not None and c != d != 0:
line = GLine(c, d, a, b)
window.add(line)
window.remove(maybe_circle)
c = 0
d = 0
if __name__ == "__main__":
main()
| 710
| 0
| 25
|
b9f156865c4501e3c6f146833e523ef8115f0c71
| 2,473
|
py
|
Python
|
tests/unit/auth/test_registration_processors.py
|
Arjun-sna/flask-forum-api-service
|
9c33c10269a147d7c5225e9c9106ccc43eb31705
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/auth/test_registration_processors.py
|
Arjun-sna/flask-forum-api-service
|
9c33c10269a147d7c5225e9c9106ccc43eb31705
|
[
"BSD-3-Clause"
] | 1
|
2021-11-25T17:25:19.000Z
|
2021-11-25T17:25:19.000Z
|
tests/unit/auth/test_registration_processors.py
|
Arjun-sna/flask-forum-api-service
|
9c33c10269a147d7c5225e9c9106ccc43eb31705
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import get_flashed_messages
from flask_login import current_user
from app.auth.services.registration import (
AutoActivateUserPostProcessor,
AutologinPostProcessor,
SendActivationPostProcessor,
)
from app.core.auth.activation import AccountActivator
from app.utils.settings import app_config
| 29.440476
| 89
| 0.696725
|
from flask import get_flashed_messages
from flask_login import current_user
from app.auth.services.registration import (
AutoActivateUserPostProcessor,
AutologinPostProcessor,
SendActivationPostProcessor,
)
from app.core.auth.activation import AccountActivator
from app.utils.settings import app_config
class TestAutoActivateUserPostProcessor(object):
def test_activates_when_user_activation_isnt_required(
self, unactivated_user, database
):
config = {"ACTIVATE_ACCOUNT": False}
processor = AutoActivateUserPostProcessor(database, config)
processor.post_process(unactivated_user)
assert unactivated_user.activated
def test_doesnt_activate_when_user_activation_is_required(
self, database, unactivated_user
):
config = {"ACTIVATE_ACCOUNT": True}
processor = AutoActivateUserPostProcessor(database, config)
processor.post_process(unactivated_user)
assert not unactivated_user.activated
class TestAutologinPostProcessor(object):
def test_sets_user_as_current_user(
self, Fred, request_context, default_settings
):
app_config["ACTIVATE_ACCOUNT"] = False
processor = AutologinPostProcessor()
processor.post_process(Fred)
expected_message = ("success", "Thanks for registering.")
assert current_user.username == Fred.username
assert (
get_flashed_messages(with_categories=True)[0] == expected_message
)
class TestSendActivationPostProcessor(object):
class SpyingActivator(AccountActivator):
def __init__(self):
self.called = False
self.user = None
def initiate_account_activation(self, user):
self.called = True
self.user = user
def activate_account(self, token):
pass
def test_sends_activation_notice(
self, request_context, unactivated_user, default_settings
):
activator = self.SpyingActivator()
processor = SendActivationPostProcessor(activator)
processor.post_process(unactivated_user)
expected_message = (
"success",
"An account activation email has been sent to notactive@example.com", # noqa
)
assert activator.called
assert activator.user == unactivated_user.email
assert (
get_flashed_messages(with_categories=True)[0] == expected_message
)
| 1,766
| 238
| 150
|
3a6cf595448f1392abc3066f4404fa1077c8347f
| 1,598
|
py
|
Python
|
exercise_fstrings_solution.py
|
annezola/gdi-python
|
a806f0eca2eb17e5a975cce8d0b1d90490dd455e
|
[
"MIT"
] | null | null | null |
exercise_fstrings_solution.py
|
annezola/gdi-python
|
a806f0eca2eb17e5a975cce8d0b1d90490dd455e
|
[
"MIT"
] | null | null | null |
exercise_fstrings_solution.py
|
annezola/gdi-python
|
a806f0eca2eb17e5a975cce8d0b1d90490dd455e
|
[
"MIT"
] | 1
|
2022-01-04T15:26:40.000Z
|
2022-01-04T15:26:40.000Z
|
""" This function should return a string like
"There are NUM planets in the solar system"
where NUM is provided as an argument."""
# Should equal "There are 8 planets in the solar system"
ss1 = solar_system(8)
# Should equal "There are 9 planets in the solar system"
ss2 = solar_system(9)
""" This function should return a string of the format
"On the DAYth day of MONTH in the year YEAR"
where DAY, MONTH, and YEAR are provided.
"""
# Should equal "On the 8th day of July in the year 2019"
date1 = fancy_date("July", 8, 2019)
# Should equal "On the 24th day of June in the year 1984"
date2 = fancy_date("June", 24, 1984)
""" This function should return a string
which starts with the provided place, then
has an @ sign, then the comma-separated lat and lng"""
# Should equal "Tilden Farm @ 37.91, -122.29"
loc1 = location("Tilden Farm", 37.91, -122.29)
# Should equal "Salton Sea @ 33.309, -115.979"
loc2 = location("Salton Sea", 33.309,-115.979)
""" This function should return a string
which starts with the provided item,
then a colon, then a $ sign and the provided cost."""
# Should equal "Avocado toast: $9.99"
menu1 = menu("Avocado toast", 9.99)
# Should equal "Cronut: $3.99"
menu2 = menu("Cronut", 3.99)
| 31.96
| 63
| 0.704005
|
""" This function should return a string like
"There are NUM planets in the solar system"
where NUM is provided as an argument."""
def solar_system(num_planets):
# Replace this line!
return f"There are {num_planets} planets in the solar system"
# Should equal "There are 8 planets in the solar system"
ss1 = solar_system(8)
# Should equal "There are 9 planets in the solar system"
ss2 = solar_system(9)
""" This function should return a string of the format
"On the DAYth day of MONTH in the year YEAR"
where DAY, MONTH, and YEAR are provided.
"""
def fancy_date(month, day, year):
# Replace this line!
return f"On the {day}th day of {month} in the year {year}"
# Should equal "On the 8th day of July in the year 2019"
date1 = fancy_date("July", 8, 2019)
# Should equal "On the 24th day of June in the year 1984"
date2 = fancy_date("June", 24, 1984)
""" This function should return a string
which starts with the provided place, then
has an @ sign, then the comma-separated lat and lng"""
def location(place, lat, lng):
# Replace this line!
return f"{place} @ {lat}, {lng}"
# Should equal "Tilden Farm @ 37.91, -122.29"
loc1 = location("Tilden Farm", 37.91, -122.29)
# Should equal "Salton Sea @ 33.309, -115.979"
loc2 = location("Salton Sea", 33.309,-115.979)
""" This function should return a string
which starts with the provided item,
then a colon, then a $ sign and the provided cost."""
def menu(item, cost):
return f"{item}: ${cost}"
# Should equal "Avocado toast: $9.99"
menu1 = menu("Avocado toast", 9.99)
# Should equal "Cronut: $3.99"
menu2 = menu("Cronut", 3.99)
| 287
| 0
| 88
|
de902fdb86f709ef7f595a5ba51b0a79c8789d68
| 3,256
|
py
|
Python
|
webradio.py
|
akaessens/raspi_music
|
36c83519036be48cb29db3755a77f5855ba788a0
|
[
"MIT"
] | null | null | null |
webradio.py
|
akaessens/raspi_music
|
36c83519036be48cb29db3755a77f5855ba788a0
|
[
"MIT"
] | null | null | null |
webradio.py
|
akaessens/raspi_music
|
36c83519036be48cb29db3755a77f5855ba788a0
|
[
"MIT"
] | null | null | null |
import logging
import vlc
import xml.etree.ElementTree as ET
import os
import sys
import re
from threading import Timer
from time import sleep
@vlc.CallbackDecorators.LogCb
| 29.071429
| 76
| 0.588452
|
import logging
import vlc
import xml.etree.ElementTree as ET
import os
import sys
import re
from threading import Timer
from time import sleep
@vlc.CallbackDecorators.LogCb
def log_callback(data, level, ctx, fmt, args):
if level > 0:
logging.debug("VLC: " + fmt.decode('UTF-8'), args)
pass
class WebRadio():
filename = os.path.join(sys.path[0], "webradiosources.xml")
def __init__(self, args):
logging.info("WebRadio started.")
logging.debug("WebRadio sources file: " + WebRadio.filename)
with open(WebRadio.filename) as file:
data = file.read()
xmlstring = re.sub(' xmlns="[^"]+"', '', data, count=1)
self.tree = ET.fromstring(xmlstring)
self.media_list = []
self.current = 0
for source in self.tree.findall("source"):
uri = str(source.text)
name = str(source.get("name"))
self.media_list.append((name, uri))
logging.debug("found source: " + name + " - " + uri)
logging.debug("added sources: " + str(len(self.media_list)))
self.vlc_instance = vlc.Instance()
if args["v"] == 2:
self.vlc_instance.log_set(log_callback, None)
else:
self.vlc_instance.log_unset()
self.player = self.vlc_instance.media_player_new()
startup_uri = self.media_list[self.current][1]
self.player.set_mrl(startup_uri)
self.player.play()
self.print_current()
def print_current(self):
logging.debug("source nr : " + str(self.current))
logging.debug("source uri : " + self.media_list[self.current][1])
logging.debug("source name: " + self.media_list[self.current][0])
timer = Timer(0.5, self.print_title)
timer.start()
def print_title(self):
logging.debug("Reading metadata")
cnt = 0
while (not self.player.is_playing() and cnt < 10):
sleep(0.1)
cnt += 1
sleep(0.1)
title = str(self.player.get_media().get_meta(vlc.Meta.Title))
playing = str(self.player.get_media().get_meta(vlc.Meta.NowPlaying))
logging.info("Station: " + title)
logging.info("Playing: " + playing)
def play_pause(self):
logging.debug("play_pause")
self.player.pause()
def prev(self):
logging.debug("prev")
self.current = (self.current - 1) % len(self.media_list)
self.print_current()
self.player.set_mrl(self.media_list[self.current][1])
self.player.play()
def next(self):
logging.debug("next")
self.current = (self.current + 1) % len(self.media_list)
self.print_current()
self.player.set_mrl(self.media_list[self.current][1])
self.player.play()
def stop(self):
logging.info("webradio stopped")
self.player.stop()
def list_stations(self):
logging.info("Listing " + str(len(self.media_list)) + " Stations")
for source in self.media_list:
if self.media_list.index(source) == self.current:
logging.info("* " + source[0] + " - " + source[1])
else:
logging.info(" " + source[0] + " - " + source[1])
| 2,759
| 276
| 45
|
5cae7b4d407c82f31dcbfab59de323eab0edb688
| 433
|
py
|
Python
|
user/admin.py
|
judeakinwale/SMS-backup
|
30636591b43bec94e7406f4c02fde402a5a2e38f
|
[
"MIT"
] | null | null | null |
user/admin.py
|
judeakinwale/SMS-backup
|
30636591b43bec94e7406f4c02fde402a5a2e38f
|
[
"MIT"
] | null | null | null |
user/admin.py
|
judeakinwale/SMS-backup
|
30636591b43bec94e7406f4c02fde402a5a2e38f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from user import models
# Register your models here.
admin.site.register(models.User)
admin.site.register(models.Staff)
admin.site.register(models.CourseAdviser)
admin.site.register(models.Student)
admin.site.register(models.Biodata)
admin.site.register(models.AcademicData)
admin.site.register(models.AcademicHistory)
admin.site.register(models.HealthData)
admin.site.register(models.FamilyData)
| 27.0625
| 43
| 0.831409
|
from django.contrib import admin
from user import models
# Register your models here.
admin.site.register(models.User)
admin.site.register(models.Staff)
admin.site.register(models.CourseAdviser)
admin.site.register(models.Student)
admin.site.register(models.Biodata)
admin.site.register(models.AcademicData)
admin.site.register(models.AcademicHistory)
admin.site.register(models.HealthData)
admin.site.register(models.FamilyData)
| 0
| 0
| 0
|
fb054d420db735d95ad2eb975049ec59b089b6d7
| 1,409
|
py
|
Python
|
dictionaries/ForceBook_dictionary.py
|
MaggieIllustrations/softuni-github-programming
|
f5695cb14602f3d2974359f6d8734332acc650d3
|
[
"MIT"
] | null | null | null |
dictionaries/ForceBook_dictionary.py
|
MaggieIllustrations/softuni-github-programming
|
f5695cb14602f3d2974359f6d8734332acc650d3
|
[
"MIT"
] | null | null | null |
dictionaries/ForceBook_dictionary.py
|
MaggieIllustrations/softuni-github-programming
|
f5695cb14602f3d2974359f6d8734332acc650d3
|
[
"MIT"
] | 1
|
2022-01-14T17:12:44.000Z
|
2022-01-14T17:12:44.000Z
|
line = input()
sides = {}
while line != "Lumpawaroo":
if " | " in line:
args = line.split(" | ")
side = args[0]
user = args[1]
# TODO If you receive forceSide | forceUser, you should check if such forceUser already exists, and if not, add him/her to the corresponding side
if side not in sides:
sides[side] = []
all_values = []
for current_list in sides.values():
all_values += current_list
if user not in all_values:
sides[side].append(user)
else:
args = line.split(" -> ")
user = args[0]
side = args[1]
old_side = ""
for key, value in sides.items():
if user in value:
old_side = key
break
if old_side != "":
sides[old_side].remove(user)
if side not in sides:
sides[side] = []
sides[side].append(user)
else:
if side not in sides:
sides[side] = []
sides[side].append(user)
print(f"{user} joins the {side} side!")
line = input()
sides = dict(sorted(sides.items(), key=lambda x: (-len(x[1]), x[0])))
for side, users in sides.items():
if len(users) == 0:
continue
print(f"Side: {side}, Members: {len(users)}")
for user in sorted(users):
print(f"! {user}")
| 23.881356
| 153
| 0.504613
|
line = input()
sides = {}
while line != "Lumpawaroo":
if " | " in line:
args = line.split(" | ")
side = args[0]
user = args[1]
# TODO If you receive forceSide | forceUser, you should check if such forceUser already exists, and if not, add him/her to the corresponding side
if side not in sides:
sides[side] = []
all_values = []
for current_list in sides.values():
all_values += current_list
if user not in all_values:
sides[side].append(user)
else:
args = line.split(" -> ")
user = args[0]
side = args[1]
old_side = ""
for key, value in sides.items():
if user in value:
old_side = key
break
if old_side != "":
sides[old_side].remove(user)
if side not in sides:
sides[side] = []
sides[side].append(user)
else:
if side not in sides:
sides[side] = []
sides[side].append(user)
print(f"{user} joins the {side} side!")
line = input()
sides = dict(sorted(sides.items(), key=lambda x: (-len(x[1]), x[0])))
for side, users in sides.items():
if len(users) == 0:
continue
print(f"Side: {side}, Members: {len(users)}")
for user in sorted(users):
print(f"! {user}")
| 0
| 0
| 0
|
6ee78f3fabbf50a0a068253b548dc1470a1883e0
| 1,368
|
py
|
Python
|
src/client/client.py
|
MayD524/MDrive
|
36f2d6ea70f2821c31c49371b0483ab1bdc7dc7a
|
[
"MIT"
] | null | null | null |
src/client/client.py
|
MayD524/MDrive
|
36f2d6ea70f2821c31c49371b0483ab1bdc7dc7a
|
[
"MIT"
] | null | null | null |
src/client/client.py
|
MayD524/MDrive
|
36f2d6ea70f2821c31c49371b0483ab1bdc7dc7a
|
[
"MIT"
] | null | null | null |
import requests
import os
url = 'http://admin:SuperAdminPasssword6742344234!!@localhost:8080/'#'http://admin:SuperAdminPasssword6742344234!!@a18e-2601-182-ce00-c860-3c42-c8b2-be91-176.ngrok.io/'
#resp = requests.post(url, data={'newUser': True, 'username': 'new_user', 'password': 'test_pass'})
## makefile : filename
## writefile : filename, data : str
## deletefile : filename
## readfile : filename (gotten from GET request)
## makefolder : foldername
## deletefolder : foldername
## listfolder : foldername
## changedir : foldername
## renamefile : filename, newname : str
## renamefolder : foldername, newname : str
##
"""requests.put(url, data={'deletefile': "4.png"})
img = Image.open('shitpost.png')
requests.post(url, data={'makefile': "4.png"})
resp = requests.put(url, data={"writefile": "4.png", "authToken": "new_user_user_1", "username": "new_user", "data": img.tobytes()})
resp = requests.get(url + "4.png")
image = Image.frombytes('RGBA', img.size, resp.content)
img.save('4.png', format='PNG')"""
#req = requests.post(url, data={"makefile": "test2.txt"})
#print(req.content)
#req = requests.put(url, data={"writefile": "test2.txt", "authToken": "admin_super_0", "username": "admin_super_0", "data": "test helfgsdfgsdfglo world"})
#print(req.content)
req = requests.get(url + "test2.txt")
print(req.content)
| 36.972973
| 169
| 0.681287
|
import requests
import os
url = 'http://admin:SuperAdminPasssword6742344234!!@localhost:8080/'#'http://admin:SuperAdminPasssword6742344234!!@a18e-2601-182-ce00-c860-3c42-c8b2-be91-176.ngrok.io/'
#resp = requests.post(url, data={'newUser': True, 'username': 'new_user', 'password': 'test_pass'})
## makefile : filename
## writefile : filename, data : str
## deletefile : filename
## readfile : filename (gotten from GET request)
## makefolder : foldername
## deletefolder : foldername
## listfolder : foldername
## changedir : foldername
## renamefile : filename, newname : str
## renamefolder : foldername, newname : str
##
"""requests.put(url, data={'deletefile': "4.png"})
img = Image.open('shitpost.png')
requests.post(url, data={'makefile': "4.png"})
resp = requests.put(url, data={"writefile": "4.png", "authToken": "new_user_user_1", "username": "new_user", "data": img.tobytes()})
resp = requests.get(url + "4.png")
image = Image.frombytes('RGBA', img.size, resp.content)
img.save('4.png', format='PNG')"""
#req = requests.post(url, data={"makefile": "test2.txt"})
#print(req.content)
#req = requests.put(url, data={"writefile": "test2.txt", "authToken": "admin_super_0", "username": "admin_super_0", "data": "test helfgsdfgsdfglo world"})
#print(req.content)
req = requests.get(url + "test2.txt")
print(req.content)
| 0
| 0
| 0
|
8bf234cc035bc4ea8b0dd72e26461c4cdb4d1364
| 10,933
|
py
|
Python
|
LM.py
|
StonyBrookNLP/SLDS-Stories
|
2a2bbcb48b860e833c93c34f0389c9f6ea851160
|
[
"MIT"
] | 1
|
2020-10-28T22:30:32.000Z
|
2020-10-28T22:30:32.000Z
|
LM.py
|
StonyBrookNLP/SLDS-Stories
|
2a2bbcb48b860e833c93c34f0389c9f6ea851160
|
[
"MIT"
] | 1
|
2021-05-01T03:28:19.000Z
|
2021-05-01T03:28:19.000Z
|
LM.py
|
StonyBrookNLP/SLDS-Stories
|
2a2bbcb48b860e833c93c34f0389c9f6ea851160
|
[
"MIT"
] | null | null | null |
##############################################
# Switching Linear Dynamical System
# Code for both SLDS generative model as well
# as variational inference code
##############################################
import torch
import torch.nn as nn
import numpy as np
import math
from torch.autograd import Variable
import itertools
import torch.nn.functional as F
import utils
from masked_cross_entropy import masked_cross_entropy
from EncDec import Encoder, Decoder, gather_last, sequence_mask
from data_utils import EOS_TOK, SOS_TOK, PAD_TOK, transform
| 43.557769
| 171
| 0.617214
|
##############################################
# Switching Linear Dynamical System
# Code for both SLDS generative model as well
# as variational inference code
##############################################
import torch
import torch.nn as nn
import numpy as np
import math
from torch.autograd import Variable
import itertools
import torch.nn.functional as F
import utils
from masked_cross_entropy import masked_cross_entropy
from EncDec import Encoder, Decoder, gather_last, sequence_mask
from data_utils import EOS_TOK, SOS_TOK, PAD_TOK, transform
class LM(nn.Module):
def __init__(self, hidden_size, rnn_hidden_size, embd_size, vocab, trans_matrix, layers=2, pretrained=False, dropout=0.0, use_cuda=False):
"""
Args:
hidden_size (int) : size of hidden vector z
embd_size (int) : size of word embeddings
vocab (torchtext.Vocab) : vocabulary object
trans_matrix (Tensor, [num states, num states]) : Transition matrix probs for switching markov chain
pretrained (bool) : use pretrained word embeddings?
"""
super(LM, self).__init__()
#self.hidden_size = self.dec_hsize = hidden_size
#self.encoded_data_size = hidden_size #Vector size to use whenever encoding text into a %&#ing vector
self.hidden_size = hidden_size
self.encoded_data_size = self.dec_hsize = rnn_hidden_size #Vector size to use whenever encoding text into a %&#ing vector
self.trans_matrix = trans_matrix
self.num_states = trans_matrix.shape[0]
self.embd_size=embd_size
self.layers = layers
self.use_cuda = use_cuda
self.vocab_size=len(vocab.stoi.keys())
self.sos_idx = vocab.stoi[SOS_TOK]
self.eos_idx = vocab.stoi[EOS_TOK]
self.pad_idx = vocab.stoi[PAD_TOK]
in_embedding = nn.Embedding(self.vocab_size, self.embd_size, padding_idx=self.pad_idx)
out_embedding = nn.Embedding(self.vocab_size, self.embd_size, padding_idx=self.pad_idx)
if pretrained:
print("Using Pretrained")
in_embedding.weight.data = vocab.vectors
out_embedding.weight.data = vocab.vectors
self.liklihood_rnn= Decoder(self.embd_size, self.dec_hsize, out_embedding, "GRU", self.layers, use_cuda=use_cuda, dropout=dropout)
self.liklihood_logits= nn.Linear(self.dec_hsize, self.vocab_size, bias=False) #Weights to calculate logits, out [batch, vocab_size]
if use_cuda:
self.liklihood_rnn = self.liklihood_rnn.cuda()
self.liklihood_logits = self.liklihood_logits.cuda()
def forward(self, input, seq_lens, gumbel_temp=1.0, state_labels=None):
"""
Args
input (Tensor, [num_sents, batch, seq_len]) : Tensor of input ids for the embeddings lookup
seq_lens (Tensor [num_sents, batch]) : Store the sequence lengths for each batch for packing
state_labels (tensor [num_sents, batch]) : labels for the states if doing supervision
Returns
output logits (Tensor, [num_sents, batch, seq_len, vocab size]) : logits for the output words
state logits (Tensor, [num_sents, batch, num classes]) : logits for state prediction, can be used for supervision and to calc state KL
Z_kl (Tensor, [batch]) : kl diveragence for the Z transitions (calculated for each batch)
"""
#Now Evaluate the Liklihood for each sentence
batch_size = input.size(1)
num_sents = input.size(0)
dhidden=None
data_logits = []
for i in range(num_sents):
logits, dhidden = self.data_liklihood_factor(input[i,:,:], dhidden, seq_lens[i]) # USE IF PASSING PREV HIDDEN STATE TO NEXT
data_logits.append(logits)
data_logits = torch.stack(data_logits, dim=0) #[num_sents, batch, seq, num classes]
return data_logits
#P(X | Z)
def data_liklihood_factor(self, data, dhidden=None, lengths=None):
"""
Output the logits at each timestep (the data liklihood outputs from the rnn)
Args:
data (Tensor, [batch, seq_len]) vocab ids of the data
curr_z (Tensor, [batch, hidden_size])
Ret:
logits (Tensor, [batch, seq_len, vocab size])
"""
#REMEMBER: The data has both BOS and EOS appended to it
dhidden_list = [] #List for storing dhiddens so that the last one before pads can be extracted out
if dhidden is None:
dhidden = torch.zeros(self.layers, data.shape[0], self.dec_hsize).cuda() if self.use_cuda else torch.zeros(self.layers, data.shape[0], self.dec_hsize)
logits = []
for i in range(data.size(1)-1): #dont process last (the eos)
dec_input = data[:, i]
#dec_output is [batch, hidden_dim]
dec_output, dhidden = self.liklihood_rnn(dec_input, dhidden)
logits_t = self.liklihood_logits(dec_output)
logits += [logits_t]
dhidden_list += [dhidden.transpose(0,1)] #list stores [batch, layers, hiddensize]
logits = torch.stack(logits, dim=1) #DIMENSION BE [batch x seq x num_classes]
dhidden_list = torch.stack(dhidden_list, dim=1) #[batch x seq x layers x hidden_size]
dhidden_list = dhidden_list.view(dhidden_list.shape[0], dhidden_list.shape[1], -1) #[batch, seq, layers*hidden_size]
last_dhidden = gather_last(dhidden_list, lengths - 1, use_cuda=self.use_cuda)
last_dhidden = last_dhidden.view(-1, self.layers, self.dec_hsize).transpose(0,1).contiguous() #[layers, batch, hiddensize]
return logits, last_dhidden
def greedy_decode(self, dhidden=None, max_decode=30, top_k=15):
"""
Output the logits at each timestep (the data liklihood outputs from the rnn)
Args:
data (Tensor, [batch, seq_len]) vocab ids of the data
curr_z (Tensor, [batch, hidden_size])
Ret:
outputs - list of indicies
"""
if dhidden is None:
dhidden = torch.zeros(self.layers, 1, self.dec_hsize)
outputs = []
prev_output = Variable(torch.LongTensor(1).zero_() + self.sos_idx)
for i in range(max_decode):
dec_input = prev_output
dec_output, dhidden = self.liklihood_rnn(dec_input, dhidden)
logits_t = self.liklihood_logits(dec_output)
#dec_output is [batch, hidden_dim]
# probs = F.log_softmax(logits_t, dim=1)
# top_vals, top_inds = probs.topk(1, dim=1)
logits_t = self.top_k_logits(logits_t, k=top_k)
probs = F.softmax(logits_t/1.00, dim=1)
top_inds = torch.multinomial(probs, 1)
outputs.append(top_inds.squeeze().item())
prev_output = top_inds[0]
if top_inds.squeeze().item() == self.eos_idx:
break
return outputs, dhidden
def top_k_logits(self,logits, k):
vals,_ = torch.topk(logits,k)
mins = vals[:,-1].unsqueeze(dim=1).expand_as(logits)
return torch.where(logits < mins, torch.ones_like(logits)*-1e10,logits)
def reconstruct(self, input, seq_lens, initial_sents):
"""
Args
input (Tensor, [num_sents, batch, seq_len]) : Tensor of input ids for the embeddings lookup
seq_lens (Tensor [num_sents, batch]) : Store the sequence lengths for each batch for packing
Returns
output logits (Tensor, [num_sents, batch, seq_len, vocab size]) : logits for the output words
state logits (Tensor, [num_sents, batch, num classes]) : logits for state prediction, can be used for supervision and to calc state KL
Z_kl (Tensor, [batch]) : kl diveragence for the Z transitions (calculated for each batch)
"""
batch_size = 1
num_sents = input.size(0)
dhidden=None
outputs = []
for i in range(num_sents):
if i < initial_sents:
_, dhidden = self.data_liklihood_factor(input[i, :, :], dhidden, seq_lens[i])
sent_out = input[i, :, :].squeeze().tolist()
else:
sent_out, dhidden= self.greedy_decode(dhidden)
outputs.append(sent_out)
return outputs
def set_use_cuda(self, value):
self.use_cuda = value
self.liklihood_rnn.use_cuda = value
def interpolate(self, input, seq_lens, initial_sents, num_samples, vocab):
"""
Args
input (Tensor, [num_sents, batch, seq_len]) : Tensor of input ids for the embeddings lookup
seq_lens (Tensor [num_sents, batch]) : Store the sequence lengths for each batch for packing
Returns
output logits (Tensor, [num_sents, batch, seq_len, vocab size]) : logits for the output words
state logits (Tensor, [num_sents, batch, num classes]) : logits for state prediction, can be used for supervision and to calc state KL
Z_kl (Tensor, [batch]) : kl diveragence for the Z transitions (calculated for each batch)
"""
batch_size = 1
num_sents = input.size(0)
min_cross_entropy = 10000.0
best_outputs = None
for _ in range(num_samples):
dhidden=None
outputs = []
for i in range(num_sents):
if i < initial_sents:
_, dhidden = self.data_liklihood_factor(input[i, :, :], dhidden, seq_lens[i]) #Just run the sentence through the lm so we can get the dhidden
sent_out = input[i, :, :].squeeze().tolist()
elif i == num_sents-1:
last_logits, dhidden = self.data_liklihood_factor(input[i, :, :], dhidden, seq_lens[i]) #Just run the sentence through the lm so we can get the dhidden
sent_out = input[i, :, :].squeeze().tolist()
else: #Decode a new sentence
sent_out, dhidden= self.greedy_decode(dhidden)
outputs.append(sent_out)
cross_entropy = masked_cross_entropy(last_logits, torch.LongTensor(outputs[-1][1:]).unsqueeze(dim=0), seq_lens[-1] - 1, use_cuda=self.use_cuda).item()
if cross_entropy < min_cross_entropy:
min_cross_entropy = cross_entropy
best_outputs = outputs
print("Cross Entropy: {}".format(min_cross_entropy))
for j, sent in enumerate(best_outputs):
print("{}".format(transform(best_outputs[j], vocab.itos)))
print("--------------------\n\n")
return best_outputs
def set_use_cuda(self, value):
self.use_cuda = value
self.liklihood_rnn.use_cuda = value
| 357
| 9,988
| 23
|
ce9a67cea15ae75df85394fc0d7f7e0ce45cad6c
| 1,617
|
py
|
Python
|
houttuynia/nn/modules/conv.py
|
speedcell4/houttuynia
|
598ba06d70c1263a6d256991a52e424c03d73130
|
[
"MIT"
] | 1
|
2018-04-24T01:50:39.000Z
|
2018-04-24T01:50:39.000Z
|
houttuynia/nn/modules/conv.py
|
speedcell4/houttuynia
|
598ba06d70c1263a6d256991a52e424c03d73130
|
[
"MIT"
] | 29
|
2018-05-05T02:00:55.000Z
|
2018-07-23T07:03:42.000Z
|
houttuynia/nn/modules/conv.py
|
speedcell4/houttuynia
|
598ba06d70c1263a6d256991a52e424c03d73130
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from houttuynia.nn import init
__all__ = [
'Conv1d', 'Conv2d', 'Conv3d', 'GramConv1',
]
| 29.4
| 115
| 0.648114
|
import torch
from torch import nn
from houttuynia.nn import init
__all__ = [
'Conv1d', 'Conv2d', 'Conv3d', 'GramConv1',
]
class Conv1d(nn.Conv1d):
def reset_parameters(self) -> None:
return init.keras_conv_(self)
class Conv2d(nn.Conv2d):
def reset_parameters(self) -> None:
return init.keras_conv_(self)
class Conv3d(nn.Conv3d):
def reset_parameters(self) -> None:
return init.keras_conv_(self)
class GramConv1(nn.Sequential):
def __init__(self, in_features: int, num_grams: int, out_features: int = None, bias: bool = True) -> None:
if out_features is None:
out_features = in_features
self.num_grams = num_grams
self.in_features = in_features
self.out_features = out_features
self.bias = bias
super(GramConv1, self).__init__(
Conv1d(in_features, out_features, kernel_size=1, stride=1, padding=0, bias=bias),
nn.ReLU(inplace=True),
Conv1d(out_features, out_features, kernel_size=num_grams, stride=1, padding=num_grams // 2, bias=bias),
nn.ReLU(inplace=True),
Conv1d(in_features, out_features, kernel_size=1, stride=1, padding=0, bias=bias),
)
self.reset_parameters()
def reset_parameters(self):
self[0].reset_parameters()
self[2].reset_parameters()
self[4].reset_parameters()
def forward(self, inputs: torch.Tensor, dim: int = -1) -> torch.Tensor:
inputs = inputs.transpose(-2, dim)
outputs = super(GramConv1, self).forward(inputs)
return outputs.transpose(-2, dim)
| 1,216
| 19
| 250
|
5a2dcb5b0bad00488d5c4efd76e4f03fb861fc7f
| 2,370
|
py
|
Python
|
src/ecr/ui/cli.py
|
eXceediDeaL/edl-coderunner
|
52f7eedd0727b8a428b61640cd9fad33c083d0fc
|
[
"Apache-2.0"
] | 1
|
2018-11-18T09:30:11.000Z
|
2018-11-18T09:30:11.000Z
|
src/ecr/ui/cli.py
|
eXceediDeaL/edl-coderunner
|
52f7eedd0727b8a428b61640cd9fad33c083d0fc
|
[
"Apache-2.0"
] | 6
|
2018-11-23T10:44:58.000Z
|
2018-12-04T03:44:00.000Z
|
src/ecr/ui/cli.py
|
eXceediDeaL/edl-coderunner
|
52f7eedd0727b8a428b61640cd9fad33c083d0fc
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
from typing import Dict, List, Optional
import click
from pygments.lexers.shell import BashLexer
from prompt_toolkit import prompt, print_formatted_text, PromptSession
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.shortcuts import ProgressBar
from prompt_toolkit.application import run_in_terminal
from . import color
confirmStrToSwitch: Dict[str, SwitchState] = {
"y": SwitchState.Yes,
"n": SwitchState.No,
"o": SwitchState.OK,
"c": SwitchState.Cancel
}
switchToConfirmStr: Dict[SwitchState, str] = {
v: k for k, v in confirmStrToSwitch.items()}
defaultInputCommandSession = PromptSession(
message="> ", lexer=PygmentsLexer(BashLexer), auto_suggest=AutoSuggestFromHistory())
| 34.852941
| 96
| 0.68692
|
from enum import Enum
from typing import Dict, List, Optional
import click
from pygments.lexers.shell import BashLexer
from prompt_toolkit import prompt, print_formatted_text, PromptSession
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.shortcuts import ProgressBar
from prompt_toolkit.application import run_in_terminal
from . import color
class SwitchState(Enum):
Yes: int = 0
No: int = 1
OK: int = 2
Cancel: int = 3
confirmStrToSwitch: Dict[str, SwitchState] = {
"y": SwitchState.Yes,
"n": SwitchState.No,
"o": SwitchState.OK,
"c": SwitchState.Cancel
}
switchToConfirmStr: Dict[SwitchState, str] = {
v: k for k, v in confirmStrToSwitch.items()}
defaultInputCommandSession = PromptSession(
message="> ", lexer=PygmentsLexer(BashLexer), auto_suggest=AutoSuggestFromHistory())
class CLI:
def __init__(self, inputCommandSession: Optional[PromptSession] = None):
self.read = prompt
self.getProgressBar = ProgressBar
self.inputCommandSession: PromptSession = inputCommandSession if inputCommandSession \
else defaultInputCommandSession
self.inputCommand = self.inputCommandSession.prompt
self.edit = click.edit
self.clear = click.clear
def write(self, *values, **kwargs)->None: # pylint: disable=R0201
def func():
print_formatted_text(*values, **kwargs)
run_in_terminal(func)
def info(self, message, end: str = "\n")->None:
self.write(color.useCyan(message), end=end)
def warning(self, message, end: str = "\n")->None:
self.write(color.useYellow(message), end=end)
def error(self, message, end: str = "\n")->None:
self.write(color.useRed(message), end=end)
def ok(self, message, end: str = "\n")->None:
self.write(color.useGreen(message), end=end)
def confirm(self, message: str, choice: List[SwitchState])->SwitchState: # pragma: no cover
swstr = ','.join([switchToConfirmStr[x] for x in choice])
ret = self.read(f"{message} ({swstr}) ")
while ret not in confirmStrToSwitch or confirmStrToSwitch[ret] not in choice:
ret = self.read(
f"Not an acceptable value. Please input again ({swstr}): ")
return confirmStrToSwitch[ret]
| 1,262
| 61
| 234
|
37a39c70b751dc22b1734d409c9237e48ff8ee2f
| 2,577
|
py
|
Python
|
Spy-Games/code.py
|
Seema10/ga-learner-dsmp-repo
|
ef41b506eab012960e914c3b3de23be3a2d0e1b6
|
[
"MIT"
] | null | null | null |
Spy-Games/code.py
|
Seema10/ga-learner-dsmp-repo
|
ef41b506eab012960e914c3b3de23be3a2d0e1b6
|
[
"MIT"
] | null | null | null |
Spy-Games/code.py
|
Seema10/ga-learner-dsmp-repo
|
ef41b506eab012960e914c3b3de23be3a2d0e1b6
|
[
"MIT"
] | null | null | null |
# --------------
##File path for the file
file_path
#Code starts here
sample_message= str(read_file(file_path))
print(sample_message)
# --------------
#Code starts here
file_path_1
file_path_2
message_1=read_file(file_path_1)
message_2=read_file(file_path_2)
print("message1", message_1)
print("message2",message_2)
#print(int(message_2)//int(message_1))
secret_msg_1 = fuse_msg(message_1,message_2)
print(secret_msg_1)
# --------------
#Code starts here
file_path_3
message_3 = read_file(file_path_3)
print("message 3:", message_3)
secret_msg_2=substitute_msg(message_3)
print("secret msg2 :",secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4 = str(read_file(file_path_4))
message_5 = str(read_file(file_path_5))
print("message 4:",message_4)
print("message 5:",message_5)
secret_msg_3 = str(compare_msg(message_4, message_5))
print("secret msg3 :", secret_msg_3)
# --------------
#Code starts here
file_path_6
message_6= str(read_file(file_path_6))
print("message 6 :",message_6)
secret_msg_4 = extract_msg(message_6)
print("secret msg 4:",secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
secret_message = write_file(secret_msg,final_path)
print("secret_msg :")
| 18.673913
| 71
| 0.660458
|
# --------------
##File path for the file
file_path
#Code starts here
def read_file(path):
file = open(file_path,"r")
sentence = file.readline()
return sentence
file.close()
sample_message= str(read_file(file_path))
print(sample_message)
# --------------
#Code starts here
file_path_1
file_path_2
message_1=read_file(file_path_1)
message_2=read_file(file_path_2)
print("message1", message_1)
print("message2",message_2)
#print(int(message_2)//int(message_1))
def fuse_msg(message_a, message_b):
quotient=int(message_b)//int(message_a)
return str(quotient)
secret_msg_1 = fuse_msg(message_1,message_2)
print(secret_msg_1)
# --------------
#Code starts here
file_path_3
message_3 = read_file(file_path_3)
print("message 3:", message_3)
def substitute_msg(message_c):
if message_c=='Red' :
sub='Army General'
if message_c=='Green':
sub='Data Scientist'
if message_c=='Blue':
sub='Marine Biologist'
return sub
secret_msg_2=substitute_msg(message_3)
print("secret msg2 :",secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
message_4 = str(read_file(file_path_4))
message_5 = str(read_file(file_path_5))
print("message 4:",message_4)
print("message 5:",message_5)
def compare_msg(message_d, message_e):
a_list = message_d.split()
b_list = message_e.split()
c_list=[i for i in a_list if i not in b_list]
final_msg= " ".join(c_list)
return final_msg
secret_msg_3 = str(compare_msg(message_4, message_5))
print("secret msg3 :", secret_msg_3)
# --------------
#Code starts here
file_path_6
message_6= str(read_file(file_path_6))
print("message 6 :",message_6)
def extract_msg(message_f):
a_list= message_f.split()
#return a_list
even_Word = lambda x : len(x)%2==0
b_list= filter(even_Word,a_list)
final_msg= " ".join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
print("secret msg 4:",secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = " ".join(message_parts)
def write_file(secret_msg, path):
file=open(path,"a+")
file.write(secret_msg)
file.close()
secret_message = write_file(secret_msg,final_path)
print("secret_msg :")
| 862
| 0
| 148
|
064bf9548636f1a3c5e4f5616d11b9d576690817
| 2,065
|
py
|
Python
|
tests/test_string.py
|
ARgorithm/TemplateLibrary
|
cde4a2bea81815a14f052ca6cf32db79b3366e99
|
[
"Apache-2.0"
] | 3
|
2021-01-20T20:12:26.000Z
|
2021-02-24T17:23:34.000Z
|
tests/test_string.py
|
ARgorithm/TemplateLibrary
|
cde4a2bea81815a14f052ca6cf32db79b3366e99
|
[
"Apache-2.0"
] | 19
|
2021-01-18T03:29:53.000Z
|
2021-06-19T07:25:43.000Z
|
tests/test_string.py
|
ARgorithm/toolkit
|
cde4a2bea81815a14f052ca6cf32db79b3366e99
|
[
"Apache-2.0"
] | null | null | null |
"""Test string
"""
import ARgorithmToolkit
algo = ARgorithmToolkit.StateSet()
st = ARgorithmToolkit.String('st', algo, "Hello world! 1234")
def test_body():
"""Test string contents
"""
assert st.body == "Hello world! 1234"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["body"] == "Hello world! 1234"
def test_append():
"""Test string append
"""
global st
st.append(" Hahaha")
assert st.body == "Hello world! 1234 Hahaha"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == " Hahaha"
st+='xyz'
assert st.body == "Hello world! 1234 Hahahaxyz"
last_state = algo.states[-1]
second_last_state = algo.states[-2]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == "xyz"
assert second_last_state.content["state_type"] == 'string_declare'
assert second_last_state.content["state_def"]["body"] == "Hello world! 1234 Hahaha"
assert second_last_state.content["state_def"]["variable_name"] == "st_super"
def test_indexing():
"""Test string indexing
"""
assert st[1] == st.body[1]
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == 1
subst = st[1:3]
assert isinstance(subst,ARgorithmToolkit.String)
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["variable_name"] == 'st_super_sub'
assert last_state.content["state_def"]["body"] == st.body[1:3]
def test_iteration():
"""Test string iteration
"""
for i,(a,b) in enumerate(zip(st,st.body)):
assert a==b
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == i
| 35
| 87
| 0.666828
|
"""Test string
"""
import ARgorithmToolkit
algo = ARgorithmToolkit.StateSet()
st = ARgorithmToolkit.String('st', algo, "Hello world! 1234")
def test_body():
"""Test string contents
"""
assert st.body == "Hello world! 1234"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["body"] == "Hello world! 1234"
def test_append():
"""Test string append
"""
global st
st.append(" Hahaha")
assert st.body == "Hello world! 1234 Hahaha"
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == " Hahaha"
st+='xyz'
assert st.body == "Hello world! 1234 Hahahaxyz"
last_state = algo.states[-1]
second_last_state = algo.states[-2]
assert last_state.content["state_type"] == 'string_append'
assert last_state.content["state_def"]["element"] == "xyz"
assert second_last_state.content["state_type"] == 'string_declare'
assert second_last_state.content["state_def"]["body"] == "Hello world! 1234 Hahaha"
assert second_last_state.content["state_def"]["variable_name"] == "st_super"
def test_indexing():
"""Test string indexing
"""
assert st[1] == st.body[1]
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == 1
subst = st[1:3]
assert isinstance(subst,ARgorithmToolkit.String)
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_declare'
assert last_state.content["state_def"]["variable_name"] == 'st_super_sub'
assert last_state.content["state_def"]["body"] == st.body[1:3]
def test_iteration():
"""Test string iteration
"""
for i,(a,b) in enumerate(zip(st,st.body)):
assert a==b
last_state = algo.states[-1]
assert last_state.content["state_type"] == 'string_iter'
assert last_state.content["state_def"]["index"] == i
| 0
| 0
| 0
|
62a9fa8ca3c66a2ada506b599b4e445bbfd93542
| 14,282
|
py
|
Python
|
scraper/TA_scrapy/spiders/restoSpiderReview.py
|
elalamik/NLP_Capgemini_Data_Camp
|
31143116e02dad07a379bb81524cdc0e1fe796bd
|
[
"MIT"
] | null | null | null |
scraper/TA_scrapy/spiders/restoSpiderReview.py
|
elalamik/NLP_Capgemini_Data_Camp
|
31143116e02dad07a379bb81524cdc0e1fe796bd
|
[
"MIT"
] | null | null | null |
scraper/TA_scrapy/spiders/restoSpiderReview.py
|
elalamik/NLP_Capgemini_Data_Camp
|
31143116e02dad07a379bb81524cdc0e1fe796bd
|
[
"MIT"
] | 1
|
2021-02-09T18:33:10.000Z
|
2021-02-09T18:33:10.000Z
|
from logzero import logger
import logzero
import logging
import glob
import pandas as pd
# Scrapy packages
import scrapy
import requests
from scrapy.selector import Selector
from TA_scrapy.items import ReviewRestoItem, RestoItem, UserItem
from TA_scrapy.spiders import get_info
# Chromedriver package and options
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
| 44.354037
| 126
| 0.624422
|
from logzero import logger
import logzero
import logging
import glob
import pandas as pd
# Scrapy packages
import scrapy
import requests
from scrapy.selector import Selector
from TA_scrapy.items import ReviewRestoItem, RestoItem, UserItem
from TA_scrapy.spiders import get_info
# Chromedriver package and options
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
class RestoReviewSpider(scrapy.Spider):
name = "RestoReviewSpider"
def __init__(self, directory='./scraped_data/',
root_url='https://www.tripadvisor.co.uk/Restaurants-g191259-Greater_London_England.html',
debug=0, nb_resto=100, maxpage_reviews=50,
scrap_user=1, scrap_website_menu=0,
only_bokan = False, *args, **kwargs):
super(RestoReviewSpider, self).__init__(*args, **kwargs)
# Set logging level
logzero.loglevel(int(debug))
if int(debug) == 0 :
logging.disable(logging.DEBUG)
# Setting the list of already scraped restaurants
existing_jsons = glob.glob("./scraped_data/restaurants/*.json")
logger.warn(f' > FINDING EXISTING JSONS {existing_jsons}')
self.already_scraped_restaurants = []
self.next_file_id = len(existing_jsons) + 1
for json in existing_jsons:
json_df = pd.read_json(json, lines=True)
restaurants = json_df['resto_TA_url'].to_list()
restaurants = [resto.split("https://www.tripadvisor.co.uk")[1] for resto in restaurants]
self.already_scraped_restaurants += restaurants
# User defined parameters
self.directory = directory
self.root_url = root_url
self.maxpage_reviews = int(maxpage_reviews)
self.scrap_user = int(scrap_user)
self.scrap_website_menu = int(scrap_website_menu)
self.nb_resto = int(nb_resto)
self.only_bokan = only_bokan
# To track the evolution of scrapping
self.resto_offset = len(self.already_scraped_restaurants)
self.review_offset = self.get_review_offset()
self.main_nb = 0
self.resto_nb = 0
self.review_nb = 0
self.restaurants_ids = []
logger.warn(f"FINDING {self.resto_offset} EXISTING RESTAURANTS")
logger.warn(f"FINDING {self.review_offset} EXISTING REVIEWS")
def get_review_offset(self):
review_offset = 0
existing_reviews = glob.glob("./scraped_data/reviews/*.json")
for json in existing_reviews:
with open(json, "r") as child_file:
for line in child_file:
review_offset += 1
return review_offset
def start_requests(self):
""" Give the urls to follow to scrapy
- function automatically called when using "scrapy crawl my_spider"
"""
# Basic restaurant page on TripAdvisor GreaterLondon
yield scrapy.Request(url=self.root_url, callback=self.parse)
def parse(self, response):
""" MAIN PARSING : Start from a classical reastaurant page
- Usually there are 30 restaurants per page
"""
logger.info(' > PARSING NEW MAIN PAGE OF RESTO ({})'.format(self.main_nb))
self.main_nb += 1
# Get the list of the 35 restaurants of the page
restaurant_urls = get_info.get_urls_resto_in_main_search_page(response)
restaurant_new_urls = set(restaurant_urls) - set(self.already_scraped_restaurants)
logger.warn(f'> FINDING : {len(restaurant_urls) - len(restaurant_new_urls)} RESTAURANTS ALREADY SCRAPED IN THIS PAGE')
# For each url : follow restaurant url to get the reviews
for restaurant_url in restaurant_new_urls:
logger.info('> New restaurant detected : {}'.format(restaurant_url))
self.resto_nb += 1
if self.resto_nb > self.nb_resto:
return None
if self.only_bokan is False or "bokan" in restaurant_url.lower():
yield response.follow(url=restaurant_url, callback=self.parse_review_page,
cb_kwargs=dict(restaurant_id=self.resto_nb))
# Get next page information
next_page, next_page_number = get_info.get_urls_next_list_of_restos(response)
# Follow the page if we decide to
if get_info.go_to_next_page(next_page, next_page_number, max_page=None):
yield response.follow(next_page, callback=self.parse)
def parse_review_page(self, response, restaurant_id):
""" SECOND PARSING : Given a review page, gets each review url and get to parse it
- Usually there are 10 reviews per page
"""
try:
if "bokan" in response.url:
logger.critical(f"FOUND BOKAN IN URL {response.url}")
except:
pass
logger.info(' > PARSING NEW REVIEW PAGE')
# Parse the restaurant if it has not been parsed yet
if restaurant_id not in self.restaurants_ids:
yield self.parse_resto(response, restaurant_id)
self.restaurants_ids.append(restaurant_id)
# Get the list of reviews on the page
urls_review = get_info.get_urls_reviews_in_review_page(response)
# For each review open the link and parse it into the parse_review method
for url_review in urls_review:
yield response.follow(url=url_review, callback=self.parse_review,
cb_kwargs=dict(restaurant_id=restaurant_id))
# Get next page information
next_page, next_page_number = get_info.get_urls_next_list_of_reviews(response)
# Follow the page if we decide to
if get_info.go_to_next_page(next_page, next_page_number, max_page=self.maxpage_reviews):
yield response.follow(next_page, callback=self.parse_review_page,
cb_kwargs=dict(restaurant_id=restaurant_id))
def parse_resto(self, response, restaurant_id):
""" Create Restaurant Item saved in specific JSON file """
logger.info(' > PARSING NEW RESTO ({})'.format(restaurant_id - 1))
xpath_name = '//h1[@class="_3a1XQ88S"]/text()'
xpath_nb_reviews = '//div[@class="_1ud-0ITN"]/span/a/span/text()'
xpath_price_cuisine = '//span[@class="_13OzAOXO _34GKdBMV"]//a/text()'
xpath_phone_number = '//div[@class="_1ud-0ITN"]/span/span/span/a/text()'
xpath_website = '//a[@class="_2wKz--mA _15QfMZ2L"]/@data-encoded-url'
xpath_ranking = '//*[@id="component_44"]/div/div[2]/span[2]/a/span/b/span/text()'
xpath_ranking_out_of = '//span[@class="_13OzAOXO _2VxaSjVD"]/a/span/text()'
xpath_rating = '//div[@class="_1ud-0ITN"]/span/a/svg/@title'
xpath_address = '//span[@class="_13OzAOXO _2VxaSjVD"]/span[1]/a/text()'
resto_item = RestoItem()
resto_item['restaurant_id'] = restaurant_id + self.resto_offset
resto_item['name'] = response.xpath(xpath_name).get()
resto_item['resto_TA_url'] = response.url
resto_item['nb_reviews'] = response.xpath(xpath_nb_reviews).get()
price_cuisine = response.xpath(xpath_price_cuisine).getall()
# Retrieve price in the right format
raw_price = -1
try:
raw_price = price_cuisine[0]
except:
resto_item['min_price'] = None
resto_item['max_price'] = None
resto_item['cuisine'] = []
else:
try:
min_price, max_price = raw_price.split(' - ')
resto_item['min_price'] = len(min_price)
resto_item['max_price'] = len(max_price)
resto_item['cuisine'] = price_cuisine[1:]
except ValueError:
if raw_price == len(raw_price) * self.currency:
resto_item['min_price'] = len(raw_price)
resto_item['max_price'] = len(raw_price)
resto_item['cuisine'] = price_cuisine[1:]
else:
resto_item['min_price'] = None
resto_item['max_price'] = None
resto_item['cuisine'] = price_cuisine
resto_item['address'] = response.xpath(xpath_address).get()
resto_item['phone_number'] = response.xpath(xpath_phone_number).get()
# Scrap websites and menus depending on user input
if self.scrap_website_menu:
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options)
driver.get(response.url)
# Catch website (use Selenium as URL is generated by JS)
website = driver.find_element_by_class_name('_2wKz--mA')
website_url = website.get_attribute('href')
if website_url is None:
resto_item['website'] = 'Website not found'
else:
resto_item['website'] = website_url
# Catch menu
menu = driver.find_elements_by_xpath('//span[@class="_13OzAOXO _2VxaSjVD ly1Ix1xT"]/a')
try:
resto_item['menu'] = menu[0].get_attribute('href')
except IndexError:
resto_item['menu'] = 'Menu not found'
else:
resto_item['website'] = 'Website not scraped'
resto_item['menu'] = 'Menu not scraped'
if response.xpath(xpath_ranking).get() is not None and response.xpath(xpath_ranking_out_of).get() is not None:
resto_item['ranking'] = response.xpath(xpath_ranking).get() + response.xpath(xpath_ranking_out_of).get()
else:
resto_item['ranking'] = 'Ranking not found'
resto_item['rating'] = response.xpath(xpath_rating).get().split()[0]
return resto_item
def parse_review(self, response, restaurant_id):
""" FINAL PARSING : Open a specific page with review and client opinion
- Read these data and store them
- Get all the data you can find and that you believe interesting
"""
logger.debug(' > PARSING NEW REVIEW ({})'.format(self.review_nb))
if self.review_nb % 100 == 0:
logger.info(' > PARSING NEW REVIEW ({})'.format(self.review_nb))
self.review_nb += 1
xpath_username = '//div[@class="username mo"]/span/text()'
xpath_date_of_visit = '//div[@class="prw_rup prw_reviews_stay_date_hsx"]/text()'
xpath_date_of_review = '//span[@class="ratingDate relativeDate"]/@title'
xpath_rating = '//div[@class="rating reviewItemInline"]/span[1]/@class'
xpath_title = '//div[@class="quote"]/a/span/text()'
xpath_comment = '(//p[@class="partial_entry"])[1]/text()'
date_of_review = response.xpath(xpath_date_of_review).get()
if date_of_review is None:
xpath_date_of_review = '//span[@class="ratingDate"]/@title'
date_of_review = response.xpath(xpath_date_of_review).get()
review_item = ReviewRestoItem()
review_item['review_id'] = self.review_nb + self.review_offset
review_item['restaurant_id'] = restaurant_id + self.resto_offset
username = response.xpath(xpath_username).get()
review_item['username'] = username
review_item['date_of_visit'] = response.xpath(xpath_date_of_visit).get()
review_item['rating'] = response.xpath(xpath_rating).get()[-2]
review_item['title'] = response.xpath(xpath_title).get()
review_item['comment'] = ' '.join(response.xpath(xpath_comment).getall())
review_item['date_of_review'] = date_of_review
yield review_item
# Scrap user if wanted and username in correct format (no spaces)
if (self.scrap_user != 0) and (" " not in username):
yield response.follow(url="https://www.tripadvisor.co.uk/Profile/" + username,
callback=self.parse_user, cb_kwargs=dict(username=username))
def parse_user(self, response, username):
""" Create User Item saved in specific JSON file """
xpath_fullname = '//span[@class="_2wpJPTNc _345JQp5A"]/text()'
xpath_date_joined = '//span[@class="_1CdMKu4t"]/text()'
xpath_all = '//a[@class="_1q4H5LOk"]/text()'
xpath_nb_followers = '//div[@class="_1aVEDY08"][2]/span[@class="iX3IT_XP"]/text()'
xpath_nb_following = '//div[@class="_1aVEDY08"][3]/span[@class="iX3IT_XP"]/text()'
xpath_location = '//span[@class="_2VknwlEe _3J15flPT default"]/text()'
user_item = UserItem()
user_item['username'] = username
user_item['fullname'] = response.xpath(xpath_fullname).get()
user_item['date_joined'] = response.xpath(xpath_date_joined).get()
user_item['location'] = response.xpath(xpath_location).get()
# Retrieve info about nb of contributions, nb of followers and nb of following
all_infos = response.xpath(xpath_all).getall()
# Assign info to correct field
if len(all_infos) == 3:
user_item['nb_contributions'] = int(all_infos[0].replace(',',''))
user_item['nb_followers'] = int(all_infos[1].replace(',',''))
user_item['nb_following'] = int(all_infos[2].replace(',',''))
elif len(all_infos) == 2:
user_item['nb_contributions'] = int(all_infos[0].replace(',',''))
nb_followers = response.xpath(xpath_nb_followers).get()
if nb_followers is None:
user_item['nb_followers'] = int(all_infos[1].replace(',',''))
user_item['nb_following'] = 0
else:
user_item['nb_followers'] = 0
user_item['nb_following'] = int(all_infos[1].replace(',',''))
elif len(all_infos) == 1:
user_item['nb_contributions'] = int(all_infos[0].replace(',',''))
user_item['nb_followers'] = 0
user_item['nb_following'] = 0
yield user_item
| 2,179
| 11,491
| 23
|
58737f5795ad4f2ff4461e48442256d05959521e
| 2,304
|
py
|
Python
|
keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
|
sanket4373/keystone
|
7cf7e7497729803f0470167315af9349b88fe0ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sqlalchemy as sql
from sqlalchemy import orm
from keystone import config
CONF = config.CONF
| 33.391304
| 78
| 0.661892
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sqlalchemy as sql
from sqlalchemy import orm
from keystone import config
CONF = config.CONF
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
endpoint_table = sql.Table('endpoint', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
for endpoint in session.query(endpoint_table).all():
try:
extra = json.loads(endpoint.extra)
legacy_endpoint_id = extra.pop('legacy_endpoint_id')
except KeyError:
# if there is no legacy_endpoint_id, there's nothing to do
pass
else:
q = endpoint_table.update()
q = q.where(endpoint_table.c.id == endpoint.id)
q = q.values({
endpoint_table.c.extra: json.dumps(extra),
endpoint_table.c.legacy_endpoint_id: legacy_endpoint_id})
migrate_engine.execute(q)
session.close()
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
endpoint_table = sql.Table('endpoint', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
for endpoint in session.query(endpoint_table).all():
if endpoint.legacy_endpoint_id is not None:
extra = json.loads(endpoint.extra)
extra['legacy_endpoint_id'] = endpoint.legacy_endpoint_id
q = endpoint_table.update()
q = q.where(endpoint_table.c.id == endpoint.id)
q = q.values({
endpoint_table.c.extra: json.dumps(extra),
endpoint_table.c.legacy_endpoint_id: None})
migrate_engine.execute(q)
session.close()
| 1,528
| 0
| 46
|
9d26b7db93c9e456352fc078aed365cac899c4ce
| 4,165
|
py
|
Python
|
src/test/test_scene.py
|
jielyu/animations
|
1e7b1f54a5379082e97de4c66332fe3dd6302803
|
[
"MIT"
] | null | null | null |
src/test/test_scene.py
|
jielyu/animations
|
1e7b1f54a5379082e97de4c66332fe3dd6302803
|
[
"MIT"
] | null | null | null |
src/test/test_scene.py
|
jielyu/animations
|
1e7b1f54a5379082e97de4c66332fe3dd6302803
|
[
"MIT"
] | null | null | null |
"""测试场景组件的使用"""
from manimlib.imports import *
class Graph2DExample(GraphScene):
"""二维坐标图实例"""
CONFIG = {
"x_min": -1,
"x_max": 6,
"x_axis_width": 10,
"x_axis_label": "time",
#"x_label_color": RED,
"y_min": -1,
"y_max": 20,
"y_axis_height": 8,
"y_axis_label": "amp",
#"y_label_color": YELLOW,
"y_tick_frequency": 1,
}
class ThreeDExample(ThreeDScene):
"""三维场景实例"""
class MovingCameraExample(MovingCameraScene):
"""运动摄像机实例"""
class SampleSpaceExample(SampleSpaceScene):
"""概率采样空间实例"""
class ZoomedExample(ZoomedScene):
"""缩放摄像机实例"""
class VectorExample(LinearTransformationScene):
"""向量场实例"""
class ConfigSceneExample(Scene):
"""CONFIG参数修改设置实例"""
CONFIG = {
"camera_config": {
"frame_rate": 30,
},
}
class UpdateExample(Scene):
"""更新器设置实例"""
class CoorExample(Scene):
"""三维坐标轴例程"""
| 24.215116
| 78
| 0.543818
|
"""测试场景组件的使用"""
from manimlib.imports import *
class Graph2DExample(GraphScene):
"""二维坐标图实例"""
CONFIG = {
"x_min": -1,
"x_max": 6,
"x_axis_width": 10,
"x_axis_label": "time",
#"x_label_color": RED,
"y_min": -1,
"y_max": 20,
"y_axis_height": 8,
"y_axis_label": "amp",
#"y_label_color": YELLOW,
"y_tick_frequency": 1,
}
def func(self, x):
return x**2
def construct(self):
self.setup_axes(animate=True)
graph = self.get_graph(self.func, color=GREEN, x_min=0, x_max=4)
graph.move_to(DOWN)
self.play(ShowCreation(graph), run_time=2)
self.wait()
class ThreeDExample(ThreeDScene):
"""三维场景实例"""
def construct(self):
axes = ThreeDAxes()
self.add(axes)
self.set_camera_orientation(phi=80 * DEGREES,theta=-60*DEGREES)
self.begin_ambient_camera_rotation(rate=0.1)
self.wait()
class MovingCameraExample(MovingCameraScene):
"""运动摄像机实例"""
def construct(self):
t = TextMobject('Hello, World')
self.play(Write(t))
self.camera.set_frame_center(UR*3)
self.wait()
class SampleSpaceExample(SampleSpaceScene):
"""概率采样空间实例"""
def construct(self):
ss = self.get_sample_space()
self.play(Write(ss))
self.wait()
class ZoomedExample(ZoomedScene):
"""缩放摄像机实例"""
def construct(self):
t = TextMobject('Hello, World')
self.play(Write(t))
self.activate_zooming()
self.wait(5)
class VectorExample(LinearTransformationScene):
"""向量场实例"""
def construct(self):
self.add_vector(UR*2)
self.add_title('Hello')
self.wait(2)
class ConfigSceneExample(Scene):
"""CONFIG参数修改设置实例"""
CONFIG = {
"camera_config": {
"frame_rate": 30,
},
}
def construct(self):
t = TexMobject("A", "{B", "\\over", "C}", "D", "E")
t[0].set_color(RED)
t[1].set_color(ORANGE)
t[2].set_color(YELLOW)
t[3].set_color(GREEN)
t[4].set_color(BLUE)
t[5].set_color(BLUE)
self.play(Write(t))
self.wait(2)
t.shift(LEFT*2)
self.play(Write(t))
self.wait()
class UpdateExample(Scene):
"""更新器设置实例"""
def construct(self):
dot = Dot()
text = TextMobject('Updater')
text.next_to(dot, RIGHT*2, buff=SMALL_BUFF)
self.add(dot, text)
def update(obj):
obj.next_to(dot, RIGHT*2, buff=SMALL_BUFF)
text.add_updater(update)
self.add(text)
self.play(dot.shift, UP * 2)
self.wait()
self.play(dot.shift, DOWN * 2, rate_func=smooth)
self.wait()
text.remove_updater(update)
self.wait()
class CoorExample(Scene):
"""三维坐标轴例程"""
def construct(self):
# NUmberLine
nl = NumberLine()
self.play(Write(nl))
self.wait()
self.remove(nl)
# Axes
coor = Axes()
self.play(Write(coor))
self.wait()
self.remove(coor)
# ThreeDAxes
coor3d = ThreeDAxes()
self.play(Write(coor3d))
self.wait()
self.remove(coor3d)
# NumberPlane
np = NumberPlane()
self.play(Write(np))
self.wait()
self.remove(np)
# ComplexPlane
cp = ComplexPlane(
y_axis_config={"decimal_number_config":{"unit": "i"}},
number_line_config={"include_numbers":True}
)
x_axis = cp[-2]
y_axis = cp[-1]
x_axis.set_color(RED)
y_axis.set_color(PURPLE)
x_labels = x_axis[0]
x_labels.set_color(ORANGE)
y_labels = y_axis[0]
y_labels.set_color(YELLOW)
for y in y_labels:
y.rotate(-PI/2)
x_label = TexMobject("x")
x_label.move_to(cp.c2p(1.8,x_label.get_height()))
y_label = TexMobject("y")
y_label.move_to(cp.c2p(-3.8,3.8))
print(cp.c2p(-1,1))
self.add(cp,x_label,y_label)
self.wait(5)
| 2,935
| 0
| 269
|
25dc4b4b6fd6e5bca2f99c074b011050a886b11c
| 17,399
|
py
|
Python
|
iPERCore/tools/human_pose2d_estimators/openpose/post_process.py
|
JSssssss/iPERCore
|
510ae3ef5cac9e2fc0cda7a72cdc8b1962719431
|
[
"Apache-2.0"
] | 2,223
|
2020-11-19T02:16:07.000Z
|
2022-03-30T01:54:11.000Z
|
iPERCore/tools/human_pose2d_estimators/openpose/post_process.py
|
JSssssss/iPERCore
|
510ae3ef5cac9e2fc0cda7a72cdc8b1962719431
|
[
"Apache-2.0"
] | 131
|
2020-11-19T06:15:00.000Z
|
2022-01-24T07:52:21.000Z
|
iPERCore/tools/human_pose2d_estimators/openpose/post_process.py
|
JSssssss/iPERCore
|
510ae3ef5cac9e2fc0cda7a72cdc8b1962719431
|
[
"Apache-2.0"
] | 286
|
2020-11-19T07:30:58.000Z
|
2022-03-03T13:23:41.000Z
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import cv2
import torch
import numpy as np
import math
from operator import itemgetter
from .dataset import normalize, pad_width
def infer_fast_post_process(net_outputs, PoseClass):
"""
Args:
net_outputs (dict): the output of the networks, and it contains,
--heatmaps:
--pafs:
PoseClass (type of tools.human_pose2d_estimators.utils.pose_utils.OpenPoseBody25):
Returns:
outputs (dict): the output results, and it contains the followings keys,
--pose_entries:
--all_keypoints:
--current_poses:
"""
heatmaps = net_outputs["heatmaps"]
pafs = net_outputs["pafs"]
pad = net_outputs["pad"]
scale = net_outputs["scale"]
stride = net_outputs["stride"]
upsample_ratio = net_outputs["upsample_ratio"]
height, width = net_outputs["orig_shape"]
num_keypoints = PoseClass.num_kpts
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, PoseClass, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
if len(all_keypoints):
all_keypoints[:, 0] = np.clip(all_keypoints[:, 0], 0, width)
all_keypoints[:, 1] = np.clip(all_keypoints[:, 1], 0, height)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.zeros((num_keypoints, 3), dtype=all_keypoints.dtype)
for kpt_id in range(num_keypoints):
kpt_num_id = int(pose_entries[n][kpt_id])
if kpt_num_id != -1: # keypoint was found
pose_keypoints[kpt_id] = all_keypoints[kpt_num_id, 0:3]
else:
pose_keypoints[kpt_id, 0:2] = -1.0
# print(n, pose_keypoints)
pose = PoseClass(pose_keypoints, pose_entries[n][-2])
current_poses.append(pose)
outputs = {
"pose_entries": pose_entries,
"all_keypoints": all_keypoints,
"current_poses": current_poses
}
return outputs
| 43.389027
| 119
| 0.587103
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import cv2
import torch
import numpy as np
import math
from operator import itemgetter
from .dataset import normalize, pad_width
def infer(net, img, scales, base_height, stride, device, num_kpts=25,
pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1 / 256):
normed_img = normalize(img, img_mean, img_scale)
height, width, _ = normed_img.shape
scales_ratios = [scale * base_height / float(height) for scale in scales]
avg_heatmaps = np.zeros((height, width, num_kpts + 1), dtype=np.float32)
avg_pafs = np.zeros((height, width, num_kpts * 2 + 2), dtype=np.float32)
for ratio in scales_ratios:
scaled_img = cv2.resize(normed_img, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
min_dims = [base_height, max(scaled_img.shape[1], base_height)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float().to(device)
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :]
heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC)
avg_heatmaps = avg_heatmaps + heatmaps / len(scales_ratios)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :]
pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC)
avg_pafs = avg_pafs + pafs / len(scales_ratios)
outputs = {
"heatmaps": avg_heatmaps,
"pafs": avg_pafs
}
return outputs
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, device,
pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1 / 256):
height, width, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
tensor_img = tensor_img.to(device)
stages_output = net(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
outputs = {
"heatmaps": heatmaps,
"pafs": pafs,
"pad": pad,
"scale": scale,
"upsample_ratio": upsample_ratio,
"stride": stride,
"orig_shape": (height, width)
}
return outputs
def linspace2d(start, stop, n=10):
points = 1 / (n - 1) * (stop - start)
return points[:, None] * np.arange(n) + start[:, None]
def extract_keypoints(heatmap, all_keypoints, total_keypoint_num):
heatmap[heatmap < 0.1] = 0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode='constant')
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1]
heatmap_peaks = (heatmap_center > heatmap_left) & \
(heatmap_center > heatmap_right) & \
(heatmap_center > heatmap_up) & \
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h)
keypoints = sorted(keypoints, key=itemgetter(0))
suppressed = np.zeros(len(keypoints), np.uint8)
keypoints_with_score_and_id = []
keypoint_num = 0
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i + 1, len(keypoints)):
if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 +
(keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
suppressed[j] = 1
keypoint_with_score_and_id = (keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]],
total_keypoint_num + keypoint_num)
keypoints_with_score_and_id.append(keypoint_with_score_and_id)
keypoint_num += 1
all_keypoints.append(keypoints_with_score_and_id)
return keypoint_num
def group_keypoints(all_keypoints_by_type, pafs, PoseClass, min_paf_score=0.05, demo=False):
BODY_PARTS_KPT_IDS = PoseClass.BODY_PARTS_KPT_IDS
BODY_PARTS_PAF_IDS = PoseClass.BODY_PARTS_PAF_IDS
pose_entry_size = PoseClass.pose_entry_size
pose_entries = []
all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist])
for part_id in range(len(BODY_PARTS_PAF_IDS)):
part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]] # [(x, y, s, part_id), ..., (x, y, s, part_id)]
kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]] # [(x, y, s, part_id), ..., (x, y, s, part_id)]
num_kpts_a = len(kpts_a)
num_kpts_b = len(kpts_b)
# ipdb.set_trace()
if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part
continue
elif num_kpts_a == 0: # body part has just 'b' keypoints
for i in range(num_kpts_b):
num = 0
for j in range(len(pose_entries)): # check if already in some pose, was added by another body part
if pose_entries[j][kpt_b_id] == kpts_b[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx
pose_entry[-1] = 1 # num keypoints in pose
pose_entry[-2] = kpts_b[i][2] # pose score
pose_entries.append(pose_entry)
continue
elif num_kpts_b == 0: # body part has just 'a' keypoints
for i in range(num_kpts_a):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == kpts_a[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = kpts_a[i][3]
pose_entry[-1] = 1
pose_entry[-2] = kpts_a[i][2]
pose_entries.append(pose_entry)
continue
# ipdb.set_trace()
connections = []
for i in range(num_kpts_a):
kpt_a = np.array(kpts_a[i][0:2])
for j in range(num_kpts_b):
kpt_b = np.array(kpts_b[j][0:2])
mid_point = [(), ()]
mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)),
int(round((kpt_a[1] + kpt_b[1]) * 0.5)))
mid_point[1] = mid_point[0]
vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]]
vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2)
if vec_norm == 0:
continue
vec[0] /= vec_norm
vec[1] /= vec_norm
cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] +
vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1])
height_n = pafs.shape[0] // 2
success_ratio = 0
point_num = 10 # number of points to integration over paf
if cur_point_score > -100:
passed_point_score = 0
passed_point_num = 0
x, y = linspace2d(kpt_a, kpt_b)
for point_idx in range(point_num):
if not demo:
px = int(round(x[point_idx]))
py = int(round(y[point_idx]))
else:
px = int(x[point_idx])
py = int(y[point_idx])
paf = part_pafs[py, px, 0:2]
cur_point_score = vec[0] * paf[0] + vec[1] * paf[1]
if cur_point_score > min_paf_score:
passed_point_score += cur_point_score
passed_point_num += 1
success_ratio = passed_point_num / point_num
ratio = 0
if passed_point_num > 0:
ratio = passed_point_score / passed_point_num
ratio += min(height_n / vec_norm - 1, 0)
if ratio > 0 and success_ratio > 0.8:
score_all = ratio + kpts_a[i][2] + kpts_b[j][2]
connections.append([i, j, ratio, score_all])
# ipdb.set_trace()
if len(connections) > 0:
connections = sorted(connections, key=itemgetter(2), reverse=True)
num_connections = min(num_kpts_a, num_kpts_b)
has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32)
has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32)
filtered_connections = []
for row in range(len(connections)):
if len(filtered_connections) == num_connections:
break
i, j, cur_point_score = connections[row][0:3]
if not has_kpt_a[i] and not has_kpt_b[j]:
filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score])
has_kpt_a[i] = 1
has_kpt_b[j] = 1
connections = filtered_connections
if len(connections) == 0:
continue
# ipdb.set_trace()
if part_id == 0:
pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))]
for i in range(len(connections)):
pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
pose_entries[i][-1] = 2
pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
# elif part_id == 17 or part_id == 18:
elif part_id == 18 or part_id == 19:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
pose_entries[j][kpt_b_id] = connections[i][1]
elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
pose_entries[j][kpt_a_id] = connections[i][0]
continue
else:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0]:
pose_entries[j][kpt_b_id] = connections[i][1]
num += 1
pose_entries[j][-1] += 1
pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2]
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = connections[i][0]
pose_entry[kpt_b_id] = connections[i][1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
pose_entries.append(pose_entry)
filtered_entries = []
for i in range(len(pose_entries)):
if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
continue
filtered_entries.append(pose_entries[i])
pose_entries = np.asarray(filtered_entries)
return pose_entries, all_keypoints
def infer_fast_post_process(net_outputs, PoseClass):
"""
Args:
net_outputs (dict): the output of the networks, and it contains,
--heatmaps:
--pafs:
PoseClass (type of tools.human_pose2d_estimators.utils.pose_utils.OpenPoseBody25):
Returns:
outputs (dict): the output results, and it contains the followings keys,
--pose_entries:
--all_keypoints:
--current_poses:
"""
heatmaps = net_outputs["heatmaps"]
pafs = net_outputs["pafs"]
pad = net_outputs["pad"]
scale = net_outputs["scale"]
stride = net_outputs["stride"]
upsample_ratio = net_outputs["upsample_ratio"]
height, width = net_outputs["orig_shape"]
num_keypoints = PoseClass.num_kpts
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, PoseClass, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
if len(all_keypoints):
all_keypoints[:, 0] = np.clip(all_keypoints[:, 0], 0, width)
all_keypoints[:, 1] = np.clip(all_keypoints[:, 1], 0, height)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.zeros((num_keypoints, 3), dtype=all_keypoints.dtype)
for kpt_id in range(num_keypoints):
kpt_num_id = int(pose_entries[n][kpt_id])
if kpt_num_id != -1: # keypoint was found
pose_keypoints[kpt_id] = all_keypoints[kpt_num_id, 0:3]
else:
pose_keypoints[kpt_id, 0:2] = -1.0
# print(n, pose_keypoints)
pose = PoseClass(pose_keypoints, pose_entries[n][-2])
current_poses.append(pose)
outputs = {
"pose_entries": pose_entries,
"all_keypoints": all_keypoints,
"current_poses": current_poses
}
return outputs
def infer_post_process(net_outputs, PoseClass):
avg_heatmaps = net_outputs["heatmaps"]
avg_pafs = net_outputs["pafs"]
num_keypoints = PoseClass.num_kpts
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(avg_heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, avg_pafs, PoseClass, demo=True)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.zeros((num_keypoints, 3), dtype=all_keypoints.dtype)
for kpt_id in range(num_keypoints):
kpt_num_id = int(pose_entries[n][kpt_id])
if kpt_num_id != -1: # keypoint was found
pose_keypoints[kpt_id] = all_keypoints[kpt_num_id, 0:3]
else:
pose_keypoints[kpt_id, 0:2] = -1.0
# print(n, pose_keypoints)
pose = PoseClass(pose_keypoints, pose_entries[n][-2])
current_poses.append(pose)
outputs = {
"pose_entries": pose_entries,
"all_keypoints": all_keypoints,
"current_poses": current_poses
}
return outputs
| 14,694
| 0
| 138
|
5e0f63bb4a3c9df4fcdd6ef680c2df73f869edf7
| 2,018
|
py
|
Python
|
1.3.BayesianInference/exercises/srcs/7/challenger.py
|
mihaighidoveanu/machine-learning-examples
|
e5a7ab71e52ae2809115eb7d7c943b46ebf394f3
|
[
"MIT"
] | null | null | null |
1.3.BayesianInference/exercises/srcs/7/challenger.py
|
mihaighidoveanu/machine-learning-examples
|
e5a7ab71e52ae2809115eb7d7c943b46ebf394f3
|
[
"MIT"
] | null | null | null |
1.3.BayesianInference/exercises/srcs/7/challenger.py
|
mihaighidoveanu/machine-learning-examples
|
e5a7ab71e52ae2809115eb7d7c943b46ebf394f3
|
[
"MIT"
] | 1
|
2021-05-02T13:12:21.000Z
|
2021-05-02T13:12:21.000Z
|
import numpy as np
import pymc as pm
from matplotlib import pyplot as plt
challenger_data = np.genfromtxt("challenger_data.csv", skip_header=1,
usecols=[1, 2], missing_values="NA",
delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
# notice the`value` here. We explain why below.
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
# connect the probabilities in `p` with our observations through a
# Bernoulli random variable.
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# Mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(120000, 100000, 2)
alpha_samples = mcmc.trace('alpha')[:, None] # best to make them 1d
beta_samples = mcmc.trace('beta')[:, None]
# histogram of the samples:
plt.subplot(211)
plt.title(r"Posterior distributions of the variables $\alpha, \beta$")
plt.hist(beta_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\beta$", color="#7A68A6", normed=True)
plt.legend()
plt.subplot(212)
plt.hist(alpha_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\alpha$", color="#A60628", normed=True)
plt.legend()
plt.show()
prob_31 = logistic(31, beta_samples, alpha_samples)
plt.xlim(0.995, 1)
plt.hist(prob_31, bins=1000, normed=True, histtype='stepfilled')
plt.title("Posterior distribution of probability of defect, given $t = 31$")
plt.xlabel("probability of defect occurring in O-ring")
plt.show()
| 33.081967
| 77
| 0.666501
|
import numpy as np
import pymc as pm
from matplotlib import pyplot as plt
challenger_data = np.genfromtxt("challenger_data.csv", skip_header=1,
usecols=[1, 2], missing_values="NA",
delimiter=",")
# drop the NA values
challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]
temperature = challenger_data[:, 0]
D = challenger_data[:, 1] # defect or not?
def logistic(x, beta, alpha=0):
return 1.0 / (1.0 + np.exp(np.dot(beta, x) + alpha))
# notice the`value` here. We explain why below.
beta = pm.Normal("beta", 0, 0.001, value=0)
alpha = pm.Normal("alpha", 0, 0.001, value=0)
@pm.deterministic
def p(t=temperature, alpha=alpha, beta=beta):
return 1.0 / (1. + np.exp(beta * t + alpha))
# connect the probabilities in `p` with our observations through a
# Bernoulli random variable.
observed = pm.Bernoulli("bernoulli_obs", p, value=D, observed=True)
model = pm.Model([observed, beta, alpha])
# Mysterious code to be explained in Chapter 3
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(120000, 100000, 2)
alpha_samples = mcmc.trace('alpha')[:, None] # best to make them 1d
beta_samples = mcmc.trace('beta')[:, None]
# histogram of the samples:
plt.subplot(211)
plt.title(r"Posterior distributions of the variables $\alpha, \beta$")
plt.hist(beta_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\beta$", color="#7A68A6", normed=True)
plt.legend()
plt.subplot(212)
plt.hist(alpha_samples, histtype='stepfilled', bins=35, alpha=0.85,
label=r"posterior of $\alpha$", color="#A60628", normed=True)
plt.legend()
plt.show()
prob_31 = logistic(31, beta_samples, alpha_samples)
plt.xlim(0.995, 1)
plt.hist(prob_31, bins=1000, normed=True, histtype='stepfilled')
plt.title("Posterior distribution of probability of defect, given $t = 31$")
plt.xlabel("probability of defect occurring in O-ring")
plt.show()
| 142
| 0
| 48
|
4f197e4b4b505eb45a77d225b7775d7855e0153e
| 1,351
|
py
|
Python
|
oop 1-1.py
|
Blasco0616/OOP-1-1
|
98ae3c1f5e3dc719d4216bcd163a8e482cba10a2
|
[
"Apache-2.0"
] | null | null | null |
oop 1-1.py
|
Blasco0616/OOP-1-1
|
98ae3c1f5e3dc719d4216bcd163a8e482cba10a2
|
[
"Apache-2.0"
] | null | null | null |
oop 1-1.py
|
Blasco0616/OOP-1-1
|
98ae3c1f5e3dc719d4216bcd163a8e482cba10a2
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
window = Tk()
window.geometry("600x500+30+20")
window.title("Welcome to Python Programming")
btn = Button(window, text = "Click to add name", fg ="blue")
btn.place(x= 80, y= 100)
lbl = Label(window, text = "Student Personal Information", fg = "Blue", bg = "orange")
lbl.place(relx=.5, y =50, anchor="center")
lbl2 = Label(window, text ="Gender", fg="red")
lbl2.place(x= 80,y = 150)
txtfld = Entry(window, bd = 3, font = ("verdana",16))
txtfld.place(x=150,y=100)
v1 = StringVar()
v2 = StringVar()
v1.set(1)
r1 = Radiobutton(window, text="Male",variable=v1)
r1.place(x=80,y=200)
r2 = Radiobutton(window, text="Female",variable=v2)
r2.place(x=200,y=200)
v3 = IntVar()
v4 = IntVar()
v5 = IntVar()
chkbox = Checkbutton(window, text="basketball",variable=v3)
chkbox2 = Checkbutton(window, text="volleyball",variable=v4)
chkbox3 = Checkbutton(window, text="swimming",variable=v5)
chkbox.place(x=80, y=300)
chkbox2.place(x=250, y=300)
chkbox3.place(x=350, y=300)
lbl3 = Label(window, text ="Sports")
lbl3.place(x=80,y=250)
lbl4 = Label(window, text ="Subjects")
lbl4.place(x=80,y=350)
data1 ="arithmetric"
data2 ="writing"
data3 ="math"
lstbox = Listbox(window, height=5, selectmode="multiple")
lstbox.insert(END,data1,data2,data3)
lstbox.place(x=80, y=400)
window.mainloop()
| 26.490196
| 87
| 0.672095
|
from tkinter import *
window = Tk()
window.geometry("600x500+30+20")
window.title("Welcome to Python Programming")
btn = Button(window, text = "Click to add name", fg ="blue")
btn.place(x= 80, y= 100)
lbl = Label(window, text = "Student Personal Information", fg = "Blue", bg = "orange")
lbl.place(relx=.5, y =50, anchor="center")
lbl2 = Label(window, text ="Gender", fg="red")
lbl2.place(x= 80,y = 150)
txtfld = Entry(window, bd = 3, font = ("verdana",16))
txtfld.place(x=150,y=100)
v1 = StringVar()
v2 = StringVar()
v1.set(1)
r1 = Radiobutton(window, text="Male",variable=v1)
r1.place(x=80,y=200)
r2 = Radiobutton(window, text="Female",variable=v2)
r2.place(x=200,y=200)
v3 = IntVar()
v4 = IntVar()
v5 = IntVar()
chkbox = Checkbutton(window, text="basketball",variable=v3)
chkbox2 = Checkbutton(window, text="volleyball",variable=v4)
chkbox3 = Checkbutton(window, text="swimming",variable=v5)
chkbox.place(x=80, y=300)
chkbox2.place(x=250, y=300)
chkbox3.place(x=350, y=300)
lbl3 = Label(window, text ="Sports")
lbl3.place(x=80,y=250)
lbl4 = Label(window, text ="Subjects")
lbl4.place(x=80,y=350)
data1 ="arithmetric"
data2 ="writing"
data3 ="math"
lstbox = Listbox(window, height=5, selectmode="multiple")
lstbox.insert(END,data1,data2,data3)
lstbox.place(x=80, y=400)
window.mainloop()
| 0
| 0
| 0
|
df93a21376abc28670852f1dc620b7f1137f0134
| 1,241
|
py
|
Python
|
day-07/problem.py
|
mkemp/aoc-2020
|
01f65bc4aee05f819c3a8f3b04565188fcc17d25
|
[
"MIT"
] | 1
|
2020-12-06T19:33:53.000Z
|
2020-12-06T19:33:53.000Z
|
day-07/problem.py
|
mkemp/aoc-2020
|
01f65bc4aee05f819c3a8f3b04565188fcc17d25
|
[
"MIT"
] | null | null | null |
day-07/problem.py
|
mkemp/aoc-2020
|
01f65bc4aee05f819c3a8f3b04565188fcc17d25
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
forward, reverse = build_mapping_from_input()
# Part 1
print(len(can_contain('shiny gold')))
# Part 2
print(count_bags('shiny gold'))
| 28.860465
| 92
| 0.622885
|
from collections import defaultdict
def build_mapping_from_input():
forward, reverse = defaultdict(dict), defaultdict(dict)
with open('input') as f:
for line in f.read().strip().split('\n'):
container, _, subcontainers = line[:-1].partition(' bags contain ')
for token in subcontainers.replace(' bags', '').replace(' bag', '').split(', '):
if token != 'no other':
count, _, subcontainer = token.partition(' ')
forward[container][subcontainer] = int(count)
reverse[subcontainer][container] = int(count)
return forward, reverse
forward, reverse = build_mapping_from_input()
# Part 1
def can_contain(subcontainer, seen=None):
seen = seen or set()
for container in reverse[subcontainer]:
if container not in seen:
seen.add(container)
can_contain(container, seen)
return seen
print(len(can_contain('shiny gold')))
# Part 2
def count_bags(container):
total = 0
subcontainers = forward.get(container, {})
for subcontainer, count in subcontainers.items():
total += count * (1 + count_bags(subcontainer))
return total
print(count_bags('shiny gold'))
| 993
| 0
| 67
|
4f1fab1f8c93e7902ff058430006c7c05bbf3f66
| 5,969
|
py
|
Python
|
osu-ac/comparer.py
|
ChrisMiuchiz/osu-ac
|
37a62624bf569b2b5774f5ac72198c273ffddc4a
|
[
"MIT"
] | 1
|
2021-04-01T21:39:41.000Z
|
2021-04-01T21:39:41.000Z
|
osu-ac/comparer.py
|
ChrisMiuchiz/osu-ac
|
37a62624bf569b2b5774f5ac72198c273ffddc4a
|
[
"MIT"
] | null | null | null |
osu-ac/comparer.py
|
ChrisMiuchiz/osu-ac
|
37a62624bf569b2b5774f5ac72198c273ffddc4a
|
[
"MIT"
] | null | null | null |
import itertools
import numpy as np
from draw import Draw
from replay import Replay
from config import WHITELIST
class Comparer:
"""
A class for managing a set of replay comparisons.
Attributes:
List replays1: A list of Replay instances to compare against replays2.
List replays2: A list of Replay instances to be compared against. Optional, defaulting to None. No attempt to error check
this is made - if a compare() call is made, the program will throw an AttributeError. Be sure to only call
methods that involve the first set of replays if this argument is not passed.
Integer threshold: If a comparison scores below this value, the result is printed.
See Also:
Investigator
"""
def __init__(self, threshold, replays1, replays2=None):
"""
Initializes a Comparer instance.
Note that the order of the two replay lists has no effect; they are only numbered for consistency.
Comparing 1 to 2 is the same as comparing 2 to 1.
Args:
List replays1: A list of Replay instances to compare against replays2.
List replays2: A list of Replay instances to be compared against. Optional, defaulting to None. No attempt to error check
this is made - if a compare() call is made, the program will throw an AttributeError. Be sure to only call
methods that involve the first set of replays.
Integer threshold: If a comparison scores below this value, the result is printed.
"""
self.replays1 = replays1
self.replays2 = replays2
self.threshold = threshold
def compare(self, mode):
"""
If mode is "double", compares all replays in replays1 against all replays in replays2.
If mode is "single", compares all replays in replays1 against all other replays in replays1 (len(replays1) choose 2 comparisons).
In both cases, prints the result of each comparison according to _print_result.
Args:
String mode: One of either "double" or "single", determining how to choose which replays to compare.
"""
iterator = itertools.product(self.replays1, self.replays2) if mode == "double" else itertools.combinations(self.replays1, 2)
for replay1, replay2 in iterator:
if(self.check_names(replay1.player_name, replay2.player_name)):
continue
result = Comparer._compare_two_replays(replay1, replay2)
self._print_result(result, replay1, replay2)
def check_names(self, player1, player2):
"""
Returns True if both players are in the whitelist or are the same name, False otherwise.
Args:
String player1: The name of the first player.
String player2: The name of the second player.
"""
return ((player1 in WHITELIST and player2 in WHITELIST) or (player1 == player2))
def _print_result(self, result, replay1, replay2):
"""
Prints a human readable version of the result if the average distance
is below the threshold set from the command line.
Args:
Tuple result: A tuple containing (average distance, standard deviation) of a comparison.
Replay replay1: The replay to print the name of and to draw against replay2
Replay replay2: The replay to print the name of and to draw against replay1
"""
mean = result[0]
sigma = result[1]
if(mean > self.threshold):
return
print("{:.1f} similarity, {:.1f} std deviation ({} vs {})".format(mean, sigma, replay1.player_name, replay2.player_name))
answer = input("Would you like to see a visualization of both replays? ")
if answer[0].lower() == "y":
animation = Draw.draw_replays(replay1, replay2)
@staticmethod
def _compare_two_replays(replay1, replay2):
"""
Compares two Replays and return their average distance
and standard deviation of distances.
"""
# get all coordinates in numpy arrays so that they're arranged like:
# [ x_1 x_2 ... x_n
# y_1 y_2 ... y_n ]
# indexed by columns first.
data1 = replay1.as_list_with_timestamps()
data2 = replay2.as_list_with_timestamps()
# interpolate
(data1, data2) = Replay.interpolate(data1, data2)
# remove time from each tuple
data1 = [d[1:] for d in data1]
data2 = [d[1:] for d in data2]
(mu, sigma) = Comparer._compute_data_similarity(data1, data2)
return (mu, sigma)
@staticmethod
def _compute_data_similarity(data1, data2):
"""
Finds the similarity and standard deviation between two datasets.
Args:
List data1: A list of tuples containing the (x, y) coordinate of points
List data2: A list of tuples containing the (x, y) coordinate of points
Returns:
A tuple containing (similarity value, standard deviation) between the two datasets
"""
data1 = np.array(data1)
data2 = np.array(data2)
# switch if the second is longer, so that data1 is always the longest.
if len(data2) > len(data1):
(data1, data2) = (data2, data1)
shortest = len(data2)
distance = data1[:shortest] - data2
# square all numbers and sum over the second axis (add row 2 to row 1),
# finally take the square root of each number to get all distances.
# [ x_1 x_2 ... x_n => [ x_1 ** 2 ... x_n ** 2
# y_1 y_2 ... y_n ] => y_1 ** 2 ... y_n ** 2 ]
# => [ x_1 ** 2 + y_1 ** 2 ... x_n ** 2 + y_n ** 2 ]
# => [ d_1 ... d_2 ]
distance = (distance ** 2).sum(axis=1) ** 0.5
mu, sigma = distance.mean(), distance.std()
return (mu, sigma)
| 39.793333
| 137
| 0.62322
|
import itertools
import numpy as np
from draw import Draw
from replay import Replay
from config import WHITELIST
class Comparer:
"""
A class for managing a set of replay comparisons.
Attributes:
List replays1: A list of Replay instances to compare against replays2.
List replays2: A list of Replay instances to be compared against. Optional, defaulting to None. No attempt to error check
this is made - if a compare() call is made, the program will throw an AttributeError. Be sure to only call
methods that involve the first set of replays if this argument is not passed.
Integer threshold: If a comparison scores below this value, the result is printed.
See Also:
Investigator
"""
def __init__(self, threshold, replays1, replays2=None):
"""
Initializes a Comparer instance.
Note that the order of the two replay lists has no effect; they are only numbered for consistency.
Comparing 1 to 2 is the same as comparing 2 to 1.
Args:
List replays1: A list of Replay instances to compare against replays2.
List replays2: A list of Replay instances to be compared against. Optional, defaulting to None. No attempt to error check
this is made - if a compare() call is made, the program will throw an AttributeError. Be sure to only call
methods that involve the first set of replays.
Integer threshold: If a comparison scores below this value, the result is printed.
"""
self.replays1 = replays1
self.replays2 = replays2
self.threshold = threshold
def compare(self, mode):
"""
If mode is "double", compares all replays in replays1 against all replays in replays2.
If mode is "single", compares all replays in replays1 against all other replays in replays1 (len(replays1) choose 2 comparisons).
In both cases, prints the result of each comparison according to _print_result.
Args:
String mode: One of either "double" or "single", determining how to choose which replays to compare.
"""
iterator = itertools.product(self.replays1, self.replays2) if mode == "double" else itertools.combinations(self.replays1, 2)
for replay1, replay2 in iterator:
if(self.check_names(replay1.player_name, replay2.player_name)):
continue
result = Comparer._compare_two_replays(replay1, replay2)
self._print_result(result, replay1, replay2)
def check_names(self, player1, player2):
"""
Returns True if both players are in the whitelist or are the same name, False otherwise.
Args:
String player1: The name of the first player.
String player2: The name of the second player.
"""
return ((player1 in WHITELIST and player2 in WHITELIST) or (player1 == player2))
def _print_result(self, result, replay1, replay2):
"""
Prints a human readable version of the result if the average distance
is below the threshold set from the command line.
Args:
Tuple result: A tuple containing (average distance, standard deviation) of a comparison.
Replay replay1: The replay to print the name of and to draw against replay2
Replay replay2: The replay to print the name of and to draw against replay1
"""
mean = result[0]
sigma = result[1]
if(mean > self.threshold):
return
print("{:.1f} similarity, {:.1f} std deviation ({} vs {})".format(mean, sigma, replay1.player_name, replay2.player_name))
answer = input("Would you like to see a visualization of both replays? ")
if answer[0].lower() == "y":
animation = Draw.draw_replays(replay1, replay2)
@staticmethod
def _compare_two_replays(replay1, replay2):
"""
Compares two Replays and return their average distance
and standard deviation of distances.
"""
# get all coordinates in numpy arrays so that they're arranged like:
# [ x_1 x_2 ... x_n
# y_1 y_2 ... y_n ]
# indexed by columns first.
data1 = replay1.as_list_with_timestamps()
data2 = replay2.as_list_with_timestamps()
# interpolate
(data1, data2) = Replay.interpolate(data1, data2)
# remove time from each tuple
data1 = [d[1:] for d in data1]
data2 = [d[1:] for d in data2]
(mu, sigma) = Comparer._compute_data_similarity(data1, data2)
return (mu, sigma)
@staticmethod
def _compute_data_similarity(data1, data2):
"""
Finds the similarity and standard deviation between two datasets.
Args:
List data1: A list of tuples containing the (x, y) coordinate of points
List data2: A list of tuples containing the (x, y) coordinate of points
Returns:
A tuple containing (similarity value, standard deviation) between the two datasets
"""
data1 = np.array(data1)
data2 = np.array(data2)
# switch if the second is longer, so that data1 is always the longest.
if len(data2) > len(data1):
(data1, data2) = (data2, data1)
shortest = len(data2)
distance = data1[:shortest] - data2
# square all numbers and sum over the second axis (add row 2 to row 1),
# finally take the square root of each number to get all distances.
# [ x_1 x_2 ... x_n => [ x_1 ** 2 ... x_n ** 2
# y_1 y_2 ... y_n ] => y_1 ** 2 ... y_n ** 2 ]
# => [ x_1 ** 2 + y_1 ** 2 ... x_n ** 2 + y_n ** 2 ]
# => [ d_1 ... d_2 ]
distance = (distance ** 2).sum(axis=1) ** 0.5
mu, sigma = distance.mean(), distance.std()
return (mu, sigma)
| 0
| 0
| 0
|
ff34b53715690a0f2b3c069cbb92b44d4175382f
| 1,004
|
py
|
Python
|
setup.py
|
kyleaj/ProxImaL
|
2986b1ed40b58057822922522145bfbbdd2cf9de
|
[
"MIT"
] | 101
|
2016-07-24T00:33:12.000Z
|
2022-03-23T23:51:58.000Z
|
setup.py
|
kyleaj/ProxImaL
|
2986b1ed40b58057822922522145bfbbdd2cf9de
|
[
"MIT"
] | 57
|
2016-07-26T18:12:37.000Z
|
2022-02-14T04:19:26.000Z
|
setup.py
|
kyleaj/ProxImaL
|
2986b1ed40b58057822922522145bfbbdd2cf9de
|
[
"MIT"
] | 30
|
2016-07-26T22:51:59.000Z
|
2021-01-15T14:45:42.000Z
|
from setuptools import setup
setup(
name='proximal',
version='0.1.7',
packages=['proximal',
'proximal.prox_fns',
'proximal.lin_ops',
'proximal.algorithms',
'proximal.utils',
'proximal.halide',
'proximal.tests',
'proximal.tests.data'],
package_dir={'proximal': 'proximal'},
package_data={'proximal.tests.data': ['angela.jpg'],
'proximal.halide': ['src/*.cpp', 'src/core/*', 'src/external/*', 'src/fft/*',
'subprojects/halide.wrap',
'subprojects/pybind11.wrap',
'subprojects/packagefiles/halide/meson.build',
'meson.build']},
url='http://github.com/comp-imaging/ProxImaL/',
install_requires=["numpy >= 1.9",
"scipy >= 0.15",
"numexpr",
"Pillow",
"meson >= 0.54"],
use_2to3=True,
)
| 34.62069
| 95
| 0.473108
|
from setuptools import setup
setup(
name='proximal',
version='0.1.7',
packages=['proximal',
'proximal.prox_fns',
'proximal.lin_ops',
'proximal.algorithms',
'proximal.utils',
'proximal.halide',
'proximal.tests',
'proximal.tests.data'],
package_dir={'proximal': 'proximal'},
package_data={'proximal.tests.data': ['angela.jpg'],
'proximal.halide': ['src/*.cpp', 'src/core/*', 'src/external/*', 'src/fft/*',
'subprojects/halide.wrap',
'subprojects/pybind11.wrap',
'subprojects/packagefiles/halide/meson.build',
'meson.build']},
url='http://github.com/comp-imaging/ProxImaL/',
install_requires=["numpy >= 1.9",
"scipy >= 0.15",
"numexpr",
"Pillow",
"meson >= 0.54"],
use_2to3=True,
)
| 0
| 0
| 0
|
b1aca679b19896f4133a9892eaf56574ec2802c4
| 7,262
|
py
|
Python
|
python/SNRstudy.py
|
titodalcanton/flare
|
4ffb02977d19786ab8c1a767cc495a799d9575ae
|
[
"Apache-2.0"
] | 3
|
2015-05-26T15:21:13.000Z
|
2020-07-20T02:56:25.000Z
|
python/SNRstudy.py
|
titodalcanton/flare
|
4ffb02977d19786ab8c1a767cc495a799d9575ae
|
[
"Apache-2.0"
] | null | null | null |
python/SNRstudy.py
|
titodalcanton/flare
|
4ffb02977d19786ab8c1a767cc495a799d9575ae
|
[
"Apache-2.0"
] | 2
|
2018-09-20T14:19:13.000Z
|
2020-07-20T02:56:30.000Z
|
import math
import numpy as np
import subprocess
import re
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import astropy.units as units
from astropy.cosmology import Planck15 as cosmo,z_at_value
from matplotlib.backends.backend_pdf import PdfPages
flare_dir="../flare"
Ms=1.5e4*10**(np.arange(16)/3.0)
#Ms=2.0e5*10**(np.arange(13)/3.0)
print "Ms=",Ms
SNRstudy(Ms,[1,2,4,10],[10,100,1000],300)
#logz = np.arange(10)/2.5
#print "logz=",logz
#print [10**x for x in logz]
#logD = [cosmo.luminosity_distance(1+10**lz)/units.Mpc for lz in logz]
#print logD
#plt.clf()
#plot=plt.plot(logz,logD)
#plt.show()
| 39.68306
| 172
| 0.593638
|
import math
import numpy as np
import subprocess
import re
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import astropy.units as units
from astropy.cosmology import Planck15 as cosmo,z_at_value
from matplotlib.backends.backend_pdf import PdfPages
def set_flare_flags(snr,params):
flags=""
#Waveform model
#flags += "--tagextpn 0" #Don't extend waveforms at low freq to allow lower masses
#flags+="--tagint 0 " #1 for Fresnel integration(default), vs gridded quadrature"
#flags+="--tagint 1 --nbptsoverlap 8192" #gridded quadrature
flags+="--deltatobs 5.0 " #duration in years of LISA observation
#flags+="--minf 1e-4 " #minimun frequency included in analysis
flags+="--minf 3e-6 " #minimun frequency included in analysis
#flags+="--nbmodeinj 1 --nbmodetemp 1 " #for no higher modes in injection and template
if(snr>0):
flags+="--snr "+str(snr)+" --rescale-distprior " #fixing SNR (rescales distance)
flags+="--comp-min 1e5 --comp-max 1e8 " #min/max for component mass prior ranges
flags+="--logflat-massprior " #assume prior uniform in log of masses, rather than uniform for mass."
#flags+="--mtot-min 8e5 --mtot-max 2e8 --q-max 11.98 " #additional prior limits on Mtot and q
flags+="--mtot-min 1e4 --mtot-max 1e9 --q-max 11.98 " #additional prior limits on Mtot and q
#flags+="-dist-min 5000. --dist-max 200e4 --distance 1e5" #prior range for distances should verify range based on distances (in Mpc).
flags+="--dist-min 1000. --dist-max 4e5 " #prior range for distances approx 0.2<z<33
#set parameter flags
m1 = params[0]
m2 = params[1]
tRef = params[2]
phiRef = params[3]
if(snr>0):
dist = 2e4
else:
dist = params[4]
lam = params[5]
beta = params[6]
inc = params[7]
pol = params[8]
flags += "--phiRef "+str(phiRef)+" "
flags += "--m1 "+str(m1)+" "
flags += "--m2 "+str(m2)+" "
flags += "--tRef "+str(tRef)+" "
flags += "--distance "+str(dist)+" "
flags += "--lambda "+str(lam)+" "
flags += "--beta "+str(beta)+" "
flags += "--inclination "+str(inc)+" "
flags += "--polarization "+str(pol)+" "
return flags
def set_mcmc_flags(outroot,ptN):
flags = ""
#MCMC basics
flags += "--rng_seed="+str(np.random.rand())+" "
flags += " --outroot "+str(outroot)+" "
flags += "--nskip=40 --info_every=10000 " #frequency of sampling/reporting
flags += "--prop=7 --de_ni=500 --gauss_1d_frac=0.5 --de_reduce_gamma=4 " #differential evolution proposal distribution with Gaussian draws 1/2 of the time
#Parallel Tempering setup
flags += "--pt --pt_Tmax=1e9 " #parallel tempering basics
if(ptN>0):
flags += "pt_n="+str(ptN)+" " #else default is 20
flags += "--pt_swap_rate=0.10 " #rate of temp swaps (or default 0.01)
flags += "--pt_evolve_rate=0.01 " #rate at which temps are allowed to evolve
flags += "--pt_reboot_rate=0.0001 --pt_reboot_every=10000 --pt_reboot_grace=50000 " #Somewhat hacky trick to avoid chains getting stuck. Not sure whether we need this.
#stopping criteria
flags += "--nsteps=1e7" #10 million steps may be about the most we can do
flags += "--pt_stop_evid_err=0.05" #may terminate earlier based on evidence criterion
return flags
def set_bambi_flags(outroot):
flags = "--nlive 1000 --tol 1.0 --mmodal --nclspar 2 --maxcls 10 --ztol -60 --seed "
flags += "--outroot "+outroot+" "
def draw_params(Mtot,q):
#we suppose fixed Mtot,q,SNR and draw the other params
m1 = Mtot*q/(1.0+q)
m2 = Mtot/(1.0+q)
tRef = np.random.randn()*1e5
phiRef = 2*math.pi*np.random.rand()
dist = 100*10**(np.random.rand()*math.log10(400))
lam = np.random.rand()*2.0*math.pi
beta = math.acos(np.random.rand()*2.0-1)-math.pi/2.0
inc = math.acos(np.random.rand()*2.0-1)
pol = np.random.rand()*math.pi
params = [m1,m2,tRef,phiRef,dist,lam,beta,inc,pol]
return params
def perform_run(name,Mtot,q,snr):
if(BAMBI):
cmd = flare_dir+"/LISAinference/LISAinference"
flags = get_bambi_flags(name)
else:
cmd = flare_dir+"/LISAinference/LISAinference_ptmcmc"
flags = get_mcmc_flags(name,60)
params=draw_params(Mtot,q)
flags+=set_flare_flags(snr,params)
subprocess.call(cmd+" "+flags)
def SNRrun(Mtot,q,snr):
cmd = flare_dir+"/LISAinference/LISAinference_ptmcmc"
flags = "--nsteps=0 --noFisher "
params=draw_params(Mtot,q)
flags+=set_flare_flags(snr,params)
name="dummy"
flags += "--rng_seed="+str(np.random.rand())+" "
flags += " --outroot "+str(name)+" "
cmd += " "+flags+">"+name+".out"
setenv = "export ROM_DATA_PATH=/Users/jgbaker/Projects/GWDA/LISA-type-response/flare/ROMdata/q1-12_Mfmin_0.0003940393857519091"
print "Executing '"+cmd+"'"
code=subprocess.call(setenv+";"+cmd,shell=True)
print "Run completed with code(",code,")"
with open(name+"params.txt",'r') as file:
lines=file.read()
#print lines
dist=re.search("dist_resc:(.*)", lines).group(1)
print "distance =",dist
return float(dist)
def SNRstudy(MtotList,qList,SNRList,Navg):
pp = PdfPages('SNRstudy.pdf')
for q in qList:
tags=[]
labels=[]
count=0
for snr in SNRList:
count+=1
y1=[]
y2=[]
x=[]
for Mtot in MtotList:
print "Running SNRrun(",Mtot,",",q,",",snr,")"
dists=np.zeros(Navg);
for i in range(Navg):
dist=SNRrun(Mtot,q,snr)
z=z_at_value(cosmo.luminosity_distance,dist*units.Mpc,zmax=10000,ztol=1e-6)
print "D=",dist," z=",z
dists[i]=math.log10(z)
#dists[i]=math.log10(dist)
mean=np.mean(dists);
std=np.std(dists);
print "M=",Mtot," q=",q,"dist=",mean,"+/-",std
x.append(math.log10(Mtot/(1+10**mean)))
#x.append(math.log10(Mtot))
y1.append(mean-std)
y2.append(mean+std)
print "x=",x
print "y1=",y1
print "y2=",y2
color=(0.2,0.8/math.sqrt(q),1.0/math.sqrt(count))
plot=plt.fill_between(x, y1, y2, facecolor=color,alpha=0.3, interpolate=True)
tags.append( Rectangle((0, 0), 1, 1, fc=color,alpha=0.3) )
labels.append("SNR="+str(snr))
plt.legend(tags,labels)
plt.ylim([-1,3])
plt.xlim([2,9])
plt.title("SNR contours for LISA q="+str(q)+" SMBH merger")
plt.ylabel("log(z)")
plt.xlabel("log(M/Msun)")
#plt.show()
pp.savefig()
plt.clf()
pp.close()
flare_dir="../flare"
Ms=1.5e4*10**(np.arange(16)/3.0)
#Ms=2.0e5*10**(np.arange(13)/3.0)
print "Ms=",Ms
SNRstudy(Ms,[1,2,4,10],[10,100,1000],300)
#logz = np.arange(10)/2.5
#print "logz=",logz
#print [10**x for x in logz]
#logD = [cosmo.luminosity_distance(1+10**lz)/units.Mpc for lz in logz]
#print logD
#plt.clf()
#plot=plt.plot(logz,logD)
#plt.show()
| 6,464
| 0
| 165
|
e2c28fc5e68159bb5f061e1ed6ea103347940c98
| 15,317
|
py
|
Python
|
pupa/scrape/schemas/event.py
|
paultag/pupa
|
137293925503496e15137540e049bf544e129971
|
[
"BSD-3-Clause"
] | null | null | null |
pupa/scrape/schemas/event.py
|
paultag/pupa
|
137293925503496e15137540e049bf544e129971
|
[
"BSD-3-Clause"
] | null | null | null |
pupa/scrape/schemas/event.py
|
paultag/pupa
|
137293925503496e15137540e049bf544e129971
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Schema for event objects.
"""
from .common import sources, extras
media_schema = {
"description": ("This \"special\" schema is used in two places in the Event"
" schema, on the top level and inside the agenda item. This is an"
" optional component that may be omited entirely from a document."),
"items": {
"properties": {
"name": {
"type": "string",
"description": ('name of the media link, such as "Recording of'
' the meeting" or "Discussion of construction'
' near the watershed"'),
},
"type": {
"type": "string",
"description": ('type of the set of recordings, such as'
' "recording" or "testimony".'),
},
"date": {
"pattern": "^[0-9]{4}(-[0-9]{2}){0,2}$",
"type": ["string", "null"],
"description": "date of the recording.",
},
"offset": {
"type": ["number", "null"],
"description": ("Offset where the related part starts. This is"
" optional and may be ommited entirely."),
},
"links": {
"description": ("List of links to the same media item, each"
" with a different MIME type."),
"items": {
"properties": {
"mimetype": {
"description": ("Mimetype of the media, such"
" as video/mp4 or audio/webm"),
"type": ["string", "null"]
},
"url": {
"type": "string",
"description": "URL where this media may be accessed",
},
},
"type": "object"
},
"type": "array"
},
},
"type": "object"
},
"type": "array"
}
schema = {
"description": "event data",
"_order": (
('Basics', ('_type', 'name', 'description', 'when', 'end', 'status', 'location')),
('Linked Entities', ('media', 'links', 'participants', 'agenda', 'documents',)),
('Common Fields', ['updated_at', 'created_at', 'sources']),
),
"properties": {
"_type": {
"enum": ["event"],
"type": "string",
"description": ("All events must have a _type field set to one of"
" the entries in the enum below."),
},
"name": {
"type": "string",
"description": ('A simple name of the event, such as "Fiscal'
' subcommittee hearing on pudding cups"')
},
"all_day": {
"type": ["boolean"],
"description": ("Indicates if the event is an all-day event"),
},
"type": {
"type": ["string"],
"description": ("type of event"),
},
# TODO: turn into enum
"updated_at": {
"type": ["string", "datetime"],
"required": False,
"description": "the time that this object was last updated.",
},
"created_at": {
"type": ["string", "datetime"],
"required": False,
"description": "the time that this object was first created.",
},
"description": {
"type": ["string", "null"],
"description": ('A longer description describing the event. As an'
' example, "Topics for discussion include this that'
' and the other thing. In addition, lunch will be'
' served".'),
},
"when": {
"type": ["datetime"],
"description": ("Starting date / time of the event. This should be"
" fully timezone qualified."),
},
"end": {
"type": ["datetime", "null"],
"description": ("Ending date / time of the event. This should"
" be fully timezone qualified."),
},
"status": {
"type": ["string", "null"],
"enum": ["cancelled", "tentative", "confirmed", "passed"],
"description": ("String that denotes the status of the meeting."
" This is useful for showing the meeting is cancelled"
" in a machine-readable way."),
},
"location": {
"description": "Where the event will take place.",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": ('name of the location, such as "City Hall,'
' Boston, MA, USA", or "Room E201, Dolan'
' Science Center, 20700 North Park Blvd'
' University Heights Ohio, 44118"'),
},
"note": {
"type": ["string", "null"],
"description": ('human readable notes regarding the location,'
' something like "The meeting will take place'
' at the Minority Whip\'s desk on the floor"')
},
"url": {
"required": False,
"type": "string",
"description": "URL of the location, if applicable.",
},
"coordinates": {
"description": ('coordinates where this event will take'
' place. If the location hasn\'t (or isn\'t)'
' geolocated or geocodable, than this should'
' be set to null.'),
"type": ["object", "null"],
"properties": {
"latitude": {
"type": "string",
"description": "latitude of the location, if any",
},
"longitude": {
"type": "string",
"description": "longitude of the location, if any",
}
}
},
},
},
"media": media_schema,
"documents": {
"description": ("Links to related documents for the event. Usually,"
" this includes things like pre-written testimony,"
" spreadsheets or a slide deck that a presenter will"
" use."),
"items": {
"properties": {
"name": {
"type": "string",
"description": ('name of the document. Something like'
' "Fiscal Report" or "John Smith\'s'
' Slides".'),
},
"url": {
"type": "string",
"description": "URL where the content may be found.",
},
"mimetype": {
"type": "string",
"description": "Mimetype of the document.",
},
},
"type": "object"
},
"type": "array"
},
"links": {
"description": ("Links related to the event that are not documents"
" or items in the Agenda. This is filled with helpful"
" links for the event, such as a committee's homepage,"
" reference material or links to learn more about subjects"
" related to the event."),
"items": {
"properties": {
"note": {
"description": ('Human-readable name of the link. Something'
' like "Historical precedent for popsicle procurement"'),
"type": "string",
"blank": True,
},
"url": {
"description": "A URL for a link about the event",
"format": "uri",
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"participants": {
"description": ("List of participants in the event. This includes"
" committees invited, legislators chairing the event"
" or people who are attending."),
"items": {
"properties": {
"chamber": {
"type": ["string", "null"],
"description": ("Optional field storing the chamber of"
" the related participant."),
},
"name": {
"type": "string",
"description": "Human readable name of the entitity.",
},
"id": {
"type": ["string", "null"],
"description": "ID of the participant",
},
"type": {
"enum": ["organization", "person"],
"type": "string",
"description": ("What type of entity is this? `person`"
" may be used if the person is not a Legislator,"
" butattending the event, such as an"
" invited speaker or one who is offering"
" testimony."),
},
"note": {
"type": "string",
"description": ("Note regarding the relationship, such"
" as `chair` for the chair of a meeting."),
},
},
"type": "object"
},
"type": "array"
},
"agenda": {
"description": ("Agenda of the event, if any. This contains information"
" about the meeting's agenda, such as bills to"
" discuss or people to present."),
"items": {
"properties": {
"description": {
"type": "string",
"description": ("Human-readable string that represents this"
" agenda item. A good example would be something like"
" The Committee will consider SB 2339, HB 100"),
},
"order": {
"type": ["string", "null"],
"description": ("order of this item, useful for re-creating"
" meeting minutes. This may be ommited entirely."
" It may also optionally contains \"dots\""
" to denote nested agenda items, such as \"1.1.2.1\""
" or \"2\", which may go on as needed."),
},
"subjects": {
"description": ("List of related topics of this agenda"
" item relates to."),
"items": {"type": "string"},
"type": "array"
},
"media": media_schema,
"notes": {
"description": ("List of notes taken during this agenda"
" item, may be used to construct meeting minutes."),
"items": {
"properties": {
"description": {
"type": "string",
"description": ("simple string containing the"
" content of the note."),
},
},
"type": "object"
},
"type": "array"
},
"related_entities": {
"description": ("Entities that relate to this agenda"
" item, such as presenters, legislative"
" instruments, or committees."),
"items": {
"properties": {
"type": {
"type": "string",
"description": ("type of the related object, like"
" `bill` or `organization`."),
},
"id": {
"type": ["string", "null"],
"description": "ID of the related entity",
},
"name": {
"type": "string",
"description": ("human readable string"
" representing the entity,"
" such as `John Q. Smith`."),
},
"note": {
"type": ["string", "null"],
"description": ("human readable string (if any) noting"
" the relationship between the entity and"
" the agenda item, such as \"Jeff"
" will be presenting on the effects"
" of too much cookie dough\""),
},
},
"type": "object",
},
"minItems": 0,
"type": "array",
},
},
"type": "object"
},
"minItems": 0,
"type": "array"
},
"sources": sources,
"extras": extras,
},
"type": "object"
}
| 39.681347
| 97
| 0.354182
|
"""
Schema for event objects.
"""
from .common import sources, extras
media_schema = {
"description": ("This \"special\" schema is used in two places in the Event"
" schema, on the top level and inside the agenda item. This is an"
" optional component that may be omited entirely from a document."),
"items": {
"properties": {
"name": {
"type": "string",
"description": ('name of the media link, such as "Recording of'
' the meeting" or "Discussion of construction'
' near the watershed"'),
},
"type": {
"type": "string",
"description": ('type of the set of recordings, such as'
' "recording" or "testimony".'),
},
"date": {
"pattern": "^[0-9]{4}(-[0-9]{2}){0,2}$",
"type": ["string", "null"],
"description": "date of the recording.",
},
"offset": {
"type": ["number", "null"],
"description": ("Offset where the related part starts. This is"
" optional and may be ommited entirely."),
},
"links": {
"description": ("List of links to the same media item, each"
" with a different MIME type."),
"items": {
"properties": {
"mimetype": {
"description": ("Mimetype of the media, such"
" as video/mp4 or audio/webm"),
"type": ["string", "null"]
},
"url": {
"type": "string",
"description": "URL where this media may be accessed",
},
},
"type": "object"
},
"type": "array"
},
},
"type": "object"
},
"type": "array"
}
schema = {
"description": "event data",
"_order": (
('Basics', ('_type', 'name', 'description', 'when', 'end', 'status', 'location')),
('Linked Entities', ('media', 'links', 'participants', 'agenda', 'documents',)),
('Common Fields', ['updated_at', 'created_at', 'sources']),
),
"properties": {
"_type": {
"enum": ["event"],
"type": "string",
"description": ("All events must have a _type field set to one of"
" the entries in the enum below."),
},
"name": {
"type": "string",
"description": ('A simple name of the event, such as "Fiscal'
' subcommittee hearing on pudding cups"')
},
"all_day": {
"type": ["boolean"],
"description": ("Indicates if the event is an all-day event"),
},
"type": {
"type": ["string"],
"description": ("type of event"),
},
# TODO: turn into enum
"updated_at": {
"type": ["string", "datetime"],
"required": False,
"description": "the time that this object was last updated.",
},
"created_at": {
"type": ["string", "datetime"],
"required": False,
"description": "the time that this object was first created.",
},
"description": {
"type": ["string", "null"],
"description": ('A longer description describing the event. As an'
' example, "Topics for discussion include this that'
' and the other thing. In addition, lunch will be'
' served".'),
},
"when": {
"type": ["datetime"],
"description": ("Starting date / time of the event. This should be"
" fully timezone qualified."),
},
"end": {
"type": ["datetime", "null"],
"description": ("Ending date / time of the event. This should"
" be fully timezone qualified."),
},
"status": {
"type": ["string", "null"],
"enum": ["cancelled", "tentative", "confirmed", "passed"],
"description": ("String that denotes the status of the meeting."
" This is useful for showing the meeting is cancelled"
" in a machine-readable way."),
},
"location": {
"description": "Where the event will take place.",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": ('name of the location, such as "City Hall,'
' Boston, MA, USA", or "Room E201, Dolan'
' Science Center, 20700 North Park Blvd'
' University Heights Ohio, 44118"'),
},
"note": {
"type": ["string", "null"],
"description": ('human readable notes regarding the location,'
' something like "The meeting will take place'
' at the Minority Whip\'s desk on the floor"')
},
"url": {
"required": False,
"type": "string",
"description": "URL of the location, if applicable.",
},
"coordinates": {
"description": ('coordinates where this event will take'
' place. If the location hasn\'t (or isn\'t)'
' geolocated or geocodable, than this should'
' be set to null.'),
"type": ["object", "null"],
"properties": {
"latitude": {
"type": "string",
"description": "latitude of the location, if any",
},
"longitude": {
"type": "string",
"description": "longitude of the location, if any",
}
}
},
},
},
"media": media_schema,
"documents": {
"description": ("Links to related documents for the event. Usually,"
" this includes things like pre-written testimony,"
" spreadsheets or a slide deck that a presenter will"
" use."),
"items": {
"properties": {
"name": {
"type": "string",
"description": ('name of the document. Something like'
' "Fiscal Report" or "John Smith\'s'
' Slides".'),
},
"url": {
"type": "string",
"description": "URL where the content may be found.",
},
"mimetype": {
"type": "string",
"description": "Mimetype of the document.",
},
},
"type": "object"
},
"type": "array"
},
"links": {
"description": ("Links related to the event that are not documents"
" or items in the Agenda. This is filled with helpful"
" links for the event, such as a committee's homepage,"
" reference material or links to learn more about subjects"
" related to the event."),
"items": {
"properties": {
"note": {
"description": ('Human-readable name of the link. Something'
' like "Historical precedent for popsicle procurement"'),
"type": "string",
"blank": True,
},
"url": {
"description": "A URL for a link about the event",
"format": "uri",
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"participants": {
"description": ("List of participants in the event. This includes"
" committees invited, legislators chairing the event"
" or people who are attending."),
"items": {
"properties": {
"chamber": {
"type": ["string", "null"],
"description": ("Optional field storing the chamber of"
" the related participant."),
},
"name": {
"type": "string",
"description": "Human readable name of the entitity.",
},
"id": {
"type": ["string", "null"],
"description": "ID of the participant",
},
"type": {
"enum": ["organization", "person"],
"type": "string",
"description": ("What type of entity is this? `person`"
" may be used if the person is not a Legislator,"
" butattending the event, such as an"
" invited speaker or one who is offering"
" testimony."),
},
"note": {
"type": "string",
"description": ("Note regarding the relationship, such"
" as `chair` for the chair of a meeting."),
},
},
"type": "object"
},
"type": "array"
},
"agenda": {
"description": ("Agenda of the event, if any. This contains information"
" about the meeting's agenda, such as bills to"
" discuss or people to present."),
"items": {
"properties": {
"description": {
"type": "string",
"description": ("Human-readable string that represents this"
" agenda item. A good example would be something like"
" The Committee will consider SB 2339, HB 100"),
},
"order": {
"type": ["string", "null"],
"description": ("order of this item, useful for re-creating"
" meeting minutes. This may be ommited entirely."
" It may also optionally contains \"dots\""
" to denote nested agenda items, such as \"1.1.2.1\""
" or \"2\", which may go on as needed."),
},
"subjects": {
"description": ("List of related topics of this agenda"
" item relates to."),
"items": {"type": "string"},
"type": "array"
},
"media": media_schema,
"notes": {
"description": ("List of notes taken during this agenda"
" item, may be used to construct meeting minutes."),
"items": {
"properties": {
"description": {
"type": "string",
"description": ("simple string containing the"
" content of the note."),
},
},
"type": "object"
},
"type": "array"
},
"related_entities": {
"description": ("Entities that relate to this agenda"
" item, such as presenters, legislative"
" instruments, or committees."),
"items": {
"properties": {
"type": {
"type": "string",
"description": ("type of the related object, like"
" `bill` or `organization`."),
},
"id": {
"type": ["string", "null"],
"description": "ID of the related entity",
},
"name": {
"type": "string",
"description": ("human readable string"
" representing the entity,"
" such as `John Q. Smith`."),
},
"note": {
"type": ["string", "null"],
"description": ("human readable string (if any) noting"
" the relationship between the entity and"
" the agenda item, such as \"Jeff"
" will be presenting on the effects"
" of too much cookie dough\""),
},
},
"type": "object",
},
"minItems": 0,
"type": "array",
},
},
"type": "object"
},
"minItems": 0,
"type": "array"
},
"sources": sources,
"extras": extras,
},
"type": "object"
}
| 0
| 0
| 0
|
9c877e6deca4ca37bf5c51bf382eec0bc84d1116
| 1,717
|
py
|
Python
|
fileupload/urls.py
|
bgreenawald/CS-3240-Semester-Project
|
087c4bfd825793697c6657fe5c298bf2700a081a
|
[
"MIT"
] | null | null | null |
fileupload/urls.py
|
bgreenawald/CS-3240-Semester-Project
|
087c4bfd825793697c6657fe5c298bf2700a081a
|
[
"MIT"
] | null | null | null |
fileupload/urls.py
|
bgreenawald/CS-3240-Semester-Project
|
087c4bfd825793697c6657fe5c298bf2700a081a
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import RedirectView
from fileupload.views import *
app_name='fileupload'
urlpatterns = [
#~ url(r'^list/$', 'fileupload.views.list_files', name='list'),
url(r'^create_report/', 'fileupload.views.create_report', name='create_report'),
url(r'^(?P<report_id>[0-9]+)/', 'fileupload.views.view_report', name='view_report'),
url(r'^browse/$', 'fileupload.views.browse', name='browse'),
url(r'^user_reports/(?P<id>[0-9]+)/$', 'fileupload.views.user_reports', name='user_reports'),
url(r'^inbox/$', 'fileupload.views.inbox', name='inbox'),
url(r'^create_message/$', 'fileupload.views.create_message', name='create_message'),
url(r'^trash/$', 'fileupload.views.trash', name='trash'),
url(r'^delete_report/(?P<report_id>[0-9]+)/$', 'fileupload.views.delete_report', name='delete_report'),
url(r'^edit_report/(?P<report_id>[0-9]+)/$', 'fileupload.views.edit_report', name='edit_report'),
url(r'^view_message/(?P<message_id>[0-9]+)/', 'fileupload.views.view_message', name='view_message'),
url(r'^reply_message/(?P<message_id>[0-9]+)/', 'fileupload.views.reply_message', name='reply_message'),
url(r'^create_folder/$', 'fileupload.views.create_folder', name='create_folder'),
url(r'^edit_folder/(?P<folder_id>[0-9]+)/', 'fileupload.views.edit_folder', name='edit_folder'),
url(r'^delete_folder/(?P<folder_id>[0-9]+)/', 'fileupload.views.delete_folder', name='delete_folder'),
]
| 61.321429
| 107
| 0.70763
|
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import RedirectView
from fileupload.views import *
app_name='fileupload'
urlpatterns = [
#~ url(r'^list/$', 'fileupload.views.list_files', name='list'),
url(r'^create_report/', 'fileupload.views.create_report', name='create_report'),
url(r'^(?P<report_id>[0-9]+)/', 'fileupload.views.view_report', name='view_report'),
url(r'^browse/$', 'fileupload.views.browse', name='browse'),
url(r'^user_reports/(?P<id>[0-9]+)/$', 'fileupload.views.user_reports', name='user_reports'),
url(r'^inbox/$', 'fileupload.views.inbox', name='inbox'),
url(r'^create_message/$', 'fileupload.views.create_message', name='create_message'),
url(r'^trash/$', 'fileupload.views.trash', name='trash'),
url(r'^delete_report/(?P<report_id>[0-9]+)/$', 'fileupload.views.delete_report', name='delete_report'),
url(r'^edit_report/(?P<report_id>[0-9]+)/$', 'fileupload.views.edit_report', name='edit_report'),
url(r'^view_message/(?P<message_id>[0-9]+)/', 'fileupload.views.view_message', name='view_message'),
url(r'^reply_message/(?P<message_id>[0-9]+)/', 'fileupload.views.reply_message', name='reply_message'),
url(r'^create_folder/$', 'fileupload.views.create_folder', name='create_folder'),
url(r'^edit_folder/(?P<folder_id>[0-9]+)/', 'fileupload.views.edit_folder', name='edit_folder'),
url(r'^delete_folder/(?P<folder_id>[0-9]+)/', 'fileupload.views.delete_folder', name='delete_folder'),
]
| 0
| 0
| 0
|
0eec5b50d5b837337d0bdd12089a3040dc3427bf
| 5,530
|
py
|
Python
|
.vim/plugged/ultisnips/pythonx/UltiSnips/text_objects/choices.py
|
traitran44/vIDE
|
12ca056ce0223e24146f96d59da6aa60a67a376f
|
[
"MIT"
] | 1
|
2017-04-24T04:07:48.000Z
|
2017-04-24T04:07:48.000Z
|
sources_non_forked/ultisnips/pythonx/UltiSnips/text_objects/choices.py
|
RobotMa/vimrc
|
5beda397d3c6f88b8542d843107a64c42bf13c93
|
[
"MIT"
] | null | null | null |
sources_non_forked/ultisnips/pythonx/UltiSnips/text_objects/choices.py
|
RobotMa/vimrc
|
5beda397d3c6f88b8542d843107a64c42bf13c93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
"""Choices are enumeration values you can choose, by selecting index number.
It is a special TabStop, its content are taken literally, thus said, they will not be parsed recursively.
"""
from UltiSnips import vim_helper
from UltiSnips.position import Position
from UltiSnips.text_objects.tabstop import TabStop
from UltiSnips.snippet.parsing.lexer import ChoicesToken
class Choices(TabStop):
"""See module docstring."""
| 37.619048
| 105
| 0.608318
|
#!/usr/bin/env python3
# encoding: utf-8
"""Choices are enumeration values you can choose, by selecting index number.
It is a special TabStop, its content are taken literally, thus said, they will not be parsed recursively.
"""
from UltiSnips import vim_helper
from UltiSnips.position import Position
from UltiSnips.text_objects.tabstop import TabStop
from UltiSnips.snippet.parsing.lexer import ChoicesToken
class Choices(TabStop):
"""See module docstring."""
def __init__(self, parent, token: ChoicesToken):
self._number = token.number # for TabStop property 'number'
self._initial_text = token.initial_text
# empty choice will be discarded
self._choice_list = [s for s in token.choice_list if len(s) > 0]
self._done = False
self._input_chars = list(self._initial_text)
self._has_been_updated = False
TabStop.__init__(self, parent, token)
def _get_choices_placeholder(self) -> str:
# prefix choices with index number
# e.g. 'a,b,c' -> '1.a|2.b|3.c'
text_segs = []
index = 1
for choice in self._choice_list:
text_segs.append("%s.%s" % (index, choice))
index += 1
text = "|".join(text_segs)
return text
def _update(self, done, buf):
if self._done:
return True
# expand initial text with select prefix number, only once
if not self._has_been_updated:
# '${1:||}' is not valid choice, should be downgraded to plain tabstop
are_choices_valid = len(self._choice_list) > 0
if are_choices_valid:
text = self._get_choices_placeholder()
self.overwrite(buf, text)
else:
self._done = True
self._has_been_updated = True
return True
def _do_edit(self, cmd, ctab=None):
if self._done:
# do as what parent class do
TabStop._do_edit(self, cmd, ctab)
return
ctype, line, col, cmd_text = cmd
cursor = vim_helper.get_cursor_pos()
[buf_num, cursor_line] = map(int, cursor[0:2])
# trying to get what user inputted in current buffer
if ctype == "I":
self._input_chars.append(cmd_text)
elif ctype == "D":
line_text = vim_helper.buf[cursor_line - 1]
self._input_chars = list(line_text[self._start.col: col])
inputted_text = "".join(self._input_chars)
if not self._input_chars:
return
# if there are more than 9 selection candidates,
# may need to wait for 2 inputs to determine selection number
is_all_digits = True
has_selection_terminator = False
# input string sub string of pure digits
inputted_text_for_num = inputted_text
for [i, s] in enumerate(self._input_chars):
if s == " ": # treat space as a terminator for selection
has_selection_terminator = True
inputted_text_for_num = inputted_text[0:i]
elif not s.isdigit():
is_all_digits = False
should_continue_input = False
if is_all_digits or has_selection_terminator:
index_strs = [str(index) for index in list(range(1, len(self._choice_list) + 1))]
matched_index_strs = list(filter(lambda s: s.startswith(inputted_text_for_num), index_strs))
remained_choice_list = []
if len(matched_index_strs) == 0:
remained_choice_list = []
elif has_selection_terminator:
if inputted_text_for_num:
num = int(inputted_text_for_num)
remained_choice_list = list(self._choice_list)[num - 1: num]
elif len(matched_index_strs) == 1:
num = int(inputted_text_for_num)
remained_choice_list = list(self._choice_list)[num - 1: num]
else:
should_continue_input = True
else:
remained_choice_list = []
if should_continue_input:
# will wait for further input
return
buf = vim_helper.buf
if len(remained_choice_list) == 0:
# no matched choice, should quit selection and go on with inputted text
overwrite_text = inputted_text_for_num
self._done = True
elif len(remained_choice_list) == 1:
# only one match
matched_choice = remained_choice_list[0]
overwrite_text = matched_choice
self._done = True
if overwrite_text is not None:
old_end_col = self._end.col
# change _end.col, thus `overwrite` won't alter texts after this tabstop
displayed_text_end_col = self._start.col + len(inputted_text)
self._end.col = displayed_text_end_col
self.overwrite(buf, overwrite_text)
# notify all tabstops those in the same line and after this to adjust their positions
pivot = Position(line, old_end_col)
diff_col = displayed_text_end_col - old_end_col
self._parent._child_has_moved(
self._parent.children.index(self),
pivot,
Position(0, diff_col)
)
vim_helper.set_cursor_from_pos([buf_num, cursor_line, self._end.col + 1])
def __repr__(self):
return "Choices(%s,%r->%r,%r)" % (self._number, self._start, self._end, self._initial_text)
| 4,927
| 0
| 134
|
af5e57ce578d5e381101a8679f6e3120fd34f348
| 457
|
py
|
Python
|
mltk/models/tests/test_tflite_micro_models.py
|
SiliconLabs/mltk
|
56b19518187e9d1c8a0d275de137fc9058984a1f
|
[
"Zlib"
] | null | null | null |
mltk/models/tests/test_tflite_micro_models.py
|
SiliconLabs/mltk
|
56b19518187e9d1c8a0d275de137fc9058984a1f
|
[
"Zlib"
] | 1
|
2021-11-19T20:10:09.000Z
|
2021-11-19T20:10:09.000Z
|
mltk/models/tests/test_tflite_micro_models.py
|
sldriedler/mltk
|
d82a60359cf875f542a2257f1bc7d8eb4bdaa204
|
[
"Zlib"
] | null | null | null |
import pytest
from mltk.utils.test_helper import run_model_operation, generate_run_model_params
@pytest.mark.parametrize(*generate_run_model_params())
@pytest.mark.parametrize(*generate_run_model_params())
| 38.083333
| 81
| 0.822757
|
import pytest
from mltk.utils.test_helper import run_model_operation, generate_run_model_params
@pytest.mark.parametrize(*generate_run_model_params())
def test_tflite_micro_speech(op, tflite, build):
run_model_operation('tflite_micro_speech-test', op, tflite, build)
@pytest.mark.parametrize(*generate_run_model_params())
def test_tflite_micro_magic_wand(op, tflite, build):
run_model_operation('tflite_micro_magic_wand-test', op, tflite, build)
| 204
| 0
| 44
|
10bd1640a99feefa6d2eec90c19302930ffb5ea6
| 338
|
py
|
Python
|
Scripts-python/022 Analisador de textos.py
|
rromulofer/python
|
eea56018b7974911fc125202ce556ec2fa59bc7f
|
[
"MIT"
] | 2
|
2021-02-11T18:36:35.000Z
|
2021-09-16T18:00:28.000Z
|
Scripts-python/022 Analisador de textos.py
|
rromulofer/python
|
eea56018b7974911fc125202ce556ec2fa59bc7f
|
[
"MIT"
] | null | null | null |
Scripts-python/022 Analisador de textos.py
|
rromulofer/python
|
eea56018b7974911fc125202ce556ec2fa59bc7f
|
[
"MIT"
] | null | null | null |
name = str(input('Type your name: ')).strip()
print('Uppercase name: {}'.format(name.upper()))
print('Lowercase name: {}'.format(name.lower()))
print('Total letters: {}'.format(len(name) - name.count(' ')))
#print('First name has {} letters. '.format(name.find(' ')))
s = name.split()
print('First name has {} letters.'.format(len(s[0])))
| 48.285714
| 62
| 0.642012
|
name = str(input('Type your name: ')).strip()
print('Uppercase name: {}'.format(name.upper()))
print('Lowercase name: {}'.format(name.lower()))
print('Total letters: {}'.format(len(name) - name.count(' ')))
#print('First name has {} letters. '.format(name.find(' ')))
s = name.split()
print('First name has {} letters.'.format(len(s[0])))
| 0
| 0
| 0
|
ae49e9537cb9a4005738067ee22e85bd6dde74fa
| 8,659
|
py
|
Python
|
scripts/python/plots.py
|
mlliarm/higgs-decay-classification
|
c65e3c0d54527f9603ebf968b374fe16a2ea84e4
|
[
"MIT"
] | null | null | null |
scripts/python/plots.py
|
mlliarm/higgs-decay-classification
|
c65e3c0d54527f9603ebf968b374fe16a2ea84e4
|
[
"MIT"
] | null | null | null |
scripts/python/plots.py
|
mlliarm/higgs-decay-classification
|
c65e3c0d54527f9603ebf968b374fe16a2ea84e4
|
[
"MIT"
] | null | null | null |
#====================================================
# MODULES
#====================================================
import pandas as pd
import ROOT
import matplotlib.pyplot as plt
import numpy as np
#====================================================
# DATA PREPARATION
#====================================================
model_outputs = pd.read_csv('model_outputs.csv')
model_outputs['Label'] = pd.read_csv('dataset_higgs_challenge.csv')['Label']
model_outputs['KaggleWeight'] = pd.read_csv('dataset_higgs_challenge.csv')['KaggleWeight']
model_outputs['KaggleSet'] = pd.read_csv('dataset_higgs_challenge.csv')['KaggleSet']
predictions_train = model_outputs['Predictions'][model_outputs['KaggleSet'] == 't']
predictions_test = model_outputs['Predictions'][model_outputs['KaggleSet'] == 'v']
weights_train = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't']
weights_test = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v']
labels_train = model_outputs['Label'][model_outputs['KaggleSet'] == 't']
labels_test = model_outputs['Label'][model_outputs['KaggleSet'] == 'v']
predictions_train = (predictions_train - min(predictions_train)) / (max(predictions_train) - min(predictions_train))
predictions_test = (predictions_test - min(predictions_test)) / (max(predictions_test) - min(predictions_test))
train_signal = predictions_train[model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='s']
train_bkg = predictions_train[model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='b']
test_signal = predictions_test[model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='s']
test_bkg = predictions_test[model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='b']
weights_train_signal = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='s']
weights_train_bkg = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='b']
weights_test_signal = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='s']
weights_test_bkg = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='b']
#====================================================
# STYLE SETTINGS
#====================================================
ROOT.gROOT.SetStyle("ATLAS")
c = ROOT.TCanvas("c", "", 750, 700)
bins = 20
hist_train_s = ROOT.TH1D("hist_train_s", "train signal", bins, 0, 1)
hist_test_s = ROOT.TH1D("hist_test_s", "test signal", bins, 0, 1)
hist_train_b = ROOT.TH1D("hist_train_b", "train bkg", bins, 0, 1)
hist_test_b = ROOT.TH1D("hist_test_b", "test bkg", bins, 0, 1)
#====================================================
# FIRST UNWEIGHTED AND NORMALIZED TO UNITY
#====================================================
for i in range(len(train_signal)):
hist_train_s.Fill(train_signal.values[i])
for i in range(len(test_signal)):
hist_test_s.Fill(test_signal.values[i])
for i in range(len(train_bkg)):
hist_train_b.Fill(train_bkg.values[i])
for i in range(len(test_bkg)):
hist_test_b.Fill(test_bkg.values[i])
for hist in [hist_test_s, hist_test_b]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
for hist in [hist_train_s, hist_test_s, hist_train_b, hist_test_b]:
hist.Scale(1/hist.Integral(), 'nosw2')
#Plot settings:
hist_train_b.SetAxisRange(3e-3, 5, 'Y')
hist_train_b.GetYaxis().SetLabelSize(0.04)
hist_train_b.GetYaxis().SetTitleSize(0.04)
hist_train_b.GetYaxis().SetTitle('Event Fraction')
hist_train_b.GetXaxis().SetLabelSize(0.04)
hist_train_b.GetXaxis().SetTitleSize(0.04)
hist_train_b.GetXaxis().SetTitle('Model Output')
hist_train_b.SetLineColor(ROOT.kRed)
hist_train_b.SetLineWidth(3)
hist_train_b.Draw('HIST')
hist_test_b.SetMarkerSize(1.3)
hist_test_b.SetMarkerStyle(3)
hist_test_b.Draw('same')
hist_train_s.SetLineColor(ROOT.kBlue)
hist_train_s.SetLineWidth(3)
hist_train_s.Draw('hist same')
hist_test_s.SetMarkerSize(1.3)
hist_test_s.SetMarkerStyle(8)
hist_test_s.Draw('same')
c.SetLogy()
#Add legend:
legend = ROOT.TLegend(0.52, 0.75, 0.92, 0.9)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.04)
legend.SetTextAlign(12)
legend.AddEntry(hist_train_s, "Signal (Training)", "lf")
legend.AddEntry(hist_test_s, "Signal (Test)", "pe")
legend.AddEntry(hist_train_b, "Background (Training)" ,"l")
legend.AddEntry(hist_test_b, "Background (Test)", "ep")
legend.Draw("SAME")
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(42)
text.SetTextSize(0.04)
text.DrawLatex(0.23, 0.87, "Simulation")
text.DrawLatex(0.23, 0.83, "H #rightarrow #tau^{+}#tau^{-}")
text.DrawLatex(0.23, 0.79, "#sqrt{s} = 8 TeV")
c.Draw()
#Set marker:
marker_types = ROOT.TCanvas('marker_types', '', 0,0,500,200)
marker = ROOT.TMarker()
marker.DisplayMarkerTypes()
marker_types.Draw()
#====================================================
# NOW THE WEIGHTED DISTRIBUTION
#====================================================
c2 = ROOT.TCanvas("c2", "", 750, 700)
bins = 10
hist_train_sw = ROOT.TH1D("hist_train_sw", "train signal", bins, 0, 1)
hist_train_bw = ROOT.TH1D("hist_train_bw", "train bkg", bins, 0, 1)
hist_test_w = ROOT.TH1D("hist_test_w", "test bkg", bins, 0, 1)
for i in range(len(train_signal)):
hist_train_sw.Fill(train_signal.values[i], weights_train_signal.values[i])
for i in range(len(train_bkg)):
hist_train_bw.Fill(train_bkg.values[i], weights_train_bkg.values[i])
for i in range(len(predictions_test)):
hist_test_w.Fill(predictions_test.values[i], weights_test.values[i])
for hist in [hist_train_sw, hist_train_bw, hist_test_w]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
hist_train_sw.SetFillColorAlpha(ROOT.kAzure-1,.6)
hist_train_bw.SetFillColorAlpha(ROOT.kRed-4, .9)
hist_train_sw.SetLineWidth(1)
hist_train_bw.SetLineWidth(1)
#Axes
hist_train_bw.GetYaxis().SetLabelSize(0.04)
hist_train_bw.GetYaxis().SetTitleSize(0.04)
hist_train_bw.GetYaxis().SetTitle('Events')
hist_train_bw.GetXaxis().SetLabelSize(0.04)
hist_train_bw.GetXaxis().SetTitleSize(0.04)
hist_train_bw.GetXaxis().SetTitle('Model Output')
hist_train_bw.Draw()
#Stack
hs = ROOT.THStack("hs", "Weighted Distributions")
hs.Add(hist_train_sw)
hs.Add(hist_train_bw)
hs.SetMinimum(20)
hs.SetMaximum(1e7)
hs.Draw('hist')
hs.SetHistogram(hist_train_bw)
hist_test_w.Draw('same')
#Legend
legend = ROOT.TLegend(0.5, 0.75, 0.8, 0.9)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.04)
legend.SetTextAlign(12)
legend.AddEntry(hist_train_sw, "Signal (Training)", "f")
legend.AddEntry(hist_train_bw, "Background (Training)", "f")
legend.AddEntry(hist_test_w, "Test", "pe")
legend.Draw("SAME")
#Text
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(42)
text.SetTextSize(0.04)
text.DrawLatex(0.23, 0.87, "Simulation")
text.DrawLatex(0.23, 0.83, "H #rightarrow #tau^{+}#tau^{-}")
text.DrawLatex(0.23, 0.79, "#sqrt{s} = 8 TeV")
c2.SetLogy()
c2.Draw()
#====================================================
# SAVE CANVAS
#====================================================
c2.SaveAs('weighted.png')
c2.SaveAs('weighted.pdf')
w = ROOT.TColorWheel()
cw = ROOT.TCanvas("cw","cw",0,0,800,800)
w.SetCanvas(cw)
w.Draw()
cw.Draw()
#====================================================
# RATIO PLOT
#====================================================
bins = 10
hist_train_sw = ROOT.TH1D("hist_train_sw", "train signal", bins, 0, 1)
hist_train_bw = ROOT.TH1D("hist_train_bw", "train bkg", bins, 0, 1)
hist_test_w = ROOT.TH1D("hist_test_w", "test bkg", bins, 0, 1)
for i in range(len(train_signal)):
hist_train_sw.Fill(train_signal.values[i], weights_train_signal.values[i])
for i in range(len(train_bkg)):
hist_train_bw.Fill(train_bkg.values[i], weights_train_bkg.values[i])
for i in range(len(predictions_test)):
hist_test_w.Fill(predictions_test.values[i], weights_test.values[i])
for hist in [hist_train_sw, hist_train_bw, hist_test_w]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
c3 = ROOT.TCanvas("c3", "Ratio Plot", 700, 750)
upper_pad = ROOT.TPad("upper_pad", "", 0, 0.25, 1, 1)
lower_pad = ROOT.TPad("lower_pad", "", 0, 0, 1, 0.25)
for pad in [upper_pad, lower_pad]:
pad.SetLeftMargin(0.14)
pad.SetRightMargin(0.05)
pad.SetTickx(True)
pad.SetTicky(True)
upper_pad.SetBottomMargin(0)
lower_pad.SetTopMargin(0)
lower_pad.SetBottomMargin(0.3)
upper_pad.Draw()
lower_pad.Draw()
c3.Draw()
| 35.05668
| 116
| 0.667052
|
#====================================================
# MODULES
#====================================================
import pandas as pd
import ROOT
import matplotlib.pyplot as plt
import numpy as np
#====================================================
# DATA PREPARATION
#====================================================
model_outputs = pd.read_csv('model_outputs.csv')
model_outputs['Label'] = pd.read_csv('dataset_higgs_challenge.csv')['Label']
model_outputs['KaggleWeight'] = pd.read_csv('dataset_higgs_challenge.csv')['KaggleWeight']
model_outputs['KaggleSet'] = pd.read_csv('dataset_higgs_challenge.csv')['KaggleSet']
predictions_train = model_outputs['Predictions'][model_outputs['KaggleSet'] == 't']
predictions_test = model_outputs['Predictions'][model_outputs['KaggleSet'] == 'v']
weights_train = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't']
weights_test = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v']
labels_train = model_outputs['Label'][model_outputs['KaggleSet'] == 't']
labels_test = model_outputs['Label'][model_outputs['KaggleSet'] == 'v']
predictions_train = (predictions_train - min(predictions_train)) / (max(predictions_train) - min(predictions_train))
predictions_test = (predictions_test - min(predictions_test)) / (max(predictions_test) - min(predictions_test))
train_signal = predictions_train[model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='s']
train_bkg = predictions_train[model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='b']
test_signal = predictions_test[model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='s']
test_bkg = predictions_test[model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='b']
weights_train_signal = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='s']
weights_train_bkg = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 't'][model_outputs['Label']=='b']
weights_test_signal = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='s']
weights_test_bkg = model_outputs['KaggleWeight'][model_outputs['KaggleSet'] == 'v'][model_outputs['Label']=='b']
#====================================================
# STYLE SETTINGS
#====================================================
ROOT.gROOT.SetStyle("ATLAS")
c = ROOT.TCanvas("c", "", 750, 700)
bins = 20
hist_train_s = ROOT.TH1D("hist_train_s", "train signal", bins, 0, 1)
hist_test_s = ROOT.TH1D("hist_test_s", "test signal", bins, 0, 1)
hist_train_b = ROOT.TH1D("hist_train_b", "train bkg", bins, 0, 1)
hist_test_b = ROOT.TH1D("hist_test_b", "test bkg", bins, 0, 1)
#====================================================
# FIRST UNWEIGHTED AND NORMALIZED TO UNITY
#====================================================
for i in range(len(train_signal)):
hist_train_s.Fill(train_signal.values[i])
for i in range(len(test_signal)):
hist_test_s.Fill(test_signal.values[i])
for i in range(len(train_bkg)):
hist_train_b.Fill(train_bkg.values[i])
for i in range(len(test_bkg)):
hist_test_b.Fill(test_bkg.values[i])
for hist in [hist_test_s, hist_test_b]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
for hist in [hist_train_s, hist_test_s, hist_train_b, hist_test_b]:
hist.Scale(1/hist.Integral(), 'nosw2')
#Plot settings:
hist_train_b.SetAxisRange(3e-3, 5, 'Y')
hist_train_b.GetYaxis().SetLabelSize(0.04)
hist_train_b.GetYaxis().SetTitleSize(0.04)
hist_train_b.GetYaxis().SetTitle('Event Fraction')
hist_train_b.GetXaxis().SetLabelSize(0.04)
hist_train_b.GetXaxis().SetTitleSize(0.04)
hist_train_b.GetXaxis().SetTitle('Model Output')
hist_train_b.SetLineColor(ROOT.kRed)
hist_train_b.SetLineWidth(3)
hist_train_b.Draw('HIST')
hist_test_b.SetMarkerSize(1.3)
hist_test_b.SetMarkerStyle(3)
hist_test_b.Draw('same')
hist_train_s.SetLineColor(ROOT.kBlue)
hist_train_s.SetLineWidth(3)
hist_train_s.Draw('hist same')
hist_test_s.SetMarkerSize(1.3)
hist_test_s.SetMarkerStyle(8)
hist_test_s.Draw('same')
c.SetLogy()
#Add legend:
legend = ROOT.TLegend(0.52, 0.75, 0.92, 0.9)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.04)
legend.SetTextAlign(12)
legend.AddEntry(hist_train_s, "Signal (Training)", "lf")
legend.AddEntry(hist_test_s, "Signal (Test)", "pe")
legend.AddEntry(hist_train_b, "Background (Training)" ,"l")
legend.AddEntry(hist_test_b, "Background (Test)", "ep")
legend.Draw("SAME")
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(42)
text.SetTextSize(0.04)
text.DrawLatex(0.23, 0.87, "Simulation")
text.DrawLatex(0.23, 0.83, "H #rightarrow #tau^{+}#tau^{-}")
text.DrawLatex(0.23, 0.79, "#sqrt{s} = 8 TeV")
c.Draw()
#Set marker:
marker_types = ROOT.TCanvas('marker_types', '', 0,0,500,200)
marker = ROOT.TMarker()
marker.DisplayMarkerTypes()
marker_types.Draw()
#====================================================
# NOW THE WEIGHTED DISTRIBUTION
#====================================================
c2 = ROOT.TCanvas("c2", "", 750, 700)
bins = 10
hist_train_sw = ROOT.TH1D("hist_train_sw", "train signal", bins, 0, 1)
hist_train_bw = ROOT.TH1D("hist_train_bw", "train bkg", bins, 0, 1)
hist_test_w = ROOT.TH1D("hist_test_w", "test bkg", bins, 0, 1)
for i in range(len(train_signal)):
hist_train_sw.Fill(train_signal.values[i], weights_train_signal.values[i])
for i in range(len(train_bkg)):
hist_train_bw.Fill(train_bkg.values[i], weights_train_bkg.values[i])
for i in range(len(predictions_test)):
hist_test_w.Fill(predictions_test.values[i], weights_test.values[i])
for hist in [hist_train_sw, hist_train_bw, hist_test_w]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
hist_train_sw.SetFillColorAlpha(ROOT.kAzure-1,.6)
hist_train_bw.SetFillColorAlpha(ROOT.kRed-4, .9)
hist_train_sw.SetLineWidth(1)
hist_train_bw.SetLineWidth(1)
#Axes
hist_train_bw.GetYaxis().SetLabelSize(0.04)
hist_train_bw.GetYaxis().SetTitleSize(0.04)
hist_train_bw.GetYaxis().SetTitle('Events')
hist_train_bw.GetXaxis().SetLabelSize(0.04)
hist_train_bw.GetXaxis().SetTitleSize(0.04)
hist_train_bw.GetXaxis().SetTitle('Model Output')
hist_train_bw.Draw()
#Stack
hs = ROOT.THStack("hs", "Weighted Distributions")
hs.Add(hist_train_sw)
hs.Add(hist_train_bw)
hs.SetMinimum(20)
hs.SetMaximum(1e7)
hs.Draw('hist')
hs.SetHistogram(hist_train_bw)
hist_test_w.Draw('same')
#Legend
legend = ROOT.TLegend(0.5, 0.75, 0.8, 0.9)
legend.SetTextFont(42)
legend.SetFillStyle(0)
legend.SetBorderSize(0)
legend.SetTextSize(0.04)
legend.SetTextAlign(12)
legend.AddEntry(hist_train_sw, "Signal (Training)", "f")
legend.AddEntry(hist_train_bw, "Background (Training)", "f")
legend.AddEntry(hist_test_w, "Test", "pe")
legend.Draw("SAME")
#Text
text = ROOT.TLatex()
text.SetNDC()
text.SetTextFont(42)
text.SetTextSize(0.04)
text.DrawLatex(0.23, 0.87, "Simulation")
text.DrawLatex(0.23, 0.83, "H #rightarrow #tau^{+}#tau^{-}")
text.DrawLatex(0.23, 0.79, "#sqrt{s} = 8 TeV")
c2.SetLogy()
c2.Draw()
#====================================================
# SAVE CANVAS
#====================================================
c2.SaveAs('weighted.png')
c2.SaveAs('weighted.pdf')
w = ROOT.TColorWheel()
cw = ROOT.TCanvas("cw","cw",0,0,800,800)
w.SetCanvas(cw)
w.Draw()
cw.Draw()
#====================================================
# RATIO PLOT
#====================================================
bins = 10
hist_train_sw = ROOT.TH1D("hist_train_sw", "train signal", bins, 0, 1)
hist_train_bw = ROOT.TH1D("hist_train_bw", "train bkg", bins, 0, 1)
hist_test_w = ROOT.TH1D("hist_test_w", "test bkg", bins, 0, 1)
for i in range(len(train_signal)):
hist_train_sw.Fill(train_signal.values[i], weights_train_signal.values[i])
for i in range(len(train_bkg)):
hist_train_bw.Fill(train_bkg.values[i], weights_train_bkg.values[i])
for i in range(len(predictions_test)):
hist_test_w.Fill(predictions_test.values[i], weights_test.values[i])
for hist in [hist_train_sw, hist_train_bw, hist_test_w]:
for i in range(1, hist.GetNbinsX()+1):
hist.SetBinError(i, np.sqrt(hist.GetBinContent(i)))
c3 = ROOT.TCanvas("c3", "Ratio Plot", 700, 750)
upper_pad = ROOT.TPad("upper_pad", "", 0, 0.25, 1, 1)
lower_pad = ROOT.TPad("lower_pad", "", 0, 0, 1, 0.25)
for pad in [upper_pad, lower_pad]:
pad.SetLeftMargin(0.14)
pad.SetRightMargin(0.05)
pad.SetTickx(True)
pad.SetTicky(True)
upper_pad.SetBottomMargin(0)
lower_pad.SetTopMargin(0)
lower_pad.SetBottomMargin(0.3)
upper_pad.Draw()
lower_pad.Draw()
c3.Draw()
| 0
| 0
| 0
|
e7ac312acc5098ff8df1adff5d7683c9356e8c8b
| 444
|
py
|
Python
|
tests/kyu_7_tests/test_filter_list.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_7_tests/test_filter_list.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_7_tests/test_filter_list.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_7.filter_list import filter_list
| 27.75
| 71
| 0.596847
|
import unittest
from katas.kyu_7.filter_list import filter_list
class FilterListTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(filter_list([1, 2, 'a', 'b']), [1, 2])
def test_equals_2(self):
self.assertEqual(filter_list([1, 'a', 'b', 0, 15]), [1, 0, 15])
def test_equals_3(self):
self.assertEqual(filter_list([1, 2, 'aasf', '1', '123', 123]),
[1, 2, 123])
| 252
| 23
| 103
|
75d5f54b3dff3715cd2acdc0ff97e60f3dc126bf
| 6,936
|
py
|
Python
|
program/demo/Decide-topic-color.py
|
shutokawabata0723/kenkyu
|
b613b4daddca9b8b16efe0802669611948daea18
|
[
"MIT"
] | 1
|
2021-05-06T03:35:16.000Z
|
2021-05-06T03:35:16.000Z
|
program/demo/Decide-topic-color.py
|
shutokawabata0723/kenkyu
|
b613b4daddca9b8b16efe0802669611948daea18
|
[
"MIT"
] | null | null | null |
program/demo/Decide-topic-color.py
|
shutokawabata0723/kenkyu
|
b613b4daddca9b8b16efe0802669611948daea18
|
[
"MIT"
] | null | null | null |
#coding:utf-8
PURPLE = '\033[35m'
RED = '\033[31m'
CYAN = '\033[36m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import csv
import sys
import codecs
from urllib.parse import urlparse #URL --> Domain
from time import sleep
dict_web_id = {}
dict_url = {}
dict_topic = {}
dict_suggest = {}
dict_sub = {}
dict_bin = {}
domains =set()
urls =set()
###################サブトピックリストの読み込み###################
a = open('sub_list.csv', 'r')
set_subtopic_keys = set()
for line in a:
LINE = line.rstrip().split(',')
web_id = LINE[0]
url = LINE[1]
topic = LINE[2]
sub_id = LINE[3]
Domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
domains.add(Domain)
dict_web_id.setdefault(Domain, set()).add(web_id)
dict_sub.setdefault(Domain, set()).add(sub_id)
dict_topic.setdefault(Domain, set()).add(topic)
set_topic=dict_topic[Domain]
set_sub=dict_sub[Domain]
set_subtopic_keys=dict_sub.keys() #dict_subのkeyの集合
a.close()
#################ビンリストの読み込み###########################
A = open('bin_list.csv','r')
set_bin_keys = set()
for line in A:
LINE = line.rstrip().split(',')
web_id = LINE[0]
url = LINE[1]
topic = LINE[2]
bin_id = LINE[3]
Domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
domains.add(Domain)
dict_web_id.setdefault(Domain, set()).add(web_id)
dict_topic.setdefault(Domain, set()).add(topic)
dict_bin.setdefault(Domain, set()).add(bin_id)
set_topic = dict_topic[Domain]
set_bin = dict_bin[Domain]
set_bin_keys = dict_bin.keys()
A.close()
###################ノウハウサイトの読み込み######################
b = open('know-how.csv','r')
count = 0
set_know_how = set()
dict_title = {}
dict_predict={}
dict_confidence={}
dict_truth={}
for line in b:
count = count + 1
print(line)
LINE = line.rstrip().split(',')
Domain = LINE[2]
Domain = Domain + '/'
Title = LINE[3]
predict= LINE[4]
confidence=LINE[5]
truth=LINE[1]
set_know_how.add(Domain)
dict_title[Domain] = Title
dict_predict[Domain]=predict
dict_confidence[Domain]=confidence
dict_truth[Domain]=truth
b.close()
####################ドメインごとにHTMLを作成#####################
p = open('result.csv','w')
p.write('domain_id\ttitle\tpredict\tconfidence\tsum_page\tsum_topic\ttopics\ttruth\n')
make_domain_dict()
#suggest_id()
p.close()
print (len(set_know_how))
print (RED + 'Prgram ended' + ENDC)
| 24.94964
| 204
| 0.60496
|
#coding:utf-8
PURPLE = '\033[35m'
RED = '\033[31m'
CYAN = '\033[36m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import csv
import sys
import codecs
from urllib.parse import urlparse #URL --> Domain
from time import sleep
dict_web_id = {}
dict_url = {}
dict_topic = {}
dict_suggest = {}
dict_sub = {}
dict_bin = {}
domains =set()
urls =set()
###################サブトピックリストの読み込み###################
a = open('sub_list.csv', 'r')
set_subtopic_keys = set()
for line in a:
LINE = line.rstrip().split(',')
web_id = LINE[0]
url = LINE[1]
topic = LINE[2]
sub_id = LINE[3]
Domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
domains.add(Domain)
dict_web_id.setdefault(Domain, set()).add(web_id)
dict_sub.setdefault(Domain, set()).add(sub_id)
dict_topic.setdefault(Domain, set()).add(topic)
set_topic=dict_topic[Domain]
set_sub=dict_sub[Domain]
set_subtopic_keys=dict_sub.keys() #dict_subのkeyの集合
a.close()
#################ビンリストの読み込み###########################
A = open('bin_list.csv','r')
set_bin_keys = set()
for line in A:
LINE = line.rstrip().split(',')
web_id = LINE[0]
url = LINE[1]
topic = LINE[2]
bin_id = LINE[3]
Domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url))
domains.add(Domain)
dict_web_id.setdefault(Domain, set()).add(web_id)
dict_topic.setdefault(Domain, set()).add(topic)
dict_bin.setdefault(Domain, set()).add(bin_id)
set_topic = dict_topic[Domain]
set_bin = dict_bin[Domain]
set_bin_keys = dict_bin.keys()
A.close()
###################ノウハウサイトの読み込み######################
b = open('know-how.csv','r')
count = 0
set_know_how = set()
dict_title = {}
dict_predict={}
dict_confidence={}
dict_truth={}
for line in b:
count = count + 1
print(line)
LINE = line.rstrip().split(',')
Domain = LINE[2]
Domain = Domain + '/'
Title = LINE[3]
predict= LINE[4]
confidence=LINE[5]
truth=LINE[1]
set_know_how.add(Domain)
dict_title[Domain] = Title
dict_predict[Domain]=predict
dict_confidence[Domain]=confidence
dict_truth[Domain]=truth
b.close()
####################ドメインごとにHTMLを作成#####################
p = open('result.csv','w')
p.write('domain_id\ttitle\tpredict\tconfidence\tsum_page\tsum_topic\ttopics\ttruth\n')
def make_domain_dict():
set_sugge = set()
domain_dict ={}
domain_id = 0
for domain in domains:
if domain in set_know_how:
domain_id = domain_id + 1
print (OKGREEN + 'domain_id=' + str(domain_id) + ENDC)
set_topic = dict_topic[domain]
set_web_id = dict_web_id[domain]
if domain in set_subtopic_keys:
set_subtopic = dict_sub[domain] #domainにsubtopicがある場合
else:
set_subtopic = 'N' #domainにsubtopicがない場合
if domain in set_bin_keys:
set_bin = dict_bin[domain]
else:
set_bin = 'N'
count_topic = len(set_topic)
count_subtopic= len(set_subtopic)
count_bin = len(set_bin)
count_page = len(set_web_id)
domain_dict["DOMAIN_ID"] = domain_id
domain_dict["DOMAIN"] = domain
domain_dict["WEB_ID"] = set_web_id
#domain_dict["URL"] = set_url
domain_dict["TOPIC"] = set_topic
domain_dict["SUBTOPIC"] = set_subtopic
domain_dict["BIN"] = set_bin
domain_dict["CT"] = count_topic
domain_dict["TITLE"] = dict_title[domain]
print ( '[domain]--->' + domain)
print (OKGREEN + '[domain_id]--->' + str(domain_id) + ENDC)
print ( '[web_id]-->' + str(set_web_id))
print (OKGREEN + '[count page]--->' + str(count_page) + ENDC)
print ('[TOPIC]->' + str(list(set_topic)))
print (OKGREEN + '[count topic]--->' + str(count_topic) + ENDC)
print ('[SUBTOPIC]->' + str(list(set_subtopic)))
print (OKGREEN + '[count subtopic]->' + str(count_subtopic) + ENDC)
print ('')
print ('')
strings = (str(domain_id)+'\t'\
+str(dict_title[domain])+'\t'\
+str(dict_predict[domain])+'\t'\
+str(dict_confidence[domain])+'\t'\
+str(count_page)+'\t'\
+str(count_topic)+'\t'\
+str(list(set_topic))+'\t'\
+str(dict_truth[domain])+'\t'\
+str(domain)+'\n')
p.write(strings)
sleep(1)
make_html(domain_dict, domain_id)
def make_html(domain_dict, domain_id):
topic_ids = domain_dict["TOPIC"]
web_ids = domain_dict["WEB_ID"]
sub_ids = domain_dict["SUBTOPIC"]
bin_ids = domain_dict["BIN"]
title = domain_dict["TITLE"]
h = open('test1.html','r')
i = open('domain_'+str(domain_id)+'.html','w')
#exec("i=open('domain_%d.html','w')"%(domain_id))
flag = 3
for row in h:
#sleep(0.05)
# トピックに関する部分
if 'class="suggest_list_small_class" value="' in row:
#value
V1 = row.split('class="suggest_list_small_class" value="')
V2 = V1[1]
V2 = V2.split('"')
value = V2[0]
#TOPIC
#L1 = row.split('(topic:')
#L2 = L1[1]
#L2 = L2.split(')')
#TOPIC_NUM = L2[0]
TOPIC_NUM = value
#TITLE
R1 = row.split('">')
R2 = R1[1]
R2 = R2.split('<')
title = R2[0]
if TOPIC_NUM in topic_ids:
string = '<div id="suggest_list_small_class_'+str(value)+'" class="suggest_list_small_class" value="'+str(value)+'" style="background-color: orange;"><font color="black">'+str(title)+'</font></div>\n'
i.write(string)
else:
i.write(row)
# subtopicとbinのウェブページに関する部分
elif '<!--web_id_sub:' in row:
flag = 0
#web_id_sub
L1 = row.split('<!--web_id_sub:')
L2 = L1[1]
L2 = L2.split('-->')
WEB_ID_SUB = L2[0]
if WEB_ID_SUB in web_ids:
i.write(row)
flag = 1
elif flag == 0 and '<!-- for search_result -->' in row:
flag = 1
#subtopicのタイトルの部分
elif 'class="subtopic_list"' in row:
L1 = row.split('subtopic_list_')
L2 = L1[1]
L2 = L2.split('"')
sub_id = L2[0]
if sub_id in sub_ids:
flag = 4
i.write(row)
elif 'class ="expand_comma"' in row and flag == 4:
L1 = row.split('class ="expand_comma"')
front = L1[0]
back = L1[1]
string = str(front) + ' class ="expand_comma" style="background-color: orange;"' + str(back)
i.write(string)
flag = 1
elif 'class="summary_list"' in row:
flag = 0
elif flag == 0 and '<!-- for summary_list -->' in row:
flag = 1
#bin部分
elif 'class="bin_list"' in row:
L1 = row.split('bin_list_')
L2 = L1[1]
L2 = L2.split('"')
bin_id = L2[0]
if bin_id in bin_ids:
flag = 4
i.write(row)
#サイト名
elif 'id="suggest_list_title"' in row:
string = '<div id="suggest_list_title"><font size="4">'+str(title)+'</font></div>'
i.write(string)
# 対象URLだけ出力
elif flag == 1:
i.write(row)
# 対象URL以外は出力なし
elif flag == 0:
#print 'No sentence'
command = 'Nothing'
# それ以外の部分はそのまま出力
else:
i.write(row)
h.close()
i.close()
sleep(0.1)
make_domain_dict()
#suggest_id()
p.close()
print (len(set_know_how))
print (RED + 'Prgram ended' + ENDC)
| 4,609
| 0
| 45
|
05ecb7474295517cf171a8fb7afcbdc8d005f9dd
| 337
|
py
|
Python
|
stage_0_Wealth.py
|
ssiddhantsharma/team-greider
|
4b2725ff64614fd4e200606b06e8d9ea132b8ec8
|
[
"MIT"
] | 9
|
2021-08-01T20:26:55.000Z
|
2021-08-07T11:32:25.000Z
|
stage_0_Wealth.py
|
ssiddhantsharma/team-greider
|
4b2725ff64614fd4e200606b06e8d9ea132b8ec8
|
[
"MIT"
] | 2
|
2021-08-02T09:08:09.000Z
|
2021-08-03T21:10:24.000Z
|
stage_0_Wealth.py
|
ssiddhantsharma/team-greider
|
4b2725ff64614fd4e200606b06e8d9ea132b8ec8
|
[
"MIT"
] | 16
|
2021-08-01T19:41:45.000Z
|
2021-08-06T09:26:15.000Z
|
#personaldetails
print("NAME:Wealth Okete \nE-MAIL: wealth.okete@gmail.com \nSLACK USERNAME: @Wealth \nBIOSTACK: Genomics \nTwitter Handle: @Wealty")
print(hamming_distance('@Wealth','@Wealty'))
| 30.636364
| 133
| 0.637982
|
#personaldetails
print("NAME:Wealth Okete \nE-MAIL: wealth.okete@gmail.com \nSLACK USERNAME: @Wealth \nBIOSTACK: Genomics \nTwitter Handle: @Wealty")
def hamming_distance(a,b):
count=0
for i in range(len(a)):
if a[i] != b[i]:
count +=1
return count
print(hamming_distance('@Wealth','@Wealty'))
| 114
| 0
| 23
|
5681df8daee7cd3898d6815b687eb1b76c33923d
| 1,594
|
py
|
Python
|
conditional statements in python.py
|
K-P1/kp-learning-python
|
67e63a53b93f269ba25d45f6811727382edf3fff
|
[
"bzip2-1.0.6"
] | null | null | null |
conditional statements in python.py
|
K-P1/kp-learning-python
|
67e63a53b93f269ba25d45f6811727382edf3fff
|
[
"bzip2-1.0.6"
] | null | null | null |
conditional statements in python.py
|
K-P1/kp-learning-python
|
67e63a53b93f269ba25d45f6811727382edf3fff
|
[
"bzip2-1.0.6"
] | null | null | null |
conditional statement in python:
this performs different computations or actions depending on whatever a specific boolean expression evaluaates to true or false.
they are handled by if statements in python.
from maths:
equals: a==b
not equals: a != b
less than: a<b
greater than: a>b
greater than or equals to: a>=b
example of if statement:
ade_height= 6.25
oyin_height= 5.75
if ade_height > oyin_height:
print("ade is taller tham oyin")
The elif keyword:
the elif keyword is python way of saying "if the previous condition were not true, then try this condition"
example-
boys score=24.77
girls score=25.01
if boys score>girls score:
print("boys win, girls lose")
elif girls score>boys score:
print("girls win, boys lose")
the else keyword:
if
the else keyword catches anything which isnt caught by the preceding conditions.
example-
#program to calc the longer journey
#between lagos-ibadan and lagos london
lb_max_time=2.5
ll_max_time=6
if lb_max_time>ll_max_time:
print("lagos to ibadan takes more time")
elif lb_max_time<ll_max_time:
print("lagos to london takes more time")
else:
print("both take equal time")
using logical operators:
you can use operators 'and,or and not' in python conditional statements.
for example:
x=200
y=33
z=500
if x> y and z>x:
print("both condition are true")
the pass keyword
if statements cannot be empty, but if you for some reason have an if statement with no content, put in the pass statement to avoid getting an error.
example
boys=17
if boys==17:
pass
| 26.131148
| 149
| 0.732748
|
conditional statement in python:
this performs different computations or actions depending on whatever a specific boolean expression evaluaates to true or false.
they are handled by if statements in python.
from maths:
equals: a==b
not equals: a != b
less than: a<b
greater than: a>b
greater than or equals to: a>=b
example of if statement:
ade_height= 6.25
oyin_height= 5.75
if ade_height > oyin_height:
print("ade is taller tham oyin")
The elif keyword:
the elif keyword is python way of saying "if the previous condition were not true, then try this condition"
example-
boys score=24.77
girls score=25.01
if boys score>girls score:
print("boys win, girls lose")
elif girls score>boys score:
print("girls win, boys lose")
the else keyword:
if
the else keyword catches anything which isnt caught by the preceding conditions.
example-
#program to calc the longer journey
#between lagos-ibadan and lagos london
lb_max_time=2.5
ll_max_time=6
if lb_max_time>ll_max_time:
print("lagos to ibadan takes more time")
elif lb_max_time<ll_max_time:
print("lagos to london takes more time")
else:
print("both take equal time")
using logical operators:
you can use operators 'and,or and not' in python conditional statements.
for example:
x=200
y=33
z=500
if x> y and z>x:
print("both condition are true")
the pass keyword
if statements cannot be empty, but if you for some reason have an if statement with no content, put in the pass statement to avoid getting an error.
example
boys=17
if boys==17:
pass
| 0
| 0
| 0
|
8645205f23ad064961651704a81244a750672741
| 27,895
|
py
|
Python
|
src/sentry/event_manager.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/event_manager.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/event_manager.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from collections import OrderedDict
from django.conf import settings
from django.db import connection, IntegrityError, router, transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.encoding import force_bytes
from hashlib import md5
from uuid import uuid4
from sentry import eventtypes
from sentry.app import buffer, tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH
)
from sentry.interfaces.base import get_interface, iter_interfaces
from sentry.models import (
Activity, Event, EventMapping, EventUser, Group, GroupHash, GroupResolution,
GroupStatus, Project, Release, TagKey, UserReport
)
from sentry.plugins import plugins
from sentry.signals import first_event_received, regression_signal
from sentry.utils.logging import suppress_exceptions
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils.cache import default_cache
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict
from sentry.utils.strings import truncatechars
from sentry.utils.validators import validate_ip
if not settings.SENTRY_SAMPLE_DATA:
else:
| 33.447242
| 100
| 0.579745
|
"""
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from collections import OrderedDict
from django.conf import settings
from django.db import connection, IntegrityError, router, transaction
from django.db.models import Q
from django.utils import timezone
from django.utils.encoding import force_bytes
from hashlib import md5
from uuid import uuid4
from sentry import eventtypes
from sentry.app import buffer, tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH
)
from sentry.interfaces.base import get_interface, iter_interfaces
from sentry.models import (
Activity, Event, EventMapping, EventUser, Group, GroupHash, GroupResolution,
GroupStatus, Project, Release, TagKey, UserReport
)
from sentry.plugins import plugins
from sentry.signals import first_event_received, regression_signal
from sentry.utils.logging import suppress_exceptions
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils.cache import default_cache
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict
from sentry.utils.strings import truncatechars
from sentry.utils.validators import validate_ip
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def md5_from_hash(hash_bits):
result = md5()
for bit in hash_bits:
result.update(force_bytes(bit, errors='replace'))
return result.hexdigest()
def get_fingerprint_for_event(event):
fingerprint = event.data.get('fingerprint')
if fingerprint is None:
return ['{{ default }}']
if isinstance(fingerprint, basestring):
return [fingerprint]
return fingerprint
def get_hashes_for_event(event):
return get_hashes_for_event_with_reason(event)[1]
def get_hashes_for_event_with_reason(event):
interfaces = event.get_interfaces()
for interface in interfaces.itervalues():
result = interface.compute_hashes(event.platform)
if not result:
continue
return (interface.get_path(), result)
return ('message', [event.message])
def get_grouping_behavior(event):
data = event.data
if 'checksum' in data:
return ('checksum', data['checksum'])
fingerprint = get_fingerprint_for_event(event)
return ('fingerprint', get_hashes_from_fingerprint_with_reason(event, fingerprint))
def get_hashes_from_fingerprint(event, fingerprint):
default_values = set(['{{ default }}', '{{default}}'])
if any(d in fingerprint for d in default_values):
default_hashes = get_hashes_for_event(event)
hash_count = len(default_hashes)
else:
hash_count = 1
hashes = []
for idx in xrange(hash_count):
result = []
for bit in fingerprint:
if bit in default_values:
result.extend(default_hashes[idx])
else:
result.append(bit)
hashes.append(result)
return hashes
def get_hashes_from_fingerprint_with_reason(event, fingerprint):
default_values = set(['{{ default }}', '{{default}}'])
if any(d in fingerprint for d in default_values):
default_hashes = get_hashes_for_event_with_reason(event)
hash_count = len(default_hashes[1])
else:
hash_count = 1
hashes = OrderedDict((bit, []) for bit in fingerprint)
for idx in xrange(hash_count):
for bit in fingerprint:
if bit in default_values:
hashes[bit].append(default_hashes)
else:
hashes[bit] = bit
return hashes.items()
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence = current_datetime - last_seen
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def generate_culprit(data, platform=None):
culprit = ''
try:
stacktraces = [
e['stacktrace']
for e in data['sentry.interfaces.Exception']['values']
if e.get('stacktrace')
]
except KeyError:
if 'sentry.interfaces.Stacktrace' in data:
stacktraces = [data['sentry.interfaces.Stacktrace']]
else:
stacktraces = None
if not stacktraces:
if 'sentry.interfaces.Http' in data:
culprit = data['sentry.interfaces.Http'].get('url', '')
else:
from sentry.interfaces.stacktrace import Stacktrace
culprit = Stacktrace.to_python(stacktraces[-1]).get_culprit_string(
platform=platform,
)
return truncatechars(culprit, MAX_CULPRIT_LENGTH)
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(plugin.is_regression, group, event,
version=1, _with_transaction=False)
if result is not None:
return result
return True
class ScoreClause(object):
def __init__(self, group):
self.group = group
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score()
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
@classmethod
def calculate(self, times_seen, last_seen):
return math.log(times_seen) * 600 + float(last_seen.strftime('%s'))
class EventManager(object):
logger = logging.getLogger('sentry.events')
def __init__(self, data, version='5'):
self.data = data
self.version = version
def normalize(self):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
data = self.data
if not isinstance(data.get('level'), (six.string_types, int)):
data['level'] = logging.ERROR
elif data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
logger = trim(data['logger'].strip(), 64)
if TagKey.is_valid_key(logger):
data['logger'] = logger
else:
data['logger'] = DEFAULT_LOGGER_NAME
if data.get('platform'):
data['platform'] = trim(data['platform'], 64)
current_timestamp = timezone.now()
timestamp = data.get('timestamp')
if not timestamp:
timestamp = current_timestamp
if isinstance(timestamp, datetime):
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
timestamp = float(timestamp.strftime('%s'))
data['timestamp'] = timestamp
data['received'] = float(timezone.now().strftime('%s'))
if not data.get('event_id'):
data['event_id'] = uuid4().hex
data.setdefault('message', '')
data.setdefault('culprit', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('fingerprint', None)
data.setdefault('platform', None)
data.setdefault('environment', None)
data.setdefault('extra', {})
data.setdefault('errors', [])
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = []
for key, value in tags:
key = six.text_type(key).strip()
value = six.text_type(value).strip()
if not (key and value):
continue
data['tags'].append((key, value))
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
# TODO(dcramer): more of validate data needs stuffed into the manager
for key in data.keys():
if key in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(key)
try:
interface = get_interface(key)()
except ValueError:
continue
try:
inst = interface.to_python(value)
data[inst.get_path()] = inst.to_json()
except Exception:
pass
# the SDKs currently do not describe event types, and we must infer
# them from available attributes
data['type'] = eventtypes.infer(data).key
data['version'] = self.version
# TODO(dcramer): find a better place for this logic
exception = data.get('sentry.interfaces.Exception')
stacktrace = data.get('sentry.interfaces.Stacktrace')
if exception and len(exception['values']) == 1 and stacktrace:
exception['values'][0]['stacktrace'] = stacktrace
del data['sentry.interfaces.Stacktrace']
if 'sentry.interfaces.Http' in data:
try:
ip_address = validate_ip(
data['sentry.interfaces.Http'].get(
'env', {}).get('REMOTE_ADDR'),
required=False,
)
except ValueError:
ip_address = None
if ip_address:
data.setdefault('sentry.interfaces.User', {})
data['sentry.interfaces.User'].setdefault(
'ip_address', ip_address)
if data['culprit']:
data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH)
if data['message']:
data['message'] = trim(
data['message'], settings.SENTRY_MAX_MESSAGE_LENGTH)
return data
@suppress_exceptions
def save(self, project, raw=False):
from sentry.tasks.post_process import index_event_tags
project = Project.objects.get_from_cache(id=project)
data = self.data.copy()
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
level = data.pop('level')
culprit = data.pop('culprit', None)
logger_name = data.pop('logger', None)
server_name = data.pop('server_name', None)
site = data.pop('site', None)
checksum = data.pop('checksum', None)
fingerprint = data.pop('fingerprint', None)
platform = data.pop('platform', None)
release = data.pop('release', None)
environment = data.pop('environment', None)
# unused
time_spent = data.pop('time_spent', None)
if not culprit:
culprit = generate_culprit(data, platform=platform)
date = datetime.fromtimestamp(data.pop('timestamp'))
date = date.replace(tzinfo=timezone.utc)
kwargs = {
'message': message,
'platform': platform,
}
event = Event(
project_id=project.id,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
**kwargs
)
tags = data.get('tags') or []
tags.append(('level', LOG_LEVELS[level]))
if logger_name:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
if release:
# TODO(dcramer): we should ensure we create Release objects
tags.append(('sentry:release', release))
if environment:
tags.append(('environment', environment))
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event,
_with_transaction=False)
if added_tags:
tags.extend(added_tags)
event_user = self._get_event_user(project, data)
if event_user:
tags.append(('sentry:user', event_user.tag_value))
# XXX(dcramer): we're relying on mutation of the data object to ensure
# this propagates into Event
data['tags'] = tags
data['fingerprint'] = fingerprint or ['{{ default }}']
# Get rid of ephemeral interface data
for interface_class, _ in iter_interfaces():
interface = interface_class()
if interface.ephemeral:
data.pop(interface.get_path(), None)
# prioritize fingerprint over checksum as its likely the client defaulted
# a checksum whereas the fingerprint was explicit
if fingerprint:
hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
elif checksum:
hashes = [checksum]
else:
hashes = map(md5_from_hash, get_hashes_for_event(event))
# TODO(dcramer): temp workaround for complexity
data['message'] = message
event_type = eventtypes.get(data.get('type', 'default'))(data)
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'data': {
'last_received': event.data.get('received') or float(event.datetime.strftime('%s')),
'type': event_type.key,
# we cache the events metadata on the group to ensure its
# accessible in the stream
'metadata': event_type.get_metadata(),
},
})
# TODO(dcramer): temp workaround for complexity
del data['message']
if release:
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
group_kwargs['first_release'] = release
group, is_new, is_regression, is_sample = self._save_aggregate(
event=event,
hashes=hashes,
release=release,
**group_kwargs
)
event.group = group
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
try:
with transaction.atomic(using=router.db_for_write(EventMapping)):
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id,
exc_info=True)
return event
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic(using=router.db_for_write(Event)):
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id,
exc_info=True)
return event
index_event_tags.delay(
project_id=project.id,
event_id=event.id,
tags=tags,
)
if event_user:
tsdb.record_multi((
(tsdb.models.users_affected_by_group, group.id, (event_user.tag_value,)),
(tsdb.models.users_affected_by_project, project.id, (event_user.tag_value,)),
), timestamp=event.datetime)
if is_new and release:
buffer.incr(Release, {'new_groups': 1}, {
'id': release.id,
})
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
if not project.first_event:
project.update(first_event=date)
first_event_received.send(project=project, group=group, sender=Project)
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
def _get_event_user(self, project, data):
user_data = data.get('sentry.interfaces.User')
if not user_data:
return
euser = EventUser(
project=project,
ident=user_data.get('id'),
email=user_data.get('email'),
username=user_data.get('username'),
ip_address=user_data.get('ip_address'),
)
if not euser.tag_value:
return
cache_key = 'euser:{}:{}'.format(
project.id,
md5(euser.tag_value.encode('utf-8')).hexdigest(),
)
cached = default_cache.get(cache_key)
if cached is None:
try:
with transaction.atomic(using=router.db_for_write(EventUser)):
euser.save()
except IntegrityError:
pass
default_cache.set(cache_key, '', 3600)
return euser
def _find_hashes(self, project, hash_list):
matches = []
for hash in hash_list:
ghash, _ = GroupHash.objects.get_or_create(
project=project,
hash=hash,
)
matches.append((ghash.group_id, ghash.hash))
return matches
def _ensure_hashes_merged(self, group, hash_list):
# TODO(dcramer): there is a race condition with selecting/updating
# in that another group could take ownership of the hash
bad_hashes = GroupHash.objects.filter(
project=group.project,
hash__in=hash_list,
).exclude(
group=group,
)
if not bad_hashes:
return
for hash in bad_hashes:
if hash.group_id:
merge_group.delay(
from_group_id=hash.group_id,
to_group_id=group.id,
)
return GroupHash.objects.filter(
project=group.project,
hash__in=[h.hash for h in bad_hashes],
).update(
group=group,
)
def _save_aggregate(self, event, hashes, release, **kwargs):
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
try:
existing_group_id = (h[0] for h in all_hashes if h[0]).next()
except StopIteration:
existing_group_id = None
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
with transaction.atomic():
short_id = project.next_short_id()
group, group_is_new = Group.objects.create(
project=project,
short_id=short_id,
**kwargs
), True
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h[1] for h in all_hashes if h[0] is None]
if new_hashes:
affected = GroupHash.objects.filter(
project=project,
hash__in=new_hashes,
group__isnull=True,
).update(
group=group,
)
if affected != len(new_hashes):
self._ensure_hashes_merged(group, new_hashes)
elif group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = should_sample(
event.data.get('received') or float(event.datetime.strftime('%s')),
group.data.get('last_received') or float(group.last_seen.strftime('%s')),
group.times_seen,
)
if not is_new:
is_regression = self._process_existing_aggregate(
group=group,
event=event,
data=kwargs,
release=release,
)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
tsdb.incr_multi([
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
], timestamp=event.datetime)
tsdb.record_frequency_multi([
(tsdb.models.frequent_projects_by_organization, {
project.organization_id: {
project.id: 1,
},
}),
(tsdb.models.frequent_issues_by_project, {
project.id: {
group.id: 1,
},
}),
], timestamp=event.datetime)
return group, is_new, is_regression, is_sample
def _handle_regression(self, group, event, release):
if not group.is_resolved():
return
elif release:
# we only mark it as a regression if the event's release is newer than
# the release which we originally marked this as resolved
has_resolution = GroupResolution.objects.filter(
Q(release__date_added__gt=release.date_added) | Q(release=release),
group=group,
).exists()
if has_resolution:
return
else:
has_resolution = False
if not plugin_is_regression(group, event):
return
# we now think its a regression, rely on the database to validate that
# no one beat us to this
date = max(event.datetime, group.last_seen)
is_regression = bool(Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# muted
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
))
group.active_at = date
group.status = GroupStatus.UNRESOLVED
if is_regression and release:
# resolutions are only valid if the state of the group is still
# resolved -- if it were to change the resolution should get removed
try:
resolution = GroupResolution.objects.get(
group=group,
)
except GroupResolution.DoesNotExist:
affected = False
else:
cursor = connection.cursor()
# delete() API does not return affected rows
cursor.execute("DELETE FROM sentry_groupresolution WHERE id = %s", [resolution.id])
affected = cursor.rowcount > 0
if affected:
# if we had to remove the GroupResolution (i.e. we beat the
# the queue to handling this) then we need to also record
# the corresponding event
try:
activity = Activity.objects.filter(
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
ident=resolution.id,
).order_by('-datetime')[0]
except IndexError:
# XXX: handle missing data, as its not overly important
pass
else:
activity.update(data={
'version': release.version,
})
if is_regression:
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_REGRESSION,
data={
'version': release.version if release else '',
}
)
return is_regression
def _process_existing_aggregate(self, group, event, data, release):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
'data': data['data'],
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = self._handle_regression(group, event, release)
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
| 25,470
| 505
| 351
|
841a190902c19e9b46726c2ff72f20fccefd58aa
| 1,925
|
py
|
Python
|
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/saga/__init__.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | 1
|
2019-05-24T02:19:29.000Z
|
2019-05-24T02:19:29.000Z
|
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/saga/__init__.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | null | null | null |
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/saga/__init__.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | null | null | null |
__author__ = "RADICAL-SAGA Development Team"
__copyright__ = "Copyright 2013, RADICAL"
__license__ = "MIT"
import os
import radical.utils as ru
# ------------------------------------------------------------------------------
#
import utils
# ------------------------------------------------------------------------------
#
from .constants import *
from .task import Task, Container
from .attributes import Attributes, Callback
from .session import Session, DefaultSession
from .context import Context
from .url import Url
from .exceptions import SagaException
from .exceptions import NotImplemented
from .exceptions import IncorrectURL
from .exceptions import BadParameter
from .exceptions import AlreadyExists
from .exceptions import DoesNotExist
from .exceptions import IncorrectState
from .exceptions import PermissionDenied
from .exceptions import AuthorizationFailed
from .exceptions import AuthenticationFailed
from .exceptions import Timeout
from .exceptions import NoSuccess
from . import job
from . import filesystem
from . import replica
from . import advert
from . import resource
# import radical.saga.messages
# ------------------------------------------------------------------------------
#
pwd = os.path.dirname (__file__)
version_short, version_detail, version_base, version_branch, \
sdist_name, sdist_path = ru.get_version ([pwd])
version = version_short
# FIXME: the logger init will require a 'classical' ini based config, which is
# different from the json based config we use now. May need updating once the
# radical configuration system has changed to json
_logger = ru.Logger('radical.saga')
_logger.info ('radical.saga version: %s' % version_detail)
# ------------------------------------------------------------------------------
| 30.078125
| 80
| 0.603636
|
__author__ = "RADICAL-SAGA Development Team"
__copyright__ = "Copyright 2013, RADICAL"
__license__ = "MIT"
import os
import radical.utils as ru
# ------------------------------------------------------------------------------
#
import utils
# ------------------------------------------------------------------------------
#
from .constants import *
from .task import Task, Container
from .attributes import Attributes, Callback
from .session import Session, DefaultSession
from .context import Context
from .url import Url
from .exceptions import SagaException
from .exceptions import NotImplemented
from .exceptions import IncorrectURL
from .exceptions import BadParameter
from .exceptions import AlreadyExists
from .exceptions import DoesNotExist
from .exceptions import IncorrectState
from .exceptions import PermissionDenied
from .exceptions import AuthorizationFailed
from .exceptions import AuthenticationFailed
from .exceptions import Timeout
from .exceptions import NoSuccess
from . import job
from . import filesystem
from . import replica
from . import advert
from . import resource
# import radical.saga.messages
# ------------------------------------------------------------------------------
#
pwd = os.path.dirname (__file__)
version_short, version_detail, version_base, version_branch, \
sdist_name, sdist_path = ru.get_version ([pwd])
version = version_short
# FIXME: the logger init will require a 'classical' ini based config, which is
# different from the json based config we use now. May need updating once the
# radical configuration system has changed to json
_logger = ru.Logger('radical.saga')
_logger.info ('radical.saga version: %s' % version_detail)
# ------------------------------------------------------------------------------
| 0
| 0
| 0
|
666d8967aa54a5ca6e2ff1fe991fa6dced6b3f03
| 781
|
py
|
Python
|
alexa_siterank/future/future.py
|
mytja/SiteRank-Alexa
|
4b8637fe622915953826b0c624b3a055710082da
|
[
"MIT"
] | 10
|
2020-11-20T10:10:31.000Z
|
2021-09-18T16:15:46.000Z
|
alexa_siterank/future/future.py
|
mytja/SiteRank-Alexa
|
4b8637fe622915953826b0c624b3a055710082da
|
[
"MIT"
] | null | null | null |
alexa_siterank/future/future.py
|
mytja/SiteRank-Alexa
|
4b8637fe622915953826b0c624b3a055710082da
|
[
"MIT"
] | 1
|
2021-12-17T20:37:37.000Z
|
2021-12-17T20:37:37.000Z
|
import json
from .utils import Utils
utils = Utils()
| 32.541667
| 94
| 0.758003
|
import json
from .utils import Utils
utils = Utils()
async def getRank(website):
r = await utils.getInfo(website)
return r["siteinfo"]
async def getTopKeywords(website):
return await utils.parse(website, '<script type="application/json" id="topKeywordsJSON">')
async def getCompetitors(website):
return await utils.parse(website, '<script type="application/json" id="competitorsJSON">')
async def getVisitors(website):
return await utils.parse(website, '<script type="application/json" id="visitorPercentage">')
async def getRankHistory(website):
return await utils.parse(website, '<script type="application/json" id="rankData">')
async def getFullRankHistory(website):
return await utils.parse(website, '<script type="application/json" id="rankDataWindow">')
| 589
| 0
| 138
|
35e4334fd81871fb480642c1d1359ca7d51365fb
| 3,725
|
py
|
Python
|
src/data/738.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/738.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/738.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
main()
| 31.837607
| 76
| 0.368591
|
# -*- coding: utf-8 -*-
def main():
N, Q = map(int, input().split())
class LowestCommonAncestor:
""" <O(nlog(n)), O(1)> """
def __init__(self, G: "隣接リスト", root: "根"):
self.n = len(G)
self.tour = [0] * (2 * self.n - 1)
self.depth_list = [0] * (2 * self.n - 1)
self.id = [-1] * self.n
self.dfs(G, root)
self._rmq_init(self.depth_list)
def _rmq_init(self, array):
n = len(array)
logn = n.bit_length()
self.sparse_table = st = [[0] * (n + 1 - (1 << i))
for i in range(logn)]
st[0] = list(range(n))
for i in range(logn - 1):
s = st[i]
t = st[i + 1]
width = 1 << i
for j in range(n + 1 - 2 * width):
first, second = s[j], s[j + width]
t[j] = first if array[first] < array[second] else second
self.log = log = [0] * (n + 1)
for i in range(2, n + 1):
log[i] = log[i >> 1] + 1
def _rmq_query(self, l: int, r: int) -> int:
"""min(array[l:r])を返す.O(1)"""
b = self.log[r - l]
s = self.sparse_table[b]
first, second = s[l], s[r - (1 << b)]
return first if self.depth_list[first] < self.depth_list[
second] else second
def dfs(self, G, root):
""" 非再帰で深さ優先探索を行う """
id = self.id
tour = self.tour
depth_list = self.depth_list
v = root
it = [0] * self.n
parents = [-1] * self.n
visit_id = 0
depth = 0
while v != -1:
if id[v] == -1:
id[v] = visit_id
tour[visit_id] = v
depth_list[visit_id] = depth
visit_id += 1
g = G[v]
if it[v] == len(g):
v = parents[v]
depth -= 1
continue
if g[it[v]] == parents[v]:
it[v] += 1
if it[v] == len(g):
v = parents[v]
depth -= 1
continue
else:
child = g[it[v]]
parents[child] = v
it[v] += 1
v = child
depth += 1
else:
child = g[it[v]]
parents[child] = v
it[v] += 1
v = child
depth += 1
def lca(self, u: int, v: int) -> int:
""" u と v の最小共通祖先を返す """
l, r = self.id[u], self.id[v]
if r < l:
l, r = r, l
q = self._rmq_query(l, r + 1)
return self.tour[q]
def dist(self, u: int, v: int) -> int:
""" u と v の距離を返す """
lca = self.lca(u, v)
depth_u = self.depth_list[self.id[u]]
depth_v = self.depth_list[self.id[v]]
depth_lca = self.depth_list[self.id[lca]]
return depth_u + depth_v - 2 * depth_lca
es = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = map(int, input().split())
a -= 1
b -= 1
es[a].append(b)
es[b].append(a)
lca = LowestCommonAncestor(es, 0)
for i in range(Q):
c, d = map(int, input().split())
if lca.dist(c - 1, d - 1) % 2 == 0:
print("Town")
else:
print("Road")
if __name__ == '__main__':
main()
| 3,721
| 0
| 22
|
c10497e6c9cb9109d06fde46cc53d923263f33ab
| 5,467
|
py
|
Python
|
networkapi/api_peer_group/v4/tests/sanity/sync/test_put.py
|
vinicius-marinho/GloboNetworkAPI
|
94651d3b4dd180769bc40ec966814f3427ccfb5b
|
[
"Apache-2.0"
] | 73
|
2015-04-13T17:56:11.000Z
|
2022-03-24T06:13:07.000Z
|
networkapi/api_peer_group/v4/tests/sanity/sync/test_put.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | 99
|
2015-04-03T01:04:46.000Z
|
2021-10-03T23:24:48.000Z
|
networkapi/api_peer_group/v4/tests/sanity/sync/test_put.py
|
shildenbrand/GloboNetworkAPI
|
515d5e961456cee657c08c275faa1b69b7452719
|
[
"Apache-2.0"
] | 64
|
2015-08-05T21:26:29.000Z
|
2022-03-22T01:06:28.000Z
|
# -*- coding: utf-8 -*-
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.util.geral import mount_url
| 36.939189
| 80
| 0.674044
|
# -*- coding: utf-8 -*-
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.util.geral import mount_url
class PeerGroupPutSuccessTestCase(NetworkApiTestCase):
peer_group_uri = '/api/v4/peer-group/'
fixtures_path = 'networkapi/api_peer_group/v4/fixtures/{}'
fixtures = [
'networkapi/config/fixtures/initial_config.json',
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
fixtures_path.format('initial_vrf.json'),
fixtures_path.format('initial_environment.json'),
fixtures_path.format('initial_route_map.json'),
fixtures_path.format('initial_peer_group.json'),
fixtures_path.format('initial_environment_peer_group.json'),
]
json_path = 'api_peer_group/v4/tests/sanity/json/put/{}'
def setUp(self):
self.client = Client()
self.authorization = self.get_http_authorization('test')
self.content_type = 'application/json'
self.fields = ['id', 'name', 'environments']
def tearDown(self):
pass
def test_put_peer_groups(self):
"""Test PUT PeerGroups."""
peer_groups_path = self.json_path.\
format('one_peer_group.json')
response = self.client.put(
self.peer_group_uri,
data=self.load_json(peer_groups_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(200, response.status_code)
get_ids = [data['id'] for data in response.data]
uri = mount_url(self.peer_group_uri,
get_ids,
fields=self.fields)
response = self.client.get(
uri,
HTTP_AUTHORIZATION=self.authorization
)
self.compare_status(200, response.status_code)
self.compare_json(peer_groups_path,
response.data)
class PeerGroupPutErrorTestCase(NetworkApiTestCase):
peer_group_uri = '/api/v4/peer-group/'
fixtures_path = 'networkapi/api_peer_group/v4/fixtures/{}'
fixtures = [
'networkapi/config/fixtures/initial_config.json',
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
fixtures_path.format('initial_vrf.json'),
fixtures_path.format('initial_environment.json'),
fixtures_path.format('initial_route_map.json'),
fixtures_path.format('initial_peer_group.json'),
fixtures_path.format('initial_environment_peer_group.json'),
fixtures_path.format('initial_asn.json'),
fixtures_path.format('initial_ipv4.json'),
fixtures_path.format('initial_ipv6.json'),
fixtures_path.format('initial_networkipv4.json'),
fixtures_path.format('initial_networkipv6.json'),
fixtures_path.format('initial_vlan.json'),
fixtures_path.format('initial_neighbor_v4.json'),
fixtures_path.format('initial_neighbor_v6.json'),
]
json_path = 'api_peer_group/v4/tests/sanity/json/put/{}'
def setUp(self):
self.client = Client()
self.authorization = self.get_http_authorization('test')
self.content_type = 'application/json'
def tearDown(self):
pass
def test_put_inexistent_peer_group(self):
"""Test PUT inexistent PeerGroup."""
peer_group_path = self.json_path.\
format('inexistent_peer_group.json')
response = self.client.put(
self.peer_group_uri,
data=self.load_json(peer_group_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(404, response.status_code)
self.compare_values(
u'PeerGroup id = 1000 do not exist',
response.data['detail']
)
def test_put_peer_group_associated_to_deployed_neighbors(self):
"""Test PUT PeerGroup associated to deployed Neighbors."""
peer_group_path = self.json_path.\
format('peer_group_assoc_to_deployed_neighbors.json')
response = self.client.put(
self.peer_group_uri,
data=self.load_json(peer_group_path),
content_type=self.content_type,
HTTP_AUTHORIZATION=self.authorization)
self.compare_status(400, response.status_code)
self.compare_values(
u'PeerGroup id = 1 is associated with deployed '
u'NeighborsV4 id = [1] and NeighborsV6 id = [1]',
response.data['detail']
)
| 351
| 4,904
| 46
|
3d3881828bc5cc9edd3b3036e7819e6964670267
| 434
|
py
|
Python
|
solthiruthi/stopwords.py
|
nv-d/open-tamil
|
0fcb1cece5ffd6263210db987bede09566353e80
|
[
"MIT"
] | 2
|
2021-07-17T02:52:38.000Z
|
2021-07-17T02:52:52.000Z
|
solthiruthi/stopwords.py
|
nv-d/open-tamil
|
0fcb1cece5ffd6263210db987bede09566353e80
|
[
"MIT"
] | null | null | null |
solthiruthi/stopwords.py
|
nv-d/open-tamil
|
0fcb1cece5ffd6263210db987bede09566353e80
|
[
"MIT"
] | null | null | null |
## -*- coding: utf-8 -*-
## (C) 2018 Muthiah Annamalai, <ezhillang@gmail.com>
import codecs
import os
from .resources import get_data_dir
| 24.111111
| 82
| 0.608295
|
## -*- coding: utf-8 -*-
## (C) 2018 Muthiah Annamalai, <ezhillang@gmail.com>
import codecs
import os
from .resources import get_data_dir
def get_stop_words():
stop_words = []
with codecs.open(
os.path.join(get_data_dir(), u"TamilStopWords.txt"), "r", "utf-8"
) as fp:
stop_words = list(
filter(lambda w: len(w) > 0, map(lambda w: w.strip(), fp.readlines()))
)
return stop_words
| 271
| 0
| 23
|
496652ef3216fd70615f2b8ec8e89f2948ba3468
| 20,250
|
py
|
Python
|
advbench/attacks.py
|
constrainedlearning/advbench
|
68f9f6d77268aad45517ca84d383b996724cc976
|
[
"MIT"
] | null | null | null |
advbench/attacks.py
|
constrainedlearning/advbench
|
68f9f6d77268aad45517ca84d383b996724cc976
|
[
"MIT"
] | null | null | null |
advbench/attacks.py
|
constrainedlearning/advbench
|
68f9f6d77268aad45517ca84d383b996724cc976
|
[
"MIT"
] | null | null | null |
import os, sys
from math import sqrt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.laplace import Laplace
from torch.distributions.normal import Normal
from torch.optim import Adam
from einops import rearrange, reduce, repeat
from advbench import perturbations
from advbench.lib.manifool.functions.algorithms.manifool import manifool
from advbench.datasets import FFCV_AVAILABLE
torch.backends.cudnn.benchmark = True
| 48.795181
| 136
| 0.633284
|
import os, sys
from math import sqrt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.laplace import Laplace
from torch.distributions.normal import Normal
from torch.optim import Adam
from einops import rearrange, reduce, repeat
from advbench import perturbations
from advbench.lib.manifool.functions.algorithms.manifool import manifool
from advbench.datasets import FFCV_AVAILABLE
torch.backends.cudnn.benchmark = True
class Attack(nn.Module):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Attack, self).__init__()
self.classifier = classifier
self.hparams = hparams
self.device = device
eps = self.hparams['epsilon']
self.perturbation = vars(perturbations)[perturbation](eps)
def forward(self, imgs, labels):
raise NotImplementedError
class Attack_Linf(Attack):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Attack_Linf, self).__init__(classifier, hparams, device, perturbation=perturbation)
if isinstance(self.perturbation.eps, torch.Tensor):
self.perturbation.eps.to(device)
if isinstance(self.perturbation.eps, list):
eps = torch.tensor(self.perturbation.eps).to(device)
else:
eps = self.perturbation.eps
self.step = (eps*self.hparams['pgd_step_size'])
self.batched = hparams['batched']
class Fo(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Fo, self).__init__(classifier, hparams, device, perturbation=perturbation)
def forward(self, imgs, labels):
self.classifier.eval()
highest_loss = torch.zeros(imgs.shape[0], device = imgs.device)
delta = self.perturbation.delta_init(imgs).to(imgs.device)
worst_delta = torch.empty_like(delta)
for _ in range(self.hparams['fo_restarts']):
delta = self.perturbation.delta_init(imgs).to(imgs.device)
delta, adv_loss = self.optimize_delta(imgs, labels, delta)
worst_delta[adv_loss>highest_loss] = delta[adv_loss>highest_loss]
highest_loss[adv_loss>highest_loss] = adv_loss[adv_loss>highest_loss]
adv_imgs = self.perturbation.perturb_img(imgs, worst_delta)
self.classifier.train()
return adv_imgs.detach(), worst_delta.detach()
class Fo_PGD(Fo):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Fo_PGD, self).__init__(classifier, hparams, device, perturbation=perturbation)
def optimize_delta(self, imgs, labels, delta):
self.classifier.eval()
for _ in range(self.hparams['pgd_n_steps']):
delta.requires_grad_(True)
with torch.enable_grad():
adv_imgs = self.perturbation.perturb_img(imgs, delta)
adv_loss = self.classifier.loss(self.classifier(adv_imgs), labels, reduction='none')
mean = adv_loss.mean()
grad = torch.autograd.grad(mean, [delta])[0].detach()
delta.requires_grad_(False)
delta += self.step*torch.sign(grad)
delta = self.perturbation.clamp_delta(delta, imgs)
self.classifier.train()
return delta, adv_loss # this detach may not be necessary
class Fo_Adam(Fo):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Fo_Adam, self).__init__(classifier, hparams, device, perturbation=perturbation)
def optimize_delta(self, imgs, labels, delta):
self.classifier.eval()
delta = self.perturbation.delta_init(imgs).to(imgs.device)
opt = Adam([delta], lr = self.hparams['fo_adam_step_size'], betas = (0.9, 0.999))
for _ in range(self.hparams['fo_n_steps']):
with torch.enable_grad():
adv_imgs = self.perturbation.perturb_img(imgs, delta)
adv_loss = self.classifier.loss(self.classifier(adv_imgs), labels, reduction='none')
mean = adv_loss.mean()
mean.backward()
opt.step()
delta = self.perturbation.clamp_delta(delta, imgs)
self.classifier.train()
return delta, adv_loss
class Fo_SGD(Fo):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Fo_SGD, self).__init__(classifier, hparams, device, perturbation=perturbation)
if isinstance(self.perturbation.eps, torch.Tensor):
self.perturbation.eps.to(device)
if isinstance(self.perturbation.eps, list):
eps = torch.tensor(self.perturbation.eps).to(device)
else:
eps = self.perturbation.eps
self.step = (eps*self.hparams['fo_sgd_step_size'])
def optimize_delta(self, imgs, labels, delta):
batch_size = imgs.size(0)
velocity=0
for t in range(self.hparams['fo_n_steps']):
delta.requires_grad_(True)
with torch.enable_grad():
adv_imgs = self.perturbation.perturb_img(imgs, delta)
adv_loss = self.classifier.loss(self.classifier(adv_imgs), labels, reduction='none')
mean = - adv_loss.mean()
torch.nn.utils.clip_grad_norm_(delta, 1, norm_type=2.0)
grad = torch.autograd.grad(mean, [delta])[0].detach()
delta.requires_grad_(False)
grad = torch.clip(grad, min=-1, max=1)
velocity = self.hparams['fo_sgd_momentum']*velocity+grad
if t<self.hparams['fo_n_steps']-1:
delta += self.step*velocity
delta = self.perturbation.clamp_delta(delta, imgs)
return delta.detach(), adv_loss
class LMC_Laplacian_Linf(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(LMC_Laplacian_Linf, self).__init__(classifier, hparams, device, perturbation=perturbation)
if isinstance(self.perturbation.eps, torch.Tensor):
self.perturbation.eps.to(device)
if isinstance(self.perturbation.eps, list):
eps = torch.tensor(self.perturbation.eps).to(device)
else:
eps = self.perturbation.eps
self.step = (eps*self.hparams['l_dale_step_size'])
if isinstance(self.step, torch.Tensor):
self.step = self.step.to(device)
self.noise_coeff = (eps*self.hparams['l_dale_noise_coeff'])
def forward(self, imgs, labels):
self.classifier.eval()
batch_size = imgs.size(0)
noise_dist = Laplace(torch.tensor(0.), torch.tensor(1.))
delta = self.perturbation.delta_init(imgs).to(imgs.device)
for _ in range(self.hparams['l_dale_n_steps']):
delta.requires_grad_(True)
with torch.enable_grad():
adv_imgs = self.perturbation.perturb_img(imgs, delta)
adv_loss = self.classifier.loss(self.classifier(adv_imgs), labels)
grad = torch.autograd.grad(adv_loss, [delta])[0].detach()
delta.requires_grad_(False)
noise = noise_dist.sample(grad.shape).to(self.device)
delta += self.step * torch.sign(grad) + self.noise_coeff * noise
delta = self.perturbation.clamp_delta(delta, imgs)
adv_imgs = self.perturbation.perturb_img(imgs, delta)
self.classifier.train()
return adv_imgs.detach(), delta.detach()
class MH(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf', acceptance_meter=None):
super(MH, self).__init__(classifier, hparams, device, perturbation=perturbation)
if self.hparams['mh_proposal']=='Laplace':
if isinstance(self.perturbation.eps, list):
eps = torch.tensor(self.perturbation.eps).to(device)
else:
eps = self.perturbation.eps
if isinstance(eps, torch.Tensor):
eps = eps.to(device)
self.noise_dist = Laplace(torch.zeros(self.perturbation.dim, device=device), self.hparams['mh_dale_scale']*eps)
self.eps = eps
else:
raise NotImplementedError
self.get_proposal = lambda x: x + self.noise_dist.sample([x.shape[0]]).to(x.device)
if acceptance_meter is not None:
self.log_acceptance=True
self.acceptance_meter = acceptance_meter
else:
self.log_acceptance = False
def forward(self, imgs, labels):
self.classifier.eval()
with torch.no_grad():
delta = self.perturbation.delta_init(imgs).to(imgs.device)
delta = self.perturbation.clamp_delta(delta, imgs)
adv_imgs = self.perturbation.perturb_img(imgs, delta)
last_loss = self.classifier.loss(self.classifier(adv_imgs), labels)
ones = torch.ones_like(last_loss)
for _ in range(self.hparams['mh_dale_n_steps']):
proposal = self.get_proposal(delta)
if torch.allclose(proposal, self.perturbation.clamp_delta(proposal, adv_imgs)):
adv_imgs = self.perturbation.perturb_img(imgs, delta)
proposal_loss = self.classifier.loss(self.classifier(adv_imgs), labels)
acceptance_ratio = (
torch.minimum((proposal_loss / last_loss), ones)
)
if self.log_acceptance:
self.acceptance_meter.update(acceptance_ratio.mean().item(), n=1)
accepted = torch.bernoulli(acceptance_ratio).bool()
delta[accepted] = proposal[accepted].type(delta.dtype)
last_loss[accepted] = proposal_loss[accepted]
elif self.log_acceptance:
self.acceptance_meter.update(0, n=1)
delta = self.perturbation.clamp_delta(delta, imgs)
adv_imgs = self.perturbation.perturb_img(imgs, delta)
self.classifier.train()
return adv_imgs.detach(), delta.detach()
class Grid_Search(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf', grid_size=None):
super(Grid_Search, self).__init__(classifier, hparams, device, perturbation=perturbation)
self.perturbation_name = perturbation
self.dim = self.perturbation.dim
if grid_size is None:
self.grid_size = self.hparams['grid_size']
else:
self.grid_size = grid_size
self.grid_steps = int(self.grid_size**(1/self.dim))
self.grid_size = self.grid_steps**self.dim
self.grid_shape = [self.grid_size, self.dim]
self.epsilon = self.hparams['epsilon']
self.make_grid()
def make_grid(self):
grids = []
for idx in range(self.dim):
if isinstance(self.epsilon, float) or isinstance(self.epsilon, int):
eps = self.epsilon
else:
eps = self.epsilon[idx]
step = 2*eps/self.grid_steps
grids.append(torch.arange(-eps, eps, step=step, device=self.device))
self.grid = torch.cartesian_prod(*grids)
def forward(self, imgs, labels):
self.classifier.eval()
batch_size = imgs.size(0)
with torch.no_grad():
adv_imgs = self.perturbation.perturb_img(
repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.grid_size),
repeat(self.grid, 'S D -> (B S) D', B=batch_size, D=self.dim, S=self.grid_size))
adv_loss = self.classifier.loss(self.classifier(adv_imgs), repeat(labels, 'B -> (B S)', S=self.grid_size), reduction="none")
adv_loss = rearrange(adv_loss, '(B S) -> B S', B=batch_size, S=self.grid_size)
max_idx = torch.argmax(adv_loss,dim=-1)
delta = self.grid[max_idx]
adv_imgs = self.perturbation.perturb_img(imgs, delta)
self.classifier.train()
return adv_imgs.detach(), delta.detach()
class Worst_Of_K(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Worst_Of_K, self).__init__(classifier, hparams, device, perturbation=perturbation)
def forward(self, imgs, labels):
self.classifier.eval()
batch_size = imgs.size(0)
delta = self.perturbation.delta_init(imgs)
steps = self.hparams['worst_of_k_steps']
if self.batched:
with torch.no_grad():
if len(imgs.shape) == 4:
repeated_images = repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=steps)
elif len(imgs.shape) == 3:
repeated_images = repeat(imgs, 'B C P -> (B S) C P', B=batch_size, S=steps)
else:
raise NotImplementedError
delta = self.perturbation.delta_init(repeated_images).to(imgs.device)
delta = self.perturbation.clamp_delta(delta, repeated_images)
adv_imgs = self.perturbation.perturb_img(repeated_images, delta)
if len(labels.shape) == 1:
new_labels = repeat(labels, 'B -> (B S)', S=steps)
else:
new_labels = repeat(labels, 'B D -> (B S) D', S=steps)
adv_loss = self.classifier.loss(self.classifier(adv_imgs), new_labels, reduction="none")
adv_loss = rearrange(adv_loss, '(B S) -> B S', B=batch_size, S=steps)
max_idx = torch.argmax(adv_loss, dim=-1)
delta = delta[max_idx]
else:
worst_loss = -1
with torch.no_grad():
for _ in range(steps):
delta = self.perturbation.delta_init(imgs).to(imgs.device)
delta = self.perturbation.clamp_delta(delta, imgs)
adv_imgs = self.perturbation.perturb_img(imgs, delta)
adv_loss = self.classifier.loss(self.classifier(adv_imgs), labels)
if adv_loss>worst_loss:
worst_loss = adv_loss
worst_delta = delta
delta = worst_delta
adv_imgs = self.perturbation.perturb_img(imgs, delta)
self.classifier.train()
return adv_imgs.detach(), delta.detach()
class Rand_Aug(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Rand_Aug, self).__init__(classifier, hparams, device, perturbation=perturbation)
def forward(self, imgs, labels):
batch_size = imgs.size(0)
self.classifier.eval()
delta = self.perturbation.delta_init(imgs).to(imgs.device)
delta = self.sample(delta)
delta = self.perturbation.clamp_delta(delta, imgs)
adv_imgs = self.perturbation.perturb_img(imgs, delta)
self.classifier.train()
return adv_imgs.detach(), delta.detach()
def sample(self, delta):
return delta
class Gaussian_aug(Rand_Aug):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Gaussian_aug, self).__init__(classifier, hparams, device, perturbation=perturbation)
self.sigma = hparams["gaussian_attack_std"]*self.perturbation.eps
def sample(self, delta):
return torch.randn_like(delta)*self.sigma
class Laplace_aug(Rand_Aug):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Laplace_aug, self).__init__(classifier, hparams, device, perturbation=perturbation)
self.scale = hparams["laplacian_attack_std"]*self.perturbation.eps/sqrt(2)
def sample(self, delta):
return Laplace(torch.zeros_like(delta), self.scale).sample().to(device=delta.device, dtype=delta.dtype)
class Rand_Aug_Batch(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Rand_Aug_Batch, self).__init__(classifier, hparams, device, perturbation=perturbation)
def forward(self, imgs, labels):
batch_size = imgs.size(0)
if len(imgs.shape) == 4:
repeated_images = repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.hparams['perturbation_batch_size'])
elif len(imgs.shape) == 3:
repeated_images = repeat(imgs, 'B C P -> (B S) C P', B=batch_size, S=self.hparams['perturbation_batch_size'])
else:
raise NotImplementedError
delta = self.perturbation.delta_init(repeated_images).to(imgs.device)
delta = self.perturbation.clamp_delta(delta, repeated_images)
adv_imgs = self.perturbation.perturb_img(repeated_images, delta)
if len(labels.shape) == 1:
new_labels = repeat(labels, 'B -> (B S)', S=self.hparams['perturbation_batch_size'])
else:
new_labels = repeat(labels, 'B D -> (B S) D', S=self.hparams['perturbation_batch_size'])
return adv_imgs.detach(), delta.detach(), new_labels.detach()
class Dist_Batch(Attack_Linf):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Dist_Batch, self).__init__(classifier, hparams, device, perturbation=perturbation)
def sample(self, delta):
raise NotImplementedError
def forward(self, imgs, labels):
batch_size = imgs.size(0)
if len(imgs.shape) == 4:
repeated_images = repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.hparams['perturbation_batch_size'])
elif len(imgs.shape) == 3:
repeated_images = repeat(imgs, 'B C P -> (B S) C P', B=batch_size, S=self.hparams['perturbation_batch_size'])
else:
raise NotImplementedError
delta = self.perturbation.delta_init(repeated_images).to(imgs.device)
delta = self.sample(delta)
delta = self.perturbation.clamp_delta(delta, repeated_images)
adv_imgs = self.perturbation.perturb_img(repeated_images, delta)
if len(labels.shape) == 1:
new_labels = repeat(labels, 'B -> (B S)', S=self.hparams['perturbation_batch_size'])
else:
new_labels = repeat(labels, 'B D -> (B S) D', S=self.hparams['perturbation_batch_size'])
return adv_imgs.detach(), delta.detach(), new_labels.detach()
class Gaussian_Batch(Dist_Batch):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Gaussian_Batch, self).__init__(classifier, hparams, device, perturbation=perturbation)
self.sigma = hparams["gaussian_attack_std"]*self.perturbation.eps
def sample(self, delta):
return torch.randn_like(delta)*self.sigma
class Laplacian_Batch(Dist_Batch):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Laplacian_Batch, self).__init__(classifier, hparams, device, perturbation=perturbation)
self.scale = hparams["laplacian_attack_std"]*self.perturbation.eps/sqrt(2)
def sample(self, delta):
return Laplace(torch.zeros_like(delta), self.scale).sample().to(device=delta.device, dtype=delta.dtype)
class Grid_Batch(Grid_Search):
def __init__(self, classifier, hparams, device, perturbation='Linf'):
super(Grid_Batch, self).__init__(classifier, hparams, device, perturbation=perturbation)
def forward(self, imgs, labels):
batch_size = imgs.size(0)
rep_grid = repeat(self.grid, 'S D -> (B S) D', B=batch_size, D=self.dim, S=self.grid_size)
if len(imgs.shape) == 4:
rep_imgs = repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.grid_size)
elif len(imgs.shape) == 3:
rep_imgs = repeat(imgs, 'B C P -> (B S) C P', B=batch_size, S=self.grid_size)
else:
raise NotImplementedError
delta = self.perturbation.clamp_delta(rep_grid, rep_imgs)
adv_imgs = self.perturbation.perturb_img(
rep_imgs,
rep_grid)
if len(labels.shape) == 1:
new_labels = repeat(labels, 'B -> (B S)', S=self.hparams['perturbation_batch_size'])
else:
new_labels = repeat(labels, 'B D -> (B S) D', S=self.hparams['perturbation_batch_size'])
return adv_imgs.detach(), delta, new_labels.detach()
| 18,200
| 113
| 1,447
|
aad78611eb26492e75d0af384e9d0c95a3e5e0f0
| 559
|
py
|
Python
|
bin/redmapper_make_zred_bkg.py
|
jacobic/redmapper
|
bda5bd6f486fd5f18d35aa9ae4b875628e905604
|
[
"Apache-2.0"
] | 17
|
2016-03-06T07:51:02.000Z
|
2022-02-03T15:17:26.000Z
|
bin/redmapper_make_zred_bkg.py
|
jacobic/redmapper
|
bda5bd6f486fd5f18d35aa9ae4b875628e905604
|
[
"Apache-2.0"
] | 42
|
2016-07-27T20:48:20.000Z
|
2022-01-31T20:47:51.000Z
|
bin/redmapper_make_zred_bkg.py
|
jacobic/redmapper
|
bda5bd6f486fd5f18d35aa9ae4b875628e905604
|
[
"Apache-2.0"
] | 8
|
2017-01-26T01:38:41.000Z
|
2020-11-14T07:41:53.000Z
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import os
import sys
import argparse
import redmapper
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute the zred background for all galaxies')
parser.add_argument('-c', '--configfile', action='store', type=str, required=True,
help='YAML config file')
args = parser.parse_args()
config = redmapper.Configuration(args.configfile)
zb = redmapper.ZredBackgroundGenerator(config)
zb.run()
| 23.291667
| 96
| 0.710197
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import os
import sys
import argparse
import redmapper
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute the zred background for all galaxies')
parser.add_argument('-c', '--configfile', action='store', type=str, required=True,
help='YAML config file')
args = parser.parse_args()
config = redmapper.Configuration(args.configfile)
zb = redmapper.ZredBackgroundGenerator(config)
zb.run()
| 0
| 0
| 0
|
e6235d7138bff4ef6e29135647b1f07992f5da72
| 409
|
py
|
Python
|
wakeonlan/search/arp.py
|
tuimac/wake_on_lan
|
150f3657796537a6ac61e391e41169c48b2375cb
|
[
"MIT"
] | null | null | null |
wakeonlan/search/arp.py
|
tuimac/wake_on_lan
|
150f3657796537a6ac61e391e41169c48b2375cb
|
[
"MIT"
] | 1
|
2020-05-12T05:04:02.000Z
|
2020-05-12T05:04:02.000Z
|
wakeonlan/search/arp.py
|
tuimac/wakeonlan
|
150f3657796537a6ac61e391e41169c48b2375cb
|
[
"MIT"
] | null | null | null |
import socket
import uuid
from struct import pack
| 20.45
| 43
| 0.569682
|
import socket
import uuid
from struct import pack
class Arp:
def __init__(self):
pass
def getMacAddress(self):
rawData = uuid.getnode()
data = bytes()
for i in range(6):
data += bytes([rawData & 0xff])
rawData = rawData >> 8
return data
def scan(self, iprange):
macaddress = self.getMacAddress()
print(macaddress)
| 267
| -11
| 103
|
0243ae709ebdd5569c0ba07be3af26232b3c03fe
| 750
|
py
|
Python
|
setup.py
|
linky00/pythonthegathering
|
5077b29ab3eb2351cd61204c9552bac889679b78
|
[
"MIT"
] | 2
|
2017-10-07T13:52:37.000Z
|
2020-04-17T15:23:28.000Z
|
setup.py
|
linky00/pythonthegathering
|
5077b29ab3eb2351cd61204c9552bac889679b78
|
[
"MIT"
] | null | null | null |
setup.py
|
linky00/pythonthegathering
|
5077b29ab3eb2351cd61204c9552bac889679b78
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='pythonthegathering',
packages=['pythonthegathering'],
version='1.2.1',
description='Replaces everything good and practical about Python with MTG!',
author='Theo Hamilton/linky00',
author_email='linky00@plotholestudios.com',
url='https://github.com/linky00/pythonthegathering',
download_url='https://github.com/linky00/pythonthegathering/archive/v1.2.1.tar.gz',
keywords='decorators mtg',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='MIT'
)
| 34.090909
| 87
| 0.66
|
from setuptools import setup
setup(
name='pythonthegathering',
packages=['pythonthegathering'],
version='1.2.1',
description='Replaces everything good and practical about Python with MTG!',
author='Theo Hamilton/linky00',
author_email='linky00@plotholestudios.com',
url='https://github.com/linky00/pythonthegathering',
download_url='https://github.com/linky00/pythonthegathering/archive/v1.2.1.tar.gz',
keywords='decorators mtg',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='MIT'
)
| 0
| 0
| 0
|
3499224506e26aaf6873fa7f3c65731a93700a0c
| 342
|
py
|
Python
|
jobs/file_remover.py
|
nSimonFR/spoken_language_dataset
|
07c018f28be72cec3ba5e9ec07608f79a6d32031
|
[
"MIT"
] | 23
|
2018-06-25T10:22:57.000Z
|
2021-07-09T09:53:47.000Z
|
jobs/file_remover.py
|
nSimonFR/spoken_language_dataset
|
07c018f28be72cec3ba5e9ec07608f79a6d32031
|
[
"MIT"
] | 3
|
2018-07-19T18:47:07.000Z
|
2021-06-01T22:11:53.000Z
|
jobs/file_remover.py
|
nSimonFR/spoken_language_dataset
|
07c018f28be72cec3ba5e9ec07608f79a6d32031
|
[
"MIT"
] | 6
|
2018-07-14T17:48:51.000Z
|
2020-12-24T01:31:41.000Z
|
from . import common
| 22.8
| 51
| 0.681287
|
from . import common
class FileRemover:
def __init__(self, input_files_key):
self.input_files_key = input_files_key
def execute(self, context):
input_files = context[self.input_files_key]
for input_file in input_files:
common.remove_file(input_file)
context[self.input_files_key] = []
| 247
| -3
| 76
|
81e0688249e61336036091af470d91b39d4b9458
| 1,155
|
py
|
Python
|
matsdp/default_params.py
|
dianwdw/matsdp
|
b5b822036d2ae1dab00f02a39fe7ba4a51384017
|
[
"BSD-3-Clause"
] | 2
|
2019-11-12T08:35:45.000Z
|
2022-02-20T14:26:54.000Z
|
matsdp/default_params.py
|
dianwdw/matsdp
|
b5b822036d2ae1dab00f02a39fe7ba4a51384017
|
[
"BSD-3-Clause"
] | null | null | null |
matsdp/default_params.py
|
dianwdw/matsdp
|
b5b822036d2ae1dab00f02a39fe7ba4a51384017
|
[
"BSD-3-Clause"
] | 1
|
2021-12-13T13:27:04.000Z
|
2021-12-13T13:27:04.000Z
|
# -*- coding: utf-8 -*-
def default_params():
'''
Description:
It defines the default parameters of the program.
Args:
None
Return:
defaults_dict
'''
defaults_dict = {}
defaults_dict['program_name'] = 'MATSDP'
defaults_dict['version'] = '0.2.4'
defaults_dict['logfile'] = 'matsdp.log'
defaults_dict['output_dir_name'] = 'outputs'
defaults_dict['projects_dir_name'] = 'projects'
defaults_dict['projects_summary_dir_name'] = 'projects_summary'
defaults_dict['task_summary_dir_name'] = 'task_summary'
defaults_dict['test_dir_name'] = 'test'
defaults_dict['greek_capital_letter_list'] = ['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta', 'Eta', 'Theta', 'Iota', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Xi', 'Omicron', 'Pi', 'Rho', 'Sigma', 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega']
defaults_dict['greek_small_letter_list'] = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega']
return defaults_dict
| 50.217391
| 246
| 0.604329
|
# -*- coding: utf-8 -*-
def default_params():
'''
Description:
It defines the default parameters of the program.
Args:
None
Return:
defaults_dict
'''
defaults_dict = {}
defaults_dict['program_name'] = 'MATSDP'
defaults_dict['version'] = '0.2.4'
defaults_dict['logfile'] = 'matsdp.log'
defaults_dict['output_dir_name'] = 'outputs'
defaults_dict['projects_dir_name'] = 'projects'
defaults_dict['projects_summary_dir_name'] = 'projects_summary'
defaults_dict['task_summary_dir_name'] = 'task_summary'
defaults_dict['test_dir_name'] = 'test'
defaults_dict['greek_capital_letter_list'] = ['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta', 'Eta', 'Theta', 'Iota', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Xi', 'Omicron', 'Pi', 'Rho', 'Sigma', 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega']
defaults_dict['greek_small_letter_list'] = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega']
return defaults_dict
| 0
| 0
| 0
|
e5f323d36236662137792ccb26942032c399a889
| 92
|
py
|
Python
|
wordhit_crawler/items.py
|
InsightLab/wordhit-crawler
|
7daba16204387263d61644ef3381ac389ccd466a
|
[
"MIT"
] | 1
|
2019-04-13T18:01:58.000Z
|
2019-04-13T18:01:58.000Z
|
wordhit_crawler/items.py
|
InsightLab/wordhit-crawler
|
7daba16204387263d61644ef3381ac389ccd466a
|
[
"MIT"
] | null | null | null |
wordhit_crawler/items.py
|
InsightLab/wordhit-crawler
|
7daba16204387263d61644ef3381ac389ccd466a
|
[
"MIT"
] | null | null | null |
import scrapy
| 18.4
| 31
| 0.73913
|
import scrapy
class WordhitItem(scrapy.Item):
word = scrapy.Field()
hits = scrapy.Field()
| 0
| 56
| 23
|
a40b5456dfaeb74461d6a07aecdf92edba57ecbd
| 3,873
|
py
|
Python
|
utility_angles.py
|
spectralskylight/skydataviewer
|
ac45fde11fb2cd1daa3f09bc30c2fad9391438df
|
[
"BSD-3-Clause"
] | null | null | null |
utility_angles.py
|
spectralskylight/skydataviewer
|
ac45fde11fb2cd1daa3f09bc30c2fad9391438df
|
[
"BSD-3-Clause"
] | 1
|
2020-06-17T01:58:35.000Z
|
2021-04-28T01:57:07.000Z
|
utility_angles.py
|
spectralskylight/skydataviewer
|
ac45fde11fb2cd1daa3f09bc30c2fad9391438df
|
[
"BSD-3-Clause"
] | 4
|
2020-02-03T23:05:00.000Z
|
2021-04-28T02:28:43.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ====================================================================
# @author: Joe Del Rocco
# @since: 11/02/2017
# @summary: A module with angle and coordinate transformations.
# @note: Parts of this file came from angle_utilities.py written by Dan Knowlton of PCG at Cornell.
# Redistributed with permission.
# ====================================================================
# Provides functionality to convert between UV coordinates and angles as well
# as other useful angle utilities.
#
# Copyright 2014-2015 Program of Computer Graphics, Cornell University
# 580 Rhodes Hall
# Cornell University
# Ithaca NY 14853
# Web: http://www.graphics.cornell.edu/
#
# Not for commercial use. Do not redistribute without permission.
# ====================================================================
import math
import numpy as np
import common
'''
Convert a sky coordinate (azimuth, altitude) to fisheye UV coordinate (0-1, 0-1).
Note that images in this application were taken with North facing downward, so we must account for this in UV.
Note sampling pattern coordinates in this application were measured in altitude, but calculation below requires zenith.
Note altering of zenith to account for warp of lens used:
http://paulbourke.net/dome/fisheyecorrect/
'''
'''
Convert a fisheye UV coordinate (0-1, 0-1) to a sky coordinate (azimuth, altitude).
'''
'''
Convert an image pixel coordinate to a fisheye UV coordinate (0-1, 0-1).
'''
'''
Take in a pair of (azimuth, altitude) sky coordintes and return the corresponding central angle between them.
https://en.wikipedia.org/wiki/Great-circle_distance#Formulas
'''
| 34.580357
| 119
| 0.633876
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ====================================================================
# @author: Joe Del Rocco
# @since: 11/02/2017
# @summary: A module with angle and coordinate transformations.
# @note: Parts of this file came from angle_utilities.py written by Dan Knowlton of PCG at Cornell.
# Redistributed with permission.
# ====================================================================
# Provides functionality to convert between UV coordinates and angles as well
# as other useful angle utilities.
#
# Copyright 2014-2015 Program of Computer Graphics, Cornell University
# 580 Rhodes Hall
# Cornell University
# Ithaca NY 14853
# Web: http://www.graphics.cornell.edu/
#
# Not for commercial use. Do not redistribute without permission.
# ====================================================================
import math
import numpy as np
import common
'''
Convert a sky coordinate (azimuth, altitude) to fisheye UV coordinate (0-1, 0-1).
Note that images in this application were taken with North facing downward, so we must account for this in UV.
Note sampling pattern coordinates in this application were measured in altitude, but calculation below requires zenith.
Note altering of zenith to account for warp of lens used:
http://paulbourke.net/dome/fisheyecorrect/
'''
def SkyCoord2FisheyeUV(azimuth, altitude, lenswarp=True):
# 1) sky photos were saved as (North down, South up), so rotate "North" to polar coordinate system (0 deg East)
# 2) inverse azimuth because photos are taken from inside skydome, so east and west are flipped!
azimuth = 360 - ((azimuth + 270) % 360)
# convert altitude to zenith
zenith = (90 - altitude)
# convert from angles to radians
azimuth = azimuth * math.pi / 180.0
zenith = zenith * math.pi / 180.0
# compute radius
# account for non-linearity/warp of actual lens
if lenswarp and len(common.LensWarp) > 0:
radius = np.polyval(common.LensWarp, zenith)
# use ideal lens
else:
radius = np.polyval(common.LensIdeal, zenith)
# compute UVs
u = radius * math.cos(azimuth)
v = radius * math.sin(azimuth)
# adjust to [0, 1] range
u = 0.5 * u + 0.5
v = 0.5 * v + 0.5
return u, v
'''
Convert a fisheye UV coordinate (0-1, 0-1) to a sky coordinate (azimuth, altitude).
'''
def FisheyeUV2SkyCoord(u, v, lenswarp=True):
# adjust to [-1, 1] range
u = (u - 0.5) * 2
v = (v - 0.5) * 2
radius = math.sqrt((u * u) + (v * v))
# compute azimuth
azimuth = math.atan2(u, v)
# rotate azimuth so that position of North is pointing directly down
azimuth = (azimuth + 2*math.pi) % (2*math.pi)
# compute zenith
# account for non-linearity/warp of actual lens
if lenswarp and len(common.LensWarpInv) > 0:
zenith = np.polyval(common.LensWarpInv, radius)
# use ideal lens
else:
zenith = np.polyval(common.LensIdealInv, radius)
# convert zenith to altitude
altitude = (math.pi / 2) - zenith
# convert from radians to angles
azimuth = azimuth * 180.0 / math.pi
altitude = altitude * 180.0 / math.pi
return azimuth, altitude
'''
Convert an image pixel coordinate to a fisheye UV coordinate (0-1, 0-1).
'''
def Pixel2FisheyeUV(x, y, width, height):
u = (x - (int(width/2) - int(height/2))) / height
v = y / height
return u, v
'''
Take in a pair of (azimuth, altitude) sky coordintes and return the corresponding central angle between them.
https://en.wikipedia.org/wiki/Great-circle_distance#Formulas
'''
def CentralAngle(a, b, inRadians=False):
if not inRadians:
a = (math.radians(a[0]), math.radians(a[1]))
b = (math.radians(b[0]), math.radians(b[1]))
return math.acos( math.sin(a[1]) * math.sin(b[1]) + math.cos(a[1]) * math.cos(b[1]) * math.cos( abs(a[0]-b[0]) ) )
| 2,095
| 0
| 88
|
b941ee17f4e874ec5ff0869d0c49beae63d5cc83
| 2,343
|
py
|
Python
|
mla_game/settings/prod.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
mla_game/settings/prod.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
mla_game/settings/prod.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
from .base import *
import os
MINIMUM_SAMPLE_SIZE = 3
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
INSTALLED_APPS += ('storages',)
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
ADMINS = [(os.environ['ADMIN_NAME'], os.environ['ADMIN_EMAIL'])]
ALLOWED_HOSTS = ['fixit.americanarchive.org', 'fixit.wgbh-mla.org']
LOG_DIRECTORY = '/home/wgbh/logs'
GA_CODE = os.environ['GA_CODE']
AWS_HEADERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
AWS_STORAGE_BUCKET_NAME = os.environ['S3_BUCKET_NAME']
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_S3_CUSTOM_DOMAIN = 's3.amazonaws.com/{}'.format(
AWS_STORAGE_BUCKET_NAME
)
STATIC_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_S3_CUSTOM_DOMAIN)
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (
'rest_framework.renderers.JSONRenderer',
)
NEWRELIC_CONFIG_PATH = os.environ['NEWRELIC_CONFIG_PATH']
NEWRELIC_ENV = os.environ['NEWRELIC_ENV']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ['PG_HOST'],
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
'metadata_errors': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/metadata_error.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'metadata_errors': {
'handlers': ['metadata_errors'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| 25.467391
| 72
| 0.625694
|
from .base import *
import os
MINIMUM_SAMPLE_SIZE = 3
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
INSTALLED_APPS += ('storages',)
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
ADMINS = [(os.environ['ADMIN_NAME'], os.environ['ADMIN_EMAIL'])]
ALLOWED_HOSTS = ['fixit.americanarchive.org', 'fixit.wgbh-mla.org']
LOG_DIRECTORY = '/home/wgbh/logs'
GA_CODE = os.environ['GA_CODE']
AWS_HEADERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
AWS_STORAGE_BUCKET_NAME = os.environ['S3_BUCKET_NAME']
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_S3_CUSTOM_DOMAIN = 's3.amazonaws.com/{}'.format(
AWS_STORAGE_BUCKET_NAME
)
STATIC_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_S3_CUSTOM_DOMAIN)
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (
'rest_framework.renderers.JSONRenderer',
)
NEWRELIC_CONFIG_PATH = os.environ['NEWRELIC_CONFIG_PATH']
NEWRELIC_ENV = os.environ['NEWRELIC_ENV']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ['PG_HOST'],
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
'metadata_errors': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/metadata_error.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'metadata_errors': {
'handlers': ['metadata_errors'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| 0
| 0
| 0
|
41d226f582b97fed8f9d1b5282260ffbb37f8314
| 1,712
|
py
|
Python
|
src/Sentiment_Analysis_Model [LR].py
|
lhk1234/Text-Mining-Sentiment-Analysis
|
8190571886e8cfe9325d48587d115e870ebb077d
|
[
"MIT"
] | 1
|
2021-07-21T05:07:46.000Z
|
2021-07-21T05:07:46.000Z
|
src/Sentiment_Analysis_Model [LR].py
|
lhk1234/Text-Mining-Sentiment-Analysis
|
8190571886e8cfe9325d48587d115e870ebb077d
|
[
"MIT"
] | null | null | null |
src/Sentiment_Analysis_Model [LR].py
|
lhk1234/Text-Mining-Sentiment-Analysis
|
8190571886e8cfe9325d48587d115e870ebb077d
|
[
"MIT"
] | null | null | null |
import urllib.request
import os
import pandas as pd
import numpy as np
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
df = pd.read_csv('../data/raw/movie_data.csv',encoding='utf-8')
#print(df.head(3))
# init Objects
tokenizer=RegexpTokenizer(r'\w+')
en_stopwords=set(stopwords.words('english'))
ps=PorterStemmer()
df['review'].apply(getStemmedReview)
#df.to_csv(r'../data/processed/movie_data[clean].csv')
# X_train = df.loc[:35000, 'review'].values
# y_train = df.loc[:35000, 'sentiment'].values
# X_test = df.loc[35000:, 'review'].values
# y_test = df.loc[35000:, 'sentiment'].values
#
# from sklearn.feature_extraction.text import TfidfVectorizer
# vectorizer = TfidfVectorizer(sublinear_tf=True, encoding='utf-8',decode_error='ignore')
# vectorizer.fit(X_train)
# X_train=vectorizer.transform(X_train)
# X_test=vectorizer.transform(X_test)
#
# from sklearn.linear_model import LogisticRegression
# model=LogisticRegression(solver='liblinear')
# model.fit(X_train,y_train)
# print("Score on training data is: "+str(model.score(X_train,y_train)))
# print("Score on testing data is: "+str(model.score(X_test,y_test)))
#
# import sklearn.externals
# import joblib
# joblib.dump(en_stopwords,'stopwords.pkl')
# joblib.dump(model,'model.pkl')
# joblib.dump(vectorizer,'vectorizer.pkl')
| 34.24
| 89
| 0.751752
|
import urllib.request
import os
import pandas as pd
import numpy as np
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
df = pd.read_csv('../data/raw/movie_data.csv',encoding='utf-8')
#print(df.head(3))
# init Objects
tokenizer=RegexpTokenizer(r'\w+')
en_stopwords=set(stopwords.words('english'))
ps=PorterStemmer()
def getStemmedReview(review):
review=review.lower()
review=review.replace("<br /><br />"," ")
#Tokenize
tokens=tokenizer.tokenize(review)
new_tokens=[token for token in tokens if token not in en_stopwords]
stemmed_tokens=[ps.stem(token) for token in new_tokens]
clean_review=' '.join(stemmed_tokens)
return clean_review
df['review'].apply(getStemmedReview)
#df.to_csv(r'../data/processed/movie_data[clean].csv')
# X_train = df.loc[:35000, 'review'].values
# y_train = df.loc[:35000, 'sentiment'].values
# X_test = df.loc[35000:, 'review'].values
# y_test = df.loc[35000:, 'sentiment'].values
#
# from sklearn.feature_extraction.text import TfidfVectorizer
# vectorizer = TfidfVectorizer(sublinear_tf=True, encoding='utf-8',decode_error='ignore')
# vectorizer.fit(X_train)
# X_train=vectorizer.transform(X_train)
# X_test=vectorizer.transform(X_test)
#
# from sklearn.linear_model import LogisticRegression
# model=LogisticRegression(solver='liblinear')
# model.fit(X_train,y_train)
# print("Score on training data is: "+str(model.score(X_train,y_train)))
# print("Score on testing data is: "+str(model.score(X_test,y_test)))
#
# import sklearn.externals
# import joblib
# joblib.dump(en_stopwords,'stopwords.pkl')
# joblib.dump(model,'model.pkl')
# joblib.dump(vectorizer,'vectorizer.pkl')
| 331
| 0
| 22
|
9cefb62a5d6a34ab9204ef1ffdff5e3c2ccb9315
| 1,443
|
py
|
Python
|
cosmoboost/lib/jeong.py
|
maamari/CosmoBoost
|
c59a6d4edce2a981f6d3d8a775f656d62b348d47
|
[
"MIT"
] | null | null | null |
cosmoboost/lib/jeong.py
|
maamari/CosmoBoost
|
c59a6d4edce2a981f6d3d8a775f656d62b348d47
|
[
"MIT"
] | null | null | null |
cosmoboost/lib/jeong.py
|
maamari/CosmoBoost
|
c59a6d4edce2a981f6d3d8a775f656d62b348d47
|
[
"MIT"
] | null | null | null |
import numpy as np
########################################
# Jeong approximate functions
########################################
| 30.702128
| 95
| 0.566875
|
import numpy as np
########################################
# Jeong approximate functions
########################################
def jeong_boost_Cl_1storder(L, Cl, beta, cos_avg=None, only_dCl=True):
if cos_avg == None:
from scipy.integrate import dblquad
cos_avg = dblquad(lambda ph, th: np.cos(th) * np.sin(th), 0, np.pi, lambda th: 0,
lambda th: 2 * np.pi)[0]
print("using <cos> = {}".format(cos_avg))
dCl = np.gradient(Cl)
dCl_1st = - beta * cos_avg * L * dCl
if only_dCl:
return dCl_1st
else:
return Cl + dCl_1st
def jeong_boost_Cl_2ndorder(L, Cl, beta, cos2_avg=None, only_dCl=True):
if cos2_avg == None:
from scipy.integrate import dblquad
cos2_avg = dblquad(lambda ph, th: np.cos(th) ** 2 * np.sin(th), 0, np.pi, lambda th: 0,
lambda th: 2 * np.pi)[0]/4/np.pi
print("using <cos2> = {}".format(cos2_avg))
dCl = np.gradient(Cl, L)
d2Cl = np.gradient(dCl, L)
dCl_2nd = (beta ** 2 / 2) * (L * dCl + d2Cl * L ** 2 * cos2_avg)
if only_dCl:
return dCl_2nd
else:
return Cl + dCl_2nd
def jeong_boost_Cl(L, Cl, beta, cos_avg=None, cos2_avg=None):
dCl_1st = jeong_boost_Cl_1storder(L, Cl, beta, cos_avg=cos_avg, only_dCl=True)
dCl_2nd = jeong_boost_Cl_2ndorder(L, Cl, beta, cos2_avg=cos2_avg, only_dCl=True)
return Cl + dCl_1st + dCl_2nd
| 1,235
| 0
| 69
|
28dc70287a20b11ef4d8b4817f104a521a87f8d7
| 11,431
|
py
|
Python
|
fastrunner/views/newrun.py
|
liuguanglei123/FasterRunnerNew
|
d37ca7350846296853d791b76571332cf12ad60c
|
[
"MIT"
] | null | null | null |
fastrunner/views/newrun.py
|
liuguanglei123/FasterRunnerNew
|
d37ca7350846296853d791b76571332cf12ad60c
|
[
"MIT"
] | 2
|
2020-02-11T23:38:06.000Z
|
2020-07-31T10:18:35.000Z
|
fastrunner/views/newrun.py
|
liuguanglei123/FasterRunnerNew
|
d37ca7350846296853d791b76571332cf12ad60c
|
[
"MIT"
] | null | null | null |
from rest_framework.decorators import api_view
from fastrunner.utils import loader,newloader
from rest_framework.response import Response
from fastrunner.utils.parser import Format
from fastrunner import models
from django.conf import settings
import os,time,sys
from httprunner.utils import create_scaffold
from fastrunner.utils import runner
import traceback
from fastrunner.utils.newrunner import RunSingleApi,RunTree,RunSingleApiInStep,RunSingleApiInCase
"""运行方式
"""
import logging
logger = logging.getLogger('httprunner')
@api_view(['GET'])
def run_api_pk(request, **kwargs):
"""run api by pk
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApi(projectPath=projectPath, config=request.query_params['config'],
apiId=kwargs['pk'], type="singleapi")
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.run()
return Response(debugApi.summary)
@api_view(["POST"])
def run_testsuite(request):
"""debug testsuite
{
name: str,
body: dict
}
"""
body = request.data["body"]
project = request.data["project"]
name = request.data["name"]
testcase_list = []
config = None
for test in body:
test = loader.load_test(test, project=project)
if "base_url" in test["request"].keys():
config = test
continue
testcase_list.append(test)
summary = loader.debug_api(testcase_list, project, name=name, config=config)
return Response(summary)
@api_view(["POST"])
def run_test(request):
"""debug single test
{
body: dict
}
"""
body = request.data["body"]
summary = loader.debug_api(loader.load_test(body), request.data["project"])
return Response(summary)
@api_view(["GET"])
def run_testsuite_pk(request, **kwargs):
"""run testsuite by pk
{
project: int,
name: str
}
"""
pk = kwargs["pk"]
test_list = models.CaseStep.objects. \
filter(case__id=pk).order_by("step").values("body")
project = request.query_params["project"]
name = request.query_params["name"]
testcase_list = []
config = None
for content in test_list:
body = eval(content["body"])
if "base_url" in body["request"].keys():
config = eval(models.Config.objects.get(name=body["name"], project__id=project).body)
continue
testcase_list.append(body)
summary = loader.debug_api(testcase_list, project, name=name, config=config)
return Response(summary)
@api_view(['POST'])
@api_view(['POST'])
@api_view(['POST'])
@api_view(['POST'])
@api_view(['POST'])
def run_api(request):
""" run api by body
"""
api = Format(request.data)
api.parse()
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
create_scaffold(projectPath)
debugApi = RunSingleApi(project=api.project,projectPath=projectPath,config=request.data['config'],
apiBody=api.testcase,type="debugapi")
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.run()
return Response(debugApi.summary)
@api_view(['POST'])
@api_view(['POST'])
def run_casesinglestep(request):
"""run testsuite by tree
{
project: int
relation: list
name: str
async: bool
}
"""
# order by id default
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
singleStep = ''
if('apiId' in request.data.keys()):
singleStep = RunSingleApiInCase(config=request.data['config'], project=request.data['project'],
apiId=request.data['apiId'],
index=request.data['index'], projectPath=projectPath,relation = request.data['relation'][0])
elif('suiteId' in request.data.keys()):
#TODO:这里的实现只是个临时方案,还要重写的
singleStep = RunSingleApiInCase(config=request.data['config'], project=request.data['project'],
suiteId=request.data['suiteId'],
index=request.data['index'], projectPath=projectPath,
relation=request.data['relation'][0])
singleStep.serializeApi()
singleStep.serializeDebugtalk()
singleStep.generateMapping()
singleStep.serializeTestCase()
singleStep.serializeTestSuite()
singleStep.run()
return Response(singleStep.summary)
@api_view(['POST'])
def run_DebugSuiteStep(request):
""" run suitestep by body
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'],
apiBody=request.data, projectPath=projectPath)
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.run()
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
return Response(debugApi.summary)
@api_view(['POST'])
def run_DebugCaseStep(request):
""" run casestep by body
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'],
apiBody=request.data, projectPath=projectPath)
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.run()
return Response(debugApi.summary)
| 34.327327
| 128
| 0.652699
|
from rest_framework.decorators import api_view
from fastrunner.utils import loader,newloader
from rest_framework.response import Response
from fastrunner.utils.parser import Format
from fastrunner import models
from django.conf import settings
import os,time,sys
from httprunner.utils import create_scaffold
from fastrunner.utils import runner
import traceback
from fastrunner.utils.newrunner import RunSingleApi,RunTree,RunSingleApiInStep,RunSingleApiInCase
"""运行方式
"""
import logging
logger = logging.getLogger('httprunner')
@api_view(['GET'])
def run_api_pk(request, **kwargs):
"""run api by pk
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApi(projectPath=projectPath, config=request.query_params['config'],
apiId=kwargs['pk'], type="singleapi")
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.run()
return Response(debugApi.summary)
@api_view(["POST"])
def run_testsuite(request):
"""debug testsuite
{
name: str,
body: dict
}
"""
body = request.data["body"]
project = request.data["project"]
name = request.data["name"]
testcase_list = []
config = None
for test in body:
test = loader.load_test(test, project=project)
if "base_url" in test["request"].keys():
config = test
continue
testcase_list.append(test)
summary = loader.debug_api(testcase_list, project, name=name, config=config)
return Response(summary)
@api_view(["POST"])
def run_test(request):
"""debug single test
{
body: dict
}
"""
body = request.data["body"]
summary = loader.debug_api(loader.load_test(body), request.data["project"])
return Response(summary)
@api_view(["GET"])
def run_testsuite_pk(request, **kwargs):
"""run testsuite by pk
{
project: int,
name: str
}
"""
pk = kwargs["pk"]
test_list = models.CaseStep.objects. \
filter(case__id=pk).order_by("step").values("body")
project = request.query_params["project"]
name = request.query_params["name"]
testcase_list = []
config = None
for content in test_list:
body = eval(content["body"])
if "base_url" in body["request"].keys():
config = eval(models.Config.objects.get(name=body["name"], project__id=project).body)
continue
testcase_list.append(body)
summary = loader.debug_api(testcase_list, project, name=name, config=config)
return Response(summary)
@api_view(['POST'])
def run_suite_tree(request):
project = request.data['project']
relation = request.data["relation"]
back_async = request.data["async"]
report = request.data["name"]
config = None
testcase = []
for relation_id in relation:
suite = models.Case.objects.filter(project__id=project,
relation=relation_id).order_by('id').values('id', 'name')
for content in suite:
test_list = models.CaseStep.objects. \
filter(case__id=content["id"]).order_by("step").values("body")
# [{scripts}, {scripts}]
testcase_list = []
for content in test_list:
body = eval(content["body"])
if "base_url" in body["request"].keys():
config = eval(models.Config.objects.get(name=body["name"], project__id=project).body)
continue
testcase_list.append(body)
# [[{scripts}, {scripts}], [{scripts}, {scripts}]]
testcase.append(testcase_list)
if back_async:
loader.async_debug_suite(testcase, project, report, suite, config=config)
summary = loader.TEST_NOT_EXISTS
summary["msg"] = "用例运行中,请稍后查看报告"
else:
summary = loader.debug_suite(testcase, project, suite, config=config)
return Response(summary)
@api_view(['POST'])
def run_suitestep(request):
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
suitesTree = RunTree(type="suiteTree", relation=request.data['relation'], project=request.data['project'],
projectPath=projectPath, config=request.data['config'], isAsync=request.data['async'])
suitesTree.serializeApi()
suitesTree.serializeTestCase()
suitesTree.serializeTestSuite()
suitesTree.serializeDebugtalk()
suitesTree.generateMapping()
#TODO:这里没有实现异步执行,需要修改
suitesTree.run()
return Response(suitesTree.summary)
@api_view(['POST'])
def run_suitesinglestep(request):
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'], project=request.data['project'],
apiId=request.data['apiId'],
index=request.data['index'], projectPath=projectPath,relation = request.data['relation'][0])
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.run()
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
return Response(debugApi.summary)
@api_view(['POST'])
def run_api_tree(request):
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
apiTree = RunTree(type="apiTree", relation=request.data['relation'],project=request.data['project'],
projectPath=projectPath,config=request.data['config'],isAsync=request.data['async'])
apiTree.serializeTestCase()
apiTree.serializeTestSuite()
apiTree.serializeDebugtalk()
apiTree.generateMapping()
apiTree.run()
return Response(apiTree.summary)
@api_view(['POST'])
def run_api(request):
""" run api by body
"""
api = Format(request.data)
api.parse()
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
create_scaffold(projectPath)
debugApi = RunSingleApi(project=api.project,projectPath=projectPath,config=request.data['config'],
apiBody=api.testcase,type="debugapi")
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.run()
return Response(debugApi.summary)
@api_view(['POST'])
def run_casestep(request):
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
suitesTree = RunTree(type="caseTree", relation=request.data['relation'], project=request.data['project'],
projectPath=projectPath, config=request.data['config'], isAsync=request.data['async'])
suitesTree.serializeApi()
suitesTree.serializeTestCase()
suitesTree.serializeTestSuite()
suitesTree.serializeDebugtalk()
suitesTree.generateMapping()
# TODO:这里没有实现异步执行,需要修改
suitesTree.run()
return Response(suitesTree.summary)
@api_view(['POST'])
def run_casesinglestep(request):
"""run testsuite by tree
{
project: int
relation: list
name: str
async: bool
}
"""
# order by id default
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
singleStep = ''
if('apiId' in request.data.keys()):
singleStep = RunSingleApiInCase(config=request.data['config'], project=request.data['project'],
apiId=request.data['apiId'],
index=request.data['index'], projectPath=projectPath,relation = request.data['relation'][0])
elif('suiteId' in request.data.keys()):
#TODO:这里的实现只是个临时方案,还要重写的
singleStep = RunSingleApiInCase(config=request.data['config'], project=request.data['project'],
suiteId=request.data['suiteId'],
index=request.data['index'], projectPath=projectPath,
relation=request.data['relation'][0])
singleStep.serializeApi()
singleStep.serializeDebugtalk()
singleStep.generateMapping()
singleStep.serializeTestCase()
singleStep.serializeTestSuite()
singleStep.run()
return Response(singleStep.summary)
@api_view(['POST'])
def run_DebugSuiteStep(request):
""" run suitestep by body
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'],
apiBody=request.data, projectPath=projectPath)
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.serializeTestSuite()
debugApi.run()
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
return Response(debugApi.summary)
@api_view(['POST'])
def run_DebugCaseStep(request):
""" run casestep by body
"""
run_test_path = settings.RUN_TEST_PATH
timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
projectPath = os.path.join(run_test_path, timedir)
create_scaffold(projectPath)
if ('debugtalk' in sys.modules.keys()):
del sys.modules['debugtalk']
debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'],
apiBody=request.data, projectPath=projectPath)
debugApi.serializeApi()
debugApi.serializeDebugtalk()
debugApi.generateMapping()
debugApi.serializeTestCase()
debugApi.run()
return Response(debugApi.summary)
| 4,522
| 0
| 110
|
0e2a2f45007c6483cc3877028f4b5fe4cc1ec1db
| 255
|
py
|
Python
|
code/python/30_seconds_of_py/count_by.py
|
AlexMapley/Scanner
|
d286fc969d540d9599dad487e6e6d9b3734ade0c
|
[
"Unlicense"
] | 2
|
2019-07-03T17:49:24.000Z
|
2019-10-24T02:18:59.000Z
|
code/python/30_seconds_of_py/count_by.py
|
AlexMapley/Scanner
|
d286fc969d540d9599dad487e6e6d9b3734ade0c
|
[
"Unlicense"
] | 7
|
2019-07-03T17:46:53.000Z
|
2019-11-14T23:37:30.000Z
|
code/python/30_seconds_of_py/count_by.py
|
AlexMapley/workstation
|
d286fc969d540d9599dad487e6e6d9b3734ade0c
|
[
"Unlicense"
] | null | null | null |
from math import floor
print(count_by([6.1,4.2,6.3], floor))
print(count_by(['one', 'two', 'three'], len))
| 21.25
| 53
| 0.584314
|
def count_by(arr, fn=lambda x: x):
key = {}
for el in map(fn, arr):
key[el] = 1 if el not in key else key[el] + 1
return key
from math import floor
print(count_by([6.1,4.2,6.3], floor))
print(count_by(['one', 'two', 'three'], len))
| 123
| 0
| 22
|
41febecd73e6f74324319424d041cc0b66eff5d8
| 270
|
py
|
Python
|
lifelineExample.py
|
gkovacs/habitlab-conservation-analysis-chi2019
|
3ac52c4b5ab65d54cf6da0441bca829765ed21ec
|
[
"MIT"
] | null | null | null |
lifelineExample.py
|
gkovacs/habitlab-conservation-analysis-chi2019
|
3ac52c4b5ab65d54cf6da0441bca829765ed21ec
|
[
"MIT"
] | null | null | null |
lifelineExample.py
|
gkovacs/habitlab-conservation-analysis-chi2019
|
3ac52c4b5ab65d54cf6da0441bca829765ed21ec
|
[
"MIT"
] | null | null | null |
from lifelines.datasets import load_rossi
from lifelines import CoxPHFitter
rossi_dataset = load_rossi()
cph = CoxPHFitter()
cph.fit(rossi_dataset, duration_col='week', event_col='arrest', show_progress=True)
cph.print_summary() # access the results using cph.summary
| 33.75
| 83
| 0.807407
|
from lifelines.datasets import load_rossi
from lifelines import CoxPHFitter
rossi_dataset = load_rossi()
cph = CoxPHFitter()
cph.fit(rossi_dataset, duration_col='week', event_col='arrest', show_progress=True)
cph.print_summary() # access the results using cph.summary
| 0
| 0
| 0
|
7de79c6601b2d07d6fa324109ff7200d849f78c6
| 5,772
|
py
|
Python
|
samples/LuceneInAction/lia/searching/QueryParserTest.py
|
fnp/pylucene
|
fb16ac375de5479dec3919a5559cda02c899e387
|
[
"Apache-2.0"
] | 15
|
2015-05-21T09:28:01.000Z
|
2022-03-18T23:41:49.000Z
|
samples/LuceneInAction/lia/searching/QueryParserTest.py
|
fnp/pylucene
|
fb16ac375de5479dec3919a5559cda02c899e387
|
[
"Apache-2.0"
] | null | null | null |
samples/LuceneInAction/lia/searching/QueryParserTest.py
|
fnp/pylucene
|
fb16ac375de5479dec3919a5559cda02c899e387
|
[
"Apache-2.0"
] | 13
|
2015-04-18T23:05:11.000Z
|
2021-11-29T21:23:26.000Z
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from lia.common.LiaTestCase import LiaTestCase
from lucene import \
WhitespaceAnalyzer, StandardAnalyzer, Term, QueryParser, Locale, \
BooleanQuery, FuzzyQuery, IndexSearcher, TermRangeQuery, TermQuery, \
BooleanClause, Version
| 38.48
| 86
| 0.601351
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from lia.common.LiaTestCase import LiaTestCase
from lucene import \
WhitespaceAnalyzer, StandardAnalyzer, Term, QueryParser, Locale, \
BooleanQuery, FuzzyQuery, IndexSearcher, TermRangeQuery, TermQuery, \
BooleanClause, Version
class QueryParserTest(LiaTestCase):
def setUp(self):
super(QueryParserTest, self).setUp()
self.analyzer = WhitespaceAnalyzer()
self.searcher = IndexSearcher(self.directory, True)
def testToString(self):
query = BooleanQuery()
query.add(FuzzyQuery(Term("field", "kountry")),
BooleanClause.Occur.MUST)
query.add(TermQuery(Term("title", "western")),
BooleanClause.Occur.SHOULD)
self.assertEqual("+kountry~0.5 title:western",
query.toString("field"), "both kinds")
def testPrefixQuery(self):
parser = QueryParser(Version.LUCENE_CURRENT, "category",
StandardAnalyzer(Version.LUCENE_CURRENT))
parser.setLowercaseExpandedTerms(False)
print parser.parse("/Computers/technology*").toString("category")
def testGrouping(self):
query = QueryParser(Version.LUCENE_CURRENT, "subject",
self.analyzer).parse("(agile OR extreme) AND methodology")
scoreDocs = self.searcher.search(query, 50).scoreDocs
self.assertHitsIncludeTitle(self.searcher, scoreDocs,
"Extreme Programming Explained")
self.assertHitsIncludeTitle(self.searcher, scoreDocs,
"The Pragmatic Programmer")
def testTermRangeQuery(self):
query = QueryParser(Version.LUCENE_CURRENT, "subject",
self.analyzer).parse("title2:[K TO N]")
self.assert_(TermRangeQuery.instance_(query))
scoreDocs = self.searcher.search(query, 10).scoreDocs
self.assertHitsIncludeTitle(self.searcher, scoreDocs, "Mindstorms")
query = QueryParser(Version.LUCENE_CURRENT, "subject",
self.analyzer).parse("title2:{K TO Mindstorms}")
scoreDocs = self.searcher.search(query, 10).scoreDocs
self.assertHitsIncludeTitle(self.searcher, scoreDocs, "Mindstorms",
True)
def testDateRangeQuery(self):
# locale diff between jre and gcj 1/1/04 -> 01/01/04
# expression = "modified:[1/1/04 TO 12/31/04]"
expression = "modified:[01/01/04 TO 12/31/04]"
parser = QueryParser(Version.LUCENE_CURRENT, "subject", self.analyzer)
parser.setLocale(Locale.US)
query = parser.parse(expression)
print expression, "parsed to", query
topDocs = self.searcher.search(query, 50)
self.assert_(topDocs.totalHits > 0)
def testSlop(self):
q = QueryParser(Version.LUCENE_CURRENT, "field",
self.analyzer).parse('"exact phrase"')
self.assertEqual("\"exact phrase\"", q.toString("field"),
"zero slop")
qp = QueryParser(Version.LUCENE_CURRENT, "field", self.analyzer)
qp.setPhraseSlop(5)
q = qp.parse('"sloppy phrase"')
self.assertEqual("\"sloppy phrase\"~5", q.toString("field"),
"sloppy, implicitly")
def testPhraseQuery(self):
analyzer = StandardAnalyzer(Version.LUCENE_24)
q = QueryParser(Version.LUCENE_24, "field",
analyzer).parse('"This is Some Phrase*"')
self.assertEqual("\"some phrase\"", q.toString("field"), "analyzed")
q = QueryParser(Version.LUCENE_CURRENT, "field",
self.analyzer).parse('"term"')
self.assert_(TermQuery.instance_(q), "reduced to TermQuery")
def testLowercasing(self):
q = QueryParser(Version.LUCENE_CURRENT, "field",
self.analyzer).parse("PrefixQuery*")
self.assertEqual("prefixquery*", q.toString("field"), "lowercased")
qp = QueryParser(Version.LUCENE_CURRENT, "field", self.analyzer)
qp.setLowercaseExpandedTerms(False)
q = qp.parse("PrefixQuery*")
self.assertEqual("PrefixQuery*", q.toString("field"), "not lowercased")
def testWildcard(self):
try:
QueryParser(Version.LUCENE_CURRENT, "field",
self.analyzer).parse("*xyz")
self.fail("Leading wildcard character should not be allowed")
except:
self.assert_(True)
def testBoost(self):
q = QueryParser(Version.LUCENE_CURRENT, "field",
self.analyzer).parse("term^2")
self.assertEqual("term^2.0", q.toString("field"))
def testParseException(self):
try:
QueryParser(Version.LUCENE_CURRENT, "contents",
self.analyzer).parse("^&#")
except:
# expression is invalid, as expected
self.assert_(True)
else:
self.fail("ParseException expected, but not thrown")
| 4,461
| 14
| 347
|
ec917be2412342c0cd49ea56f2b8aec6f6878936
| 1,274
|
py
|
Python
|
chord_recognition/cache.py
|
discort/chord-recognition
|
0527084d0616dcf4c8fa27faec878427543384fb
|
[
"MIT"
] | 5
|
2021-01-22T18:22:25.000Z
|
2021-11-30T18:33:39.000Z
|
chord_recognition/cache.py
|
discort/chord-recognition
|
0527084d0616dcf4c8fa27faec878427543384fb
|
[
"MIT"
] | null | null | null |
chord_recognition/cache.py
|
discort/chord-recognition
|
0527084d0616dcf4c8fa27faec878427543384fb
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
# @staticmethod
# def _preprocess_group_value(group):
# data = group['data'][:]
# labels = group['labels'][:]
# result = [(data[i][np.newaxis], labels[i, 0]) for i in range(data.shape[0])]
# return result
# @staticmethod
# def _preprocess_set_value(value):
# data = np.vstack([v[0] for v in value])
# labels = np.vstack([v[1] for v in value])
# return data, labels
| 26.541667
| 86
| 0.545526
|
import h5py
import numpy as np
class Cache:
@classmethod
def get(cls, key):
raise NotImplemented
@classmethod
def set(self, key, value):
raise NotImplemented
class HDF5Cache(Cache):
def __init__(self, filename):
self.filename = filename
def get(self, key):
value = None
with h5py.File(self.filename, 'a') as f:
if key in f:
group = f[key]
data = group['data'][:]
labels = group['labels'][:]
value = data, labels
return value
def set(self, key, value):
data, labels = value
with h5py.File(self.filename, 'a') as f:
grp = f.create_group(key)
grp.create_dataset("data", data=data)
grp.create_dataset("labels", data=labels)
# @staticmethod
# def _preprocess_group_value(group):
# data = group['data'][:]
# labels = group['labels'][:]
# result = [(data[i][np.newaxis], labels[i, 0]) for i in range(data.shape[0])]
# return result
# @staticmethod
# def _preprocess_set_value(value):
# data = np.vstack([v[0] for v in value])
# labels = np.vstack([v[1] for v in value])
# return data, labels
| 596
| 80
| 126
|
d864da6d5ec0e0d923fdeb84db63bf1604b209a3
| 16,919
|
py
|
Python
|
src/entity/clause.py
|
yakuza8/first-order-predicate-logic-theorem-prover
|
f0a2b2a8d13b21f668cc2977a37b63691acdb883
|
[
"MIT"
] | 7
|
2020-01-05T17:37:07.000Z
|
2022-03-16T11:31:38.000Z
|
src/entity/clause.py
|
yakuza8/first-order-predicate-logic-theorem-prover
|
f0a2b2a8d13b21f668cc2977a37b63691acdb883
|
[
"MIT"
] | null | null | null |
src/entity/clause.py
|
yakuza8/first-order-predicate-logic-theorem-prover
|
f0a2b2a8d13b21f668cc2977a37b63691acdb883
|
[
"MIT"
] | 4
|
2020-03-13T07:30:26.000Z
|
2021-06-25T19:37:08.000Z
|
import itertools
import unittest
from typing import List, Optional, Union, Tuple
from . import children_entity_parser
from .predicate import Predicate
from ..most_general_unifier import MostGeneralUnifier
class Clause(object):
"""
Class for keeping predicates together and some several multi-predicate supported functionality
"""
def has_tautology(self) -> bool:
"""
Tautology checking procedure in the list of predicates
:return: Boolean flag representing whether the list has tautology or not. In case of having tautology True will
be returned, otherwise False.
"""
# Group each predicate by their name
for key, group in itertools.groupby(self.predicates, lambda predicate: predicate.get_name()):
# Separate them by their negation and test all the unification results of permutations of paired predicates
non_negated_predicates, negated_predicates = Clause._predicate_separator_by_sign(group)
for non_negated_predicate in non_negated_predicates:
for negated_predicate in negated_predicates:
unification, _ = MostGeneralUnifier.unify(non_negated_predicate.get_child(),
negated_predicate.get_child())
# If any of them can be unified, it means we got tautology
if unification:
return True
# If not achieved any tautology, it means we have no tautology
return False
def does_subsume(self, other: 'Clause') -> bool:
"""
Subsumption controlling function where the function tries to find
whether the other clause is more specific than the current clause
:param other: Other clause to check subsumption
:return: Boolean flag representing that the current clause subsumes the other clause
"""
# If no meet naming and negation match as a subset then immediately return False since subsumption cannot occur
fast_check_result = Clause._fast_check_by_negation_and_name(self, other)
if fast_check_result:
# Group by both name and negation
first_group = {key: list(group) for key, group in
itertools.groupby(self.predicates, lambda p: (p.get_name(), p.is_negated))}
second_group = {key: list(group) for key, group in
itertools.groupby(other.predicates, lambda p: (p.get_name(), p.is_negated))}
# Take common keys of each dict so that we can check if there exists any substitution which unifies them
common_keys = first_group.keys() & second_group.keys()
# And filter common predicates
filtered_first_group = [first_group[key] for key in common_keys]
filtered_second_group = [second_group[key] for key in common_keys]
# Then take multiplication of them
for multiplication in itertools.product(itertools.product(*filtered_first_group),
itertools.product(*filtered_second_group)):
# Each of the predicates must be the same or be less specific than the other's predicates
result = all(child == other_child or child.is_less_specific(other_child)
for child, other_child in zip(multiplication[0], multiplication[1]))
if result:
return True
# If none of them holds the condition, then return False
return False
else:
# If fast check fails
return False
def resolve_with(self, other: 'Clause') -> Tuple[Union['Clause', None], Union['Clause', None]]:
"""
Function to resolve two clauses
:param other: Other clause
:return: Resolvent clause in case of resolution otherwise None
"""
for predicate1, predicate2 in itertools.product(self.predicates, other.predicates):
# Try to unify them if they represent the same predicate but they have different negation states
if predicate1.get_name() == predicate2.get_name() and predicate1.is_negated != predicate2.is_negated:
result, substitutions = MostGeneralUnifier.unify(predicate1.get_child(), predicate2.get_child())
# Compose new predicate with combined predicates of both clauses except for resolvent predicates
new_clause_children = [Predicate.build(str(predicate)) for predicate in self.predicates]
new_clause_children.extend([Predicate.build(str(predicate)) for predicate in other.predicates])
new_clause_children.remove(predicate1)
new_clause_children.remove(predicate2)
# Return composed clause
return Clause(MostGeneralUnifier.apply_substitution(new_clause_children, substitutions)), substitutions
# If none of them can be resolved, return none
return None, None
@staticmethod
def _predicate_separator_by_sign(predicates):
"""
Grouping functionality of predicates
"""
non_negated, negated = [], []
for predicate in predicates:
(non_negated, negated)[predicate.is_negated].append(predicate)
return non_negated, negated
@staticmethod
def _fast_check_by_negation_and_name(clause1: 'Clause', clause2: 'Clause') -> bool:
"""
Fast subsumption check procedure which try to check there is any different predicate exists in other clause
so that the first clause cannot subsume
:param clause1: Clause to check subsume onto other clause
:param clause2: Clause which assumed to be subsumed by the first clause
:return: Boolean flag representing all predicates in the first clause are subset of that for second clause
"""
clause1 = set(map(lambda predicate: (predicate.is_negated, predicate.get_name()), clause1.predicates))
clause2 = set(map(lambda predicate: (predicate.is_negated, predicate.get_name()), clause2.predicates))
return clause1.issubset(clause2)
| 49.761765
| 119
| 0.675395
|
import itertools
import unittest
from typing import List, Optional, Union, Tuple
from . import children_entity_parser
from .predicate import Predicate
from ..most_general_unifier import MostGeneralUnifier
class Clause(object):
"""
Class for keeping predicates together and some several multi-predicate supported functionality
"""
def __init__(self, predicates: List[Optional[Predicate]]):
self.predicates = predicates
self.predicates = sorted(self.predicates, key=lambda predicate: (predicate.get_name(), predicate.is_negated))
def __repr__(self):
return str(self)
def __str__(self):
return str(self.predicates)
def __eq__(self, other):
if not isinstance(other, Clause):
return False
return str(self.predicates) == str(other)
def __hash__(self):
return hash(str(self.predicates))
def get_clause_length(self):
return len(self.predicates)
def has_tautology(self) -> bool:
"""
Tautology checking procedure in the list of predicates
:return: Boolean flag representing whether the list has tautology or not. In case of having tautology True will
be returned, otherwise False.
"""
# Group each predicate by their name
for key, group in itertools.groupby(self.predicates, lambda predicate: predicate.get_name()):
# Separate them by their negation and test all the unification results of permutations of paired predicates
non_negated_predicates, negated_predicates = Clause._predicate_separator_by_sign(group)
for non_negated_predicate in non_negated_predicates:
for negated_predicate in negated_predicates:
unification, _ = MostGeneralUnifier.unify(non_negated_predicate.get_child(),
negated_predicate.get_child())
# If any of them can be unified, it means we got tautology
if unification:
return True
# If not achieved any tautology, it means we have no tautology
return False
def does_subsume(self, other: 'Clause') -> bool:
"""
Subsumption controlling function where the function tries to find
whether the other clause is more specific than the current clause
:param other: Other clause to check subsumption
:return: Boolean flag representing that the current clause subsumes the other clause
"""
# If no meet naming and negation match as a subset then immediately return False since subsumption cannot occur
fast_check_result = Clause._fast_check_by_negation_and_name(self, other)
if fast_check_result:
# Group by both name and negation
first_group = {key: list(group) for key, group in
itertools.groupby(self.predicates, lambda p: (p.get_name(), p.is_negated))}
second_group = {key: list(group) for key, group in
itertools.groupby(other.predicates, lambda p: (p.get_name(), p.is_negated))}
# Take common keys of each dict so that we can check if there exists any substitution which unifies them
common_keys = first_group.keys() & second_group.keys()
# And filter common predicates
filtered_first_group = [first_group[key] for key in common_keys]
filtered_second_group = [second_group[key] for key in common_keys]
# Then take multiplication of them
for multiplication in itertools.product(itertools.product(*filtered_first_group),
itertools.product(*filtered_second_group)):
# Each of the predicates must be the same or be less specific than the other's predicates
result = all(child == other_child or child.is_less_specific(other_child)
for child, other_child in zip(multiplication[0], multiplication[1]))
if result:
return True
# If none of them holds the condition, then return False
return False
else:
# If fast check fails
return False
def resolve_with(self, other: 'Clause') -> Tuple[Union['Clause', None], Union['Clause', None]]:
"""
Function to resolve two clauses
:param other: Other clause
:return: Resolvent clause in case of resolution otherwise None
"""
for predicate1, predicate2 in itertools.product(self.predicates, other.predicates):
# Try to unify them if they represent the same predicate but they have different negation states
if predicate1.get_name() == predicate2.get_name() and predicate1.is_negated != predicate2.is_negated:
result, substitutions = MostGeneralUnifier.unify(predicate1.get_child(), predicate2.get_child())
# Compose new predicate with combined predicates of both clauses except for resolvent predicates
new_clause_children = [Predicate.build(str(predicate)) for predicate in self.predicates]
new_clause_children.extend([Predicate.build(str(predicate)) for predicate in other.predicates])
new_clause_children.remove(predicate1)
new_clause_children.remove(predicate2)
# Return composed clause
return Clause(MostGeneralUnifier.apply_substitution(new_clause_children, substitutions)), substitutions
# If none of them can be resolved, return none
return None, None
@staticmethod
def _predicate_separator_by_sign(predicates):
"""
Grouping functionality of predicates
"""
non_negated, negated = [], []
for predicate in predicates:
(non_negated, negated)[predicate.is_negated].append(predicate)
return non_negated, negated
@staticmethod
def _fast_check_by_negation_and_name(clause1: 'Clause', clause2: 'Clause') -> bool:
"""
Fast subsumption check procedure which try to check there is any different predicate exists in other clause
so that the first clause cannot subsume
:param clause1: Clause to check subsume onto other clause
:param clause2: Clause which assumed to be subsumed by the first clause
:return: Boolean flag representing all predicates in the first clause are subset of that for second clause
"""
clause1 = set(map(lambda predicate: (predicate.is_negated, predicate.get_name()), clause1.predicates))
clause2 = set(map(lambda predicate: (predicate.is_negated, predicate.get_name()), clause2.predicates))
return clause1.issubset(clause2)
class ClauseUnitTest(unittest.TestCase):
@staticmethod
def _predicate_parser(predicates):
return [Predicate.build(predicate) for predicate in children_entity_parser(predicates)]
def test_basic_properties(self):
predicate_string = 'p(y), q(y,A), r(A)'
predicates = ClauseUnitTest._predicate_parser(predicate_string)
predicates2 = ClauseUnitTest._predicate_parser(predicate_string + ', p(x)')
clause = Clause(predicates)
clause2 = Clause(predicates)
clause3 = Clause(predicates2)
expected_string = '[' + ', '.join(str(p) for p in predicates) + ']'
self.assertEqual(expected_string, str(clause))
self.assertEqual(expected_string, repr(clause))
self.assertEqual(hash(clause), hash(clause2))
self.assertNotEqual(hash(clause), hash(clause3))
self.assertEqual(clause, clause2)
self.assertNotEqual(clause, clause3)
self.assertNotEqual(clause, 8)
def test_get_predicate_length(self):
clause = Clause([])
self.assertEqual(0, clause.get_clause_length())
clause = Clause(ClauseUnitTest._predicate_parser('p(y)'))
self.assertEqual(1, clause.get_clause_length())
clause = Clause(ClauseUnitTest._predicate_parser('p(y),q(y, A),r(A)'))
self.assertEqual(3, clause.get_clause_length())
def test_has_tautology_empty_list(self):
clause = Clause([])
self.assertFalse(clause.has_tautology())
def test_has_tautology_singleton_list(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(y)'))
self.assertFalse(clause.has_tautology())
def test_has_tautology_with_different_predicates(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(y),q(y, A),r(A)'))
self.assertFalse(clause.has_tautology())
def test_has_tautology_variables(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(y),q(y, A),r(A),~p(y)'))
self.assertTrue(clause.has_tautology())
def test_has_tautology_variable_constant(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(y),q(y, A),r(A),~p(H)'))
self.assertTrue(clause.has_tautology())
def test_has_tautology_variable_function(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(y),q(y, A),r(A),~p(c(a, T))'))
self.assertTrue(clause.has_tautology())
def test_has_tautology_constant(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(H),q(y, A),r(A),~p(H)'))
self.assertTrue(clause.has_tautology())
clause = Clause(ClauseUnitTest._predicate_parser('p(J),q(y, A),r(A),~p(H)'))
self.assertFalse(clause.has_tautology())
def test_has_tautology_function(self):
clause = Clause(ClauseUnitTest._predicate_parser('p(x, r(ABC, k)),q(y, A),r(A),~p(x, r(GTX, k))'))
self.assertFalse(clause.has_tautology())
clause = Clause(ClauseUnitTest._predicate_parser('p(x, r(ABC, k)),q(y, A),r(A),~p(x, r(b, k))'))
self.assertTrue(clause.has_tautology())
clause = Clause(ClauseUnitTest._predicate_parser('p(x, r(ABC, k)),q(y, A),r(A),~p(u, r(b, k))'))
self.assertTrue(clause.has_tautology())
def test_fast_check_valid(self):
# Should pass fast check since we have Predicate p
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(v)'))
self.assertTrue(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should not pass since we have negated Predicate p
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),~p(v)'))
self.assertFalse(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should pass fast check since we have Predicate p and more general than second clause
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(k(l, ABC))'))
self.assertTrue(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should not pass since we have negated Predicate p
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),~p(k(l, ABC))'))
self.assertFalse(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should pass fast check since we have Predicate p and more general than second clause
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(ABC, ACB, BAC, BCA, CAB, CBA)'))
self.assertTrue(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should not pass since we have negated Predicate p
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),~p(ABC, ACB, BAC, BCA, CAB, CBA)'))
self.assertFalse(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should not pass since we have negated Predicate q
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y),~q(x)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(ABC, ACB, BAC, BCA, CAB, CBA)'))
self.assertFalse(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should pass since we have negated Predicate p and q
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y),q(x)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(ABC, ACB, BAC, BCA, CAB, CBA)'))
self.assertTrue(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should not pass since we have additional Predicate z
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y),q(x),z(m)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(ABC, ACB, BAC, BCA, CAB, CBA)'))
self.assertFalse(Clause._fast_check_by_negation_and_name(clause1, clause2))
# Should pass we have subset of other clause
clause1 = Clause(ClauseUnitTest._predicate_parser('p(y),p(x),p(o)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(ABC, ACB, BAC, BCA, CAB, CBA)'))
self.assertTrue(Clause._fast_check_by_negation_and_name(clause1, clause2))
def test_subsumption_with_fast_check_holds(self):
clause1 = Clause(ClauseUnitTest._predicate_parser('~p(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(v)'))
self.assertFalse(clause1.does_subsume(clause2))
clause1 = Clause(ClauseUnitTest._predicate_parser('~p(y),p(u)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('q(z),p(v)'))
self.assertFalse(clause1.does_subsume(clause2))
clause1 = Clause(ClauseUnitTest._predicate_parser('~p(y),~p(u)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(z),p(v)'))
self.assertFalse(clause1.does_subsume(clause2))
def test_subsumption_with_fast_check_does_not_hold(self):
# Should subsume { A / x }
clause1 = Clause(ClauseUnitTest._predicate_parser('p(x)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(A)'))
self.assertTrue(clause1.does_subsume(clause2))
# Should subsume { H / z }
clause1 = Clause(ClauseUnitTest._predicate_parser('p(A),q(z)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(B),z(f),q(H),p(A)'))
self.assertTrue(clause1.does_subsume(clause2))
# Should not subsume { B / A } is not applicable both are constants
clause1 = Clause(ClauseUnitTest._predicate_parser('p(A),q(z)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(B),z(f),q(H)'))
self.assertFalse(clause1.does_subsume(clause2))
# Should subsume { A / x } also we have extra predicate
clause1 = Clause(ClauseUnitTest._predicate_parser('p(x)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(A),q(y)'))
self.assertTrue(clause1.does_subsume(clause2))
# Should not subsume { y / B } is not applicable constant is more specific than variable
clause1 = Clause(ClauseUnitTest._predicate_parser('p(B)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(x),q(y)'))
self.assertFalse(clause1.does_subsume(clause2))
# Should not subsume { B / A } is not applicable both are constants
clause1 = Clause(ClauseUnitTest._predicate_parser('p(B)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(A),q(y)'))
self.assertFalse(clause1.does_subsume(clause2))
clause1 = Clause(ClauseUnitTest._predicate_parser('p(x),q(x)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(y),q(y),r(y,B)'))
self.assertTrue(clause1.does_subsume(clause2))
# Should not subsume { y / A } is not applicable constant is more specific than variable
clause1 = Clause(ClauseUnitTest._predicate_parser('p(x),q(A)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(y),q(y),r(y,B)'))
self.assertFalse(clause1.does_subsume(clause2))
def test_resolve_with_with_match(self):
clause1 = Clause(ClauseUnitTest._predicate_parser('~q(y), r(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('~r(A)'))
resolvent, substitution = clause1.resolve_with(clause2)
self.assertIsNotNone(resolvent)
self.assertIsNotNone(substitution)
expected_resolvent = Clause(ClauseUnitTest._predicate_parser('~q(y)'))
expected_substitution_list = '[A / y]'
self.assertEqual(expected_resolvent, resolvent)
self.assertEqual(expected_substitution_list, str(substitution))
def test_resolve_with_with_no_match(self):
clause1 = Clause(ClauseUnitTest._predicate_parser('~q(y), r(y)'))
clause2 = Clause(ClauseUnitTest._predicate_parser('p(A,f(t))'))
resolvent, substitution = clause1.resolve_with(clause2)
self.assertIsNone(resolvent)
self.assertIsNone(substitution)
| 10,061
| 469
| 185
|
cc692a03f5ec7b579bbaa20d4760c3c4612fdc95
| 3,133
|
py
|
Python
|
apps/arus_demo/session_panel.py
|
qutang/arus
|
ee422bbadc72635037944359d00475f698e8fc61
|
[
"MIT"
] | null | null | null |
apps/arus_demo/session_panel.py
|
qutang/arus
|
ee422bbadc72635037944359d00475f698e8fc61
|
[
"MIT"
] | 264
|
2019-09-25T14:15:39.000Z
|
2022-03-11T10:11:38.000Z
|
apps/arus_demo/session_panel.py
|
qutang/arus
|
ee422bbadc72635037944359d00475f698e8fc61
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
import app_state as app
import dashboard
import backend
import os
from loguru import logger
import traceback
import arus
| 38.679012
| 113
| 0.533993
|
import PySimpleGUI as sg
import app_state as app
import dashboard
import backend
import os
from loguru import logger
import traceback
import arus
def control_button(text, disabled, key=None):
return sg.Button(button_text=text,
font=('Helvetica', 15), auto_size_button=True, size=(25, None), key=key, disabled=disabled)
class SessionSelectionPanel:
def __init__(self):
self._new_button = None
self._continue_button = None
def init_panel(self):
self._new_button = control_button(
'New session', disabled=False, key='_NEW_')
self._continue_button = control_button(
'Continue last session', disabled=not os.path.exists(app.AppState._snapshot_path), key='_CONTINUE_'
)
return sg.Window('Select a session to start', layout=[
[self._new_button, self._continue_button]
], finalize=True)
def start(self):
window = self.init_panel()
ready = False
while True:
event, _ = window.read()
if event == self._continue_button.Key:
logger.info('Restoring application status..')
file_path = sg.PopupGetFile('Select the pkl file to restore session', title='Continue a session',
default_extension='.pkl', initial_folder=app.AppState._snapshot_path)
if file_path is None:
continue
if app.AppState.restore(file_path):
app_state = app.AppState.getInstance()
ready = True
else:
app_state = app.AppState.getInstance()
app_state.origin_dataset = backend.load_origin_dataset()
ready = True
elif event == self._new_button.Key:
app.AppState.reset()
app_state = app.AppState.getInstance()
new_pid = sg.PopupGetText(
'Set new participant ID',
title='Create a new session',
default_text=app_state.pid,
keep_on_top=True
)
if new_pid is None:
continue
app_state.pid = new_pid
app_state.origin_dataset = backend.load_origin_dataset()
ready = True
elif event is None:
break
if ready:
try:
log_file = os.path.join(
app.AppState._path, 'logs', app_state.pid + '.log')
os.makedirs(os.path.dirname(log_file), exist_ok=True)
logger.add(log_file)
demo = dashboard.Dashboard(
title='Arus Demo Session: ' + app_state.pid)
demo.start()
except Exception as e:
print(e)
print(traceback.format_exc())
finally:
logger.info('Saving application status..')
app.AppState.snapshot()
window.close()
| 2,852
| 7
| 126
|
ec5aac30b24743db5cf9c8fd21fb7ab43ce419ce
| 1,667
|
py
|
Python
|
paths.py
|
cleysonsilvame/R2R-PDF
|
8f71b3130d23b2cd257fbf0e854c64f8d37679fc
|
[
"MIT"
] | null | null | null |
paths.py
|
cleysonsilvame/R2R-PDF
|
8f71b3130d23b2cd257fbf0e854c64f8d37679fc
|
[
"MIT"
] | null | null | null |
paths.py
|
cleysonsilvame/R2R-PDF
|
8f71b3130d23b2cd257fbf0e854c64f8d37679fc
|
[
"MIT"
] | null | null | null |
import time
import timeit
from handlerPdf import getPDFname, getLocalTime
from pathlib import Path, PurePath
| 24.514706
| 79
| 0.595681
|
import time
import timeit
from handlerPdf import getPDFname, getLocalTime
from pathlib import Path, PurePath
def getPDFByPath(selected_folder, window):
files = Path(selected_folder)
paths_filtered_by_PDF = map(lambda file: {
"name": file.name,
"path": file,
"root": file.parent
}, files.rglob("*.pdf"))
paths_filtered_by_PDF = list(paths_filtered_by_PDF)
window.write_event_value('-THREAD_GET_PDF_BY_PATH-', paths_filtered_by_PDF)
def setPDFName(oldPaths, window):
timer_start = timeit.default_timer()
totalPaths = len(oldPaths)
progress_value = 0
for file in oldPaths:
progress_value += 100 / totalPaths
oldNameFile = file["name"]
oldPathFile = Path(file["path"]).resolve()
oldRootFile = Path(file["root"]).resolve()
newNameFile = getPDFname(oldPathFile)
newPathFile = Path(oldRootFile, newNameFile + ".pdf").resolve()
if newPathFile.exists():
time.sleep(1)
localtime = getLocalTime()
newNameFile += ' - ' + localtime
newPathFile = Path(
oldRootFile,
newNameFile
+ ".pdf"
).resolve()
print(
'Nome: '
+ oldNameFile
+ ' ----> '
+ newNameFile
+ ".pdf"
+ '\n'
)
oldPathFile.rename(newPathFile)
window.write_event_value('-PROGRESS-', progress_value)
timer_stop = timeit.default_timer()
window.write_event_value('-THREAD_DONE-', (timer_stop - timer_start))
def getAbsolutePath(path):
return Path(path).resolve()
| 1,486
| 0
| 69
|
0a4b776b9a7b0e70bfcedbf13a543757169dce86
| 5,531
|
py
|
Python
|
axelrod/tests/integration/test_round_robin.py
|
lipingzhu/Zero-determinant
|
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
|
[
"MIT"
] | null | null | null |
axelrod/tests/integration/test_round_robin.py
|
lipingzhu/Zero-determinant
|
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
|
[
"MIT"
] | null | null | null |
axelrod/tests/integration/test_round_robin.py
|
lipingzhu/Zero-determinant
|
6e30aa72358d5dfc3975abe433d0d13cc3a750a1
|
[
"MIT"
] | null | null | null |
import unittest
import random
import axelrod
C, D = axelrod.Actions.C, axelrod.Actions.D
| 42.221374
| 92
| 0.633339
|
import unittest
import random
import axelrod
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestRoundRobin(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
@classmethod
def payoffs2scores(cls, payoffs):
return [sum([pp for ipp, pp in enumerate(p) if ipp != ip])
for ip, p in enumerate(payoffs)]
@classmethod
def get_test_outcome(cls, outcome, turns=10):
# Extract the name of players from the outcome tupples,
# and initiate the players by getting the classes from axelrod.
names = [out[0] for out in outcome]
players = [getattr(axelrod, n)() for n in names]
# Do the actual game and build the expected outcome tuples.
round_robin = axelrod.RoundRobin(
players=players, game=cls.game, turns=turns)
payoffs = round_robin.play()['payoff']
scores = cls.payoffs2scores(payoffs)
outcome = zip(names, scores)
# The outcome is expected to be sort by score.
return sorted(outcome, key=lambda k: k[1])
def test_deterministic_cache(self):
p1, p2, p3 = axelrod.Cooperator(), axelrod.Defector(), axelrod.Random()
rr = axelrod.RoundRobin(players=[p1, p2, p3], game=self.game, turns=20)
self.assertEqual(rr.deterministic_cache, {})
rr.play()
self.assertEqual(rr.deterministic_cache[
(axelrod.Defector, axelrod.Defector)]['scores'], (20, 20))
self.assertEqual(rr.deterministic_cache[
(axelrod.Defector, axelrod.Defector)]['cooperation_rates'], (0, 0))
self.assertEqual(rr.deterministic_cache[
(axelrod.Cooperator, axelrod.Cooperator)]['scores'], (60, 60))
self.assertEqual(rr.deterministic_cache[
(axelrod.Cooperator, axelrod.Cooperator)]['cooperation_rates'], (20, 20))
self.assertEqual(rr.deterministic_cache[
(axelrod.Cooperator, axelrod.Defector)]['scores'], (0, 100))
self.assertEqual(rr.deterministic_cache[
(axelrod.Cooperator, axelrod.Defector)]['cooperation_rates'], (20, 0))
self.assertFalse(
(axelrod.Random, axelrod.Random) in rr.deterministic_cache)
def test_noisy_cache(self):
p1, p2, p3 = axelrod.Cooperator(), axelrod.Defector(), axelrod.Random()
rr = axelrod.RoundRobin(
players=[p1, p2, p3], game=self.game, turns=20, noise=0.2)
rr.play()
self.assertEqual(rr.deterministic_cache, {})
def test_calculate_score_for_mix(self):
"""Test that scores are calculated correctly."""
P1 = axelrod.Defector()
P1.history = [C, C, D]
P2 = axelrod.Defector()
P2.history = [C, D, D]
round_robin = axelrod.RoundRobin(
players=[P1, P2], game=self.game, turns=200)
self.assertEqual(round_robin._calculate_scores(P1, P2), (4, 9))
def test_calculate_score_for_all_cooperate(self):
"""Test that scores are calculated correctly."""
P1 = axelrod.Player()
P1.history = [C, C, C]
P2 = axelrod.Player()
P2.history = [C, C, C]
round_robin = axelrod.RoundRobin(
players=[P1, P2], game=self.game, turns=200)
self.assertEqual(round_robin._calculate_scores(P1, P2), (9, 9))
def test_calculate_score_for_all_defect(self):
"""Test that scores are calculated correctly."""
P1 = axelrod.Player()
P1.history = [D, D, D]
P2 = axelrod.Player()
P2.history = [D, D, D]
round_robin = axelrod.RoundRobin(
players=[P1, P2], game=self.game, turns=200)
self.assertEqual(round_robin._calculate_scores(P1, P2), (3, 3))
def test_round_robin_defector_v_cooperator(self):
"""Test round robin: the defector viciously punishes the cooperator."""
outcome = [('Cooperator', 0), ('Defector', 50)]
self.assertEqual(self.get_test_outcome(outcome), outcome)
def test_round_robin_defector_v_titfortat(self):
"""Test round robin: the defector does well against tit for tat."""
outcome = [('TitForTat', 9), ('Defector', 14)]
self.assertEqual(self.get_test_outcome(outcome), outcome)
def test_round_robin_cooperator_v_titfortat(self):
"""Test round robin: the cooperator does very well WITH tit for tat."""
outcome = [('Cooperator', 30), ('TitForTat', 30)]
self.assertEqual(self.get_test_outcome(outcome), outcome)
def test_round_robin_cooperator_v_titfortat_v_defector(self):
"""Test round robin: the defector dominates in this population."""
outcome = [('Cooperator', 30), ('TitForTat', 39), ('Defector', 64)]
self.assertEqual(self.get_test_outcome(outcome), outcome)
def test_round_robin_cooperator_v_titfortat_v_defector_v_grudger(self):
"""Test round robin: tit for tat does better this time around."""
outcome = [
('Cooperator', 60),
('TitForTat', 69),
('Grudger', 69),
('Defector', 78)]
self.assertEqual(self.get_test_outcome(outcome), outcome)
def test_round_robin_cooperator_v_titfortat_v_defector_v_grudger_v_go_by_majority(self):
"""Test round robin: Tit for tat is doing a lot better."""
outcome = [
('Cooperator', 90),
('Defector', 92),
('Grudger', 99),
('GoByMajority', 99),
('TitForTat', 99)]
self.assertEqual(self.get_test_outcome(outcome), outcome)
| 2,193
| 3,225
| 23
|
a3ba9515a19b08eb9d9f9e3d1bd37373a12af5c6
| 7,713
|
py
|
Python
|
supermariopy/ptutils/test/test_nn.py
|
theRealSuperMario/supermariopy
|
9fff8275278ff26caff50da86109c25d276bb30b
|
[
"MIT"
] | 36
|
2019-07-14T16:10:37.000Z
|
2022-03-29T10:11:03.000Z
|
supermariopy/ptutils/test/test_nn.py
|
theRealSuperMario/supermariopy
|
9fff8275278ff26caff50da86109c25d276bb30b
|
[
"MIT"
] | 3
|
2019-10-09T15:11:13.000Z
|
2021-07-31T02:17:43.000Z
|
supermariopy/ptutils/test/test_nn.py
|
theRealSuperMario/supermariopy
|
9fff8275278ff26caff50da86109c25d276bb30b
|
[
"MIT"
] | 14
|
2019-08-29T14:11:54.000Z
|
2022-03-06T13:41:56.000Z
|
import numpy as np
import pytest
import torch
from supermariopy.ptutils import nn
| 29.10566
| 86
| 0.595747
|
import numpy as np
import pytest
import torch
from supermariopy.ptutils import nn
def test_spatial_softmax():
t = torch.rand(1, 10, 128, 128)
probs = nn.spatial_softmax(t)
assert nn.shape_as_list(probs) == [1, 10, 128, 128]
assert np.allclose(torch.sum(probs, dim=[2, 3]).numpy(), np.ones((1, 10)))
def test_grad():
t = torch.rand(1, 10, 128, 128, dtype=torch.float32)
g = nn.grad(t)
assert np.allclose(nn.shape_as_list(g), [1, 20, 128, 128])
def test_mumford_shah():
t = torch.rand(1, 10, 128, 128, dtype=torch.float32)
alpha = 1.0
lambda_ = 1.0
r, s, c = nn.mumford_shah(t, alpha, lambda_)
assert True
def test_filltriangular():
params = torch.range(0, 5).view(1, 6)
dim = 3
L = nn.fill_triangular(params, dim)
# assert L.shape == ()
assert L.shape == (1, 3, 3)
assert np.allclose(L, np.array([[3, 0, 0], [5, 4, 0], [2, 1, 0]]))
def test_diag_part():
params = torch.range(1, 6).view(1, 6)
dim = 3
L = nn.fill_triangular(params, dim)
diag_L = torch.diagonal(L, dim1=-2, dim2=-1)
assert diag_L.shape == (1, 3)
assert np.allclose(diag_L.numpy().squeeze(), np.array([4, 5, 1]))
def test_set_diag():
with torch.enable_grad():
params = torch.range(1, 6, requires_grad=True).view(1, 6)
dim = 3
L = nn.fill_triangular(params, dim)
diag_L = torch.ones((1, 3), dtype=params.dtype, requires_grad=True) * 6
M = nn.set_diag(L, diag_L)
loss = M.sum()
loss.backward()
assert M.grad_fn # is not None
assert L.grad_fn # is not None
assert np.allclose(nn.diag_part(M).detach(), diag_L.detach())
class Test_FullLatentDistribution:
def test_n_parameters(self):
dim = 10
assert nn.FullLatentDistribution.n_parameters(dim) == 65
def test_sample(self):
dim = 10
n_parameters = nn.FullLatentDistribution.n_parameters(dim)
parameters = torch.rand((1, n_parameters), dtype=torch.float32)
distr = nn.FullLatentDistribution(parameters, dim)
assert distr.sample().shape == (1, dim)
n_parameters = nn.FullLatentDistribution.n_parameters(dim)
parameters = torch.rand(10, n_parameters, 1, 1)
latent = nn.FullLatentDistribution(parameters, dim, False)
sample = latent.sample()
assert sample.shape == (10, dim, 1, 1)
def test_kl(self):
dim = 10
n_parameters = nn.FullLatentDistribution.n_parameters(dim)
parameters = torch.rand((1, n_parameters), dtype=torch.float32)
distr = nn.FullLatentDistribution(parameters, dim)
distr.mean = torch.zeros((1, dim), dtype=parameters.dtype)
distr.L = nn.set_diag(
torch.zeros((1, dim, dim), dtype=parameters.dtype),
torch.ones((1, dim), dtype=parameters.dtype),
)
distr.log_diag_L = torch.zeros((1, dim), dtype=parameters.dtype)
assert np.allclose(distr.kl(), np.array([0]))
@pytest.mark.cuda
def test_cuda(self):
# TODO: test this
pass
def test_tf_implementation(self):
dim = 10
n_parameters = nn.FullLatentDistribution.n_parameters(dim)
parameters = torch.rand((1, n_parameters), dtype=torch.float32)
distr = nn.FullLatentDistribution(parameters, dim)
kl_pt = distr.kl()
import tensorflow as tf
tf.enable_eager_execution()
from supermariopy.tfutils import nn as tfnn
distr_tf = tfnn.FullLatentDistribution(
tf.convert_to_tensor(parameters.numpy()), dim
)
kl_tf = distr_tf.kl()
assert np.allclose(kl_tf, kl_pt)
class Test_MeanFieldDistribution:
def test_kl_improper_gmrf(self):
dim = (1, 128, 128)
parameters = torch.zeros((1,) + dim)
mfd = nn.MeanFieldDistribution(parameters, dim)
kl = mfd.kl_improper_gmrf()
assert np.allclose(kl, np.array([0]))
def test_sample(self):
dim = (1, 128, 128)
parameters = torch.zeros((1,) + dim)
mfd = nn.MeanFieldDistribution(parameters, dim)
s = mfd.sample()
assert s.shape == parameters.shape
@pytest.mark.cuda
def test_cuda(self):
class Foo(torch.nn.Module):
def __init__(self):
return super(Foo, self).__init__()
def forward(self, x):
gmrf = nn.MeanFieldDistribution(x, True)
return gmrf.sample()
assert torch.cuda.is_available()
device = torch.device("cuda")
model = Foo()
with torch.autograd.set_detect_anomaly(True):
with torch.cuda.device(device):
p = torch.rand(1, 10, 128, 128, requires_grad=True)
p = p.to(device)
model = model.to(device)
sample = model(p)
# device.type is {cuda, cpu}
assert sample.is_cuda
assert p.is_cuda
loss = sample.mean()
loss.backward()
class Test_to_one_hot:
@pytest.mark.cuda
def test_cuda(self):
assert torch.cuda.is_available()
device = torch.device("cuda")
with torch.cuda.device(device):
x = torch.zeros(1, 128, 128)
x[:, :50, :50] = 1
x = x.to(device)
y = nn.to_one_hot(x, 2)
assert x.is_cuda
assert y.is_cuda
assert y.shape == (1, 128, 128, 2)
def test_cpu(self):
x = torch.zeros(1, 128, 128)
x[:, :50, :50] = 1
y = nn.to_one_hot(x, 2)
assert not x.is_cuda
assert not y.is_cuda
assert y.shape == (1, 128, 128, 2)
def test_image_gradient():
import tensorflow as tf
tf.enable_eager_execution()
x_tf = tf.random.normal((1, 128, 128, 10))
x_np = np.array(x_tf)
x_pt = torch.from_numpy(x_np)
x_pt = x_pt.permute(0, 3, 1, 2)
g_tf = tf.image.image_gradients(x_tf)
g_pt = nn.image_gradient(x_pt)
g_pt = [g.permute(0, 2, 3, 1) for g in g_pt]
assert np.allclose(np.array(g_tf[0]), np.array(g_pt[0]))
assert np.allclose(np.array(g_tf[1]), np.array(g_pt[1]))
def test_hloss():
import tensorflow as tf
tf.enable_eager_execution()
logits = torch.randn(1, 2, 128, 128)
probs = torch.nn.functional.softmax(logits, dim=1)
l_tf = logits.permute(0, 2, 3, 1).numpy()
p_tf = probs.permute(0, 2, 3, 1).numpy()
h_pt = nn.HLoss()(logits)
h_tf = tf.nn.softmax_cross_entropy_with_logits_v2(p_tf, l_tf)
h_tf = tf.reduce_sum(h_tf, axis=(1, 2))
assert np.allclose(h_pt, h_tf)
def test_probs_to_mu_sigma():
from supermariopy.tfutils import nn as tfnn
import tensorflow as tf
tf.enable_eager_execution()
_means = [30, 50, 70]
means = tf.ones((3, 1, 2), dtype=tf.float32) * np.array(_means).reshape((3, 1, 1))
stds = tf.concat(
[
tf.ones((1, 1, 1), dtype=tf.float32) * 5,
tf.ones((1, 1, 1), dtype=tf.float32) * 10,
],
axis=-1,
)
blob = tfnn.tf_hm(means, 100, 100, stds)
mu, sigma = tfnn.probs_to_mu_sigma(blob)
pt_blob = tf.transpose(blob, (0, 3, 1, 2))
pt_blob = torch.from_numpy(np.array(pt_blob))
mupt, sigmapt = nn.probs_to_mu_sigma(pt_blob)
assert np.allclose(mupt, mu)
assert np.allclose(sigmapt, sigma, rtol=1.0e-2)
def test_flip():
c = torch.rand(1, 10)
c_inv = nn.flip(c, 1)
assert np.allclose(c.numpy()[:, ::-1], c_inv)
def test_init():
assert False
def test_convbnrelu():
N = 1
H = 128
W = 128
C = 10
x = torch.ones((N, C, H, W))
c_bn_relu = nn.ConvBnRelu(C, 256)(x)
assert list(c_bn_relu.shape) == [1, 256, H, W]
| 6,912
| 359
| 345
|
6569b350c282db3c0db747befde1f58375f52655
| 1,322
|
py
|
Python
|
corehq/apps/consumption/models.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/consumption/models.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/consumption/models.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from dimagi.ext.couchdbkit import DecimalProperty, Document, StringProperty
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
TYPE_DOMAIN = 'domain'
TYPE_PRODUCT = 'product'
TYPE_SUPPLY_POINT_TYPE = 'supply-point-type'
TYPE_SUPPLY_POINT = 'supply-point'
class DefaultConsumption(CachedCouchDocumentMixin, Document):
"""
Model for setting the default consumption value of an entity
"""
type = StringProperty() # 'domain', 'product', 'supply-point-type', 'supply-point'
domain = StringProperty()
product_id = StringProperty()
supply_point_type = StringProperty()
supply_point_id = StringProperty()
default_consumption = DecimalProperty()
@classmethod
@classmethod
@classmethod
@classmethod
| 32.243902
| 87
| 0.710287
|
from dimagi.ext.couchdbkit import DecimalProperty, Document, StringProperty
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
TYPE_DOMAIN = 'domain'
TYPE_PRODUCT = 'product'
TYPE_SUPPLY_POINT_TYPE = 'supply-point-type'
TYPE_SUPPLY_POINT = 'supply-point'
class DefaultConsumption(CachedCouchDocumentMixin, Document):
"""
Model for setting the default consumption value of an entity
"""
type = StringProperty() # 'domain', 'product', 'supply-point-type', 'supply-point'
domain = StringProperty()
product_id = StringProperty()
supply_point_type = StringProperty()
supply_point_id = StringProperty()
default_consumption = DecimalProperty()
@classmethod
def get_domain_default(cls, domain):
return cls._by_index_key([domain, None, None, None])
@classmethod
def get_product_default(cls, domain, product_id):
return cls._by_index_key([domain, product_id, None, None])
@classmethod
def get_supply_point_default(cls, domain, product_id, supply_point_id):
return cls._by_index_key([domain, product_id, {}, supply_point_id])
@classmethod
def _by_index_key(cls, key):
return cls.view('consumption/consumption_index',
key=key,
reduce=False,
include_docs=True,
).one()
| 455
| 0
| 104
|
9365747a58e4241bec7c4cf80ad605ed7efc1366
| 9,200
|
py
|
Python
|
11_calibration_NHMv11/plot_byHW_calibration_results.py
|
pnorton-usgs/notebooks
|
17a38ecd3f3c052b9bd785c2e53e16a9082d1e71
|
[
"MIT"
] | null | null | null |
11_calibration_NHMv11/plot_byHW_calibration_results.py
|
pnorton-usgs/notebooks
|
17a38ecd3f3c052b9bd785c2e53e16a9082d1e71
|
[
"MIT"
] | null | null | null |
11_calibration_NHMv11/plot_byHW_calibration_results.py
|
pnorton-usgs/notebooks
|
17a38ecd3f3c052b9bd785c2e53e16a9082d1e71
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# %%
# %%
headwater = '0259'
hw_suffix = ''
workdir = f'/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHW_sample/HW{headwater}{hw_suffix}/RESULTS'
ofs_file = f'{workdir}/objfun_{headwater}'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True)
x_vars = df.columns.tolist()[3:]
ncols = 3
numrows = int(round(len(x_vars) / float(ncols) + 0.5))
cstep = 4
# of_var = 'of_som'
# Layout info at: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
fig, axes = plt.subplots(nrows=numrows, ncols=ncols, figsize=(10, 10), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=4 / 72, h_pad=4 / 72, hspace=0.1, wspace=0.2)
ax = axes.flatten()
for ii,of in enumerate(x_vars):
ax[ii].set_title(f'of_prms vs {of}')
step_df = df[df.step == cstep]
step_df.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='red', alpha=0.2)
df_final = step_df.iloc[[-1]]
df_final.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='black')
# precal_ns_ref_df.plot(ax=ax[0], x='OF', y=precal_ns_ref_df.columns[1], ylim=(0.0, 1.0), color=calib_color,
# label='PRECAL-ref')
# ax = plt.gca()
# step_df = df[df.step == cstep]
# df_final = step_df.iloc[[-1]]
# step_df.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='red', alpha=0.2)
# df_final.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='black')
# step_two = df[df.step == 2]
# step_two.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='green', alpha=0.2)
# step_three = df[df.step == 3]
# step_three.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='blue', alpha=0.2)
# step_four = df[df.step == 4]
# step_four.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='yellow', alpha=0.2)
# df_final = step_one.iloc[[-1]]
# df_final.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='black')
# %%
len(df.columns.tolist()[2:])
# %%
colors = ['red', 'green', 'blue', 'yellow']
ncols = 3
numrows = int(round(len(x_vars) / float(ncols) + 0.5))
rnd = 3
# of_var = 'of_som'
df = df[df.loc[:, 'round'] == rnd]
# Layout info at: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
fig, axes = plt.subplots(nrows=numrows, ncols=ncols, figsize=(15, 15), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=4 / 72, h_pad=4 / 72, hspace=0.1, wspace=0.2)
ax = axes.flatten()
for ii,of in enumerate(x_vars):
ax[ii].set_title(f'of_prms vs {of}')
for xx in range(1, 5):
p_df = df[df.step == xx]
p_df.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color=colors[xx-1], alpha=0.2)
df_final = p_df.iloc[[-1]]
df_final.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='black')
# %%
df[df.loc[:, 'round'] == 1]
# %%
df.head()
# %%
df.info()
# %%
# %%
# %%
# %%
# %%
x_vars
# %% [markdown]
# ### Plot OFS from the original byHRU calibration
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/OFS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
# df.plot(kind='scatter',x='num_children',y='num_pets',color='red')
ax = plt.gca()
df.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='black')
# %%
# %% [markdown]
# ### Plot params
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run2/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='black')
# %% [markdown]
# ### Plot params from original calibration
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='black')
# %%
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
# %%
df.columns
# %%
# %%
# %%
var = 'tmin_cbh_adj'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run2/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='black')
# %%
# %%
var = 'tmin_cbh_adj'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='black')
# %%
| 34.848485
| 126
| 0.65163
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# %%
# %%
headwater = '0259'
hw_suffix = ''
workdir = f'/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHW_sample/HW{headwater}{hw_suffix}/RESULTS'
ofs_file = f'{workdir}/objfun_{headwater}'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True)
x_vars = df.columns.tolist()[3:]
ncols = 3
numrows = int(round(len(x_vars) / float(ncols) + 0.5))
cstep = 4
# of_var = 'of_som'
# Layout info at: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
fig, axes = plt.subplots(nrows=numrows, ncols=ncols, figsize=(10, 10), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=4 / 72, h_pad=4 / 72, hspace=0.1, wspace=0.2)
ax = axes.flatten()
for ii,of in enumerate(x_vars):
ax[ii].set_title(f'of_prms vs {of}')
step_df = df[df.step == cstep]
step_df.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='red', alpha=0.2)
df_final = step_df.iloc[[-1]]
df_final.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='black')
# precal_ns_ref_df.plot(ax=ax[0], x='OF', y=precal_ns_ref_df.columns[1], ylim=(0.0, 1.0), color=calib_color,
# label='PRECAL-ref')
# ax = plt.gca()
# step_df = df[df.step == cstep]
# df_final = step_df.iloc[[-1]]
# step_df.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='red', alpha=0.2)
# df_final.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='black')
# step_two = df[df.step == 2]
# step_two.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='green', alpha=0.2)
# step_three = df[df.step == 3]
# step_three.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='blue', alpha=0.2)
# step_four = df[df.step == 4]
# step_four.plot(kind='scatter', x=of_var, y='of_prms', ax=ax, color='yellow', alpha=0.2)
# df_final = step_one.iloc[[-1]]
# df_final.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='black')
# df_final.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='black')
# %%
len(df.columns.tolist()[2:])
# %%
colors = ['red', 'green', 'blue', 'yellow']
ncols = 3
numrows = int(round(len(x_vars) / float(ncols) + 0.5))
rnd = 3
# of_var = 'of_som'
df = df[df.loc[:, 'round'] == rnd]
# Layout info at: https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html
fig, axes = plt.subplots(nrows=numrows, ncols=ncols, figsize=(15, 15), constrained_layout=True)
fig.set_constrained_layout_pads(w_pad=4 / 72, h_pad=4 / 72, hspace=0.1, wspace=0.2)
ax = axes.flatten()
for ii,of in enumerate(x_vars):
ax[ii].set_title(f'of_prms vs {of}')
for xx in range(1, 5):
p_df = df[df.step == xx]
p_df.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color=colors[xx-1], alpha=0.2)
df_final = p_df.iloc[[-1]]
df_final.plot(ax=ax[ii], kind='scatter', x=of, y='of_prms', color='black')
# %%
df[df.loc[:, 'round'] == 1]
# %%
df.head()
# %%
df.info()
# %%
# %%
# %%
# %%
# %%
x_vars
# %% [markdown]
# ### Plot OFS from the original byHRU calibration
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/OFS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
# df.plot(kind='scatter',x='num_children',y='num_pets',color='red')
ax = plt.gca()
df.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='ofRUN', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofAET', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofSCA', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofRCH', y='prmsOF', ax=ax, color='black')
df_final.plot(kind='scatter', x='ofSOM', y='prmsOF', ax=ax, color='black')
# %%
# %% [markdown]
# ### Plot params
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run2/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='black')
# %% [markdown]
# ### Plot params from original calibration
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='yellow', alpha=0.2)
df.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='fastcoef_lin', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='freeh2o_cap', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='gwflow_coef', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x='jh_coef', y='RUN', ax=ax, color='black')
# %%
ax = plt.gca()
df.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='red', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x='carea_max', y='RUN', ax=ax, color='black')
# %%
df.columns
# %%
# %%
# %%
var = 'tmin_cbh_adj'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run2/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='black')
# %%
# %%
var = 'tmin_cbh_adj'
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU3505_run1/RESULTS'
ofs_file = f'{workdir}/PARAMS_HRU3505'
df = pd.read_csv(ofs_file, sep='\s+', skipinitialspace=True, header=0)
ax = plt.gca()
df.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='red', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='green', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='blue', alpha=0.2)
df.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='purple', alpha=0.2)
df_final = df.iloc[[-1]]
df_final.plot(kind='scatter', x=f'{var}', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.1', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.2', y='RUN', ax=ax, color='black')
df_final.plot(kind='scatter', x=f'{var}.3', y='RUN', ax=ax, color='black')
# %%
| 0
| 0
| 0
|
b946a1266385ed0c2b650d8a7767efd4107ff908
| 13,287
|
py
|
Python
|
oelint_adv/__main__.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
oelint_adv/__main__.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
oelint_adv/__main__.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
import argparse
import json
import os
import re
import sys
from configparser import ConfigParser
from configparser import NoOptionError
from configparser import NoSectionError
from configparser import ParsingError
from typing import Union, Dict
from oelint_parser.cls_stash import Stash
from oelint_parser.constants import CONSTANTS
from oelint_adv.cls_rule import load_rules
from oelint_adv.color import set_colorize
from oelint_adv.rule_file import set_messageformat
from oelint_adv.rule_file import set_noinfo
from oelint_adv.rule_file import set_nowarn
from oelint_adv.rule_file import set_relpaths
from oelint_adv.rule_file import set_rulefile
from oelint_adv.rule_file import set_suppressions
sys.path.append(os.path.abspath(os.path.join(__file__, '..')))
def deserialize_boolean_options(options: Dict) -> Dict[str, Union[str, bool]]:
"""Converts strings in `options` that are either 'True' or 'False' to their boolean
representations.
"""
for k, v in options.items():
if isinstance(v, str):
if v.strip() == 'False':
options[k] = False
elif v.strip() == 'True':
options[k] = True
return options
if __name__ == '__main__':
main() # pragma: no cover
| 39.310651
| 137
| 0.57989
|
import argparse
import json
import os
import re
import sys
from configparser import ConfigParser
from configparser import NoOptionError
from configparser import NoSectionError
from configparser import ParsingError
from typing import Union, Dict
from oelint_parser.cls_stash import Stash
from oelint_parser.constants import CONSTANTS
from oelint_adv.cls_rule import load_rules
from oelint_adv.color import set_colorize
from oelint_adv.rule_file import set_messageformat
from oelint_adv.rule_file import set_noinfo
from oelint_adv.rule_file import set_nowarn
from oelint_adv.rule_file import set_relpaths
from oelint_adv.rule_file import set_rulefile
from oelint_adv.rule_file import set_suppressions
sys.path.append(os.path.abspath(os.path.join(__file__, '..')))
class TypeSafeAppendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
if isinstance(items, str):
items = re.split(r'\s+|\t+|\n+', items) # pragma: no cover
items.append(values) # pragma: no cover
setattr(namespace, self.dest, items) # pragma: no cover
def deserialize_boolean_options(options: Dict) -> Dict[str, Union[str, bool]]:
"""Converts strings in `options` that are either 'True' or 'False' to their boolean
representations.
"""
for k, v in options.items():
if isinstance(v, str):
if v.strip() == 'False':
options[k] = False
elif v.strip() == 'True':
options[k] = True
return options
def parse_configfile():
config = ConfigParser()
for conffile in [os.environ.get('OELINT_CONFIG', '/does/not/exist'),
os.path.join(os.getcwd(), '.oelint.cfg'),
os.path.join(os.environ.get('HOME', '/does/not/exist'), '.oelint.cfg')]:
try:
if not os.path.exists(conffile):
continue
config.read(conffile)
items = {k.replace('-', '_'): v for k, v in config.items('oelint')}
items = deserialize_boolean_options(items)
return items
except (PermissionError, SystemError) as e: # pragma: no cover
print(f'Failed to load config file {conffile}. {e!r}') # noqa: T001 - it's fine here; # pragma: no cover
except (NoSectionError, NoOptionError, ParsingError) as e:
print(f'Failed parsing config file {conffile}. {e!r}') # noqa: T001 - it's here for a reason
return {}
def create_argparser():
parser = argparse.ArgumentParser(prog='oelint-adv',
description='Advanced OELint - Check bitbake recipes against OECore styleguide')
parser.register('action', 'tsappend', TypeSafeAppendAction)
parser.add_argument('--suppress', default=[],
action='tsappend', help='Rules to suppress')
parser.add_argument('--output', default=sys.stderr,
help='Where to flush the findings (default: stderr)')
parser.add_argument('--fix', action='store_true', default=False,
help='Automatically try to fix the issues')
parser.add_argument('--nobackup', action='store_true', default=False,
help='Don\'t create backup file when auto fixing')
parser.add_argument('--addrules', nargs='+', default=[],
help='Additional non-default rulessets to add')
parser.add_argument('--customrules', nargs='+', default=[],
help='Additional directories to parse for rulessets')
parser.add_argument('--rulefile', default=None,
help='Rulefile')
parser.add_argument('--constantfile', default=None, help='Constantfile')
parser.add_argument('--color', action='store_true', default=False,
help='Add color to the output based on the severity')
parser.add_argument('--quiet', action='store_true', default=False,
help='Print findings only')
parser.add_argument('--noinfo', action='store_true', default=False,
help='Don\'t print information level findings')
parser.add_argument('--nowarn', action='store_true', default=False,
help='Don\'t print warning level findings')
parser.add_argument('--relpaths', action='store_true', default=False,
help='Show relative paths instead of absolute paths in results')
parser.add_argument('--noid', action='store_true', default=False,
help='Don\'t show the error-ID in the output')
parser.add_argument('--messageformat', default='{path}:{line}:{severity}:{id}:{msg}',
type=str, help='Format of message output')
parser.add_argument('--constantmods', default=[], nargs='+',
help='''
Modifications to the constant db.
prefix with:
+ - to add to DB,
- - to remove from DB,
None - to override DB
''')
parser.add_argument('--print-rulefile', action='store_true', default=False,
help='Print loaded rules as a rulefile and exit')
parser.add_argument('--exit-zero', action='store_true', default=False,
help='Always return a 0 (non-error) status code, even if lint errors are found')
# Override the defaults with the values from the config file
parser.set_defaults(**parse_configfile())
parser.add_argument('files', nargs='*', help='File to parse')
return parser
def parse_arguments():
return create_argparser().parse_args() # pragma: no cover
def arguments_post(args): # noqa: C901 - complexity is still okay
# Convert boolean symbols
for _option in [
'color',
'exit_zero',
'fix',
'nobackup',
'noinfo',
'nowarn',
'print_rulefile',
'quiet',
'relpaths',
]:
try:
setattr(args, _option, bool(getattr(args, _option)))
except AttributeError: # pragma: no cover
pass # pragma: no cover
# Convert list symbols
for _option in [
'suppress',
'constantmods',
]:
try:
if not isinstance(getattr(args, _option), list):
setattr(args, _option, [x.strip() for x in (getattr(args, _option) or '').split('\n') if x])
except AttributeError: # pragma: no cover
pass # pragma: no cover
if args.files == [] and not args.print_rulefile:
raise argparse.ArgumentTypeError('no input files')
if args.rulefile:
try:
with open(args.rulefile) as i:
set_rulefile(json.load(i))
except (FileNotFoundError, json.JSONDecodeError):
raise argparse.ArgumentTypeError(
'\'rulefile\' is not a valid file')
if args.constantfile:
try:
with open(args.constantfile) as i:
CONSTANTS.AddFromConstantFile(json.load(i))
except (FileNotFoundError, json.JSONDecodeError):
raise argparse.ArgumentTypeError(
'\'constantfile\' is not a valid file')
for mod in args.constantmods:
try:
with open(mod.lstrip('+-')) as _in:
_cnt = json.load(_in)
if mod.startswith('+'):
CONSTANTS.AddConstants(_cnt)
elif mod.startswith('-'):
CONSTANTS.RemoveConstants(_cnt)
else:
CONSTANTS.OverrideConstants(_cnt)
except (FileNotFoundError, json.JSONDecodeError):
raise argparse.ArgumentTypeError(
'mod file \'{file}\' is not a valid file'.format(file=mod))
set_colorize(args.color)
set_nowarn(args.nowarn)
set_noinfo(args.noinfo)
set_relpaths(args.relpaths)
set_suppressions(args.suppress)
if args.noid:
# just strip id from message format if noid is requested
args.messageformat = args.messageformat.replace('{id}', '')
# strip any double : resulting from the previous operation
args.messageformat = args.messageformat.replace('::', ':')
set_messageformat(args.messageformat)
return args
def group_files(files):
# in case multiple bb files are passed at once we might need to group them to
# avoid having multiple, potentially wrong hits of include files shared across
# the bb files in the stash
res = {}
for f in files:
_filename, _ext = os.path.splitext(f)
if _ext not in ['.bb']:
continue
if '_' in os.path.basename(_filename):
_filename_key = _filename
else:
_filename_key = os.path.basename(_filename)
if _filename_key not in res: # pragma: no cover
res[_filename_key] = set()
res[_filename_key].add(f)
# second round now for the bbappend files
for f in files:
_filename, _ext = os.path.splitext(f)
if _ext not in ['.bbappend']:
continue
_match = False
for _, v in res.items():
_needle = '.*/' + os.path.basename(_filename).replace('%', '.*')
if any(re.match(_needle, x) for x in v):
v.add(f)
_match = True
if not _match:
_filename_key = '_'.join(os.path.basename(
_filename).split('_')[:-1]).replace('%', '')
if _filename_key not in res: # pragma: no cover
res[_filename_key] = set()
res[_filename_key].add(f)
# as sets are unordered, we convert them to sorted lists at this point
# order is like the files have been passed via CLI
for k, v in res.items():
res[k] = sorted(v, key=lambda index: files.index(index))
return res.values()
def print_rulefile(args):
rules = load_rules(args, add_rules=args.addrules,
add_dirs=args.customrules)
ruleset = {}
for r in rules:
ruleset.update(r.get_rulefile_entries())
print(json.dumps(ruleset, indent=2)) # noqa: T001 - it's here for a reason
def run(args):
try:
rules = load_rules(args, add_rules=args.addrules,
add_dirs=args.customrules)
_loaded_ids = []
for r in rules:
_loaded_ids += r.get_ids()
if not args.quiet:
print('Loaded rules:\n\t{rules}'.format( # noqa: T001 - it's here for a reason
rules='\n\t'.join(sorted(_loaded_ids))))
issues = []
fixedfiles = []
groups = group_files(args.files)
for group in groups:
stash = Stash(args)
for f in group:
try:
stash.AddFile(f)
except FileNotFoundError as e: # pragma: no cover
if not args.quiet: # pragma: no cover
print('Can\'t open/read: {e}'.format(e=e)) # noqa: T001 - it's fine here; # pragma: no cover
stash.Finalize()
_files = list(set(stash.GetRecipes() + stash.GetLoneAppends()))
for _, f in enumerate(_files):
for r in rules:
if not r.OnAppend and f.endswith('.bbappend'):
continue
if r.OnlyAppend and not f.endswith('.bbappend'):
continue
if args.fix:
fixedfiles += r.fix(f, stash)
issues += r.check(f, stash)
fixedfiles = list(set(fixedfiles))
for f in fixedfiles:
_items = [f] + stash.GetLinksForFile(f)
for i in _items:
items = stash.GetItemsFor(filename=i, nolink=True)
if not args.nobackup:
os.rename(i, i + '.bak') # pragma: no cover
with open(i, 'w') as o:
o.write(''.join([x.RealRaw for x in items]))
if not args.quiet:
print('{path}:{lvl}:{msg}'.format(path=os.path.abspath(i), # noqa: T001 - it's fine here; # pragma: no cover
lvl='debug', msg='Applied automatic fixes'))
return sorted(set(issues), key=lambda x: x[0])
except Exception:
import traceback
# pragma: no cover
print('OOPS - That shouldn\'t happen - {files}'.format(files=args.files)) # noqa: T001 - it's here for a reason
# pragma: no cover
traceback.print_exc()
return []
def main(): # pragma: no cover
args = arguments_post(parse_arguments())
if args.print_rulefile:
print_rulefile(args)
sys.exit(0)
issues = run(args)
if args.output != sys.stderr:
args.output = open(args.output, 'w')
args.output.write('\n'.join([x[1] for x in issues]))
if issues:
args.output.write('\n')
if args.output != sys.stderr:
args.output.close()
exit_code = len(issues) if not args.exit_zero else 0
sys.exit(exit_code)
if __name__ == '__main__':
main() # pragma: no cover
| 11,770
| 23
| 234
|
d943948e5dc3be1e7b8dcba0fcb1cfc4e4c719e8
| 1,213
|
py
|
Python
|
pyclopedia/p01_beginner/p02_data_type/p03_str.py
|
MacHu-GWU/pyclopedia-project
|
c6ee156eb40bc5a4ac5f51aa735b6fd004cb68ee
|
[
"MIT"
] | null | null | null |
pyclopedia/p01_beginner/p02_data_type/p03_str.py
|
MacHu-GWU/pyclopedia-project
|
c6ee156eb40bc5a4ac5f51aa735b6fd004cb68ee
|
[
"MIT"
] | null | null | null |
pyclopedia/p01_beginner/p02_data_type/p03_str.py
|
MacHu-GWU/pyclopedia-project
|
c6ee156eb40bc5a4ac5f51aa735b6fd004cb68ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
String manipulate.
"""
# left strip
assert " Hello ".lstrip() == "Hello "
# right strip
assert " Hello ".rstrip() == " Hello"
# strip
assert " Hello ".strip() == "Hello"
# upper case
assert "Hello".upper() == "HELLO"
# lower case
assert "Hello".lower() == "hello"
# swap case
assert "Hello".swapcase() == "hELLO"
# titlize
assert "this is so good".title() == "This Is So Good"
# center
assert "Hello".center(9, "-") == "--Hello--"
# index
assert "this is so good".index("is") == 2
# replace
assert "this is so good".replace("is", "are") == "thare are so good"
# find
assert "this is so good".find("is") == 2
# count
assert "this is so good".count("o") == 3
# split
assert "This is so good".split(" ") == ["This", "is", "so", "good"]
# join
assert ", ".join(["a", "b", "c"]) == "a, b, c"
# ascii code to string
assert chr(88) == "X"
# string to ascii code
assert ord("X") == 88
# partition
assert "this is so good".partition("is") == ("th", "is", " is so good")
# make translate table and translate
table = str.maketrans("abc", "xyz")
assert "abc".translate(table) == "xyz"
# concatenate
assert "hello" + " " + "world" == "hello world"
| 18.661538
| 71
| 0.591921
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
String manipulate.
"""
# left strip
assert " Hello ".lstrip() == "Hello "
# right strip
assert " Hello ".rstrip() == " Hello"
# strip
assert " Hello ".strip() == "Hello"
# upper case
assert "Hello".upper() == "HELLO"
# lower case
assert "Hello".lower() == "hello"
# swap case
assert "Hello".swapcase() == "hELLO"
# titlize
assert "this is so good".title() == "This Is So Good"
# center
assert "Hello".center(9, "-") == "--Hello--"
# index
assert "this is so good".index("is") == 2
# replace
assert "this is so good".replace("is", "are") == "thare are so good"
# find
assert "this is so good".find("is") == 2
# count
assert "this is so good".count("o") == 3
# split
assert "This is so good".split(" ") == ["This", "is", "so", "good"]
# join
assert ", ".join(["a", "b", "c"]) == "a, b, c"
# ascii code to string
assert chr(88) == "X"
# string to ascii code
assert ord("X") == 88
# partition
assert "this is so good".partition("is") == ("th", "is", " is so good")
# make translate table and translate
table = str.maketrans("abc", "xyz")
assert "abc".translate(table) == "xyz"
# concatenate
assert "hello" + " " + "world" == "hello world"
| 0
| 0
| 0
|
deea753df2648662a3991eee9178a066ec6dd686
| 13,111
|
py
|
Python
|
api/tests.py
|
MatteoNardi/dyanote-server
|
b7e61555da147f699962bd1ea3df5970175594d6
|
[
"MIT"
] | null | null | null |
api/tests.py
|
MatteoNardi/dyanote-server
|
b7e61555da147f699962bd1ea3df5970175594d6
|
[
"MIT"
] | null | null | null |
api/tests.py
|
MatteoNardi/dyanote-server
|
b7e61555da147f699962bd1ea3df5970175594d6
|
[
"MIT"
] | null | null | null |
"""
This file contains unittests for the api app.
Use test_settings when running this:
./manage.py test --settings=dyanote.test_settings api
This will use sqlite and other settings to make test execution faster.
Command used to create test database.
./manage.py dumpdata --indent=4
--natural
-e admin
-e sessions
-e contenttypes
-e auth.Permission
-e south.migrationhistory > api/fixtures/test-db.json
To see test coverage use:
coverage run ./manage.py test --settings=dyanote.test_settings api
coverage report -m --include=api/*
coverage html
"""
import unittest
import re
from urllib.parse import quote
from json import loads as load_json
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import ValidationError
from rest_framework.test import APITestCase, APIClient
from rest_framework import status
from django.core.urlresolvers import get_script_prefix, resolve
from api.models import Page, ActivationKey
from api import utils
# Costant values found in the test database fixture
USERNAME = 'test@dyanote.com'
PASSWORD = 'pwd'
CLIENT_ID = 'bb05c6ab017f50116084'
CLIENT_SECRET = '4063c2648cdd7f2e4dae563da80a516f2eb6ebb6'
ACCESS_TOKEN = '1b24279ad7d5986301583538804e5240c3e588af'
ADMIN_USERNAME = 'admin'
ADMIN_PASSWORD = 'admin'
# Model test
# Utils tests
# User testing
| 38.789941
| 89
| 0.636641
|
"""
This file contains unittests for the api app.
Use test_settings when running this:
./manage.py test --settings=dyanote.test_settings api
This will use sqlite and other settings to make test execution faster.
Command used to create test database.
./manage.py dumpdata --indent=4
--natural
-e admin
-e sessions
-e contenttypes
-e auth.Permission
-e south.migrationhistory > api/fixtures/test-db.json
To see test coverage use:
coverage run ./manage.py test --settings=dyanote.test_settings api
coverage report -m --include=api/*
coverage html
"""
import unittest
import re
from urllib.parse import quote
from json import loads as load_json
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import User
from django.core import mail
from django.core.exceptions import ValidationError
from rest_framework.test import APITestCase, APIClient
from rest_framework import status
from django.core.urlresolvers import get_script_prefix, resolve
from api.models import Page, ActivationKey
from api import utils
# Costant values found in the test database fixture
USERNAME = 'test@dyanote.com'
PASSWORD = 'pwd'
CLIENT_ID = 'bb05c6ab017f50116084'
CLIENT_SECRET = '4063c2648cdd7f2e4dae563da80a516f2eb6ebb6'
ACCESS_TOKEN = '1b24279ad7d5986301583538804e5240c3e588af'
ADMIN_USERNAME = 'admin'
ADMIN_PASSWORD = 'admin'
# Model test
class PageTest(APITestCase):
fixtures = ['test-db.json'] # Load test db
@classmethod
def create_page(cls, author, title="Test note", parent=None,
body="Lorem ipsum dol...", flags=Page.NORMAL):
return Page.objects.create(
author=author,
title=title,
parent=parent,
body=body,
flags=flags)
def test_page_creation(self):
print(resolve('/api/users/test%asdcom/'));
note = PageTest.create_page(
author=User.objects.get(username=USERNAME),
title="Root page",
flags=Page.ROOT)
self.assertTrue(isinstance(note, Page))
note.clean()
self.assertEqual(note.title, "Root page")
def test_normal_page_with_no_parent_throws_error(self):
note = PageTest.create_page(
author=User.objects.get(username=USERNAME),
flags=Page.NORMAL)
self.assertRaises(ValidationError, note.clean)
# Utils tests
class UtilsTest(APITestCase):
fixtures = ['test-db.json']
def test_get_server_url(self):
self.assertEqual(utils.get_server_url(), 'https://dyanote.herokuapp.com')
def test_get_client_url(self):
self.assertEqual(utils.get_client_url(), 'http://dyanote.com')
def test_user_exists(self):
self.assertTrue(utils.user_exists(USERNAME))
self.assertFalse(utils.user_exists('abracadabra@gmmail.com'))
def test_get_note_url(self):
note = PageTest.create_page(author=User.objects.get(username=USERNAME))
url = 'https://dyanote.herokuapp.com/api/users/test%40dyanote.com/pages/1/'
self.assertEqual(utils.get_note_url(note), url)
def test_setup_default_notes(self):
user = User.objects.create_user('test@test.com', 'test@test.com', 'pwd')
utils.setup_default_notes(user)
pages = Page.objects.filter(author=user.id)
self.assertEqual(pages.count(), 9)
root = Page.objects.get(author=user.id, flags=Page.ROOT)
todo = Page.objects.get(author=user.id, title='Todo')
url = utils.get_note_url(todo)
self.assertIn(url, root.body)
self.assertEqual(todo.parent, root)
# User testing
class UserAPITest(APITestCase):
fixtures = ['test-db.json']
def set_token(self, token):
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + token)
def login(self, username, password):
params = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'password',
'username': username,
'password': password
}
path = quote('/api/users/{}/login/'.format(username))
response = self.client.post(path, params)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = load_json(response.content.decode())['access_token']
self.set_token(token)
def test_login(self):
params = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'password',
'username': USERNAME,
'password': PASSWORD
}
path = quote('/api/users/{}/login/'.format(USERNAME))
response = self.client.post(path, params)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_login_with_wrong_password(self):
params = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'password',
'username': USERNAME,
'password': PASSWORD + '..ops!'
}
path = quote('/api/users/{}/login/'.format(USERNAME))
response = self.client.post(path, params)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_login_with_inactive_user(self):
u = User.objects.create_user('test@test.com', 'test@test.com', 'pwd')
u.is_active = False
u.save()
params = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'password',
'username': 'test@test.com',
'password': 'pwd'
}
path = quote('/api/users/{}/login/'.format('test@test.com'))
response = self.client.post(path, params)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.content, b'User is not active')
def test_login_with_wrong_path(self):
params = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'password',
'username': USERNAME,
'password': PASSWORD
}
path = quote('/api/users/{}/login/'.format('wrongEmail@test.com'))
response = self.client.post(path, params)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.content, b'Mismatching usernames')
def test_get_user_detail_as_unauthenticated(self):
path = quote('/api/users/{}/'.format(USERNAME))
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
ERROR = b'{"detail":"Authentication credentials were not provided."}'
self.assertEqual(response.content, ERROR)
def test_get_user_detail(self):
self.set_token(ACCESS_TOKEN)
path = quote('/api/users/{}/'.format(USERNAME))
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
RES = (b'{"url":"http://testserver/api/users/test%40dyanote.com/",'
b'"username":"test@dyanote.com","email":"test@dyanote.com",'
b'"pages":"http://testserver/api/users/test%40dyanote.com/pages/"}')
self.assertEqual(response.content, RES)
def test_get_user_detail_as_admin(self):
self.login('admin', 'admin')
path = quote('/api/users/{}/'.format(USERNAME))
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
RES = (b'{"url":"http://testserver/api/users/test%40dyanote.com/",'
b'"username":"test@dyanote.com","email":"test@dyanote.com",'
b'"pages":"http://testserver/api/users/test%40dyanote.com/pages/"}')
self.assertEqual(response.content, RES)
def test_get_user_list_as_unauthenticated(self):
response = self.client.get('/api/users/')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
ERROR = b'{"detail":"Authentication credentials were not provided."}'
self.assertEqual(response.content, ERROR)
def test_get_user_list(self):
self.set_token(ACCESS_TOKEN)
response = self.client.get('/api/users/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
MSG = (b'[{"url":"http://testserver/api/users/test%40dyanote.com/",'
b'"username":"test@dyanote.com",'
b'"email":"test@dyanote.com",'
b'"pages":"http://testserver/api/users/test%40dyanote.com/pages/"}]')
self.assertEqual(response.content, MSG)
def test_user_creation(self):
params = {
'email': 'new_user@dyanote.com',
'password': '123'
}
response = self.client.post('/api/users/', params, format='json')
# check mail
msg = ("Welcome to Dyanote, your personal hypertext\.\n"
"To activate your account, follow this link:\n"
"https://dyanote.herokuapp\.com/api/users/new_user@dyanote\.com/activate/"
"\?key=([0-9a-fA-F]+)\n\n")
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, 'Welcome to Dyanote')
self.assertEquals(mail.outbox[0].from_email, 'Dyanote')
self.assertEquals(mail.outbox[0].to, ['new_user@dyanote.com'])
self.assertRegexpMatches(mail.outbox[0].body, msg)
# check response
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
# check database
u = User.objects.get(email='new_user@dyanote.com')
self.assertFalse(u.is_active)
key = re.match(msg, mail.outbox[0].body).group(1)
k = ActivationKey.objects.get(key=key, user__email='new_user@dyanote.com')
self.assertIsNotNone(k)
self.assertTrue(u.check_password('123'))
def test_user_creation_with_invalid_data(self):
params = {
'email': 'new_user@dyanote.com',
}
response = self.client.post('/api/users/', params, format='json')
self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_recreation(self):
params = {
'email': USERNAME,
'password': '123'
}
response = self.client.post('/api/users/', params, format='json')
self.assertEquals(response.status_code, status.HTTP_409_CONFLICT)
def test_inactive_user_recreation(self):
# If someone tries to create a user which already exists, change password and
# send new activation mail.
u = User.objects.get(email=USERNAME)
u.is_active = False
u.save()
params = {
'email': USERNAME,
'password': 'new password 123'
}
response = self.client.post('/api/users/', params, format='json')
response = self.client.post('/api/users/', params, format='json')
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
u = User.objects.get(email=USERNAME)
self.assertTrue(u.check_password('new password 123'))
def test_user_activation(self):
user = User.objects.create_user('new_user@dyanote.com',
'new_user@dyanote.com', '123')
user.is_active = False
user.save()
key = ActivationKey.objects.create(key='0123456789abcdef', user=user)
data = {
'key': '0123456789abcdef'
}
path = quote('/api/users/{}/activate/'.format('new_user@dyanote.com'))
response = self.client.get(path, data)
self.assertEquals(response.status_code, status.HTTP_200_OK)
user = User.objects.get(pk=user.pk)
self.assertTrue(user.is_active)
def test_user_activation_wrong_user(self):
user = User.objects.create_user('new_user@dyanote.com',
'new_user@dyanote.com', '123')
user.is_active = False
user.save()
key = ActivationKey.objects.create(key='0123456789abcdef', user=user)
data = {
'key': '0123456789abcdef'
}
path = quote('/api/users/{}/activate/'.format(USERNAME))
response = self.client.get(path, data)
self.assertEquals(response.status_code, status.HTTP_302_FOUND)
user = User.objects.get(pk=user.pk)
self.assertFalse(user.is_active)
def test_user_activation_creates_default_notes(self):
user = User.objects.create_user('new_user@dyanote.com',
'new_user@dyanote.com', '123')
user.is_active = False
user.save()
key = ActivationKey.objects.create(key='0123456789abcdef', user=user)
data = {
'key': '0123456789abcdef'
}
path = quote('/api/users/{}/activate/'.format('new_user@dyanote.com'))
response = self.client.get(path, data)
self.assertEquals(response.status_code, status.HTTP_200_OK)
user = User.objects.get(pk=user.pk)
self.assertTrue(user.pages.count() > 5)
| 10,745
| 856
| 66
|
328b618a39d4ad5e9ab39c77cc3954ce5a7783ae
| 3,280
|
py
|
Python
|
tests/sparktests/test_sources.py
|
commonsearch/cosr-back
|
28ca0c1b938dde52bf4f59a835c98dd5ab22cad6
|
[
"Apache-2.0"
] | 141
|
2016-02-17T14:27:57.000Z
|
2021-12-27T02:56:48.000Z
|
tests/sparktests/test_sources.py
|
commonsearch/cosr-back
|
28ca0c1b938dde52bf4f59a835c98dd5ab22cad6
|
[
"Apache-2.0"
] | 69
|
2016-02-20T02:06:59.000Z
|
2017-01-29T22:23:46.000Z
|
tests/sparktests/test_sources.py
|
commonsearch/cosr-back
|
28ca0c1b938dde52bf4f59a835c98dd5ab22cad6
|
[
"Apache-2.0"
] | 38
|
2016-02-25T04:40:07.000Z
|
2020-06-11T07:22:44.000Z
|
import pytest
import shutil
import tempfile
import os
import pipes
import ujson as json
CORPUS = {
"docs": [
{
"url": "http://www.douglasadams.com/",
"content": """ <title>xxxxuniquecontent</title> """
},
{
"url": "http://www.example.com/page1",
"content": """ <title>xxxxuniquecontent2</title> """
}
],
"block": "1"
}
@pytest.mark.elasticsearch
| 30.654206
| 104
| 0.602439
|
import pytest
import shutil
import tempfile
import os
import pipes
import ujson as json
CORPUS = {
"docs": [
{
"url": "http://www.douglasadams.com/",
"content": """ <title>xxxxuniquecontent</title> """
},
{
"url": "http://www.example.com/page1",
"content": """ <title>xxxxuniquecontent2</title> """
}
],
"block": "1"
}
@pytest.mark.elasticsearch
def test_source_multiple(searcher, indexer, sparksubmit):
# Sources are done in order and overlapping documents are overwritten
# This is because they both use block=1
sparksubmit(
"""spark/jobs/pipeline.py \
--plugin plugins.filter.All:index_body=1 \
--source wikidata:block=1 \
--source corpus:%s """ % (
pipes.quote(json.dumps(CORPUS)),
)
)
# From wikidata only
search_res = searcher.client.search("sfgov", explain=False, lang=None, fetch_docs=True)
assert len(search_res["hits"]) == 1
assert search_res["hits"][0]["url"] == "http://sfgov.org"
# From corpus only
search_res = searcher.client.search("xxxxuniquecontent2", explain=False, lang=None, fetch_docs=True)
assert len(search_res["hits"]) == 1
assert search_res["hits"][0]["url"] == "http://www.example.com/page1"
# Overwritten from corpus
search_res = searcher.client.search("xxxxuniquecontent", explain=False, lang=None, fetch_docs=True)
assert len(search_res["hits"]) == 1
assert search_res["hits"][0]["url"] == "http://www.douglasadams.com/"
search_res = searcher.client.search("douglasadams", explain=False, lang=None, fetch_docs=True)
assert len(search_res["hits"]) == 1
assert search_res["hits"][0]["url"] == "http://www.douglasadams.com/"
def test_source_commoncrawl(sparksubmit):
tmp_dir = tempfile.mkdtemp()
try:
sparksubmit(
"""spark/jobs/pipeline.py \
--source commoncrawl:limit=2,maxdocs=3 \
--plugin plugins.dump.DocumentMetadata:format=json,output=%s/intermediate/ """ % (
tmp_dir
)
)
intermediate_dir = os.path.join(tmp_dir, "intermediate")
files = [f for f in os.listdir(intermediate_dir) if f.endswith(".json")]
assert len(files) == 1
items = []
with open(os.path.join(intermediate_dir, files[0]), "r") as jsonf:
for line in jsonf.readlines():
items.append(json.loads(line.strip()))
# print items
assert len(items) == 6
assert "id" in items[0]
assert "url" in items[0]
# This is a silly test but it should work: read the metadata and dump it again somewhere else,
# but this time as parquet!
sparksubmit(
"""spark/jobs/pipeline.py \
--source metadata:format=json,path=%s/intermediate/ \
--plugin plugins.dump.DocumentMetadata:format=parquet,output=%s/intermediate2/ """ % (
tmp_dir, tmp_dir
)
)
from .test_plugin_webgraph import _read_parquet
data = _read_parquet(os.path.join(tmp_dir, "intermediate2"))
assert len(data) == 6
assert "url" in data[0]
finally:
shutil.rmtree(tmp_dir)
| 2,793
| 0
| 45
|
8a88666d1dd6f499a3ed35078674e978ae79866d
| 1,366
|
py
|
Python
|
h2o-py/tests/testdir_algos/glm/pyunit_mean_residual_deviance_glm.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-py/tests/testdir_algos/glm/pyunit_mean_residual_deviance_glm.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-py/tests/testdir_algos/glm/pyunit_mean_residual_deviance_glm.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_mean_residual_deviance)
else:
glm_mean_residual_deviance()
| 42.6875
| 120
| 0.669107
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def glm_mean_residual_deviance():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
s = cars[0].runif()
train = cars[s > 0.2]
valid = cars[s <= 0.2]
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
glm = H2OGeneralizedLinearEstimator(nfolds=3)
glm.train(x=predictors, y=response_col, training_frame=train, validation_frame=valid)
glm_mrd = glm.mean_residual_deviance(train=True,valid=True,xval=True)
assert isinstance(glm_mrd['train'],float), "Expected training mean residual deviance to be a float, but got " \
"{0}".format(type(glm_mrd['train']))
assert isinstance(glm_mrd['valid'],float), "Expected validation mean residual deviance to be a float, but got " \
"{0}".format(type(glm_mrd['valid']))
assert isinstance(glm_mrd['xval'],float), "Expected cross-validation mean residual deviance to be a float, but got " \
"{0}".format(type(glm_mrd['xval']))
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_mean_residual_deviance)
else:
glm_mean_residual_deviance()
| 1,071
| 0
| 23
|
7eaaf4d3a29ee1f4f1ae7d478ad437c6dff04d92
| 1,210
|
py
|
Python
|
hw/ni_dac_hw.py
|
ScopeFoundry/HW_ni_daq
|
aebe097df1fbd7abcfe93e08c93ba0be0a285216
|
[
"MIT"
] | null | null | null |
hw/ni_dac_hw.py
|
ScopeFoundry/HW_ni_daq
|
aebe097df1fbd7abcfe93e08c93ba0be0a285216
|
[
"MIT"
] | null | null | null |
hw/ni_dac_hw.py
|
ScopeFoundry/HW_ni_daq
|
aebe097df1fbd7abcfe93e08c93ba0be0a285216
|
[
"MIT"
] | null | null | null |
from ScopeFoundry import HardwareComponent
from ScopeFoundryHW.ni_daq.devices.NI_Daq import NI_DacTask
| 32.702703
| 70
| 0.599174
|
from ScopeFoundry import HardwareComponent
from ScopeFoundryHW.ni_daq.devices.NI_Daq import NI_DacTask
class NI_DAC_HW(HardwareComponent):
def __init__(self, app, name='ni_dac', debug=False):
self.name = name
HardwareComponent.__init__(self, app, debug=debug)
def setup(self):
self.settings.New('dac_val', dtype=float, ro=False, unit='V')
self.settings.New('channel', dtype=str, initial='/Dev1/ao0')
def connect(self):
S = self.settings
# Open connection to hardware
self.dac_task = NI_DacTask(channel=S['channel'],
name=self.name)
self.dac_task.set_single()
self.dac_task.start()
#TODO disable channel and terminal_config
#connect settings to hardware
self.settings.dac_val.connect_to_hardware(
write_func=self.dac_task.set)
def disconnect(self):
self.settings.disconnect_all_from_hardware()
#TODO reenable channel and terminal_config
if hasattr(self, 'dac_task'):
self.dac_task.close()
del self.dac_task
| 939
| 14
| 155
|
137fa1dfdd618ac27b052bdf4dc933141bdaf33c
| 5,328
|
py
|
Python
|
elastico/cli/alerter.py
|
klorenz/python-elastico
|
9a39e6cfe33d3081cc52424284c19e9698343006
|
[
"MIT"
] | null | null | null |
elastico/cli/alerter.py
|
klorenz/python-elastico
|
9a39e6cfe33d3081cc52424284c19e9698343006
|
[
"MIT"
] | null | null | null |
elastico/cli/alerter.py
|
klorenz/python-elastico
|
9a39e6cfe33d3081cc52424284c19e9698343006
|
[
"MIT"
] | null | null | null |
"""cli.alerter -- control alerter
With ``alerter`` command you can control the :py:mod:`~elastico.alerter`
module.
For more help on a command, run::
elastico alerter <command> -h
"""
from .cli import command, opt, arg
from ..alerter import Alerter
from ..connection import elasticsearch
from ..util import write_output
from ..server import Server
import pyaml, logging, time, yaml, sys
logger = logging.getLogger('elastico.cli.alerter')
alerter_command = command.add_subcommands('alerter', description=__doc__)
@alerter_command("expand-rules",
arg("--list", '-l', choices=['names', 'keys', 'types', 'alerts'], default=None),
arg("--format", '-f', default=None),
)
def alerter_expand_rules(config):
"""Expand rules, that you can check, if they are correct
This command expands the rules like in a regular alerter run and prints
them to stdout in YAML format. This way you can check, if all variables
and defaults are expanded as expected.
"""
expanded_rules = Alerter.expand_rules(config)
if config['alerter.expand-rules.list']:
expand = config['alerter.expand-rules.list']
if expand in ('names', 'keys', 'types'):
for name in set([ rule[expand[:-1]] for rule in expanded_rules ]):
print(name)
if expand == 'alerts':
for name in set([ "%s-%s" % (rule['type'], rule['key']) for rule in expanded_rules ]):
print(name)
elif config['alerter.expand-rules.format']:
for rule in expanded_rules:
print(config['alerter.expand-rules.format'].format(**rule))
else:
pyaml.p(expanded_rules)
@alerter_command('check',
arg('--status', "-s", choices=['ok', 'alert', 'error'], default='ok'),
arg('alert', nargs="*", default=[]),
)
# need a command, where I simulate the data input for the checks, such that
# you can check, if messages are created correctly
# need a command to display dependency tree of alert rules and alerts
@alerter_command('deps')
@alerter_command('status', opt('--all')) #, arg("rule"))
@alerter_command('show',
arg('item', choices=('rules', 'alerts'), help="choose what to display"),
opt('--details', '--all', '-a', help="display rule details")
)
@alerter_command("run")
def alerter_run(config):
"""run alerter"""
alerter = Alerter(elasticsearch(config), config)
alerter.check_alerts()
@alerter_command("serve",
arg('--sleep-seconds', '-s', type=float, default=60, config="serve.sleep_seconds"),
arg('--count', '-c', type=int, default=0, config="serve.count"),
)
def alerter_serve(config):
"""run alerter"""
server = Server(config, run=_run)
server.run()
@alerter_command("query")
def alerter_run(config):
"""run alerter"""
pass
| 31.714286
| 98
| 0.631569
|
"""cli.alerter -- control alerter
With ``alerter`` command you can control the :py:mod:`~elastico.alerter`
module.
For more help on a command, run::
elastico alerter <command> -h
"""
from .cli import command, opt, arg
from ..alerter import Alerter
from ..connection import elasticsearch
from ..util import write_output
from ..server import Server
import pyaml, logging, time, yaml, sys
logger = logging.getLogger('elastico.cli.alerter')
alerter_command = command.add_subcommands('alerter', description=__doc__)
@alerter_command("expand-rules",
arg("--list", '-l', choices=['names', 'keys', 'types', 'alerts'], default=None),
arg("--format", '-f', default=None),
)
def alerter_expand_rules(config):
"""Expand rules, that you can check, if they are correct
This command expands the rules like in a regular alerter run and prints
them to stdout in YAML format. This way you can check, if all variables
and defaults are expanded as expected.
"""
expanded_rules = Alerter.expand_rules(config)
if config['alerter.expand-rules.list']:
expand = config['alerter.expand-rules.list']
if expand in ('names', 'keys', 'types'):
for name in set([ rule[expand[:-1]] for rule in expanded_rules ]):
print(name)
if expand == 'alerts':
for name in set([ "%s-%s" % (rule['type'], rule['key']) for rule in expanded_rules ]):
print(name)
elif config['alerter.expand-rules.format']:
for rule in expanded_rules:
print(config['alerter.expand-rules.format'].format(**rule))
else:
pyaml.p(expanded_rules)
@alerter_command('check',
arg('--status', "-s", choices=['ok', 'alert', 'error'], default='ok'),
arg('alert', nargs="*", default=[]),
)
def alerter_check(config):
raise NotImplemented("'check' command needs refactoring")
config['arguments.dry_run'] = True
result = []
alerter = Alerter(elasticsearch(config), config)
check_alerts = config.get('alerter.check.alert')
status = config['alerter.check.status']
def check(alert):
logger.debug("alert: %s", alert)
alert_id = "%s-%s" % (alert['type'], alert['key'])
if (check_alerts
and alert_id not in check_alerts
and alert['key'] not in check_alerts): return
result.append(alerter.check_alert(alert, status=status))
alerter.process_rules(action=check)
write_output(config, result)
# need a command, where I simulate the data input for the checks, such that
# you can check, if messages are created correctly
# need a command to display dependency tree of alert rules and alerts
@alerter_command('deps')
def alerter_deps(config):
alerter = Alerter(config=config)
x = pyaml.PrettyYAMLDumper.ignore_aliases
try:
pyaml.PrettyYAMLDumper.ignore_aliases = lambda *a: True
s = pyaml.dumps(alerter.dependency_tree()).decode('utf-8')
s = s.replace(": {}", '')
s = s.replace(":", '')
sys.stdout.write(s)
finally:
pyaml.PrettyYAMLDumper.ignore_aliases = x
@alerter_command('status', opt('--all')) #, arg("rule"))
def alerter_status(config):
alerter = Alerter(elasticsearch(config), config=config)
statuses = {}
for rule in alerter.iterate_rules():
key = rule.getval('key')
status = alerter.read_status(key=key)
if config['alerter.status.all']:
statuses[key] = status
else:
if status['alerts']:
statuses[key] = status
# result = alerter.read_status(key='heartbeat_tcp_cal2')
# from pprint import pprint
# pprint(result)
pyaml.p(statuses)
@alerter_command('show',
arg('item', choices=('rules', 'alerts'), help="choose what to display"),
opt('--details', '--all', '-a', help="display rule details")
)
def alerter_show(config):
alerter = Alerter(elasticsearch(config), config)
if config['alerter.show.item'] == 'rules':
data = dict((r['name'], r)
for r in [rule.format() for rule in alerter.iterate_rules(ordered=False)])
if config['alerter.show.details']:
pyaml.p(data)
else:
pyaml.p(sorted([data[k].get('key') for k in data.keys()]))
#pyaml.p(sorted([k for k in data.keys()]))
elif config['alerter.show.item'] == 'alerts':
data = dict(('{}.{}'.format(*alerter.get_alert_key_type(alert)), alert)
for rule,alerts in alerter.iterate_alerts()
for alert in alerts
)
if config['alerter.show.details']:
pyaml.p(data)
else:
pyaml.p(sorted([k for k in data.keys()]))
@alerter_command("run")
def alerter_run(config):
"""run alerter"""
alerter = Alerter(elasticsearch(config), config)
alerter.check_alerts()
@alerter_command("serve",
arg('--sleep-seconds', '-s', type=float, default=60, config="serve.sleep_seconds"),
arg('--count', '-c', type=int, default=0, config="serve.count"),
)
def alerter_serve(config):
"""run alerter"""
def _run():
alerter = Alerter(elasticsearch(config), config)
alerter.check_alerts()
server = Server(config, run=_run)
server.run()
@alerter_command("query")
def alerter_run(config):
"""run alerter"""
pass
| 2,428
| 0
| 115
|
34823fdc749a9ba46e904acdd2ec15c0818a7a24
| 4,902
|
py
|
Python
|
src/restfx/middleware/middlewares/session.py
|
hyjiacan/restfx
|
8ba70bc099e6ace0c9b3afe8909ea61a5ff82dec
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2021-01-25T11:09:41.000Z
|
2021-04-28T07:17:21.000Z
|
src/restfx/middleware/middlewares/session.py
|
hyjiacan/restfx
|
8ba70bc099e6ace0c9b3afe8909ea61a5ff82dec
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
src/restfx/middleware/middlewares/session.py
|
hyjiacan/restfx
|
8ba70bc099e6ace0c9b3afe8909ea61a5ff82dec
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-01-28T00:53:37.000Z
|
2021-01-28T00:53:37.000Z
|
import time
import uuid
from ...config import AppConfig
from ...middleware.interface import MiddlewareBase
from ...session.interfaces import ISessionProvider
from ...util import md5, b64
| 32.68
| 67
| 0.549572
|
import time
import uuid
from ...config import AppConfig
from ...middleware.interface import MiddlewareBase
from ...session.interfaces import ISessionProvider
from ...util import md5, b64
class SessionMiddleware(MiddlewareBase):
def __init__(self, provider: ISessionProvider,
secret=None,
maker=None,
cookie_name='sessionid',
cookie_max_age=None,
cookie_expires=None,
cookie_path="/",
cookie_domain=None,
cookie_secure=False,
cookie_samesite=None,
cookie_httponly=True
):
"""
:param provider:
:param secret: 用于加密 session id 的密钥,设置为 None 时,将使用 app_id
:param maker:session id 的创建算法:
设置为 None 时表示使用默认的加密算法
设置为函数表示自定义算法
:param cookie_name:
:param cookie_max_age:
:param cookie_expires:
:param cookie_path:
:param cookie_domain:
:param cookie_secure:
:param cookie_samesite:
:param cookie_httponly:
"""
assert isinstance(provider, ISessionProvider)
self.maker = maker
self.secret = secret
self.secret_bytes = None
self.provider = provider
self.cookie_name = cookie_name
self.cookie_max_age = cookie_max_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_samesite = cookie_samesite
self.cookie_httponly = cookie_httponly
@staticmethod
def default_maker():
return uuid.uuid4().hex
def new_sid(self):
sid = self.maker() if self.maker else self.default_maker()
return md5.hash_str(sid)
def decode(self, sid):
"""
将客户端传过来的 sid 解码,取出其中的信息
:param sid:
:return:
"""
# noinspection PyBroadException
try:
# 前端传来的 sid 是经过 base64 编码的
sid_bytes = b64.dec_bytes(sid)
# 使用 secret 解密
result = bytearray()
for i in range(32):
result.append(sid_bytes[i] ^ self.secret_bytes[i])
# 解密后能得到原始的 md5
return result.decode()
except Exception:
# 解码失败,此 id 非法
return None
def on_startup(self, app):
if self.secret is None:
self.secret = app.id
self.secret_bytes = md5.hash_str(self.secret).encode()
def process_request(self, request, meta):
# 当指定了 session=False 时,表示此请求此路由时不需要创建 session
if not meta.get('session', True):
return
config = AppConfig.get(request.app_id)
if config is None or self.provider is None:
return
client_session_id = request.cookies.get(self.cookie_name)
# 客户端无 session_id
if not client_session_id:
request.session = self.provider.create(self.new_sid())
return
# 解码 session_id
session_id = self.decode(client_session_id)
if not session_id:
# session id 非法,新创建一个
request.session = self.provider.create(self.new_sid())
return
# 尝试根据客户端的 session_id 获取 session
request.session = self.provider.get(session_id)
# session 已经过期或session被清除
if request.session is None:
request.session = self.provider.create(self.new_sid())
return
now = time.time()
# session 过期
if self.provider.is_expired(request.session):
request.session = self.provider.create(self.new_sid())
return
# 修改已经存在 session 的最后访问时间
request.session.last_access_time = now
def on_leaving(self, request, response):
# 在响应结束时才写入,以减少 IO
if not request.session:
return
request.session.flush()
# 使用 secret 加密
sid_bytes = request.session.id.encode()
result = bytearray()
for i in range(32):
result.append(sid_bytes[i] ^ self.secret_bytes[i])
# 加密后的 sid
sid = b64.enc_str(result)
response.set_cookie(self.cookie_name,
sid,
max_age=self.cookie_max_age,
expires=self.cookie_expires,
path=self.cookie_path,
domain=self.cookie_domain,
secure=self.cookie_secure,
httponly=self.cookie_httponly,
samesite=self.cookie_samesite,
)
def __del__(self):
del self.provider
| 2,613
| 2,440
| 24
|
c6228ba792993028a93c9f240e189a2996613f3b
| 940
|
py
|
Python
|
param.py
|
vieilfrance/TwitterBot
|
72307862d49f2c3b1488e1c89db94ace3e71aa29
|
[
"MIT"
] | 2
|
2015-01-20T17:14:54.000Z
|
2016-05-16T05:46:11.000Z
|
param.py
|
vieilfrance/TwitterBot
|
72307862d49f2c3b1488e1c89db94ace3e71aa29
|
[
"MIT"
] | null | null | null |
param.py
|
vieilfrance/TwitterBot
|
72307862d49f2c3b1488e1c89db94ace3e71aa29
|
[
"MIT"
] | null | null | null |
LATESTMFILE = 'last_id.txt'
LOGFILE = "twitterbot_log.txt"
verbose = False
twitterName = "ui_cer_bot"
# Liste de terme qui servent pour répondre
answers = ['ahah :)' , 'YO' , 'O_O', 'stoi' , 'TG' , 'MER IL ET FOU']
# Liste des terme qui servent a repondre "stoi xxxx"
bad_words = {'boloss' : 'le boloss', 'boulette' : 'la boulette', 'accident' :"l'accident" , 'youtube':"le tube" , 'facebook':"le bouc" , 'dément': "qui ment"}
# Liste des terme relou ou le bot repond TG avec un mention paritculiere pour @infredwetrust :)
boring_words = {'#old' , 'oscours', '#oscours', "twitpic", "selfie" }
# Liste des termes qui enclenche une reponse
tg_list = ['tg','ta gueule', 'tg.', 'tg!', 'ta gueule.', 'ta gueule!']
#Liste des phrase que le bot tweete de lui-emme
talk = {"Sinon SAVA ?", "c'est l'amour à la plage, aoum tcha tcha tcha", "Je vous trouve très beau, surtout moi" , "y a quoi de beau à la télé ce soir ?", "sim est mort. #rip"}
| 47
| 176
| 0.66383
|
LATESTMFILE = 'last_id.txt'
LOGFILE = "twitterbot_log.txt"
verbose = False
twitterName = "ui_cer_bot"
# Liste de terme qui servent pour répondre
answers = ['ahah :)' , 'YO' , 'O_O', 'stoi' , 'TG' , 'MER IL ET FOU']
# Liste des terme qui servent a repondre "stoi xxxx"
bad_words = {'boloss' : 'le boloss', 'boulette' : 'la boulette', 'accident' :"l'accident" , 'youtube':"le tube" , 'facebook':"le bouc" , 'dément': "qui ment"}
# Liste des terme relou ou le bot repond TG avec un mention paritculiere pour @infredwetrust :)
boring_words = {'#old' , 'oscours', '#oscours', "twitpic", "selfie" }
# Liste des termes qui enclenche une reponse
tg_list = ['tg','ta gueule', 'tg.', 'tg!', 'ta gueule.', 'ta gueule!']
#Liste des phrase que le bot tweete de lui-emme
talk = {"Sinon SAVA ?", "c'est l'amour à la plage, aoum tcha tcha tcha", "Je vous trouve très beau, surtout moi" , "y a quoi de beau à la télé ce soir ?", "sim est mort. #rip"}
| 0
| 0
| 0
|
4cfae502db39016626f313d1e7ed2997741ffab6
| 391
|
py
|
Python
|
tests/ctoi_chap2/ex_03.py
|
tvatter/dsa
|
e5ae217e38441d90914a55103e23d86f5821dc2f
|
[
"MIT"
] | null | null | null |
tests/ctoi_chap2/ex_03.py
|
tvatter/dsa
|
e5ae217e38441d90914a55103e23d86f5821dc2f
|
[
"MIT"
] | null | null | null |
tests/ctoi_chap2/ex_03.py
|
tvatter/dsa
|
e5ae217e38441d90914a55103e23d86f5821dc2f
|
[
"MIT"
] | null | null | null |
from dsa.data_structures import LinkedList, ListNode
l = [1, 2, 3]
ll = LinkedList(l, doubly=False)
mid_n = ll.head.next_node
delete_middle_node(mid_n)
str(ll)
| 23
| 52
| 0.774936
|
from dsa.data_structures import LinkedList, ListNode
def delete_middle_node(mid_node: ListNode):
next_node = mid_node.next_node
mid_node.key = next_node.key
mid_node.data = next_node.data
mid_node.next_node = next_node.next_node
mid_node.prev_node = next_node.prev_node
l = [1, 2, 3]
ll = LinkedList(l, doubly=False)
mid_n = ll.head.next_node
delete_middle_node(mid_n)
str(ll)
| 205
| 0
| 23
|
8f5fa4bb7c149bc76a139e4ca5d49be7e9640bc4
| 889
|
py
|
Python
|
music/models.py
|
rayyanshikoh/djangomusicsite
|
be1582b2e48c6c3edde15b69c1299682fbc1b12a
|
[
"MIT"
] | null | null | null |
music/models.py
|
rayyanshikoh/djangomusicsite
|
be1582b2e48c6c3edde15b69c1299682fbc1b12a
|
[
"MIT"
] | null | null | null |
music/models.py
|
rayyanshikoh/djangomusicsite
|
be1582b2e48c6c3edde15b69c1299682fbc1b12a
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.db.models.deletion import CASCADE
# Create your models here.
| 37.041667
| 64
| 0.75703
|
from django.db import models
from django.db.models.deletion import CASCADE
# Create your models here.
class Artist(models.Model):
artist_name = models.CharField(max_length=100)
artist_picture_name = models.CharField(max_length=100)
artist_description = models.CharField(max_length=100)
def __str__(self):
return self.artist_name
class Album(models.Model):
artist = models.ForeignKey(Artist, on_delete=models.CASCADE)
album_name = models.CharField(max_length=100)
album_filename_url = models.CharField(max_length=100)
release_date = models.IntegerField()
album_art = models.CharField(max_length=100)
def __str__(self):
return self.album_name
class Song(models.Model):
album = models.ForeignKey(Album, on_delete=models.CASCADE)
song_name = models.CharField(max_length=100)
song_artists = models.CharField(max_length=100)
| 57
| 662
| 68
|
899a8ac3943f0e2672d9868197a5e5bb885a511f
| 2,218
|
py
|
Python
|
scripts/csv_to_gv.py
|
mingkaic/callgraph-profiler
|
b3258dd67ee530ed5362ff1d0120b7cf100fa4cb
|
[
"MIT"
] | 9
|
2017-01-06T17:10:53.000Z
|
2022-01-21T12:09:09.000Z
|
scripts/csv_to_gv.py
|
mingkaic/callgraph-profiler
|
b3258dd67ee530ed5362ff1d0120b7cf100fa4cb
|
[
"MIT"
] | 1
|
2017-01-09T18:50:56.000Z
|
2017-01-09T18:50:56.000Z
|
scripts/csv_to_gv.py
|
mingkaic/callgraph-profiler
|
b3258dd67ee530ed5362ff1d0120b7cf100fa4cb
|
[
"MIT"
] | 14
|
2017-01-09T04:19:48.000Z
|
2022-03-14T03:27:21.000Z
|
#!/usr/bin/env python3
from collections import defaultdict
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('csv', nargs='?', default=None,
help='the CSV format callgraph to transform')
args = parser.parse_args()
import sys
with (open(args.csv) if args.csv else sys.stdin) as infile:
callgraph = read_callgraph(infile)
print_callgraph(callgraph)
| 32.617647
| 79
| 0.558161
|
#!/usr/bin/env python3
from collections import defaultdict
def get_row_tuples(instream):
return (tuple(col.strip() for col in line.split(',')) for line in instream)
def read_callgraph(instream):
nodes = set()
edges = defaultdict(lambda : defaultdict(list))
for (caller, filename, line, callee, count) in get_row_tuples(instream):
nodes.add(caller)
nodes.add(callee)
edges[caller][(filename,line)].append((callee, count))
return (nodes, edges)
def count_iterator(edges):
return (int(count) for site in edges.values()
for target in site.values()
for callee, count in target)
def print_callgraph(callgraph):
nodes, edges = callgraph
print('digraph {\n node [shape=record];')
max_count = float(max(count_iterator(edges)))
for node in nodes:
sitemap = edges[node]
callsites = ''.join('|<l{0}>{1}:{2}'.format(id, filename, line)
for id, (filename,line) in enumerate(sitemap.keys()))
node_format = ' "{0}"[label=\"{{{0}{1}}}\"];'
print(node_format.format(node, callsites))
for id, site in enumerate(sitemap.keys()):
for (target,count) in sitemap[site]:
count = int(count)
weight = round(max(1, min(count, 5 * (count / max_count))), 2)
color = hex(int(255 * (count / max_count)))[2:]
styles = [
'label="{0}"'.format(count),
'penwidth="{0}"'.format(weight),
'labelfontcolor=black',
'color="#{0}0000"'.format(color)
]
edge = (node, id, target, ','.join(styles))
print(' "{0}":l{1} -> "{2}" [{3}];'.format(*edge))
print('}')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('csv', nargs='?', default=None,
help='the CSV format callgraph to transform')
args = parser.parse_args()
import sys
with (open(args.csv) if args.csv else sys.stdin) as infile:
callgraph = read_callgraph(infile)
print_callgraph(callgraph)
| 1,662
| 0
| 92
|
44ec7269f753565b6d256da9e84d6e1d7e9ed9cb
| 2,628
|
py
|
Python
|
tests/integration/test_passwd.py
|
mthaddon/operator-libs-linux
|
ef86a004f51ae7a506718c90c66d5464d58ac731
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_passwd.py
|
mthaddon/operator-libs-linux
|
ef86a004f51ae7a506718c90c66d5464d58ac731
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_passwd.py
|
mthaddon/operator-libs-linux
|
ef86a004f51ae7a506718c90c66d5464d58ac731
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
from charms.operator_libs_linux.v0 import passwd
from helpers import lines_in_file
logger = logging.getLogger(__name__)
| 29.52809
| 90
| 0.708904
|
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
from charms.operator_libs_linux.v0 import passwd
from helpers import lines_in_file
logger = logging.getLogger(__name__)
def test_add_user():
# First check the user we're creating doesn't exist
assert passwd.user_exists("test-user-0") is None
u = passwd.add_user(username="test-user-0")
expected_passwd_line = f"{u.pw_name}:x:{u.pw_uid}:{u.pw_gid}::{u.pw_dir}:{u.pw_shell}"
expected_group_line = f"{u.pw_name}:x:{u.pw_gid}:"
assert passwd.user_exists("test-user-0") is not None
assert expected_group_line in lines_in_file("/etc/group")
assert expected_passwd_line in lines_in_file("/etc/passwd")
# clean up
passwd.remove_user("test-user-0")
def test_remove_user():
u = passwd.add_user(username="test-user-0")
assert passwd.user_exists("test-user-0") is not None
passwd.remove_user("test-user-0")
expected_passwd_line = f"{u.pw_name}:x:{u.pw_uid}:{u.pw_gid}::{u.pw_dir}:{u.pw_shell}"
expected_group_line = f"{u.pw_name}:x:{u.pw_gid}:"
assert passwd.user_exists("test-user-0") is None
assert expected_group_line not in lines_in_file("/etc/group")
assert expected_passwd_line not in lines_in_file("/etc/passwd")
def test_add_user_with_params():
u = passwd.add_user(username="test-user-1", shell="/bin/bash", primary_group="admin")
expected = f"{u.pw_name}:x:{u.pw_uid}:{u.pw_gid}::{u.pw_dir}:{u.pw_shell}"
assert expected in lines_in_file("/etc/passwd")
passwd.remove_user("test-user-1")
def test_add_group():
assert passwd.group_exists("test-group") is None
g = passwd.add_group(group_name="test-group")
expected = f"{g.gr_name}:x:{g.gr_gid}:"
assert passwd.group_exists("test-group") is not None
assert expected in lines_in_file("/etc/group")
passwd.remove_group("test-group")
def test_remove_group():
g = passwd.add_group(group_name="test-group")
assert passwd.group_exists("test-group") is not None
expected = f"{g.gr_name}:x:{g.gr_gid}:"
assert expected in lines_in_file("/etc/group")
passwd.remove_group("test-group")
assert passwd.group_exists("test-group") is None
assert expected not in lines_in_file("/etc/group")
def test_add_group_with_gid():
assert passwd.group_exists("test-group") is None
passwd.add_group(group_name="test-group", gid=1099)
expected = "test-group:x:1099:"
assert passwd.group_exists("test-group") is not None
assert expected in lines_in_file("/etc/group")
passwd.remove_group("test-group")
| 2,248
| 0
| 138
|
b58e318ca4f093238eee18f1e7d2c3f073e021ad
| 11,006
|
py
|
Python
|
mqtt-relais.py
|
meberli/mqtt-relais
|
b0de69d6da6119e9c09601355ea4de1b525402b9
|
[
"Apache-2.0"
] | null | null | null |
mqtt-relais.py
|
meberli/mqtt-relais
|
b0de69d6da6119e9c09601355ea4de1b525402b9
|
[
"Apache-2.0"
] | null | null | null |
mqtt-relais.py
|
meberli/mqtt-relais
|
b0de69d6da6119e9c09601355ea4de1b525402b9
|
[
"Apache-2.0"
] | 1
|
2022-01-02T23:14:21.000Z
|
2022-01-02T23:14:21.000Z
|
# !/usr/bin/python3
import string
import time
import random
import json
import yaml
import ssl
import base64
import logging
from logging.config import fileConfig
import importlib
import argparse
import os
import re
from rich.logging import RichHandler
from datetime import datetime
import paho.mqtt.client as mqtt
from MessageConverters.MessageConverter import MessageConverter
LOGGING_CONFIG = 'logging.conf'
CONVERTERS_DIR = 'MessageConverters'
# list to store all mqtt connection infos
brokers = []
'''
def translate_to_tb_format(payload):
tb_payload = {}
measurements = []
measurement = {}
measurement['ts'] = payload.get('ts')
measurement['values'] = payload.get('fields')
deviceid = payload.get('tags').get('deviceid')
measurements.append(measurement)
tb_payload[deviceid] = measurements
return tb_payload
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"--conf_file",
help="configuration file",
type=str,
default="config.yaml")
args = parser.parse_args()
path_log_config_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'conf', LOGGING_CONFIG)
print(f'logging config file: {path_log_config_file}')
fileConfig(path_log_config_file)
logger = logging.getLogger(__name__)
logger.info("using logging conf from {}".format(path_log_config_file))
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
logger.info("verbosity turned on")
# load config
path_config_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'conf', args.conf_file)
with open(path_config_file) as yaml_conf_file:
configuration = yaml.full_load(yaml_conf_file)
logger.info("loaded config: {}".format(configuration))
# start all mqtt connections
logger.info('starting mqtt connections...')
# list to stor all active vlients
active_clients = {}
# dictionary to store all dynamically loaded converters
converters = {}
for name, conf in configuration.get("brokers").items():
logger.info(
f'starting client for broker {name}, connecting to host {conf.get("host")}')
client = connect_mqtt(name, conf)
if client:
# Bind function to callback
client.on_publish = on_publish
client.on_log = on_log
client.on_message = on_message
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.loop_start()
client.enable_logger(logger)
# create converter and routing info
converter_and_routing_info = {}
converter_and_routing_info['name'] = name
subscribe_converter = conf.get('subscribe-converter')
converter_and_routing_info['subscribe-converter'] = subscribe_converter
if subscribe_converter:
_load_converter(subscribe_converter)
publish_converter = conf.get('publish-converter')
converter_and_routing_info['publish-converter'] = publish_converter
if publish_converter:
_load_converter(publish_converter)
converter_and_routing_info['routes'] = []
for route in configuration.get("routing"):
if route["subscribe-broker"] == name:
converter_and_routing_info['routes'].append(route)
payload_converter = route.get('payload-converter')
if payload_converter:
_load_converter(
payload_converter)
logger.debug(f"added route {route['name']}")
client.user_data_set(converter_and_routing_info)
active_clients[name] = client
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.info('interrupted!')
for name, client in active_clients.items():
disconnect_mqtt(client)
| 36.203947
| 124
| 0.621116
|
# !/usr/bin/python3
import string
import time
import random
import json
import yaml
import ssl
import base64
import logging
from logging.config import fileConfig
import importlib
import argparse
import os
import re
from rich.logging import RichHandler
from datetime import datetime
import paho.mqtt.client as mqtt
from MessageConverters.MessageConverter import MessageConverter
LOGGING_CONFIG = 'logging.conf'
CONVERTERS_DIR = 'MessageConverters'
# list to store all mqtt connection infos
brokers = []
def _load_converter(converter_classname: string):
if converter_classname in converters:
logger.info(f'converter {converter_classname} is already loaded. skipping..')
else:
try:
m_name = f'{CONVERTERS_DIR}.{converter_classname}'
module = importlib.import_module(m_name)
device_class = getattr(module, converter_classname)
message_converter = device_class()
except ImportError as err:
logger.error(f'failed to load module: {converter_classname}. message: {err}')
return None
logger.info(f'successfully loaded converter {device_class.__name__}')
converters[converter_classname] = message_converter
def _convert_message(message: bytes, converter_classname: string) -> bytes:
# get corresponding decoder
message_converter = converters.get(converter_classname)
if message_converter:
return message_converter.convert(message)
else:
logger.error(f"can't find converter with name {converter_classname}. skipping..")
return message
'''
def translate_to_tb_format(payload):
tb_payload = {}
measurements = []
measurement = {}
measurement['ts'] = payload.get('ts')
measurement['values'] = payload.get('fields')
deviceid = payload.get('tags').get('deviceid')
measurements.append(measurement)
tb_payload[deviceid] = measurements
return tb_payload
'''
def on_connect(client: mqtt.Client, userdata, flags, rc):
if rc == 0:
client.connected_flag = True
client.disconnect_flag = False
logger.info(
f'Connect for Client {userdata.get("name")} successful.')
for route in userdata.get('routes'):
topic = route.get(
"subscribe-topic")
if topic:
logger.info(
f'Subscribing to topic {topic}')
client.subscribe(topic)
else:
logger.error(
f"Connect for Client {userdata.get('name')} failed with result code: {str(rc)}")
def on_disconnect(client, userdata, rc):
client.connected_flag = False
client.disconnect_flag = True
logger.info(
"Disconnected client {} . Reason: {}".format(client, str(rc)))
def on_publish(client, userdata, result):
# Todo after published data, remove from DB.
logger.info(
"data published to client {}. userdata: {} result: {}".format(
client._client_id, userdata, result))
def on_message(client: mqtt.Client, userdata, message: mqtt.MQTTMessage):
logger.info(
f"**** new message received from broker '{userdata.get('name')}' on topic '{message.topic}'")
message_payload = message.payload
logger.debug(
f"received message: {message_payload.decode('utf-8')}")
# find matching routing
routes_to_process = []
for route in userdata.get('routes'):
pattern = re.compile("^" + route.get('subscribe-topic').replace('+','.*') +"$")
if pattern.match(message.topic):
routes_to_process.append(route)
if routes_to_process:
for route in routes_to_process:
# convert with subscribe-converter if conigured
subscribe_converter = userdata.get('subscribe-converter')
if subscribe_converter:
logger.debug(
f'converting message with subscribe-converter {subscribe_converter}')
message_payload = _convert_message(
message_payload, subscribe_converter)
# convert with payload-converter if conigured
payload_converter = route.get('payload-converter')
if payload_converter:
logger.debug(
f'converting message with payload-converter {payload_converter}')
message_payload = _convert_message(
message_payload, payload_converter)
# convert with publish-converter if configured
publish_broker = route.get('publish-broker')
publish_client = active_clients.get(publish_broker)
publish_converter = configuration.get("brokers").get(
publish_broker).get('publish-converter')
if publish_converter:
logger.debug(
f'converting message with publish_converter {publish_converter}')
message_payload = _convert_message(
message_payload, publish_converter)
# publish message
try:
logger.info(
f"publishing message to broker '{route.get('publish-broker')}' on topic '{route.get('publish-topic')}'")
logger.debug(
f"message: {message_payload.decode('utf-8')}")
publish_client.publish(
route.get('publish-topic'),
payload=message_payload)
except Exception as error:
logger.exception(error)
else:
logger.info(
f'no route found for topic {message.topic}')
def on_log(client, userdata, level, buf):
logger.info('Loglevel: {}. message: {}, userdata: {}'.format(
level, buf, userdata))
def connect_mqtt(name, broker_info):
try:
ssl.match_hostname = lambda cert, hostname: True
auth_conf = broker_info.get("auth")
auth_type = auth_conf.get("auth_type")
# generate client id
client_id = "{}-{}".format(
name,
random.randint(150, 205))
# create client object
client = mqtt.Client(
client_id,
clean_session=False)
client.connected_flag = False
client.disconnect_flag = True
# configure authentication
if (auth_type == "password"):
client.username_pw_set(
auth_conf.get("user"),
auth_conf.get("pw"))
elif (auth_type == "cert"):
client.tls_set(
ca_certs=auth_conf.get("ca_certs_path"),
certfile=auth_conf.get("certfile_path"),
keyfile=auth_conf.get("keyfile_path"),
cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_SSLv23)
client.tls_insecure_set(True)
logger.info(
f'connecting to broker {name} on host {broker_info.get("host")}')
client.connect(
host=broker_info.get("host"),
port=broker_info.get("port"),
keepalive=60)
# workaround to make sure the session is clean on startup but remains on automatic reconnect
client.disconnect()
while client.connected_flag:
logger.warning("waiting for client to disconnect..")
time.sleep(1)
client.clean_session = True
client.connect(
host=broker_info.get("host"),
port=broker_info.get("port"),
keepalive=60)
return client
except Exception:
logger.error(f"connection for broker {name} failed. skipping this one..")
return None
def disconnect_mqtt(client):
client.loop_stop()
client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"--conf_file",
help="configuration file",
type=str,
default="config.yaml")
args = parser.parse_args()
path_log_config_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'conf', LOGGING_CONFIG)
print(f'logging config file: {path_log_config_file}')
fileConfig(path_log_config_file)
logger = logging.getLogger(__name__)
logger.info("using logging conf from {}".format(path_log_config_file))
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
logger.info("verbosity turned on")
# load config
path_config_file = os.path.join(os.path.dirname(
os.path.realpath(__file__)), 'conf', args.conf_file)
with open(path_config_file) as yaml_conf_file:
configuration = yaml.full_load(yaml_conf_file)
logger.info("loaded config: {}".format(configuration))
# start all mqtt connections
logger.info('starting mqtt connections...')
# list to stor all active vlients
active_clients = {}
# dictionary to store all dynamically loaded converters
converters = {}
for name, conf in configuration.get("brokers").items():
logger.info(
f'starting client for broker {name}, connecting to host {conf.get("host")}')
client = connect_mqtt(name, conf)
if client:
# Bind function to callback
client.on_publish = on_publish
client.on_log = on_log
client.on_message = on_message
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.loop_start()
client.enable_logger(logger)
# create converter and routing info
converter_and_routing_info = {}
converter_and_routing_info['name'] = name
subscribe_converter = conf.get('subscribe-converter')
converter_and_routing_info['subscribe-converter'] = subscribe_converter
if subscribe_converter:
_load_converter(subscribe_converter)
publish_converter = conf.get('publish-converter')
converter_and_routing_info['publish-converter'] = publish_converter
if publish_converter:
_load_converter(publish_converter)
converter_and_routing_info['routes'] = []
for route in configuration.get("routing"):
if route["subscribe-broker"] == name:
converter_and_routing_info['routes'].append(route)
payload_converter = route.get('payload-converter')
if payload_converter:
_load_converter(
payload_converter)
logger.debug(f"added route {route['name']}")
client.user_data_set(converter_and_routing_info)
active_clients[name] = client
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logger.info('interrupted!')
for name, client in active_clients.items():
disconnect_mqtt(client)
| 6,598
| 0
| 207
|
118d6b457420d337af3414d3d155dda1d1ccd34c
| 1,458
|
py
|
Python
|
FusionSlicerLT.py
|
tapnair/FusionSlicerLT
|
11f17855bffab81371431e779d7b6e5edc242006
|
[
"MIT"
] | 3
|
2018-09-27T17:28:23.000Z
|
2021-07-31T05:07:54.000Z
|
FusionSlicerLT.py
|
tapnair/FusionSlicerLT
|
11f17855bffab81371431e779d7b6e5edc242006
|
[
"MIT"
] | null | null | null |
FusionSlicerLT.py
|
tapnair/FusionSlicerLT
|
11f17855bffab81371431e779d7b6e5edc242006
|
[
"MIT"
] | 2
|
2019-05-15T06:09:20.000Z
|
2021-07-31T05:07:55.000Z
|
# Author-Patrick Rainsberry
# Description-Simplified Slicer for Fusion 360
# Importing sample Fusion Command
# Could import multiple Command definitions here
from .FusionSlicerLTCommand import FusionSlicerLTCommand, FusionSlicerLTCommand2
commands = []
command_definitions = []
# Define parameters for 1st command
cmd = {
'cmd_name': 'Fusion Slicer LT',
'cmd_description': 'Simplified Fusion Slicing App',
'cmd_id': 'cmdID_slicer_lt',
'cmd_resources': './resources',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'SolidScriptsAddinsPanel',
'class': FusionSlicerLTCommand
}
command_definitions.append(cmd)
# Define parameters for 1st command
cmd = {
'cmd_name': 'Fusion Slicer LT 2',
'cmd_description': 'Simplified Fusion Slicing App',
'cmd_id': 'cmdID_slicer_lt2',
'cmd_resources': './resources',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'SolidScriptsAddinsPanel',
'command_visible': False,
'class': FusionSlicerLTCommand2
}
command_definitions.append(cmd)
# Set to True to display various useful messages when debugging your app
debug = False
# Don't change anything below here:
for cmd_def in command_definitions:
command = cmd_def['class'](cmd_def, debug)
commands.append(command)
| 27
| 80
| 0.733882
|
# Author-Patrick Rainsberry
# Description-Simplified Slicer for Fusion 360
# Importing sample Fusion Command
# Could import multiple Command definitions here
from .FusionSlicerLTCommand import FusionSlicerLTCommand, FusionSlicerLTCommand2
commands = []
command_definitions = []
# Define parameters for 1st command
cmd = {
'cmd_name': 'Fusion Slicer LT',
'cmd_description': 'Simplified Fusion Slicing App',
'cmd_id': 'cmdID_slicer_lt',
'cmd_resources': './resources',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'SolidScriptsAddinsPanel',
'class': FusionSlicerLTCommand
}
command_definitions.append(cmd)
# Define parameters for 1st command
cmd = {
'cmd_name': 'Fusion Slicer LT 2',
'cmd_description': 'Simplified Fusion Slicing App',
'cmd_id': 'cmdID_slicer_lt2',
'cmd_resources': './resources',
'workspace': 'FusionSolidEnvironment',
'toolbar_panel_id': 'SolidScriptsAddinsPanel',
'command_visible': False,
'class': FusionSlicerLTCommand2
}
command_definitions.append(cmd)
# Set to True to display various useful messages when debugging your app
debug = False
# Don't change anything below here:
for cmd_def in command_definitions:
command = cmd_def['class'](cmd_def, debug)
commands.append(command)
def run(context):
for run_command in commands:
run_command.on_run()
def stop(context):
for stop_command in commands:
stop_command.on_stop()
| 120
| 0
| 46
|
10e82da9ec01eac8d5b3c46539f095f880596fbb
| 2,871
|
py
|
Python
|
old_version/threads/downloader.py
|
DenisZhmakin/VK-Music-Downloader
|
217d54f462b2da74776eec47bf1c355c54b017ab
|
[
"Unlicense"
] | null | null | null |
old_version/threads/downloader.py
|
DenisZhmakin/VK-Music-Downloader
|
217d54f462b2da74776eec47bf1c355c54b017ab
|
[
"Unlicense"
] | 1
|
2021-12-20T03:42:21.000Z
|
2021-12-20T09:57:57.000Z
|
old_version/threads/downloader.py
|
DenisZhmakin/VK-Music-Downloader
|
217d54f462b2da74776eec47bf1c355c54b017ab
|
[
"Unlicense"
] | null | null | null |
import subprocess
import tempfile
from pathlib import Path
import requests
from mutagen.easyid3 import EasyID3
from mutagen.id3 import APIC, ID3
from mutagen.mp3 import MP3
from pathvalidate import sanitize_filename
from PyQt5.QtCore import QThread
from vk_api.audio import VkAudio
from entities.album import VkAlbum
from entities.session import VkSession
from entities.song import VkSong
from utils import get_tracklist_iter
| 34.590361
| 131
| 0.642285
|
import subprocess
import tempfile
from pathlib import Path
import requests
from mutagen.easyid3 import EasyID3
from mutagen.id3 import APIC, ID3
from mutagen.mp3 import MP3
from pathvalidate import sanitize_filename
from PyQt5.QtCore import QThread
from vk_api.audio import VkAudio
from entities.album import VkAlbum
from entities.session import VkSession
from entities.song import VkSong
from utils import get_tracklist_iter
class VkDownloader(QThread):
def __init__(self, album: VkAlbum):
QThread.__init__(self)
self.album = album
self.tmp_dir = Path(tempfile.gettempdir()) / sanitize_filename(album.artist, "_") / sanitize_filename(album.title, "_")
self.tmp_dir.mkdir(parents=True, exist_ok=True)
self.music_dir = Path.home() / "Музыка" / sanitize_filename(album.artist, "_") / sanitize_filename(album.title, "_")
self.music_dir.mkdir(parents=True, exist_ok=True)
responce = requests.get(album.cover_url)
self.cover_file = self.tmp_dir / "cover.jpeg"
self.cover_file.write_bytes(responce.content)
def run(self):
for vk_song in get_tracklist_iter(self.album):
self.download_track(vk_song)
self.set_mp3_tags(vk_song)
self.set_cover_image(vk_song)
self.rename_file(vk_song)
def download_track(self, track: VkSong):
mp3_file = self.music_dir / f"{track.track_code}.mp3"
ts_file = self.tmp_dir / f"{track.track_code}.ts"
if 'index.m3u8' in track.url:
subprocess.call(["streamlink", "--output", ts_file, track.url, "best"])
subprocess.call(["ffmpeg", "-i", ts_file, "-ab", "320k", mp3_file])
elif 'long_chunk=1' in track.url:
responce = requests.get(track.url)
mp3_file.write_bytes(responce.content)
def set_mp3_tags(self, track: VkSong):
audio = MP3(filename=self.music_dir / f"{track.track_code}.mp3", ID3=EasyID3)
audio['title'] = track.title
audio['artist'] = track.artist
audio['tracknumber'] = str(track.track_num)
audio['date'] = str(track.year)
audio['album'] = track.album
audio['genre'] = track.genre
audio.save()
def set_cover_image(self, track: VkSong):
audio = ID3(Path(self.music_dir / f"{track.track_code}.mp3"))
with open(self.cover_file, 'rb') as album_art:
audio['APIC'] = APIC(
encoding=3,
mime='image/jpeg',
type=3, desc='Cover',
data=album_art.read()
)
audio.save()
def rename_file(self, track: VkSong):
file = Path(self.music_dir / f"{track.track_code}.mp3")
new_name = f"{str(track.track_num).zfill(2)}. {sanitize_filename(track.title)}.mp3"
file.rename(self.music_dir / new_name)
| 2,246
| 7
| 195
|
eee40442b370235e54fc3c68ad3f9b6e578cb6a3
| 2,494
|
py
|
Python
|
train.py
|
konowrockis/ai2-comments-generator
|
62ca30aeeb332dbbe39a9120ca38d07e69ae76be
|
[
"MIT"
] | null | null | null |
train.py
|
konowrockis/ai2-comments-generator
|
62ca30aeeb332dbbe39a9120ca38d07e69ae76be
|
[
"MIT"
] | null | null | null |
train.py
|
konowrockis/ai2-comments-generator
|
62ca30aeeb332dbbe39a9120ca38d07e69ae76be
|
[
"MIT"
] | null | null | null |
import h5py
import math
import time
import numpy
import sys
from functools import reduce
from keras.models import Sequential
from keras.layers import GRU, LSTM, Dropout, Dense
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
with open('./data/fb_news_comments.txt', 'r', encoding='utf-8') as file:
comments = file.read()
chars = list(sorted(set(comments)))
# print(''.join(chars))
# print([ord(x) for x in chars])
# exit()
start = 0
seq_length = 100
items = 200000
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
n_vocab = len(chars)
n_patterns = items
model = Sequential()
model.add(GRU(512, input_shape=(seq_length, 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(256))
model.add(Dropout(0.2))
model.add(Dense(n_vocab, activation='softmax'))
model.load_weights("./results/test_6/weights-improvement-60-1.7856.hdf5")
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="./results/test_6/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, mode='min')
callbacks_list = [checkpoint]
for i in range(33, 100):
dataX = []
dataY = []
generate()
exit()
print()
for j in range(start + items * i, start + items * (i + 1)):
seq_in = comments[j:j + seq_length]
seq_out = comments[j + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
X = X / float(n_vocab)
y = np_utils.to_categorical(dataY)
model.fit(X, y, epochs=i * 2 + 2, initial_epoch=i * 2, batch_size=128, callbacks=callbacks_list)
| 30.790123
| 189
| 0.685646
|
import h5py
import math
import time
import numpy
import sys
from functools import reduce
from keras.models import Sequential
from keras.layers import GRU, LSTM, Dropout, Dense
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
with open('./data/fb_news_comments.txt', 'r', encoding='utf-8') as file:
comments = file.read()
chars = list(sorted(set(comments)))
# print(''.join(chars))
# print([ord(x) for x in chars])
# exit()
start = 0
seq_length = 100
items = 200000
char_to_int = dict((c, i) for i, c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
n_vocab = len(chars)
n_patterns = items
model = Sequential()
model.add(GRU(512, input_shape=(seq_length, 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(GRU(256))
model.add(Dropout(0.2))
model.add(Dense(n_vocab, activation='softmax'))
model.load_weights("./results/test_6/weights-improvement-60-1.7856.hdf5")
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="./results/test_6/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, mode='min')
callbacks_list = [checkpoint]
def generate():
seed = list("To me, something just doesn't add up.... It helps that this article says he killed them separately and a second person of interest might be involved.".lower())[:seq_length]
pattern = [char_to_int[char] for char in seed]
# temp = 2
for i in range(1000):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = numpy.random.choice(n_vocab, 1, p=numpy.reshape(prediction, n_vocab))[0]
result = int_to_char[index]
sys.stdout.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
for i in range(33, 100):
dataX = []
dataY = []
generate()
exit()
print()
for j in range(start + items * i, start + items * (i + 1)):
seq_in = comments[j:j + seq_length]
seq_out = comments[j + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
X = numpy.reshape(dataX, (n_patterns, seq_length, 1))
X = X / float(n_vocab)
y = np_utils.to_categorical(dataY)
model.fit(X, y, epochs=i * 2 + 2, initial_epoch=i * 2, batch_size=128, callbacks=callbacks_list)
| 653
| 0
| 23
|
3829eb9ad6055f6bc1ed519ee8456fa201a7dced
| 790
|
py
|
Python
|
var/spack/repos/builtin/packages/r-clue/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-clue/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-clue/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RClue(RPackage):
"""Cluster Ensembles."""
cran = "clue"
version('0.3-61', sha256='71311b16ce380fd9a8834be95b55b3d1b47e4ee2b8acb35b8d481138c314dc31')
version('0.3-60', sha256='6d21ddfd0d621ed3bac861890c600884b6ed5ff7d2a36c9778b892636dbbef2a')
version('0.3-58', sha256='2ab6662eaa1103a7b633477e8ebd266b262ed54fac6f9326b160067a2ded9ce7')
version('0.3-57', sha256='6e369d07b464a9624209a06b5078bf988f01f7963076e946649d76aea0622d17')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
| 37.619048
| 96
| 0.756962
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RClue(RPackage):
"""Cluster Ensembles."""
cran = "clue"
version('0.3-61', sha256='71311b16ce380fd9a8834be95b55b3d1b47e4ee2b8acb35b8d481138c314dc31')
version('0.3-60', sha256='6d21ddfd0d621ed3bac861890c600884b6ed5ff7d2a36c9778b892636dbbef2a')
version('0.3-58', sha256='2ab6662eaa1103a7b633477e8ebd266b262ed54fac6f9326b160067a2ded9ce7')
version('0.3-57', sha256='6e369d07b464a9624209a06b5078bf988f01f7963076e946649d76aea0622d17')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-cluster', type=('build', 'run'))
| 0
| 0
| 0
|
e6aa0f6654ae612b3d6e03152a37d40e81cf4608
| 18
|
py
|
Python
|
One Road.py
|
MrAnonymous5635/CSCircles
|
010ac82942c88da357e214ea5462ec378f3667b8
|
[
"MIT"
] | 17
|
2018-09-19T09:44:33.000Z
|
2022-01-17T15:17:11.000Z
|
One Road.py
|
MrAnonymous5635/CSCircles
|
010ac82942c88da357e214ea5462ec378f3667b8
|
[
"MIT"
] | 2
|
2020-02-24T15:28:33.000Z
|
2021-11-16T00:04:52.000Z
|
One Road.py
|
MrAnonymous5635/CSCircles
|
010ac82942c88da357e214ea5462ec378f3667b8
|
[
"MIT"
] | 8
|
2020-02-20T00:02:06.000Z
|
2022-01-06T17:25:51.000Z
|
print(min(a,b,c))
| 9
| 17
| 0.611111
|
print(min(a,b,c))
| 0
| 0
| 0
|
7fa4a0b55f76126d1cfae7e663e73c96dbb0b977
| 11,714
|
py
|
Python
|
Compute_all_features/resources/feature_functions.py
|
BenjaminDHorne/Language-Features-for-News
|
eaaaf81b8908a8c9d19f97800d566300286db72e
|
[
"Unlicense"
] | 12
|
2018-03-28T02:16:52.000Z
|
2020-09-23T07:38:01.000Z
|
Compute_all_features/resources/feature_functions.py
|
BenjaminDHorne/Language-Features-for-News
|
eaaaf81b8908a8c9d19f97800d566300286db72e
|
[
"Unlicense"
] | 4
|
2018-11-09T17:50:24.000Z
|
2019-12-08T05:02:09.000Z
|
Compute_all_features/resources/feature_functions.py
|
BenjaminDHorne/Language-Features-for-News
|
eaaaf81b8908a8c9d19f97800d566300286db72e
|
[
"Unlicense"
] | 14
|
2018-11-09T12:12:32.000Z
|
2021-11-29T17:53:25.000Z
|
import nltk
from nltk import tokenize
from nltk.util import ngrams
import os
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from readability import Readability
import collections
from nltk.stem.porter import *
from nltk import word_tokenize
import string
import pickle
### This File contains functions for each type of feature. Use Compute_All_Features.py to run.
DIRNAME = os.path.dirname(__file__)
| 37.787097
| 218
| 0.655967
|
import nltk
from nltk import tokenize
from nltk.util import ngrams
import os
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from readability import Readability
import collections
from nltk.stem.porter import *
from nltk import word_tokenize
import string
import pickle
### This File contains functions for each type of feature. Use Compute_All_Features.py to run.
DIRNAME = os.path.dirname(__file__)
class Functions:
def fix(self, text):
try:
text = text.decode("ascii", "ignore")
except:
t=[unicodedata.normalize('NFKD', unicode(q)).encode('ascii','ignore') for q in text]
text=''.join(t).strip()
return text
def load_happiness_index_lexicon(self, filepath="./resources/"):
word_to_happiness = {}
with open(os.path.join(filepath, "happiness_index.txt")) as lex:
lex.readline()
for line in lex:
line = line.strip().split("\t")
word_to_happiness[line[0]] = line[2]
return word_to_happiness
def happiness_index_feats(self, text):
happiness_scores = []
happiness = self.load_happiness_index_lexicon()
tokens = word_tokenize(text)
tokens = [t.lower() for t in tokens]
with open("./resources/stopwords.txt") as stopdata:
stopwords = [w.strip() for w in stopdata]
stopwords = set(stopwords)
for token in tokens:
if token not in stopwords:
if token in happiness.keys():
happiness_scores.append(float(happiness[token]))
else:
happiness_scores.append(5)
if len(happiness_scores) == 0:
return 0
h = float(sum(happiness_scores)) / len(happiness_scores)
return h
def load_moral_foundations_lexicon(self, filepath="./resources/"):
code_to_foundation = {}
foundation_to_lex = {}
with open(os.path.join(filepath, "moral foundations dictionary.dic")) as lex:
header_token = self.fix(lex.readline())
for line in lex:
line = self.fix(line)
if line == header_token:
break
code_foundation = line.strip().split()
code_to_foundation[code_foundation[0]] = code_foundation[1]
foundation_to_lex[code_foundation[1]] = []
for line in lex:
try:
word_code = line.strip().split()
stem = word_code[0].replace("*", "")
codes = word_code[1:]
for x in xrange(len(codes)):
foundation_to_lex[code_to_foundation[codes[x]]].append(stem)
except:
continue
return foundation_to_lex
def moral_foundation_feats(self, text):
foundation_counts = {}
foundation_lex_dictionary = self.load_moral_foundations_lexicon()
tokens = word_tokenize(text)
stemmer = PorterStemmer()
stemed_tokens = [stemmer.stem(t) for t in tokens]
for key in foundation_lex_dictionary.keys():
foundation_counts[key] = float(sum([stemed_tokens.count(i) for i in foundation_lex_dictionary[key]])) / len(
stemed_tokens)
return foundation_counts["HarmVirtue"], foundation_counts["HarmVice"], foundation_counts["FairnessVirtue"], \
foundation_counts["FairnessVice"], foundation_counts["IngroupVirtue"], foundation_counts["IngroupVice"], \
foundation_counts["AuthorityVirtue"], foundation_counts["AuthorityVice"], foundation_counts["PurityVirtue"], \
foundation_counts["PurityVice"], foundation_counts["MoralityGeneral"]
def load_acl13_lexicons(self, filepath="./resources/"):
with open(os.path.join(filepath, "bias-lexicon.txt")) as lex:
bias = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "assertives.txt")) as lex:
assertives = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "factives.txt")) as lex:
factives = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "hedges.txt")) as lex:
hedges = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "implicatives.txt")) as lex:
implicatives = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "report_verbs.txt")) as lex:
report_verbs = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "negative-words.txt")) as lex:
negative = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "positive-words.txt")) as lex:
positive = set([self.fix(l.strip()) for l in lex])
with open(os.path.join(filepath, "subjclueslen.txt")) as lex:
wneg = set([])
wpos = set([])
wneu = set([])
sneg = set([])
spos = set([])
sneu = set([])
for line in lex:
line = self.fix(line).split()
if line[0] == "type=weaksubj":
if line[-1] == "priorpolarity=negative":
wneg.add(line[2].split("=")[1])
elif line[-1] == "priorpolarity=positive":
wpos.add(line[2].split("=")[1])
elif line[-1] == "priorpolarity=neutral":
wneu.add(line[2].split("=")[1])
elif line[-1] == "priorpolarity=both":
wneg.add(line[2].split("=")[1])
wpos.add(line[2].split("=")[1])
elif line[0] == "type=strongsubj":
if line[-1] == "priorpolarity=negative":
sneg.add(line[2].split("=")[1])
elif line[-1] == "priorpolarity=positive":
spos.add(line[2].split("=")[1])
elif line[-1] == "priorpolarity=neutral":
sneu.add(line[2].split("=")[1])
elif line[-1] == "priorpolarity=both":
spos.add(line[2].split("=")[1])
sneg.add(line[2].split("=")[1])
return bias, assertives, factives, hedges, implicatives, report_verbs, positive, negative, wneg, wpos, wneu, sneg, spos, sneu
def bias_lexicon_feats(self, text):
bias, assertives, factives, hedges, implicatives, report_verbs, positive_op, negative_op, wneg, wpos, wneu, sneg, spos, sneu = self.load_acl13_lexicons()
tokens = word_tokenize(text)
bigrams = [" ".join(bg) for bg in ngrams(tokens, 2)]
trigrams = [" ".join(tg) for tg in ngrams(tokens, 3)]
bias_count = float(sum([tokens.count(b) for b in bias])) / len(tokens)
assertives_count = float(sum([tokens.count(a) for a in assertives])) / len(tokens)
factives_count = float(sum([tokens.count(f) for f in factives])) / len(tokens)
hedges_count = sum([tokens.count(h) for h in hedges]) + sum([bigrams.count(h) for h in hedges]) + sum(
[trigrams.count(h) for h in hedges])
hedges_count = float(hedges_count) / len(tokens)
implicatives_count = float(sum([tokens.count(i) for i in implicatives])) / len(tokens)
report_verbs_count = float(sum([tokens.count(r) for r in report_verbs])) / len(tokens)
positive_op_count = float(sum([tokens.count(p) for p in positive_op])) / len(tokens)
negative_op_count = float(sum([tokens.count(n) for n in negative_op])) / len(tokens)
wneg_count = float(sum([tokens.count(n) for n in wneg])) / len(tokens)
wpos_count = float(sum([tokens.count(n) for n in wpos])) / len(tokens)
wneu_count = float(sum([tokens.count(n) for n in wneu])) / len(tokens)
sneg_count = float(sum([tokens.count(n) for n in sneg])) / len(tokens)
spos_count = float(sum([tokens.count(n) for n in spos])) / len(tokens)
sneu_count = float(sum([tokens.count(n) for n in sneu])) / len(tokens)
return bias_count, assertives_count, factives_count, hedges_count, implicatives_count, report_verbs_count, positive_op_count, negative_op_count, wneg_count, wpos_count, wneu_count, sneg_count, spos_count, sneu_count
def ttr(self, text):
words = text.split()
dif_words = len(set(words))
tot_words = len(words)
if tot_words == 0:
return 0
return (float(dif_words) / tot_words)
def POS_features(self, fn, text, outpath):
fname = os.path.join(outpath, fn.split(".")[0] + "_tagged.txt")
pos_tags = ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT",
"POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "SYM", "TO", "UH", "WP$", "WRB", "VB", "VBD", "VBG",
"VBN", "VBP", "VBZ", "WDT", "WP"]
sents = tokenize.sent_tokenize(text)
counts_norm = []
allwords = []
sents = tokenize.sent_tokenize(text)
with open(fname, "w") as out:
for sent in sents:
words = sent.strip(".").split()
tags = nltk.pos_tag(words)
strtags = ["/".join((wt[0], wt[1])) for wt in tags]
out.write(" ".join(strtags) + " ")
with open(fname, "r") as fl:
line = fl.readline() # each file is one line
wordandtag = line.strip().split()
try:
tags = [wt.split("/")[1] for wt in wordandtag]
except:
print wordandtag
counts = collections.Counter(tags)
for pt in pos_tags:
try:
counts_norm.append(float(counts[pt]) / len(tags))
except:
counts_norm.append(0)
return counts_norm
def vadersent(self, text):
analyzer = SentimentIntensityAnalyzer()
vs = analyzer.polarity_scores(text)
return vs['neg'], vs['neu'], vs['pos']
def readability(self, text):
rd = Readability(text)
fkg_score = rd.FleschKincaidGradeLevel()
SMOG = rd.SMOGIndex()
return fkg_score, SMOG
def wordlen_and_stop(self, text):
with open("./resources/stopwords.txt") as data:
stopwords = [w.strip() for w in data]
set(stopwords)
words = word_tokenize(text)
WC = len(words)
stopwords_in_text = [s for s in words if s in stopwords]
percent_sws = float(len(stopwords_in_text)) / len(words)
lengths = [len(w) for w in words if w not in stopwords]
if len(lengths) == 0:
word_len_avg = 3
else:
word_len_avg = float(sum(lengths)) / len(lengths)
return percent_sws, word_len_avg, WC
def stuff_LIWC_leftout(self, pid, text):
puncs = set(string.punctuation)
tokens = word_tokenize(text)
quotes = tokens.count("\"") + tokens.count('``') + tokens.count("''")
Exclaim = tokens.count("!")
AllPunc = 0
for p in puncs:
AllPunc += tokens.count(p)
words_upper = 0
for w in tokens:
if w.isupper():
words_upper += 1
try:
allcaps = float(words_upper) / len(tokens)
except:
print pid
return (float(quotes) / len(tokens)) * 100, (float(Exclaim) / len(tokens)) * 100, (
float(AllPunc) / len(tokens)) * 100, allcaps
def subjectivity(self, text):
loaded_model = pickle.load(open(os.path.join(DIRNAME, '', 'NB_Subj_Model.sav'), 'rb'))
count_vect = pickle.load(open(os.path.join(DIRNAME, '', 'count_vect.sav'), 'rb'))
tfidf_transformer = pickle.load(open(os.path.join(DIRNAME, '', 'tfidf_transformer.sav'), 'rb'))
X_new_counts = count_vect.transform([text])
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
result = loaded_model.predict_proba(X_new_tfidf)
prob_obj = result[0][0]
prob_subj = result[0][1]
return prob_obj, prob_subj
def load_LIWC_dictionaries(self, filepath="./resources/"):
cat_dict = {}
stem_dict = {}
counts_dict = {}
with open(os.path.join(filepath, "LIWC2007_English100131.dic")) as raw:
raw.readline()
for line in raw:
if line.strip() == "%":
break
line = line.strip().split()
cat_dict[line[0]] = line[1]
counts_dict[line[0]] = 0
for line in raw:
line = line.strip().split()
stem_dict[line[0]] = [l.replace("*", "") for l in line[1:]]
return cat_dict, stem_dict, counts_dict
def LIWC(self, text, cat_dict, stem_dict, counts_dict):
for key in counts_dict:
counts_dict[key] = 0
tokens = word_tokenize(text)
WC = len(tokens)
stemmer = PorterStemmer()
stemed_tokens = [stemmer.stem(t) for t in tokens]
# count and percentage
for stem in stem_dict:
count = stemed_tokens.count(stem.replace("*", ""))
if count > 0:
for cat in stem_dict[stem]:
counts_dict[cat] += count
counts_norm = [float(counts_dict[cat]) / WC * 100 for cat in counts_dict]
cats = [cat_dict[cat] for cat in cat_dict]
return counts_norm, cats
| 10,813
| -5
| 440
|
07e053226e18266eef9402961a67ab146eb222b4
| 3,107
|
py
|
Python
|
sandbox/vtk/vtkqt.py
|
rboman/progs
|
c60b4e0487d01ccd007bcba79d1548ebe1685655
|
[
"Apache-2.0"
] | 2
|
2021-12-12T13:26:06.000Z
|
2022-03-03T16:14:53.000Z
|
sandbox/vtk/vtkqt.py
|
rboman/progs
|
c60b4e0487d01ccd007bcba79d1548ebe1685655
|
[
"Apache-2.0"
] | 5
|
2019-03-01T07:08:46.000Z
|
2019-04-28T07:32:42.000Z
|
sandbox/vtk/vtkqt.py
|
rboman/progs
|
c60b4e0487d01ccd007bcba79d1548ebe1685655
|
[
"Apache-2.0"
] | 2
|
2017-12-13T13:13:52.000Z
|
2019-03-13T20:08:15.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# example of PyQt (QMainWindow) + vtk (QVTKRenderWindowInteractor)
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
print("Qt %s loaded!" % QT_VERSION_STR)
import vtk
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SimpleView()
window.show()
window.widget.Initialize() # This is the line we need
app.exec_()
| 35.306818
| 76
| 0.671709
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# example of PyQt (QMainWindow) + vtk (QVTKRenderWindowInteractor)
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
print("Qt %s loaded!" % QT_VERSION_STR)
import vtk
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import sys
class Ui_MainWindow(QWidget):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(603, 553)
self.centralWidget = QWidget(MainWindow)
self.gridlayout = QGridLayout(self.centralWidget)
self.vtkWidget = QVTKRenderWindowInteractor(self.centralWidget)
self.gridlayout.addWidget(self.vtkWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
class SimpleView(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.widget = self.ui.vtkWidget
self.ren = vtk.vtkRenderer()
renwin = self.widget.GetRenderWindow()
renwin.AddRenderer(self.ren)
iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor()
cube = vtk.vtkCubeSource()
cube.SetXLength(200)
cube.SetYLength(200)
cube.SetZLength(200)
cube.Update()
cm = vtk.vtkPolyDataMapper()
cm.SetInputConnection(cube.GetOutputPort())
ca = vtk.vtkActor()
ca.SetMapper(cm)
self.ren.AddActor(ca)
if 1: # AnnotatedCubeActor
self.axesActor = vtk.vtkAnnotatedCubeActor()
self.axesActor.SetXPlusFaceText('R')
self.axesActor.SetXMinusFaceText('L')
self.axesActor.SetYMinusFaceText('H')
self.axesActor.SetYPlusFaceText('F')
self.axesActor.SetZMinusFaceText('P')
self.axesActor.SetZPlusFaceText('A')
self.axesActor.GetTextEdgesProperty().SetColor(1, 1, 0)
self.axesActor.GetTextEdgesProperty().SetLineWidth(2)
self.axesActor.GetCubeProperty().SetColor(0, 0, 1)
self.axes = vtk.vtkOrientationMarkerWidget()
self.axes.SetOrientationMarker(self.axesActor)
self.axes.SetInteractor(iren)
self.axes.EnabledOn()
self.axes.InteractiveOn()
self.ren.ResetCamera()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SimpleView()
window.show()
window.widget.Initialize() # This is the line we need
app.exec_()
| 1,901
| 17
| 98
|
ab5fb1a7d6f2ca56ccccbda5f8782171c842d4bd
| 1,796
|
py
|
Python
|
datasets/hdf5datasetwriter.py
|
mihsamusev/pytrl_demo
|
411a74cb5f3601f03438f608b4cf8e451a88345e
|
[
"MIT"
] | null | null | null |
datasets/hdf5datasetwriter.py
|
mihsamusev/pytrl_demo
|
411a74cb5f3601f03438f608b4cf8e451a88345e
|
[
"MIT"
] | null | null | null |
datasets/hdf5datasetwriter.py
|
mihsamusev/pytrl_demo
|
411a74cb5f3601f03438f608b4cf8e451a88345e
|
[
"MIT"
] | null | null | null |
import h5py
import os
| 34.538462
| 79
| 0.599109
|
import h5py
import os
class HDF5DatasetWriter:
def __init__(self, dims, outputPath, dataKey="images",bufSize=1000):
# verify that file doesnt exist
if os.path.exists(outputPath):
raise ValueError("Specified path to HDF5 file already exists",
outputPath)
# initialize HDF5 database
self.db = h5py.File(outputPath, 'w')
self.data = self.db.create_dataset(dataKey, dims, dtype="float")
self.labels = self.db.create_dataset("labels", (dims[0],), dtype="int")
# create buffer
self.buffer = {"data": [], "labels": []}
self.bufSize = bufSize
self.startIdx = 0
def add(self, rows, labels):
# add data to buffer, if buffer is full flush it to
# the database
self.buffer["data"].extend(rows)
self.buffer["labels"].extend(labels)
if len(self.buffer["data"]) >= self.bufSize:
self.flush()
def flush(self):
# calculate the inserting range to hdf5 database for the buffer
endIdx = self.startIdx + len(self.buffer["data"])
self.data[self.startIdx:endIdx] = self.buffer["data"]
self.labels[self.startIdx:endIdx] = self.buffer["labels"]
# update start and reset buffer
self.startIdx = endIdx
self.buffer = {"data": [], "labels": []}
def storeClassLabels(self, classLabels):
# create a dataset to store the actual class label names,
# then store the class labels
dt = h5py.special_dtype(vlen=str)
labelSet = self.db.create_dataset("label_names",
(len(classLabels),), dtype=dt)
labelSet[:] = classLabels
def close(self):
if len(self.buffer["data"]) > 0:
self.flush()
self.db.close()
| 1,613
| 3
| 157
|
cf4315f5984ce6db46cd3a2ae1530f8767e750cf
| 8,189
|
py
|
Python
|
autokeras/hypermodel/processor.py
|
dickronez/autokeras
|
b31f2cafe77bf3a2f738289a89438fb72936117c
|
[
"MIT"
] | 1
|
2019-09-06T07:47:40.000Z
|
2019-09-06T07:47:40.000Z
|
autokeras/hypermodel/processor.py
|
dickronez/autokeras
|
b31f2cafe77bf3a2f738289a89438fb72936117c
|
[
"MIT"
] | null | null | null |
autokeras/hypermodel/processor.py
|
dickronez/autokeras
|
b31f2cafe77bf3a2f738289a89438fb72936117c
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from sklearn.feature_extraction import text
from sklearn import feature_selection
from tensorflow.python.util import nest
from autokeras import const
from autokeras.hypermodel import hyper_block as hb_module
class HyperPreprocessor(hb_module.HyperBlock):
"""Hyper preprocessing block base class."""
def build(self, hp, inputs=None):
"""Build into part of a Keras Model.
Since they are for preprocess data before feeding into the Keras Model,
they are not part of the Keras Model. They only pass the inputs
directly to outputs.
"""
return inputs
def set_hp(self, hp):
"""Set Hyperparameters for the Preprocessor.
Since the `update` and `transform` function are all for single training
instances instead of the entire dataset, the Hyperparameters needs to be
set in advance of call them.
Args:
hp: Hyperparameters. The hyperparameters for tuning the preprocessor.
"""
self._hp = hp
def update(self, x):
"""Incrementally fit the preprocessor with a single training instance.
Args:
x: EagerTensor. A single instance in the training dataset.
"""
raise NotImplementedError
def transform(self, x):
"""Incrementally fit the preprocessor with a single training instance.
Args:
x: EagerTensor. A single instance in the training dataset.
Returns:
A transformed instanced which can be converted to a tf.Tensor.
"""
raise NotImplementedError
def output_types(self):
"""The output types of the transformed data, e.g. tf.int64.
The output types are required by tf.py_function, which is used for transform
the dataset into a new one with a map function.
Returns:
A tuple of data types.
"""
raise NotImplementedError
def output_shape(self):
"""The output shape of the transformed data.
The output shape is needed to build the Keras Model from the AutoModel.
The output shape of the preprocessor is the input shape of the Keras Model.
Returns:
A tuple of ints or a TensorShape.
"""
raise NotImplementedError
def finalize(self):
"""Training process of the preprocessor after update with all instances."""
pass
class OneHotEncoder(object):
"""A class that can format data.
This class provides ways to transform data's classification label into
vector.
Attributes:
data: The input data
num_classes: The number of classes in the classification problem.
labels: The number of labels.
label_to_vec: Mapping from label to vector.
int_to_label: Mapping from int to label.
"""
def __init__(self):
"""Initialize a OneHotEncoder"""
self.data = None
self.num_classes = 0
self.labels = None
self.label_to_vec = {}
self.int_to_label = {}
def fit(self, data):
"""Create mapping from label to vector, and vector to label."""
data = np.array(data).flatten()
self.labels = set(data)
self.num_classes = len(self.labels)
for index, label in enumerate(self.labels):
vec = np.array([0] * self.num_classes)
vec[index] = 1
self.label_to_vec[label] = vec
self.int_to_label[index] = label
def transform(self, data):
"""Get vector for every element in the data array."""
data = np.array(data)
if len(data.shape) > 1:
data = data.flatten()
return np.array(list(map(lambda x: self.label_to_vec[x], data)))
def inverse_transform(self, data):
"""Get label for every element in data."""
return np.array(list(map(lambda x: self.int_to_label[x],
np.argmax(np.array(data), axis=1))))
class Normalize(HyperPreprocessor):
""" Perform basic image transformation and augmentation.
# Attributes
mean: Tensor. The mean value. Shape: (data last dimension length,)
std: Tensor. The standard deviation. Shape is the same as mean.
"""
def transform(self, x):
""" Transform the test data, perform normalization.
# Arguments
data: Tensorflow Dataset. The data to be transformed.
# Returns
A DataLoader instance.
"""
x = nest.flatten(x)[0]
return (x - self.mean) / self.std
class TextToIntSequence(HyperPreprocessor):
"""Convert raw texts to sequences of word indices."""
class TextToNgramVector(HyperPreprocessor):
"""Convert raw texts to n-gram vectors."""
| 31.74031
| 84
| 0.620222
|
import tensorflow as tf
import numpy as np
from sklearn.feature_extraction import text
from sklearn import feature_selection
from tensorflow.python.util import nest
from autokeras import const
from autokeras.hypermodel import hyper_block as hb_module
class HyperPreprocessor(hb_module.HyperBlock):
"""Hyper preprocessing block base class."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._hp = None
def build(self, hp, inputs=None):
"""Build into part of a Keras Model.
Since they are for preprocess data before feeding into the Keras Model,
they are not part of the Keras Model. They only pass the inputs
directly to outputs.
"""
return inputs
def set_hp(self, hp):
"""Set Hyperparameters for the Preprocessor.
Since the `update` and `transform` function are all for single training
instances instead of the entire dataset, the Hyperparameters needs to be
set in advance of call them.
Args:
hp: Hyperparameters. The hyperparameters for tuning the preprocessor.
"""
self._hp = hp
def update(self, x):
"""Incrementally fit the preprocessor with a single training instance.
Args:
x: EagerTensor. A single instance in the training dataset.
"""
raise NotImplementedError
def transform(self, x):
"""Incrementally fit the preprocessor with a single training instance.
Args:
x: EagerTensor. A single instance in the training dataset.
Returns:
A transformed instanced which can be converted to a tf.Tensor.
"""
raise NotImplementedError
def output_types(self):
"""The output types of the transformed data, e.g. tf.int64.
The output types are required by tf.py_function, which is used for transform
the dataset into a new one with a map function.
Returns:
A tuple of data types.
"""
raise NotImplementedError
def output_shape(self):
"""The output shape of the transformed data.
The output shape is needed to build the Keras Model from the AutoModel.
The output shape of the preprocessor is the input shape of the Keras Model.
Returns:
A tuple of ints or a TensorShape.
"""
raise NotImplementedError
def finalize(self):
"""Training process of the preprocessor after update with all instances."""
pass
class OneHotEncoder(object):
"""A class that can format data.
This class provides ways to transform data's classification label into
vector.
Attributes:
data: The input data
num_classes: The number of classes in the classification problem.
labels: The number of labels.
label_to_vec: Mapping from label to vector.
int_to_label: Mapping from int to label.
"""
def __init__(self):
"""Initialize a OneHotEncoder"""
self.data = None
self.num_classes = 0
self.labels = None
self.label_to_vec = {}
self.int_to_label = {}
def fit(self, data):
"""Create mapping from label to vector, and vector to label."""
data = np.array(data).flatten()
self.labels = set(data)
self.num_classes = len(self.labels)
for index, label in enumerate(self.labels):
vec = np.array([0] * self.num_classes)
vec[index] = 1
self.label_to_vec[label] = vec
self.int_to_label[index] = label
def transform(self, data):
"""Get vector for every element in the data array."""
data = np.array(data)
if len(data.shape) > 1:
data = data.flatten()
return np.array(list(map(lambda x: self.label_to_vec[x], data)))
def inverse_transform(self, data):
"""Get label for every element in data."""
return np.array(list(map(lambda x: self.int_to_label[x],
np.argmax(np.array(data), axis=1))))
class Normalize(HyperPreprocessor):
""" Perform basic image transformation and augmentation.
# Attributes
mean: Tensor. The mean value. Shape: (data last dimension length,)
std: Tensor. The standard deviation. Shape is the same as mean.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sum = 0
self.square_sum = 0
self.count = 0
self.mean = None
self.std = None
self._shape = None
def update(self, x):
x = nest.flatten(x)[0].numpy()
self.sum += x
self.square_sum += np.square(x)
self.count += 1
self._shape = x.shape
def finalize(self):
axis = tuple(range(len(self._shape) - 1))
self.mean = np.mean(self.sum / self.count, axis=axis)
square_mean = np.mean(self.square_sum / self.count, axis=axis)
self.std = np.sqrt(square_mean - np.square(self.mean))
def transform(self, x):
""" Transform the test data, perform normalization.
# Arguments
data: Tensorflow Dataset. The data to be transformed.
# Returns
A DataLoader instance.
"""
x = nest.flatten(x)[0]
return (x - self.mean) / self.std
def output_types(self):
return tf.float64,
def output_shape(self):
return self.mean.shape
class TextToIntSequence(HyperPreprocessor):
"""Convert raw texts to sequences of word indices."""
def __init__(self, max_len=None, **kwargs):
super().__init__(**kwargs)
self.max_len = max_len
self._max_len = 0
self._tokenizer = tf.keras.preprocessing.text.Tokenizer(
num_words=const.Constant.VOCABULARY_SIZE)
def update(self, x):
sentence = nest.flatten(x)[0].numpy().decode('utf-8')
self._tokenizer.fit_on_texts([sentence])
sequence = self._tokenizer.texts_to_sequences([sentence])[0]
if self.max_len is None:
self._max_len = max(self._max_len, len(sequence))
def transform(self, x):
sentence = nest.flatten(x)[0].numpy().decode('utf-8')
sequence = self._tokenizer.texts_to_sequences(sentence)[0]
sequence = tf.keras.preprocessing.sequence.pad_sequences(
sequence,
self.max_len or self._max_len)
return sequence
def output_types(self):
return tf.int64,
def output_shape(self):
return self.max_len or self._max_len,
class TextToNgramVector(HyperPreprocessor):
"""Convert raw texts to n-gram vectors."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._vectorizer = text.TfidfVectorizer(
ngram_range=(1, 2),
strip_accents='unicode',
decode_error='replace',
analyzer='word',
min_df=2)
self.selector = None
self.labels = None
self._max_features = const.Constant.VOCABULARY_SIZE
self._vectorizer.max_features = self._max_features
self._texts = []
self._shape = None
def update(self, x):
# TODO: Implement a sequential version fit for both
# TfidfVectorizer and SelectKBest
self._texts.append(nest.flatten(x)[0].numpy().decode('utf-8'))
def finalize(self):
self._texts = np.array(self._texts)
self._vectorizer.fit(self._texts)
data = self._vectorizer.transform(self._texts)
self._shape = data.shape[1:]
if self.labels:
self.selector = feature_selection.SelectKBest(
feature_selection.f_classif,
k=min(self._max_features, data.shape[1]))
self.selector.fit(data, self.labels)
def transform(self, x):
sentence = nest.flatten(x)[0].numpy().decode('utf-8')
data = self._vectorizer.transform([sentence]).toarray()
if self.selector:
data = self.selector.transform(data).astype('float32')
return data[0]
def output_types(self):
return tf.float64,
def output_shape(self):
return self._shape
| 2,947
| 0
| 459
|