hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0f002191d74bb50bea698bd5937724a5cff101 | 1,648 | py | Python | gamer_registration_system/con/tests/test_models.py | splummer/gamer_reg | 7cccbbf8e6e52e46594c8128a7e7a523b8202f03 | [
"MIT"
] | null | null | null | gamer_registration_system/con/tests/test_models.py | splummer/gamer_reg | 7cccbbf8e6e52e46594c8128a7e7a523b8202f03 | [
"MIT"
] | null | null | null | gamer_registration_system/con/tests/test_models.py | splummer/gamer_reg | 7cccbbf8e6e52e46594c8128a7e7a523b8202f03 | [
"MIT"
] | null | null | null | import pytest
import datetime
from django.test import TestCase
from django.utils import timezone
from gamer_registration_system.con.models import Convention, Event, EventSchedule
# Create your tests here.
class EventScheduleModelTests(TestCase):
new_con = Convention(convention_name='Test Future Con')
new_event = Event(convention=new_con, title='Test Future Event')
def test_recent_event_with_future_start(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns False for events whose start_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_eventsched = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(future_eventsched.recent_event(), False)
def test_recent_event_with_old_event(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns False for events whose start_date is older than 1 day
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_event = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(old_event.recent_event(), False)
def test_recent_event_with_recent_question(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns True for events whose start_date is within the last day
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_event = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(recent_event.recent_event(), True)
| 43.368421 | 95 | 0.723908 | 1,439 | 0.87318 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.223908 |
4a0f14554b46d464f95e7a64d1a1063eea95e280 | 661 | py | Python | Codes/gracekoo/interview_33.py | ghoslation/algorithm | 5708bf89e59a80cd0f50f2e6138f069b4f9bc96e | [
"Apache-2.0"
] | 256 | 2017-10-25T13:02:15.000Z | 2022-02-25T13:47:59.000Z | Codes/gracekoo/interview_33.py | IYoreI/Algorithm | 0addf0cda0ec9e3f46c480eeda3a8ecb64c94121 | [
"Apache-2.0"
] | 56 | 2017-10-27T01:34:20.000Z | 2022-03-01T00:20:55.000Z | Codes/gracekoo/interview_33.py | IYoreI/Algorithm | 0addf0cda0ec9e3f46c480eeda3a8ecb64c94121 | [
"Apache-2.0"
] | 83 | 2017-10-25T12:51:53.000Z | 2022-02-15T08:27:03.000Z | # -*- coding: utf-8 -*-
# @Time: 2020/7/3 10:21
# @Author: GraceKoo
# @File: interview_33.py
# @Desc: https://leetcode-cn.com/problems/chou-shu-lcof/
class Solution:
def nthUglyNumber(self, n: int) -> int:
if n <= 0:
return 0
dp, a, b, c = [1] * n, 0, 0, 0
for i in range(1, n):
min_ugly = min(dp[a] * 2, dp[b] * 3, dp[c] * 5)
dp[i] = min_ugly
if min_ugly == dp[a] * 2:
a += 1
if min_ugly == dp[b] * 3:
b += 1
if min_ugly == dp[c] * 5:
c += 1
return dp[-1]
so = Solution()
print(so.nthUglyNumber(10))
| 24.481481 | 59 | 0.444781 | 462 | 0.698941 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.219365 |
4a108377cf0f5dbdfd8a79087fc2255b8874224a | 2,599 | py | Python | SafeCracker.py | epollinger/python | 26b500ace6e9ec743c6046be3e191262fb5eb1e9 | [
"MIT"
] | null | null | null | SafeCracker.py | epollinger/python | 26b500ace6e9ec743c6046be3e191262fb5eb1e9 | [
"MIT"
] | null | null | null | SafeCracker.py | epollinger/python | 26b500ace6e9ec743c6046be3e191262fb5eb1e9 | [
"MIT"
] | null | null | null | # Solution for the SafeCracker 50 Puzzle from Creative Crafthouse
# By: Eric Pollinger
# 9/11/2016
#
# Function to handle the addition of a given slice
def add(slice):
if row1Outer[(index1 + slice) % 16] != -1:
valRow1 = row1Outer[(index1 + slice) % 16]
else:
valRow1 = row0Inner[slice]
if row2Outer[(index2 + slice) % 16] != -1:
valRow2 = row2Outer[(index2 + slice) % 16]
else:
valRow2 = row1Inner[(index1 + slice) % 16]
if row3Outer[(index3 + slice) % 16] != -1:
valRow3 = row3Outer[(index3 + slice) % 16]
else:
valRow3 = row2Inner[(index2 + slice) % 16]
if row4[(index4 + slice) % 16] != -1:
valRow4 = row4[(index4 + slice) % 16]
else:
valRow4 = row3Inner[(index3 + slice) % 16]
return row0Outer[slice] + valRow1 + valRow2 + valRow3 + valRow4
if __name__ == "__main__":
# Raw data (Row0 = base of puzzle)
row0Outer = [10,1,10,4,5,3,15,16,4,7,0,16,8,4,15,7]
row0Inner = [10,10,10,15,7,19,18,2,9,27,13,11,13,10,18,10]
row1Outer = [-1,10,-1,8,-1,10,-1,9,-1,8,-1,8,-1,9,-1,6]
row1Inner = [1,24,8,10,20,7,20,12,1,10,12,22,0,5,8,5]
row2Outer = [0,-1,11,-1,8,-1,8,-1,8,-1,10,-1,11,-1,10,-1]
row2Inner = [20,8,19,10,15,20,12,20,13,13,0,22,19,10,0,5]
row3Outer = [10,-1,14,-1,11,-1,8,-1,12,-1,11,-1,3,-1,8,-1]
row3Inner = [6,18,8,17,4,20,4,14,4,5,1,14,10,17,10,5]
row4 = [8,-1,8,-1,16,-1,19,-1,8,-1,17,-1,6,-1,6,-1]
count = 0
for index1 in range(0,16):
for index2 in range(0,16):
for index3 in range(0,16):
for index4 in range(0,16):
if add(0) == 50:
solution = True
for sl in range(1,16):
if add(sl) != 50:
solution = False
if solution == True:
count = count + 1
# Print Solution
print('Solution with index values: ' + str(index1) + ' ' + str(index2) + ' ' + str(index3)
+ ' ' + str(index4) + ' for a total number of solutions: ' + str(count))
for i in range(0, 5):
print('Solution with Slice ' + str(i) + ' values:\t ' + str(row1Outer[(index1 + i) % 16]) + '\t\t' + str(
row2Outer[(index2 + i) % 16]) + '\t\t' + str(row3Outer[(index3 + i) % 16]) + '\t\t' + str(row4[(index4 + i) % 16]))
if count == 0:
print("No Solution Found")
| 41.253968 | 147 | 0.485571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.139284 |
4a12505b82a3a30031591a8cfae55ddea0b1e877 | 978 | py | Python | mul_func.py | motokimura/shake_shake_chainer | 3b87193dbfcf58723586dfc34c9bc21a900da327 | [
"MIT"
] | 2 | 2018-11-26T13:51:56.000Z | 2019-08-12T00:22:20.000Z | mul_func.py | motokimura/shake_shake_chainer | 3b87193dbfcf58723586dfc34c9bc21a900da327 | [
"MIT"
] | null | null | null | mul_func.py | motokimura/shake_shake_chainer | 3b87193dbfcf58723586dfc34c9bc21a900da327 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chainer
from chainer import cuda
from chainer import configuration
class Mul(chainer.function.Function):
def __init__(self):
return
def forward(self, inputs):
x1, x2 = inputs
xp = cuda.get_array_module(x1) # Get numpy(x=n) or cupy(x=c) array module
alpha = xp.ones(x1.shape, dtype=x1.dtype) * 0.5
if configuration.config.train:
for i in range(len(alpha)):
alpha[i] = xp.random.rand()
return x1 * alpha + x2 * (xp.ones(x1.shape, dtype=x1.dtype) - alpha),
def backward(self, inputs, grad_outputs):
gx, = grad_outputs
xp = cuda.get_array_module(gx)
beta = xp.empty(gx.shape, dtype=gx.dtype)
for i in range(len(beta)):
beta[i] = xp.random.rand()
return gx * beta, gx * (xp.ones(gx.shape, dtype=gx.dtype) - beta)
def mul(x1, x2):
return Mul()(x1, x2)
| 26.432432 | 81 | 0.578732 | 811 | 0.829243 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.087935 |
4a1271f036e10e95cf781747d5f3a517fd97dc81 | 1,673 | py | Python | client/osx/objc_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 6 | 2015-04-03T02:25:28.000Z | 2021-11-17T21:42:59.000Z | client/osx/objc_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 3 | 2020-02-11T22:29:15.000Z | 2021-06-10T17:44:31.000Z | client/osx/objc_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for grr.client.lib.osx.objc.
These tests don't have OS X dependencies and will run on linux.
"""
import ctypes
import mox
from grr.client.osx import objc
from grr.lib import flags
from grr.lib import test_lib
class ObjcTest(test_lib.GRRBaseTest):
def setUp(self):
super(ObjcTest, self).setUp()
self.mox = mox.Mox()
self.mox.StubOutWithMock(objc.ctypes.util, 'find_library')
self.mox.StubOutWithMock(objc.ctypes.cdll, 'LoadLibrary')
self.dll = self.mox.CreateMockAnything()
self.function = self.mox.CreateMockAnything()
self.dll.CFMockFunc = self.function
self.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.restype = ctypes.c_void_p
self.cftable = [
('CFMockFunc',
self.argtypes,
self.restype)
]
def tearDown(self):
self.mox.UnsetStubs()
def testSetCTypesForLibraryLibNotFound(self):
objc.ctypes.util.find_library('mock').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(objc.ErrorLibNotFound, objc.SetCTypesForLibrary,
'mock', self.cftable)
self.mox.VerifyAll()
def testSetCTypesForLibrary(self):
objc.ctypes.util.find_library('mock').AndReturn('/mock/path')
objc.ctypes.cdll.LoadLibrary('/mock/path').AndReturn(self.dll)
self.mox.ReplayAll()
dll = objc.SetCTypesForLibrary('mock', self.cftable)
self.assertEqual(dll.CFMockFunc.argtypes, self.argtypes)
self.assertEqual(dll.CFMockFunc.restype, self.restype)
self.mox.VerifyAll()
def main(argv):
test_lib.main(argv)
if __name__ == '__main__':
flags.StartMain(main)
| 26.983871 | 70 | 0.706515 | 1,282 | 0.766288 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.16318 |
4a135294042232d55bf132afa1098b5dfc142ad5 | 731 | py | Python | tests/test_demo.py | bruziev/security_interface | 0758a88f3c6ce96502ad287ab1a743cd5040c0b8 | [
"MIT"
] | 5 | 2018-11-02T07:50:30.000Z | 2019-03-22T19:40:17.000Z | tests/test_demo.py | theruziev/security_interface | cacb85f0736c20f6cbe1b4d148ebb8b56b921642 | [
"MIT"
] | 146 | 2019-05-30T09:16:06.000Z | 2022-02-04T17:20:51.000Z | tests/test_demo.py | theruziev/security_interface | cacb85f0736c20f6cbe1b4d148ebb8b56b921642 | [
"MIT"
] | 1 | 2018-11-13T06:21:01.000Z | 2018-11-13T06:21:01.000Z | import pytest
from demo.jwt import IdentityMaker, JwtIdentityPolicy, JwtAuthPolicy
from security_interface.api import Security
SECRET = "SECRET"
identity_maker = IdentityMaker(expired_after=1, secret=SECRET)
jwt_identity = JwtIdentityPolicy(secret=SECRET)
jwt_auth = JwtAuthPolicy()
security = Security(jwt_identity, jwt_auth)
@pytest.mark.asyncio
async def test_jwt_demo():
payload = {"login": "Bakhtiyor", "scope": ["read", "write"]}
token = identity_maker.make(payload)
identity = await security.check_authorized(token)
assert "Bakhtiyor" == identity["login"]
assert await security.can(token, "read")
assert await security.can(token, "write")
assert not await security.can(token, "private")
| 27.074074 | 68 | 0.74829 | 0 | 0 | 0 | 0 | 396 | 0.541724 | 375 | 0.512996 | 86 | 0.117647 |
4a1469b6007eb43c8ea911ee36cf0b4ea0c6bbef | 867 | py | Python | demos/grasp_fusion/ros/grasp_fusion/node_scripts/bounding_box_to_tf.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | null | null | null | demos/grasp_fusion/ros/grasp_fusion/node_scripts/bounding_box_to_tf.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | 2 | 2019-04-11T05:36:23.000Z | 2019-08-19T12:58:10.000Z | demos/grasp_fusion/ros/grasp_fusion/node_scripts/bounding_box_to_tf.py | pazeshun/jsk_apc | 0ff42000ad5992f8a31e719a5360a39cf4fa1fde | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
import tf
from jsk_recognition_msgs.msg import BoundingBox
class BoundingBoxToTf(object):
def __init__(self):
self.tf_frame = rospy.get_param('~tf_frame', 'bounding_box')
self.broadcaster = tf.TransformBroadcaster()
self.sub = rospy.Subscriber('~input', BoundingBox, self._cb)
def _cb(self, bbox):
pos = bbox.pose.position
ornt = bbox.pose.orientation
self.broadcaster.sendTransform((pos.x, pos.y, pos.z),
(ornt.x, ornt.y, ornt.z, ornt.w),
rospy.Time.now(),
self.tf_frame,
bbox.header.frame_id)
if __name__ == '__main__':
rospy.init_node(' bounding_box_to_tf')
app = BoundingBoxToTf()
rospy.spin()
| 27.09375 | 72 | 0.55594 | 651 | 0.750865 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.098039 |
4a188b12918e6b0e9f8e6f9cc1f7a056cc5daf48 | 42 | py | Python | pymkup/__init__.py | psolin/pymkup | 5f2e5787f3150e10b9c6391c805b02be7496cdc1 | [
"MIT"
] | 7 | 2021-04-17T11:35:00.000Z | 2021-12-30T04:05:27.000Z | pymkup/__init__.py | psolin/pymkup | 5f2e5787f3150e10b9c6391c805b02be7496cdc1 | [
"MIT"
] | 23 | 2021-04-15T18:01:44.000Z | 2021-05-21T15:58:59.000Z | pymkup/__init__.py | psolin/pymkup | 5f2e5787f3150e10b9c6391c805b02be7496cdc1 | [
"MIT"
] | 2 | 2021-04-20T18:09:46.000Z | 2021-04-24T13:04:04.000Z | from .pymkup import *
__version__ = '0.1'
| 14 | 21 | 0.690476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.119048 |
4a18f414dde63eff7e1a06931ef3b1725eecda3f | 539 | py | Python | tests/import/module_getattr.py | sebi5361/micropython | 6c054cd124bc6229bee127128264dc0829dea53c | [
"MIT"
] | 198 | 2017-03-24T23:23:54.000Z | 2022-01-07T07:14:00.000Z | tests/import/module_getattr.py | sebi5361/micropython | 6c054cd124bc6229bee127128264dc0829dea53c | [
"MIT"
] | 509 | 2017-03-28T19:37:18.000Z | 2022-03-31T20:31:43.000Z | tests/import/module_getattr.py | sebi5361/micropython | 6c054cd124bc6229bee127128264dc0829dea53c | [
"MIT"
] | 187 | 2017-03-24T23:23:58.000Z | 2022-02-25T01:48:45.000Z | # test __getattr__ on module
# ensure that does_not_exist doesn't exist to start with
this = __import__(__name__)
try:
this.does_not_exist
assert False
except AttributeError:
pass
# define __getattr__
def __getattr__(attr):
if attr == 'does_not_exist':
return False
raise AttributeError
# do feature test (will also test functionality if the feature exists)
if not hasattr(this, 'does_not_exist'):
print('SKIP')
raise SystemExit
# check that __getattr__ works as expected
print(this.does_not_exist)
| 22.458333 | 70 | 0.742115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.471243 |
4a1afa3059a4ff0d3f5701a8c02d7add860eb0bd | 1,094 | py | Python | transform_pdf2image/__init__.py | SilasPDJ/maeportifolios_desktop_etc | a341648c8161251d42055155f6fd99fd388d9f2d | [
"MIT"
] | null | null | null | transform_pdf2image/__init__.py | SilasPDJ/maeportifolios_desktop_etc | a341648c8161251d42055155f6fd99fd388d9f2d | [
"MIT"
] | null | null | null | transform_pdf2image/__init__.py | SilasPDJ/maeportifolios_desktop_etc | a341648c8161251d42055155f6fd99fd388d9f2d | [
"MIT"
] | null | null | null | from defs_utils import *
import pdf2image
import os
def transforma_pdf_em_img_por_materia(materia, pdf_path=None):
searched = materia
if pdf_path:
list_files = list_dir(complete_name(searched, pre=pdf_path), True)
else:
list_files = list_dir(complete_name(searched), True)
volta = os.getcwd()
for file in list_files:
pages = pdf2image.convert_from_path(file)
print(file)
os.chdir(volta)
for e, page in enumerate(pages):
e_cont = e+1
dir_name = '../MATERIAS_CRIA_FILES'
dir_name += '\\'+searched+'\\'
dir_name += file.split('\\')[-1].split('-')[0]
for folder in dir_name.split('\\'):
try:
os.chdir(folder)
except (FileNotFoundError):
os.mkdir(folder)
os.chdir(folder)
os.chdir(volta)
real = '\\'.join(os.path.realpath(__file__).split('\\')[:-1])
page.save(f'{real}\\{dir_name}\\out-{e_cont}.jpg', 'JPEG')
print(dir_name)
| 30.388889 | 74 | 0.546618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.087751 |
4a1b274e1ae2fba791921111ffefa8e3eb237de0 | 826 | py | Python | Own/Python/Tutorials/Lists.py | cychitivav/programming_exercises | e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78 | [
"MIT"
] | null | null | null | Own/Python/Tutorials/Lists.py | cychitivav/programming_exercises | e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78 | [
"MIT"
] | null | null | null | Own/Python/Tutorials/Lists.py | cychitivav/programming_exercises | e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78 | [
"MIT"
] | null | null | null | #Cristian Chitiva
#cychitivav@unal.edu.co
#12/Sept/2018
myList = ['Hi', 5, 6 , 3.4, "i"] #Create the list
myList.append([4, 5]) #Add sublist [4, 5] to myList
myList.insert(2,"f") #Add "f" in the position 2
print(myList)
myList = [1, 3, 4, 5, 23, 4, 3, 222, 454, 6445, 6, 4654, 455]
myList.sort() #Sort the list from lowest to highest
print(myList)
myList.sort(reverse = True) #Sort the list from highest to lowest
print(myList)
myList.extend([5, 77]) #Add 5 and 77 to myList
print(myList)
#List comprehension
myList = []
for value in range(0, 50):
myList.append(value)
print(myList)
myList = ["f" for value in range(0,20)]
print(myList)
myList = [value for value in range(0,20)]
print(myList)
myList = [value for value in range(0,60,3) if value % 2 == 0]
print(myList) | 25.030303 | 68 | 0.641646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.318402 |
4a1b69377cbe114d0ff86a93ac41f2645662116e | 7,955 | py | Python | src/scripts/retrive_sfd_data.py | seattlepublicrecords/revealseattle | 727d6fcaed3abd3170f4941d249067c52e86bf96 | [
"Apache-2.0"
] | null | null | null | src/scripts/retrive_sfd_data.py | seattlepublicrecords/revealseattle | 727d6fcaed3abd3170f4941d249067c52e86bf96 | [
"Apache-2.0"
] | 5 | 2016-10-12T05:21:56.000Z | 2016-10-12T10:26:10.000Z | src/scripts/retrive_sfd_data.py | seattlepublicrecords/revealseattle | 727d6fcaed3abd3170f4941d249067c52e86bf96 | [
"Apache-2.0"
] | null | null | null | import sys, traceback
from bs4 import BeautifulSoup
import rethinkdb as r
import requests
import time
import geocoder
import datetime
from dateutil.parser import parse as dtparse
from pytz import timezone
r.connect( "localhost", 28015).repl()
la = timezone('America/Los_Angeles')
table = r.db("revealseattle").table("dispatch_log")
dbtable = r.db("revealseattle").table("dispatch_log")
dbtable.delete().run()
def get_todays_dispatches():
already_geocoded = []
existing_data = dict([(row['id'], row) for row in dbtable.run()])
addresses_and_coordinates = dbtable.pluck('address', 'coordinates').run()
addresses_to_coordinates = dict([(item['address'], item['coordinates']) for item in addresses_and_coordinates if item.get('coordinates')])
addresses_and_place_names = dbtable.pluck('address', 'place_name').run()
addresses_to_place_names = dict([(item['address'], item['place_name']) for item in addresses_and_place_names if item.get('place_name', '').strip()])
addresses_and_assessor_ids = dbtable.pluck('address', 'assessor_id').run()
addresses_to_assessor_ids = dict([(item['address'], item['assessor_id']) for item in addresses_and_place_names if item.get('assessor_id', '').strip()])
html = requests.get('http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?action=Today&incDate=&rad1=des').text
soup = BeautifulSoup(html, 'lxml')
data = []
table = soup.findAll('tr')[3].find('table').find('table')
rows = table.find_all('tr')
# http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?incDate=09%2F24%2F16&rad1=des
previous_day = datetime.date.today()-datetime.timedelta(1)
previous_day = previous_day.strftime('%m%%2F%d%%2F%y')
html = requests.get('http://www2.seattle.gov/fire/realtime911/getRecsForDatePub.asp?incDate=%s&rad1=des' % (previous_day)).text
soup = BeautifulSoup(html, 'lxml')
data = []
table = soup.findAll('tr')[3].find('table').find('table')
rows.extend(table.find_all('tr'))
for row in rows:
cols = list(row.findAll('td'))
incident_id = cols[1].getText()
db_id = 'SFD_'+incident_id
existing_data_for_row = existing_data.get(db_id, {})
is_active = 'class="active"' in str(cols[0])
if is_active:
org_address = cols[4].getText()
address = org_address + ', Seattle'
address = address.replace('/', '&')
incident = {'id': db_id, 'agency': 'SFD', 'incident_id': incident_id, 'address': address, 'is_active': is_active, 'unit_timestamps': get_unit_dispatches_for_incident(incident_id)}
incident["number_of_units_dispatched"] = len(set([row['unit'] for row in incident["unit_timestamps"]]))
incident["number_of_units_in_service"] = len([row['in_service'] for row in incident["unit_timestamps"] if row['in_service']])
incident["org_address"] = org_address
incident["datetime"] = la.localize(dtparse(cols[0].getText()))
incident["type"] = cols[5].getText()
incident["streetview_url"] = 'https://maps.googleapis.com/maps/api/streetview?size=100x100&key=AIzaSyB59q3rCxkjqo3K2utcIh0_ju_-URL-L6g&location='+incident['address']
coordinates = addresses_to_coordinates.get(address)
if coordinates:
incident["coordinates"] = coordinates
else:
coordinates = geocoder.google(address, key='AIzaSyBE-WvY5WPBccBxW-97ZSBCBYEF80NBe7U').latlng
print coordinates
incident["coordinates"] = coordinates
place_name = addresses_to_place_names.get(address)
if place_name:
incident["place_name"] = place_name
else:
url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=%s,%s&radius=30.48&key=AIzaSyBE-WvY5WPBccBxW-97ZSBCBYEF80NBe7U' % (incident["coordinates"][0], incident["coordinates"][1])
print url
place_name = '; '.join([row.get('name', ' ') for row in requests.get(url).json()['results'][1:]])
incident["place_name"] = place_name
assessor_id = addresses_to_assessor_ids.get(address)
if assessor_id:
incident["assessor_id"] = assessor_id
incident["assessor_image_url"] = existing_data_for_row.get('assessor_image_url')
else:
url = 'http://gismaps.kingcounty.gov/parcelviewer2/addSearchHandler.ashx?add='+address
items = requests.get(url).json()['items']
incident["assessor_id"] = items[0].get('PIN', None) if items else None
url_beginning = 'http://blue.kingcounty.com/Assessor/eRealProperty/Dashboard.aspx?ParcelNbr='
if incident["assessor_id"]:
url = '%s%s' % (url_beginning, incident["assessor_id"])
print 'ASSESSOR url', url
assessor_html = requests.get(url).text
#print assessor_html
html_id = 'kingcounty_gov_cphContent_FormViewPictCurr_CurrentImage'
image_url_beginning = 'http://blue.kingcounty.com/Assessor/eRealProperty/'
assessor_soup = BeautifulSoup(assessor_html, 'lxml')
image_url_end = assessor_soup.find(id=html_id)['src']
image_url = '%s%s' % (image_url_beginning, image_url_end)
else:
image_url = ''
incident["assessor_image_url"] = image_url
address_history = existing_data_for_row.get('address_history')
if address_history:
incident["address_history"] = address_history
else:
url = 'https://data.seattle.gov/resource/grwu-wqtk.json?$order=datetime DESC&address='+org_address
print url
incident["address_history"] = requests.get(url, verify=False).json()
data.append(incident)
else:
# was it previously active in last loop?
try:
if dbtable.get('SFD_'+incident_id).run()['is_active']:
dbtable.get('SFD_'+incident_id).update({"is_active": False, "unit_timestamps": get_unit_dispatches_for_incident(incident_id)}).run()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
return data
def get_unit_dispatches_for_incident(incident_id):
incident_html = requests.get('http://www2.seattle.gov/fire/IncidentSearch/incidentDetail.asp?ID='+incident_id).text
incident_soup = BeautifulSoup(incident_html, 'lxml')
table = incident_soup.findAll('tr')[3].find('table').find('table')
rows = table.find_all('tr')
data = []
for row in rows[1:]:
cols = list(row.findAll('td'))
dispatched = cols[1].getText().strip()
arrived = cols[2].getText().strip()
in_service = cols[3].getText().strip()
data.append({'unit': cols[0].getText().strip().strip('*'), 'dispatched': dispatched, 'arrived': arrived, 'in_service': in_service})
return data
while True:
print '*'
try:
todays_data = get_todays_dispatches()
#print todays_data
print table.insert(todays_data).run(conflict='update')
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
time.sleep(5) | 56.021127 | 215 | 0.63193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,192 | 0.27555 |
4a1b9f76326f96b5e172977b7e2c29700d0ab015 | 2,654 | py | Python | tests/test_unasync.py | nsidnev/unasyncer | e48f9104b123707f621adce87721f1028912094d | [
"MIT"
] | null | null | null | tests/test_unasync.py | nsidnev/unasyncer | e48f9104b123707f621adce87721f1028912094d | [
"MIT"
] | null | null | null | tests/test_unasync.py | nsidnev/unasyncer | e48f9104b123707f621adce87721f1028912094d | [
"MIT"
] | null | null | null | import os
import pathlib
import pytest
from unasyncer.unasync import unasync_path
@pytest.fixture
def expected_unasynced_code(sources: pathlib.Path) -> str:
with open(sources / "right_sync.py") as right_sync_file:
return right_sync_file.read()
def test_create_right_sync_structure_for_dir(sources: pathlib.Path) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
[_ for _ in unasync_path(async_folder, sync_folder)] # consume iterator
required_paths = [
sync_folder / "__init__.py",
sync_folder / "inner_package",
sync_folder / "inner_package" / "__init__.py",
sync_folder / "inner_package" / "logic.py",
]
for path in required_paths:
assert path.exists()
def test_create_unasynced_single_file(sources: pathlib.Path) -> None:
async_file_path = sources / "_async" / "inner_package" / "logic.py"
sync_file_path = sources / "_async" / "inner_package" / "sync.py"
next(unasync_path(async_file_path, sync_file_path))
assert sync_file_path.exists()
def test_unasynced_file_content(
sources: pathlib.Path, expected_unasynced_code: str
) -> None:
async_file_path = sources / "_async" / "inner_package" / "logic.py"
sync_file_path = sources / "_async" / "inner_package" / "sync.py"
next(unasync_path(async_file_path, sync_file_path))
with open(sync_file_path) as sync_file:
content = sync_file.read()
assert content == expected_unasynced_code
def test_skiping_not_files_or_dirs(sources: pathlib.Path) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
fifo_path = async_folder / "fifo"
os.mkfifo(fifo_path)
[_ for _ in unasync_path(async_folder, sync_folder)] # consume iterator
assert not (sync_folder / "fifo").exists()
def test_raising_error_for_not_files_or_dirs_in_unasync_path(
sources: pathlib.Path,
) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
fifo_path = async_folder / "fifo"
os.mkfifo(fifo_path)
with pytest.raises(ValueError):
next(unasync_path(fifo_path, sync_folder / "fifo"))
def test_raising_error_if_dir_does_not_exist_and_creation_disabled(
sources: pathlib.Path,
) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
with pytest.raises(RuntimeError):
next(unasync_path(async_folder, sync_folder, create_missed_paths=False))
def test_raising_error_if_path_does_not_exist() -> None:
with pytest.raises(FileNotFoundError):
next(unasync_path(pathlib.Path("error_path"), pathlib.Path("_sync")))
| 28.234043 | 80 | 0.712509 | 0 | 0 | 0 | 0 | 173 | 0.065185 | 0 | 0 | 365 | 0.137528 |
4a1ca9697d104014d8cea3a47f68e33f8be8c14c | 4,281 | py | Python | baccarat.py | lnbalon/open-casino | 2358b12fa2c3c6f17a06c261cf763a2709e4b034 | [
"MIT"
] | null | null | null | baccarat.py | lnbalon/open-casino | 2358b12fa2c3c6f17a06c261cf763a2709e4b034 | [
"MIT"
] | null | null | null | baccarat.py | lnbalon/open-casino | 2358b12fa2c3c6f17a06c261cf763a2709e4b034 | [
"MIT"
] | null | null | null | import random
def shuffle_shoe(n_decks=8):
cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K']
deck = cards * 4
shoe = deck * n_decks
random.shuffle(shoe)
return shoe
def deal_game(shoe):
# initialize a list to store cards for player and banker
player = []
banker = []
# define a card converter function
def card_converter(card):
return 10 if card in ['J', 'Q', 'K'] else card
# deal the first four cards
card1 = shoe.pop()
card2 = shoe.pop()
card3 = shoe.pop()
card4 = shoe.pop()
player.append(card_converter(card1))
banker.append(card_converter(card2))
player.append(card_converter(card3))
banker.append(card_converter(card4))
# test for player and banker pairs
if card1 == card3:
player_pair = 1
else:
player_pair = 0
if card2 == card4:
banker_pair = 1
else:
banker_pair = 0
# calculate the player score
player_score = sum(player) % 10
banker_score = sum(banker) % 10
# If either the player or banker is dealt a total of eight or nine,
# both the player and banker stand (i.e. a "Natural"). This rule
# overrules all others.
if player_score >= 8 or banker_score >= 8:
result = {'player': sum(player) % 10,
'banker': sum(banker) % 10,
'player_pair': player_pair,
'banker_pair': banker_pair}
return result
# If player has 6 or 7, he stands. Banker stands
# if he also has 6 or 7.
elif player_score >= 6 and banker_score >= 6:
result = {'player': sum(player) % 10,
'banker': sum(banker) % 10,
'player_pair': player_pair,
'banker_pair': banker_pair}
return result
# If a player stands, the banker can only draw a hand
# with a score of 5 and below.
elif player_score >= 6 and banker_score <= 5:
banker.append(card_converter(shoe.pop()))
# If the player_score is <=5 he draws another card.
elif player_score <= 5:
player_draw = card_converter(shoe.pop())
player.append(player_draw)
# If banker's first 2 hands totals <= 2, draw a card.
if banker_score <= 2:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 3 and if player_draw
# is in [1,2,3,4,5,6,7,9,10] banker draws.
elif banker_score == 3 and player_draw in [1, 2, 3, 4, 5, 6, 7, 9, 10]:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 4 and if player_draw
# is in [2,3,4,5,6,7] banker draws.
elif banker_score == 4 and player_draw in [2, 3, 4, 5, 6, 7]:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 5 and if player_draw
# is in [4,5,6,7] banker draws.
elif banker_score == 5 and player_draw in [4, 5, 6, 7]:
banker.append(card_converter(shoe.pop()))
# If banker's first two cards totals 6 and if player_draw
# is in [6,7] banker draws.
elif banker_score == 6 and player_draw in [6, 7]:
banker.append(card_converter(shoe.pop()))
# If banker score is 7 then he stands.
elif banker_score == 7:
pass
result = {'player': sum(player) % 10,
'banker': sum(banker) % 10,
'player_card': player,
'banker_card': banker,
'player_pair': player_pair,
'banker_pair': banker_pair}
return result
def simulator(number_shoe=10):
player_wins = 0
banker_wins = 0
ties = 0
while number_shoe > 0:
shoe = shuffle_shoe()
while len(shoe) > 10:
result = deal_game(shoe)
if result['player'] > result['banker']:
player_wins += 1
elif result['player'] < result['banker']:
banker_wins += 1
else:
ties += 1
number_shoe -= 1
total = player_wins + banker_wins + ties
return player_wins / total, banker_wins / total, ties / total
if __name__ == '__main__':
import sys
n_shoes = int(sys.argv[1])
print(simulator(number_shoe=n_shoes))
| 28.731544 | 79 | 0.5758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.281476 |
4a1d8a246738fe7d87ca8aa627f3e6f93ad3cc07 | 3,561 | py | Python | tests/amqp/test_rpc_client.py | OpenMatchmaking/sage-utils-python | 348394bf9cd3adb96fe3915d1d4d99daa46ab437 | [
"BSD-3-Clause"
] | null | null | null | tests/amqp/test_rpc_client.py | OpenMatchmaking/sage-utils-python | 348394bf9cd3adb96fe3915d1d4d99daa46ab437 | [
"BSD-3-Clause"
] | 2 | 2018-04-07T23:24:19.000Z | 2018-05-25T08:31:31.000Z | tests/amqp/test_rpc_client.py | OpenMatchmaking/sage-utils-python | 348394bf9cd3adb96fe3915d1d4d99daa46ab437 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from sage_utils.amqp.clients import RpcAmqpClient
from sage_utils.amqp.extension import AmqpExtension
from sage_utils.constants import VALIDATION_ERROR
from sage_utils.wrappers import Response
from tests.fixtures import Application, FakeConfig, FakeRegisterMicroserviceWorker
REQUEST_QUEUE = FakeRegisterMicroserviceWorker.QUEUE_NAME
REQUEST_EXCHANGE = FakeRegisterMicroserviceWorker.REQUEST_EXCHANGE_NAME
RESPONSE_EXCHANGE_NAME = FakeRegisterMicroserviceWorker.RESPONSE_EXCHANGE_NAME
VALIDATION_ERROR_DECR = FakeRegisterMicroserviceWorker.ERROR_DESCRIPTION
@pytest.mark.asyncio
async def test_rpc_amqp_client_returns_ok(event_loop):
app = Application(config=FakeConfig(), loop=event_loop)
register_worker = FakeRegisterMicroserviceWorker(app)
extension = AmqpExtension(app)
extension.register_worker(register_worker)
await extension.init(event_loop)
client = RpcAmqpClient(
app=app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE_NAME
)
response = await client.send(payload={'name': 'microservice', 'version': '1.0.0'})
assert Response.CONTENT_FIELD_NAME in response.keys()
assert response[Response.CONTENT_FIELD_NAME] == 'OK'
assert Response.EVENT_FIELD_NAME in response.keys()
assert response[Response.EVENT_FIELD_NAME] is None
await extension.deinit(event_loop)
@pytest.mark.asyncio
async def test_rpc_amqp_client_returns_ok_with_custom_event_loop(event_loop):
app = Application(config=FakeConfig(), loop=event_loop)
register_worker = FakeRegisterMicroserviceWorker(app)
extension = AmqpExtension(app)
extension.register_worker(register_worker)
await extension.init(event_loop)
client = RpcAmqpClient(
app=app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE_NAME,
loop=event_loop
)
response = await client.send(payload={'name': 'microservice', 'version': '1.0.0'})
assert Response.CONTENT_FIELD_NAME in response.keys()
assert response[Response.CONTENT_FIELD_NAME] == 'OK'
assert Response.EVENT_FIELD_NAME in response.keys()
assert response[Response.EVENT_FIELD_NAME] is None
await extension.deinit(event_loop)
@pytest.mark.asyncio
async def test_rpc_amqp_client_returns_an_error(event_loop):
app = Application(config=FakeConfig(), loop=event_loop)
register_worker = FakeRegisterMicroserviceWorker(app)
extension = AmqpExtension(app)
extension.register_worker(register_worker)
await extension.init(event_loop)
client = RpcAmqpClient(
app=app,
routing_key=REQUEST_QUEUE,
request_exchange=REQUEST_EXCHANGE,
response_queue='',
response_exchange=RESPONSE_EXCHANGE_NAME
)
response = await client.send(payload={})
assert Response.ERROR_FIELD_NAME in response.keys()
assert Response.ERROR_TYPE_FIELD_NAME in response[Response.ERROR_FIELD_NAME].keys()
assert response[Response.ERROR_FIELD_NAME][Response.ERROR_TYPE_FIELD_NAME] == VALIDATION_ERROR # NOQA
assert Response.ERROR_DETAILS_FIELD_NAME in response[Response.ERROR_FIELD_NAME].keys()
assert response[Response.ERROR_FIELD_NAME][Response.ERROR_DETAILS_FIELD_NAME] == VALIDATION_ERROR_DECR # NOQA
assert Response.EVENT_FIELD_NAME in response.keys()
assert response[Response.EVENT_FIELD_NAME] is None
await extension.deinit(event_loop)
| 35.61 | 114 | 0.775344 | 0 | 0 | 0 | 0 | 2,977 | 0.836001 | 2,914 | 0.818309 | 98 | 0.02752 |
4a21f3279034131e287608aa7f238be08a6231f6 | 986 | py | Python | project4github/largest_digit.py | chinkaih319/SC101 | 25c179c96e0a2bbc4e47768c029ee4bf49e06245 | [
"MIT"
] | null | null | null | project4github/largest_digit.py | chinkaih319/SC101 | 25c179c96e0a2bbc4e47768c029ee4bf49e06245 | [
"MIT"
] | null | null | null | project4github/largest_digit.py | chinkaih319/SC101 | 25c179c96e0a2bbc4e47768c029ee4bf49e06245 | [
"MIT"
] | null | null | null | """
File: largest_digit.py
Name:
----------------------------------
This file recursively prints the biggest digit in
5 different integers, 12345, 281, 6, -111, -9453
If your implementation is correct, you should see
5, 8, 6, 1, 9 on Console.
"""
def main():
print(find_largest_digit(12345)) # 5
print(find_largest_digit(281)) # 8
print(find_largest_digit(6)) # 6
print(find_largest_digit(-111)) # 1
print(find_largest_digit(-9453)) # 9
def find_largest_digit(n):
"""
:param n:
:return:
"""
time = 0
bs = 0
return helper(n, time, bs)
def helper(n, time, bs):
if 0 <= n <= 10:
return n
else:
if n < 10 ** (time+1):
if n < 0:
return helper(-n, time, bs)
else:
first = n // (10 ** time)
if first > bs:
return first
else:
return bs
else:
sq = n//(10 ** time) - (n//(10 ** (time + 1))) * 10
if sq > bs:
bs = sq
time += 1
return helper(n, time, bs)
if __name__ == '__main__':
main()
| 18.603774 | 54 | 0.558824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.30426 |
4a2373574a8c20f2e658dd78ede53ea3d56378cc | 1,196 | py | Python | eclipse-mosquitto/test/broker/07-will-no-flag.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | 2 | 2021-04-20T14:28:59.000Z | 2021-05-06T07:46:53.000Z | eclipse-mosquitto/test/broker/07-will-no-flag.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | null | null | null | eclipse-mosquitto/test/broker/07-will-no-flag.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Test whether a connection is disconnected if it sets the will flag but does
# not provide a will payload.
from mosq_test_helper import *
def do_test(proto_ver):
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("will-no-payload", keepalive=keepalive, will_topic="will/topic", will_qos=1, will_retain=True, proto_ver=proto_ver)
b = list(struct.unpack("B"*len(connect_packet), connect_packet))
bmod = b[0:len(b)-2]
bmod[1] = bmod[1] - 2 # Reduce remaining length by two to remove final two payload length values
connect_packet = struct.pack("B"*len(bmod), *bmod)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, b"", port=port)
sock.close()
rc = 0
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
print("proto_ver=%d" % (proto_ver))
exit(rc)
do_test(proto_ver=4)
do_test(proto_ver=5)
exit(0)
| 28.47619 | 158 | 0.655518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.218227 |
4a243a51f1c3704674b477f2c65030fc5b33c2c9 | 563 | py | Python | setup.py | gsgoncalves/K-NRM | b7bc8c44ddf6c8d0bc14a399beb05c9c1956fe2f | [
"BSD-3-Clause"
] | 198 | 2017-11-02T18:11:44.000Z | 2022-03-26T00:03:03.000Z | setup.py | gsgoncalves/K-NRM | b7bc8c44ddf6c8d0bc14a399beb05c9c1956fe2f | [
"BSD-3-Clause"
] | 18 | 2017-11-14T08:04:21.000Z | 2022-01-14T09:09:45.000Z | setup.py | gsgoncalves/K-NRM | b7bc8c44ddf6c8d0bc14a399beb05c9c1956fe2f | [
"BSD-3-Clause"
] | 43 | 2017-11-02T16:43:35.000Z | 2021-06-13T09:22:13.000Z | # Copyright (c) 2017, Carnegie Mellon University. All rights reserved.
#
# Use of the K-NRM package is subject to the terms of the software license set
# forth in the LICENSE file included with this software, and also available at
# https://github.com/AdeDZY/K-NRM/blob/master/LICENSE
from setuptools import setup
from setuptools import find_packages
setup(name='knrm',
version='0',
description='knrm',
author='Zhuyun Dai and Chenyan Xiong',
install_requires=['numpy', 'traitlets', 'tensorflow'],
packages=find_packages()
)
| 31.277778 | 78 | 0.719361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.630551 |
4a26cd7230b52b52759159c2aeac729e2261aebf | 2,674 | py | Python | tohu/v4/set_special_methods.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | 1 | 2019-03-07T19:58:45.000Z | 2019-03-07T19:58:45.000Z | tohu/v4/set_special_methods.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | 9 | 2017-10-04T15:08:53.000Z | 2021-02-02T21:51:41.000Z | tohu/v4/set_special_methods.py | maxalbert/tohu | 3adf0c58b13ef1e1d716d7d613484d2adc58fb60 | [
"MIT"
] | null | null | null | """
This module is not meant to be imported directly.
Its purpose is to patch the TohuBaseGenerator class
so that its special methods __add__, __mul__ etc.
support other generators as arguments.
"""
from operator import add, mul, gt, ge, lt, le, eq
from .base import TohuBaseGenerator
from .primitive_generators import GeoJSONGeolocation, SelectOnePrimitive, Timestamp, as_tohu_generator
from .derived_generators import Apply, GetAttribute, SelectOneDerived
from .utils import identity
__all__ = []
def add_generators(self, other):
return Apply(add, self, as_tohu_generator(other))
def radd_generators(self, other):
return Apply(add, as_tohu_generator(other), self)
def mul_generators(self, other):
return Apply(mul, self, as_tohu_generator(other))
def rmul_generators(self, other):
return Apply(mul, as_tohu_generator(other), self)
def eq_generators(self, other):
return Apply(eq, self, as_tohu_generator(other))
def lt_generators(self, other):
return Apply(lt, self, as_tohu_generator(other))
def le_generators(self, other):
return Apply(le, self, as_tohu_generator(other))
def gt_generators(self, other):
return Apply(gt, self, as_tohu_generator(other))
def ge_generators(self, other):
return Apply(ge, self, as_tohu_generator(other))
# Patch TohuBaseGenerator with the new methods
TohuBaseGenerator.__add__ = add_generators
TohuBaseGenerator.__radd__ = radd_generators
TohuBaseGenerator.__mul__ = mul_generators
TohuBaseGenerator.__rmul__ = rmul_generators
TohuBaseGenerator.__eq__ = eq_generators
TohuBaseGenerator.__lt__ = lt_generators
TohuBaseGenerator.__le__ = le_generators
TohuBaseGenerator.__gt__ = gt_generators
TohuBaseGenerator.__ge__ = ge_generators
def getattribute_generators(self, name):
if name == '__isabstractmethod__':
# Special case which is needed because TohuUltraBaseMeta is
# derived from ABCMeta and it uses '__isabstractmethod__'
# to check for abstract methods.
#
# TODO: This check should probably be moved to TohuUltraBaseGenerator somewhere.
return
if name == '_ipython_canary_method_should_not_exist_':
# Special case which is needed because IPython uses this attribute internally.
raise NotImplementedError("Special case needed for IPython")
return GetAttribute(self, name)
SelectOnePrimitive.__getattr__ = getattribute_generators
SelectOneDerived.__getattr__ = getattribute_generators
def split_geolocation(self):
attributes = ['lon', 'lat'] + self.include_attributes
return tuple(GetAttribute(self, attr_name) for attr_name in attributes)
GeoJSONGeolocation.split = split_geolocation
| 29.384615 | 102 | 0.775243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.246073 |
c57aaa710fcf9be54652ebcd007f595595a9e9c7 | 12,486 | py | Python | backend/bot/bot.py | makarbaderko/hackathon_itgenio_2020.2 | 35dc545a749af2a10d9e1584bbba97e0b5e48af9 | [
"MIT"
] | null | null | null | backend/bot/bot.py | makarbaderko/hackathon_itgenio_2020.2 | 35dc545a749af2a10d9e1584bbba97e0b5e48af9 | [
"MIT"
] | null | null | null | backend/bot/bot.py | makarbaderko/hackathon_itgenio_2020.2 | 35dc545a749af2a10d9e1584bbba97e0b5e48af9 | [
"MIT"
] | null | null | null | #Imports
import config
import telebot
from db_manager import SQL
import os
#Globals
restaurants = {"12345":"abc"}
current_restaurant_id = ""
current_restaurant_key = ""
couriers = {"12345":"abc"}
current_courier_id = ""
current_courier_key = ""
current_courier_altitude = 0
current_courier_longitude = 0
#DB + BOT CONNECTION
token = config.API_KEY
bot = telebot.TeleBot(token)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "database.db")
db = SQL(db_path)
keyboard1 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard1.row('Клиент', 'Ресторан', "Курьер")
keyboard2 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard2.row("Сделать заказ", "Проверить статус заказа")
keyboard3 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard3.row("Да, досталяйте туда же", "Нет, я сейчас введу новые")
keyboard4 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard4.row("Москва", "Санкт-Петербург")
keyboard5 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard5.row("Да, сохраняйте", "Нет, спасибо.")
keyboard6 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard6.row("KFC", "McDonalds")
keyboard7 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard7.row("Получить список заказов", "Заказ отдан курьеру", "Что в заказе?")
keyboard8 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard8.row("Уже зарегистрированы", "Хотим подключиться")
keyboard9 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard9.row("Cэндвичи", "Бургеры")
keyboard10 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard10.row("Баскеты", "Твистеры")
keyboard11 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard11.row("Сандерс Баскет", "Баскет Дуэт")
keyboard11.row("Домашний Баскет", "Баскет L")
keyboard12 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard12.row("Да", "Нет")
keyboard13 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard13.row("Закончить оформление заказа", "Прлолжить оформление заказа")
keyboard14 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard14.row("Уже зарегистрирован", "Хочу подключиться")
keyboard15 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard15.row("Готов принять заказ", "Передал заказ клиенту")
current_client = {"username": None, "name": None, "surname": None, "phone": None, "adress": None, "city": None}
foods = ""
current_client_for_restaurant = {"order_id": None}
@bot.message_handler(commands=['start'])
def start_message(message):
msg = bot.reply_to(message, 'Здравствуйте, скажите, пожалуйста, кто Вы?', reply_markup=keyboard1)
bot.register_next_step_handler(msg, process_client_1)
@bot.message_handler(content_types=['text'])
def process_client_1(message):
text = message.text
if text == "Клиент":
msg = bot.reply_to(message, 'Отлично, что бы Вы хотели сделать?', reply_markup=keyboard2)
bot.register_next_step_handler(msg, process_client_2)
if text == 'Ресторан':
msg = bot.reply_to(message, 'Отлично! Вы уже зарегистрированы в нашей системе?', reply_markup=keyboard8)
bot.register_next_step_handler(msg, process_restaurant_1)
if text == "Курьер":
msg = bot.reply_to(message, 'Отлично! Вы уже зарегистрированы в нашей системе?', reply_markup=keyboard14)
bot.register_next_step_handler(msg, process_courier_1)
else:
bot.send_message(message.chat.id, 'In development')
def process_courier_1(message):
if message.text == "Уже зарегистрирован":
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш ID курьера')
bot.register_next_step_handler(msg, process_courier_1_1)
if message.text == "Хочу подключиться":
bot.send_message(message.chat.id, "Напишите мне в Telegram на t.me/makarbaderko для подключения к нашей сети доставки")
def process_courier_1_1(message):
global current_courier_id
current_courier_id = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш key курьера')
bot.register_next_step_handler(msg, process_courier_1_2)
def process_courier_1_2(message):
global current_courier_id, current_courier_key
current_courier_key = message.text
if couriers[current_courier_id] == current_courier_key:
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard15)
bot.register_next_step_handler(msg, process_courier_2)
@bot.message_handler(content_types=['location'])
def process_courier_2(message):
if message.text == "Готов принять заказ":
data = db.get_random_order()
bot.send_message(message.chat.id, data)
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard15)
bot.register_next_step_handler(msg, process_courier_2)
if message.text == "Передал заказ клиенту":
msg = bot.reply_to(message, 'Введите, пожалуйста, номер переданного клиенту заказа?')
bot.register_next_step_handler(msg, process_courier_3)
def process_courier_3(message):
#Смена статуса заказа
db.update_status(message.text, "FINISHED")
bot.send_message(message.chat.id, "Статус заказа изменен на передан клиенту. Заказ в скором времени будет удален. Деньги будут перечислены Вам в течении 24-х часов")
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard15)
bot.register_next_step_handler(msg, process_courier_2)
def process_client_2(message):
if message.text == "Сделать заказ":
username = message.from_user.username
current_client["username"] = username
if db.user_exists(current_client["username"]) == True:
msg = bot.reply_to(message, 'Вы уже заказывали у нас, можем ли мы использовать данные с прошлого заказа?', reply_markup=keyboard3)
bot.register_next_step_handler(msg, process_client_3_yes)
else:
bot.send_message(message.chat.id, 'Сейчас мы попросим ввести Ваши данные, для нашей службы доставки.')
process_client_4(message)
else:
bot.send_message(message.chat.id, 'Поскольку в гашей системе пока нет зарегистрированных курьеров, поэтому статус заказа узнать нельзя.')
def process_client_3_yes(message):
if message.text == "Нет, я сейчас введу новые":
msg = bot.reply_to(message, 'Выберите, пожалуйста, Ваш город', reply_markup=keyboard4)
bot.register_next_step_handler(msg, process_client_4)
else:
process_client_7_1(message)
def process_client_4(message):
global current_client
current_client["city"] = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш адрес.')
bot.register_next_step_handler(msg, process_client_5)
def process_client_5(message):
global current_client
current_client["adress"] = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш номер телефона.')
bot.register_next_step_handler(msg, process_client_6)
def process_client_6(message):
global current_client
current_client ["phone"] = message.text
msg = bot.reply_to(message, 'Хотите ли Вы, чтобы мы сохранили Ваши данные у себя, для упрощения создания заказа Вами в будущем?', reply_markup=keyboard5)
bot.register_next_step_handler(msg, process_client_7)
def process_client_7(message):
if message.text == "Да, сохраняйте":
#db.add_user(username=current_client["username"], name=current_client["name"], surname=current_client["surname"], city=current_client["city"], phone=current_client["city"], adress=current_client["adress"])
msg = bot.reply_to(message, 'Где бы Вы хотели заказать еду?', reply_markup=keyboard6)
bot.register_next_step_handler(msg, process_client_8)
else:
msg = bot.reply_to(message, 'Где бы Вы хотели заказать еду?', reply_markup=keyboard6)
bot.register_next_step_handler(msg, process_client_8)
def process_client_7_1(message):
#db.add_user(username=current_client["username"], name=current_client["name"], surname=current_client["surname"], city=current_client["city"], phone=current_client["city"], adress=current_client["adress"])
msg = bot.reply_to(message, 'Где бы Вы хотели заказать еду?', reply_markup=keyboard6)
bot.register_next_step_handler(msg, process_client_8)
def process_client_8(message):
if message.text == "KFC":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard10)
bot.register_next_step_handler(msg, process_client_8_1)
if message.text == "McDonalds":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard9)
bot.register_next_step_handler(msg, process_client_8_2)
def process_client_8_1(message):
if message.text == "Баскеты":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard11)
bot.register_next_step_handler(msg, process_client_9_1)
if message.text == "Твистеры":
pass
def process_client_8_2(message):
if message.text == "Cэндвичи":
pass
if message.text == "Бургеры":
pass
def process_client_9_1(message):
global foods
foods += str(message.text)
foods += " "
msg = bot.reply_to(message, 'Что-нибудь еще?', reply_markup=keyboard12)
bot.register_next_step_handler(msg, process_client_10)
def process_client_10(message):
if message.text == "Да":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard10)
bot.register_next_step_handler(msg, process_client_8_1)
else:
msg = bot.reply_to(message, 'Закончим?', reply_markup=keyboard13)
bot.register_next_step_handler(msg, process_client_11)
def process_client_11(message):
bot.send_message(message.chat.id, "Заказ передан в службу доставки. Ожидайте. По всем вопросам звоните на +8 (800) 555-35-35")
def process_restaurant_1(message):
if message.text == "Уже зарегистрированы":
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш ID ресторана')
bot.register_next_step_handler(msg, process_restaurant_1_1)
if message.text == "Хотим подключиться":
bot.send_message(message.chat.id, "Напишите нам в Telegram на t.me/makarbaderko для подключения к нашей сети доставки")
def process_restaurant_1_1(message):
global current_restaurant_id
current_restaurant_id = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш key ресторана')
bot.register_next_step_handler(msg, process_restaurant_1_2)
def process_restaurant_1_2(message):
global current_restaurant_id, current_restaurant_key
current_restaurant_key = message.text
if restaurants[current_restaurant_id] == current_restaurant_key:
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
def process_restaurant_2(message):
if message.text == "Получить список заказов":
data = db.get_all_orders()
new_data = ""
for tupl in data:
new_data += f"Номер заказа: {tupl[0]} Состав заказа: {tupl[1]} Курьер: {tupl[2]}\n"
bot.send_message(message.chat.id, new_data)
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
if message.text == "Заказ отдан курьеру":
msg = bot.reply_to(message, 'Введите, пожалуйста, номер переданного заказа.')
bot.register_next_step_handler(msg, process_restaurant_2_1)
if message.text == "Что в заказе?":
msg = bot.reply_to(message, 'Введите, пожалуйста, номер заказа, по которму идет поиск')
bot.register_next_step_handler(msg, process_restaurant_2_2)
def process_restaurant_2_1(message):
text = int(message.text)
print(text)
db.update_status(text, "BEEN_DELIVERED")
bot.send_message(message.chat.id, "Статус заказа изменен на: Передан Курьеру")
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
def process_restaurant_2_2(message):
data = db.get_food(message.text)
new_data = data[0][1]
bot.send_message(message.chat.id, new_data)
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
bot.polling() | 47.116981 | 213 | 0.744594 | 0 | 0 | 0 | 0 | 1,825 | 0.125421 | 0 | 0 | 5,492 | 0.377431 |
c57c19f3188cfdc20686ab1aee33afbfe3ee78b0 | 1,205 | py | Python | Project/HDLT/User/getCloseUsers.py | Opty-MSc/HDS | 5ffff7908d185bbbf57cef4b9516ecd4b4ac70c8 | [
"MIT"
] | null | null | null | Project/HDLT/User/getCloseUsers.py | Opty-MSc/HDS | 5ffff7908d185bbbf57cef4b9516ecd4b4ac70c8 | [
"MIT"
] | null | null | null | Project/HDLT/User/getCloseUsers.py | Opty-MSc/HDS | 5ffff7908d185bbbf57cef4b9516ecd4b4ac70c8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from math import hypot
from sys import argv
from typing import List, Dict, Tuple
def main(fn, maxD):
with open(fn) as file:
positions: List[str] = file.readlines()
uLocations: Dict[int, Dict[str, Tuple[int, int]]] = {}
for position in positions:
positionSplit = position.split(';')
if int(positionSplit[1]) not in uLocations:
uLocations[int(positionSplit[1])] = {}
uLocations[int(positionSplit[1])][positionSplit[0]] = (int(positionSplit[2]), int(positionSplit[3]))
for epoch in uLocations.keys():
print(f"Epoch: {epoch}")
for uname, location in uLocations[epoch].items():
closeUnames = []
for iUname, iLocation in uLocations[epoch].items():
if uname == iUname:
continue
distance = hypot(iLocation[0] - location[0], iLocation[1] - location[1])
if distance < maxD:
closeUnames.append(iUname)
print(f"{uname}: {closeUnames}")
if __name__ == '__main__':
if len(argv) < 3:
print("Usage: [Grid Filename] [Max Distance]")
else:
main(argv[1], float(argv[2]))
| 33.472222 | 108 | 0.582573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.096266 |
c57cdef697b1ae7480e0770028e9b4a5e38b5778 | 2,155 | py | Python | stock/migrations/0020_stockproductcds_stockproductdis.py | unicefburundi/paludisme | 775af3c15349d4437e3780690eb6fa2ea8622ee7 | [
"MIT"
] | 1 | 2017-04-26T10:09:12.000Z | 2017-04-26T10:09:12.000Z | stock/migrations/0020_stockproductcds_stockproductdis.py | srugano/paludisme | 775af3c15349d4437e3780690eb6fa2ea8622ee7 | [
"MIT"
] | null | null | null | stock/migrations/0020_stockproductcds_stockproductdis.py | srugano/paludisme | 775af3c15349d4437e3780690eb6fa2ea8622ee7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-10 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("bdiadmin", "0013_auto_20170319_1415"),
("stock", "0019_stockproductprov"),
]
operations = [
migrations.CreateModel(
name="StockProductCDS",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("year", models.PositiveIntegerField(default=2017)),
("week", models.CharField(max_length=3)),
("product", models.CharField(max_length=50)),
("quantity", models.FloatField(default=0.0)),
(
"cds",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bdiadmin.CDS"
),
),
],
),
migrations.CreateModel(
name="StockProductDis",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("year", models.PositiveIntegerField(default=2017)),
("week", models.CharField(max_length=3)),
("product", models.CharField(max_length=50)),
("quantity", models.FloatField(default=0.0)),
(
"district",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="bdiadmin.District",
),
),
],
),
]
| 32.164179 | 86 | 0.429234 | 1,964 | 0.911369 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.137355 |
c57e5f3b0b518154b490e3d006aecf49846e001b | 5,271 | py | Python | source_synphot/source.py | gnarayan/source_synphot | 3bc3d48217ad7ea5630131e68fd3c544d14d10f6 | [
"MIT"
] | null | null | null | source_synphot/source.py | gnarayan/source_synphot | 3bc3d48217ad7ea5630131e68fd3c544d14d10f6 | [
"MIT"
] | null | null | null | source_synphot/source.py | gnarayan/source_synphot | 3bc3d48217ad7ea5630131e68fd3c544d14d10f6 | [
"MIT"
] | 1 | 2020-05-05T04:38:41.000Z | 2020-05-05T04:38:41.000Z | # -*- coding: UTF-8 -*-
"""
Source processing routines
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
from collections import OrderedDict
from astropy.cosmology import default_cosmology
import numpy as np
import os
import pysynphot as S
import astropy.table as at
from . import io
from . import passband
def load_source(sourcenames):
"""
Loads sources
Parameters
----------
sourcenames : array-like
The source names. Passed to :py:func:`source_synphot.io.read_source`
Returns
-------
sources : dict
The dictionary of source spectra
See Also
--------
:py:func:`source_synphot.io.read_source`
"""
sources = OrderedDict()
if np.isscalar(sourcenames):
sourcenames = np.array(sourcenames, ndmin=1)
else:
sourcenames = np.array(sourcenames).flatten()
nsource = len(sourcenames)
for source in sourcenames:
try:
thissource = io.read_source(source)
except Exception as e:
message = 'Source {} not loaded'.format(source)
warnings.warn(message, RuntimeWarning)
continue
sources[source] = thissource
return sources
def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):
"""
Pre-process a source at some redshift ``sourcez`` back to the rest-frame
and normalize it to have magnitude ``sourcemag`` in passband ``sourcepb``
Parameters
----------
sourcespec : str
The source spectrum filename
sourcemag : float
The magnitude of the source spectrum in passband ``sourcepb``
sourcepb : :py:class:`pysynphot.spectrum.ArraySpectralElement`
The passband in which `source` has magnitude ``sourcemag``
sourcez : float
The redshift of `source`
smooth : bool, optional
Smooth the spectrum (default: True)
Returns
-------
source : :py:class:`pysynphot.ArraySpectrum`
The de-redshifted, normalized and optionally smoothed spectrum
See Also
--------
:py:func:`astropy.table.Table.read`
"""
inspec = None
inspecz = np.nan
inspecmag = np.nan
inspecpb = None
source_table_file = os.path.join('sources', 'sourcetable.txt')
source_table_file = io.get_pkgfile(source_table_file)
source_table = at.Table.read(source_table_file, format='ascii')
ind = (source_table['specname'] == source)
nmatch = len(source_table['specname'][ind])
if nmatch == 1:
# load the file and the info
inspec = source_table['specname'][ind][0]
inspecz = source_table['redshift'][ind][0]
inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag
elif nmatch == 0:
message = 'Spectrum {} not listed in lookup table'.format(source)
pass
else:
message = 'Spectrum {} not uniquely listed in lookup table'.format(source)
pass
if inspec is None:
warnings.warn(message, RuntimeWarning)
inspec = source
inspecz = sourcez
inspecmag = sourcemag
inspecpb = sourcepb
if not os.path.exists(inspec):
message = 'Spectrum {} could not be found'.format(inspec)
raise ValueError(message)
try:
spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')
except Exception as e:
message = 'Could not read file {}'.format(source)
raise ValueError(message)
if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):
pass
else:
pbs = passband.load_pbs([inspecpb], 0.)
try:
inspecpb = pbs[inspecpb][0]
except KeyError as e:
message = 'Could not load passband {}'.format(inspecpb)
raise RuntimeError(message)
try:
inspecmag = float(inspecmag)
except (TypeError, ValueError) as e:
message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)
raise ValueError(message)
try:
inspecz = float(inspecz)
except (TypeError, ValueError) as e:
message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)
raise ValueError(message)
if inspecz < 0 :
message = 'Source must have positive definite cosmological redshift'
raise ValueError(message)
inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')
try:
inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)
inspec.convert('flam')
except Exception as e:
message = 'Could not renormalize spectrum {}'.format(inspec)
raise RuntimeError(message)
if inspecz > 0:
zblue = 1./(1+inspecz) - 1.
inspec_rest = inspec.redshift(zblue)
inspec_rest.convert('flam')
c = default_cosmology.get()
mu = c.distmod(inspecz)
out = inspec_rest*(10.**(0.4*mu.value))
else:
out = inspec
# TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is
# and renormalizes - there's some sanity checking for overlaps
# we can do this without using it and relying on the .passband routines
return out
| 30.824561 | 97 | 0.634415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,064 | 0.391577 |
c57ebe95c16965a0958db44e83cf0116a400cb09 | 415 | py | Python | objects.py | umesh0689/Mandorian | bb5bd963686c020482082dbb3e52218520410276 | [
"MIT"
] | null | null | null | objects.py | umesh0689/Mandorian | bb5bd963686c020482082dbb3e52218520410276 | [
"MIT"
] | null | null | null | objects.py | umesh0689/Mandorian | bb5bd963686c020482082dbb3e52218520410276 | [
"MIT"
] | null | null | null | from random import randint
class Objects:
def __init__(self):
self._type=' '
self._arr=[]
def creating_objects(self):
times=randint(10,20)
for i in range(times):
temp=[]
temp.append(randint(1,4))#1=slant 2=horizontal 3=vertical 4=powerup
temp.append(randint(1,27))
temp.append(randint(0,330))
self._arr.append(temp) | 31.923077 | 79 | 0.573494 | 388 | 0.93494 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.108434 |
c58214974ffbd819063cc0bd44598acdfc483c89 | 2,264 | py | Python | Code/GMM Test Scripts/yellowTest.py | Praveen1098/Gaussian_Mixture_Modelling | 653fee21eaf777172696ea0440fef5c29dcb4893 | [
"BSD-2-Clause"
] | null | null | null | Code/GMM Test Scripts/yellowTest.py | Praveen1098/Gaussian_Mixture_Modelling | 653fee21eaf777172696ea0440fef5c29dcb4893 | [
"BSD-2-Clause"
] | null | null | null | Code/GMM Test Scripts/yellowTest.py | Praveen1098/Gaussian_Mixture_Modelling | 653fee21eaf777172696ea0440fef5c29dcb4893 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
import math
def gaussian(x, mu, sig):
return ((1/(sig*math.sqrt(2*math.pi)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))))
x=list(range(0, 256))
g1=gaussian(x,np.array([239.82]), np.array([2.53]))
g2=gaussian(x,np.array([221.93]), np.array([23.83]))
r2=gaussian(x, np.array([217.47]),np.array([29.43]))
r1=gaussian(x, np.array([230.01]),np.array([3.675]))
b1=gaussian(x, np.array([103.60]),np.array([18.64]))
b2=gaussian(x, np.array([164.96]),np.array([27.56]))
path=os.getcwd()+"/"+"detectbuoy.avi"
c=cv2.VideoCapture(path)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter('yellowDetection.mp4', fourcc, 15, (640, 480))
while (True):
ret,image=c.read()
if ret==False:
break
b,g,r=cv2.split(image)
if ret == True:
img_out3=np.zeros(g.shape, dtype = np.uint8)
for index, v in np.ndenumerate(g):
av=int((int(v)+int(r[index]))/2)
if ( g1[v]>0.08) and (r1[r[index]] >0.04) and (b1[b[index]]>0.01 ):
img_out3[index]=255
else:
img_out3[index]=0
ret, threshold3 = cv2.threshold(img_out3, 240, 255, cv2.THRESH_BINARY)
kernel3 = np.ones((2,2),np.uint8)
dilation3 = cv2.dilate(threshold3,kernel3,iterations =8)
contours3, _= cv2.findContours(dilation3, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours3:
if cv2.contourArea(contour) > 50:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
# print(radius)
if radius > 12:
cv2.circle(image,center,radius,(0,255,255),2)
cv2.imshow("Threshold",dilation3)
cv2.imshow('Yellow Ball Segmentation', image)
cv2.imwrite('yellow.jpg', image)
out.write(image)
k = cv2.waitKey(1) & 0xff
if k == 27:
break # wait for ESC key to exit
else:
break
c.release()
out.release()
cv2.destroyAllWindows()
| 31.444444 | 99 | 0.560512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.079947 |
c582597e359b4f6dc67ebe89e7ea7ad56c231461 | 264 | py | Python | Python/w3resource/Challenge5.py | TakaIzuki/school-work | c6f8c4a8f1f07b7e4f027e4d53c40e868780a71b | [
"Unlicense"
] | 1 | 2021-03-12T14:57:38.000Z | 2021-03-12T14:57:38.000Z | Python/w3resource/Challenge5.py | TakaIzuki/school-work | c6f8c4a8f1f07b7e4f027e4d53c40e868780a71b | [
"Unlicense"
] | null | null | null | Python/w3resource/Challenge5.py | TakaIzuki/school-work | c6f8c4a8f1f07b7e4f027e4d53c40e868780a71b | [
"Unlicense"
] | null | null | null | sample = ["abc", "xyz", "aba", "1221"]
def stringCounter(items):
amount = 0
for i in items:
if len(i) >= 2 and i[0] == i[-1]:
amount += 1
return amount
print("The amount of string that meet the criteria is:",stringCounter(sample)) | 26.4 | 78 | 0.579545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.265152 |
c58302aba3746df3f777c33afe351a9fca738f8e | 3,937 | py | Python | trayapp/tray_app.py | RoW171/trayPy | d5ba97d144d00e175184c87d3ad4ff553b01eec6 | [
"MIT"
] | 3 | 2020-06-02T19:33:31.000Z | 2022-01-21T23:59:47.000Z | trayapp/tray_app.py | robin-weiland/trayPy | d5ba97d144d00e175184c87d3ad4ff553b01eec6 | [
"MIT"
] | null | null | null | trayapp/tray_app.py | robin-weiland/trayPy | d5ba97d144d00e175184c87d3ad4ff553b01eec6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Robin 'r0w' Weiland"
__date__ = "2020-03-02"
__version__ = "0.1.2"
"""Library for creating system tray applications
based on Moses Palmér's 'pystray' library
See README for insntructions"""
__all__ = ('TrayApp',)
from typing import Union, Tuple
from pystray import Icon
from PIL import Image
from pathlib import Path
from trayapp.tray_menu import TrayMenu
class TrayApp:
__slots__ = ('app', 'menu',)
app: Icon
menu: TrayMenu
def __init__(self, name: str, icon_or_path: Union[str, Path, type(Image)], icon_size: Tuple[int, int]):
self.app = Icon(name=name, title=name)
self.set_icon(icon_or_path, icon_size)
self.menu = TrayMenu()
def __repr__(self) -> str: return str(self.app.menu or self.menu())
__str__ = __repr__
def __enter__(self) -> TrayMenu: return self.menu
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
if not exc_type:
self.app.menu = self.menu()
self.app.update_menu()
self.run()
def __bool__(self): return self.app.visible
def set_icon(self, icon_or_path: Union[str, Path, type(Image)], icon_size: Tuple[int, int]):
if not isinstance(icon_or_path, (Path, str)):
(image := icon_or_path).thumbnail(icon_size)
elif not (icon_or_path := Path(icon_or_path)).exists():
raise FileNotFoundError(f'Could not find image: "{icon_or_path}"')
else:
(image := Image.open(icon_or_path)).thumbnail(icon_size)
self.app.icon = image
self.app.icon.visible = True
# lambda to ensure that the icon is visible on every platform
def run(self) -> None: self.app.run(lambda icon: setattr(icon, 'visible', True))
def stop(self) -> None: self.app.stop()
if __name__ == '__main__':
with TrayApp(name='Test', # the little tooltip, seen when hovering over the icon
icon_or_path=Path(r'C:\Users\robin\Pictures\Wallpaper\cyberpunk_bttf.jpg'), # anything that can be transformed into a PIL.Image
icon_size=(256, 256,) # size to create the thumbnail
) as app:
# create the menu shown when icon gets right-clicked here
app.add_button(text='hello world',
action=print, # method to call when clicked
args=('hello world',), # arguments, optional, in a tuple
# determines wheter the function gets called when the icon is left-clicked
# optional, default to False, can be obviously only used once per app
default=True
)
app.add_separator() # well...
with app.add_submenu(text='SubMenu') as submenu: # submenues can be created by using a context manager within
with submenu.add_submenu(text='first subsub') as first_sub_sub: # and recursivly as well
first_sub_sub.add_button(text='1.1', action=print, args=('1.1',))
first_sub_sub.add_button(text='1.2', action=print, args=('1.2',))
with submenu.add_submenu(text='second susub') as second_sub_sub:
second_sub_sub.add_button(text='2.1', action=print, args=('2.1',))
second_sub_sub.add_button(text='2.2', action=print, args=('2.2',))
# any add_button(), add_separator(), add_submenu(), add_radiobuttongroup() can be used here
# just remember to add them to the right submenu
app.add_separator()
# a RadioButtonGroup is a group of buttons which can be used to select something
# trying it out might be the best way to understand it
with app.add_radiobuttongroup() as rbg: # used with a contextmanager as well
rbg.add(text='hello')
rbg.add(text='world', selected=True) # selected determines the item which is selected on creation
| 39.767677 | 145 | 0.631953 | 1,398 | 0.355003 | 0 | 0 | 0 | 0 | 0 | 0 | 1,381 | 0.350686 |
c583a57704c6fdfa2c477fd3c64065132094d1ee | 5,210 | py | Python | compiler/domain.py | cul-it/arxiv-compiler | b4aaca17a08a752d8b5c12224edabd011a8920f7 | [
"MIT"
] | 5 | 2019-05-26T22:47:35.000Z | 2021-11-05T12:30:07.000Z | compiler/domain.py | arXiv/arxiv-compiler | b4aaca17a08a752d8b5c12224edabd011a8920f7 | [
"MIT"
] | 16 | 2019-02-12T23:25:04.000Z | 2021-04-30T15:04:48.000Z | compiler/domain.py | cul-it/arxiv-compiler | b4aaca17a08a752d8b5c12224edabd011a8920f7 | [
"MIT"
] | 3 | 2019-01-10T22:01:50.000Z | 2020-12-06T16:29:51.000Z | """Domain class for the compiler service."""
from typing import NamedTuple, Optional, BinaryIO, Dict
import io
from datetime import datetime
from .util import ResponseStream
from enum import Enum
class Format(Enum):
"""Compilation formats supported by this service."""
PDF = "pdf"
DVI = "dvi"
PS = "ps"
@property
def ext(self) -> str:
"""Filename extension for the compilation product."""
value: str = self.value
return value
@property
def content_type(self) -> str:
"""The mime-type for this format."""
_ctypes: Dict['Format', str] = {
Format.PDF: 'application/pdf',
Format.DVI: 'application/x-dvi',
Format.PS: 'application/postscript'
}
return _ctypes[self]
class Status(Enum):
"""Represents the status of a requested compilation."""
COMPLETED = "completed"
IN_PROGRESS = "in_progress"
FAILED = "failed"
class Reason(Enum):
"""Specific reasons for a (usually failure) outcome."""
AUTHORIZATION = "auth_error"
MISSING = "missing_source"
SOURCE_TYPE = "invalid_source_type"
CORRUPTED = "corrupted_source"
STORAGE = "storage"
CANCELLED = "cancelled"
COMPILATION = "compilation_errors"
NETWORK = "network_error"
DOCKER = "docker"
NONE = None
class Task(NamedTuple):
"""Represents the state of a compilation product in the store."""
# Here are the actual slots/fields.
status: Status
"""
The status of the compilation.
If :attr:`Status.COMPLETED`, the current file corresponding to the format
of this compilation status is the product of this compilation.
"""
source_id: Optional[str] = None
output_format: Optional[Format] = None
"""
The target format of the compilation.
One of :attr:`PDF`, :attr:`DVI`, or :attr:`PS`.
"""
checksum: Optional[str] = None
"""
Checksum of the source tarball from the file management service.
This is likely to be a checksum of some kind, but may be something else.
"""
task_id: Optional[str] = None
"""If a task exists for this compilation, the unique task ID."""
reason: Reason = Reason.NONE
"""An explanation of the current status. E.g. why did it fail."""
description: str = ""
"""A description of the outcome."""
size_bytes: int = 0
"""Size of the product."""
owner: Optional[str] = None
"""The owner of this resource."""
@property
def is_completed(self) -> bool:
"""Indicate whether or not this task is completed."""
return bool(self.status in [Status.COMPLETED, Status.FAILED])
@property
def is_failed(self) -> bool:
"""Indicate whether or not this task has failed."""
return bool(self.status is Status.FAILED)
@property
def is_in_progress(self) -> bool:
"""Indicate whether or not this task is still in progress."""
return bool(self.status is Status.IN_PROGRESS)
@property
def ext(self) -> str:
"""Filename extension for the compilation product."""
if self.output_format is None:
raise TypeError('Output format `None` has no extension')
return self.output_format.ext
@property
def content_type(self) -> str:
"""Mime type for the output format of this compilation."""
if self.output_format is None:
raise TypeError('Output format `None` has no content type')
return self.output_format.content_type
def to_dict(self) -> dict:
"""Generate a dict representation of this object."""
return {
'source_id': self.source_id,
'output_format':
self.output_format.value if self.output_format else None,
'checksum': self.checksum,
'task_id': self.task_id,
'status': self.status.value if self.status else None,
'reason': self.reason.value if self.reason else None,
'description': self.description,
'size_bytes': self.size_bytes,
'owner': self.owner
}
@classmethod
def from_dict(cls, data: dict) -> 'Task':
"""Generate a :class:`.Task` instance from raw data."""
data['output_format'] = Format(data['output_format'])
data['status'] = Status(data['status'])
data['reason'] = Reason(data['reason'])
data['size_bytes'] = data['size_bytes']
return cls(**data)
class Product(NamedTuple):
"""Content of a compilation product itself."""
stream: BinaryIO
"""Readable buffer with the product content."""
checksum: Optional[str] = None
"""The B64-encoded MD5 hash of the compilation product."""
class SourcePackage(NamedTuple):
"""Source package content, retrieved from file management service."""
source_id: str
"""The identifier of the source package (upload workspace)."""
path: str
"""Path to the retrieved source package."""
etag: str
"""Etag returned with the source package content."""
class SourcePackageInfo(NamedTuple):
"""Current state of the source package in the file managment service."""
source_id: str
etag: str
| 28.944444 | 77 | 0.632438 | 4,992 | 0.958157 | 0 | 0 | 1,823 | 0.349904 | 0 | 0 | 2,418 | 0.464107 |
c58427c4a5c249ad637ef0452752594d7390392b | 40,912 | py | Python | zziplib-0.13.62/docs/make-doc.py | guangbin79/Lua_5.1.5-Android | d85e3f54169a36c9281f7da9ad2b4c3c34027e4b | [
"MIT"
] | 28 | 2017-04-20T06:21:26.000Z | 2021-12-10T15:22:51.000Z | zziplib-0.13.62/docs/make-doc.py | guangbin79/Lua_5.1.5-Android | d85e3f54169a36c9281f7da9ad2b4c3c34027e4b | [
"MIT"
] | 3 | 2017-04-05T00:41:45.000Z | 2020-04-04T00:44:24.000Z | zziplib-0.13.62/docs/make-doc.py | guangbin79/Lua_5.1.5-Android | d85e3f54169a36c9281f7da9ad2b4c3c34027e4b | [
"MIT"
] | 15 | 2015-02-23T16:35:28.000Z | 2022-03-25T13:40:33.000Z | #! /usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import re
import string
import commands
import warnings
errors = 0
def warn(msg, error=None):
global errors
errors += 1
if error is None:
warnings.warn("-- "+str(errors)+" --\n "+msg, RuntimeWarning, 2)
else:
warnings.warn("-- "+str(errors)+" --\n "+msg+
"\n error was "+str(error), RuntimeWarning, 2)
#fu
# beware, stupid python interprets backslashes in repl only partially!
def s(string, pattern, repl, count=0):
return re.sub(pattern, repl, string, count)
def m(string, pattern):
return re.match(pattern, string)
def sorted_keys(dict):
keys = dict.keys()
keys.sort()
return keys
# we make up a few formatter routines to help in the processing:
def html2docbook(text):
""" the C comment may contain html markup - simulate with docbook tags """
return (
s(s(s(s(s(s(s(s(s(s(s(text,
r"<br\s*/?>",""),
r"(</?)em>",r"\1emphasis>"),
r"<code>","<userinput>"),
r"</code>","</userinput>"),
r"<link>","<function>"),
r"</link>","</function>"),
r"(?s)\s*</screen>","</screen>"),
# r"<ul>","</para><itemizedlist>"),
# r"</ul>","</itemizedlist><para>"),
# r"<li>","<listitem><para>"),
# r"</li>","</para></listitem>\n"),
r"<ul>","</para><programlisting>\n"),
r"</ul>","</programlisting><para>"),
r"<li>",""),
r"</li>",""))
def paramdef2html(text):
return s(s(s(s(s(text,
r"\s+<paramdef>", r"\n<nobr>"),
r"<paramdef>",r"<nobr>"),
r"</paramdef>",r"</nobr>"),
r"<parameters>",r"\n <code>"),
r"</parameters>",r"</code>\n")
def section2html(text):
mapping = { "<screen>" : "<pre>", "</screen>" : "</pre>",
"<para>" : "<p>", "</para>" : "</p>" ,
"<function>" : "<link>", "</function>" : "</link>" }
for str in mapping:
text = string.replace(text, str, mapping[str])
return text
def html(text):
return section2html(paramdef2html(text))
def cdata1(text):
return string.replace(text, "&", "&")
def cdata31(text):
return string.replace(string.replace(text, "<","<"), ">",">")
def cdata3(text):
return cdata31(cdata1(text))
def cdata43(text):
return string.replace(text,"\"", """)
def cdata41(text):
return cdata43(cdata31(text))
def cdata4(text):
return cdata43(cdata3(text))
def markup_as_screen41 (text):
""" used for non-star lines in comment blocks """
return " <screen> " + s(cdata41(text), r"(?m)^", r" ") +" </screen> "
def file_comment2section(text):
""" convert a C comment into a series of <para> and <screen> parts """
return ("<para>\n"+
s(s(s(s(s(s(s(text,
r"(?s){<([\w\.\-]+\@[\w\.\-]+\w\w)>",
r"<\1>"),
r"(?mx) ^\s?\s?\s? ([^\*\s]+ .*) $",
lambda x : markup_as_screen41 (x.group(1))),
r"(?mx) ^\s*[*]\s* $", r" \n</para><para>\n"),
r"(?mx) ^\s?\s?\s?\* (.*) $", r" \1 "),
r"(?sx) </screen>(\s*)<screen> ", r"\1"),
r"(?sx) <([^<>\;]+\@[^<>\;]+)> ", r"<email>\1</email>"),
r"(?sx) \<\;([^<>\&\;]+\@[^<>\&\;]+)\>\; ",
r"<email>\1</email>") + "\n</para>")
def func_comment2section(text):
""" convert a C comment into a series of <para> and <screen> parts
and sanitize a few markups already present in the comment text
"""
return ("<para>\n"+
s(s(s(s(s(s(s(s(s(s(s(text,
r"<c>",r"<code>"), r"</c>", r"</code>"),
r"(?mx) ^\s?\s?\s? ([^\*\s]+.*)",
lambda x: markup_as_screen41 (x.group(1))),
r"(?mx) ^\s?\s?\s?\* (.*) $", r" <br /> \1"),
r"(?mx) ^\s*<br\s*\/>\s* $", r"\n</para><para>\n"),
r"<<",r"<"), r">>",r">"),
r"(?sx) (</?para>\s*)<br\s*\/?>",r"\1"),
r"(?sx) (</?para>\s*)<br\s*\/?>",r"\1"),
r"(?sx) (<br\s*\/?>\s*)<br\s*\/?>",r"\1"),
r"(?sx) <\/screen>(\s*)<screen>",r"\1") + "\n</para>")
def markup_link_syntax(text):
""" markup the link-syntax ` => somewhere ` in the text block """
return (
s(s(s(s(text,
r"(?mx) (^|\s)\=\>\"([^\"]*)\"", r"\1<link>\2</link>"),
r"(?mx) (^|\s)\=\>\'([^\"]*)\'", r"\1<link>\2</link>"),
r"(?mx) (^|\s)\=\>\s(\w[\w.]*\w)\b", r"\1<link>\2</link>"),
r"(?mx) (^|\s)\=\>\s([^\s\,\.\!\?\:\;\<\>\&\'\=\-]+)",
r"\1<link>\2</link>"))
def this_function_link(text, name):
return s(text, r"(?sx) (T|t)his \s (function|procedure) ", lambda x
: "<function>"+x.group(1)+"he "+name+" "+x.group(2)+"</function>")
# -----------------------------------------------------------------------
class Options:
var = {}
def __getattr__(self, name):
if not self.var.has_key(name): return None
return self.var[name]
def __setattr__(self, name, value):
self.var[name] = value
#end
o = Options()
o.verbose = 0
o.version = s( commands.getoutput(
""" grep -i "^version *:" *.spec 2>/dev/null |
sed -e "s/[Vv]ersion *: *//" """), r"\s*",r"")
o.package = s(commands.getoutput(
""" grep -i "^name *:" *.spec 2>/dev/null |
sed -e "s/[Nn]ame *: *//" """), r"\s*",r"")
if not len(o.version):
o.version = commands.getoutput(""" date +%Y.%m.%d """)
if not len(o.package):
o.package = "_project"
o.suffix = "-doc3"
o.mainheader = o.package+".h"
class File:
def __init__(self, filename):
self.name = filename
self.mainheader = o.mainheader
self.authors = ""
self.copyright = ""
def __getattr__(self, name):
""" defend against program to break on uninited members """
if self.__dict__.has_key(name): return self.__dict__[name]
warn("no such member: "+name); return None
def set_author(self, text):
if self.authors:
self.authors += "\n"
self.authors += text
return text
def set_copyright(self, text):
self.copyright = text
return text
class InputFiles:
""" for each set of input files we can create an object
it does correspond with a single html-output page and
a single docbook <reference> master page to be output
"""
def __init__(self):
# the id will tell us in which order
# we did meet each function definition
self.id = 1000
self.files = [] # file_list
self.funcs = [] # func_list: of hidden class FuncDeclaration
self.file = None # current file
def new_File(self, name):
self.file = File(name)
self.files.append(self.file)
return self.file
def next_id(self):
id = self.id ; self.id += 1
return id
def add_function_declaration(self, comment, prototype):
class FuncDeclaration: # note that both decl.comment and
pass # decl.prototype are in cdata1 format
func = FuncDeclaration()
func.file = self.file
func.comment = s(comment, # need to take out email-style markups
r"<([\w\.\-]+\@[\w\.\-]+\w\w)>", r"<\1>")
func.prototype = prototype
func.id = all.next_id()
self.funcs.append(func)
# print id
return prototype
def scan_options (options, list):
def encode(text):
return s(s(text, r"¬", r"&#AC;"), r"\*/",r"¬")
def decode(text):
return s(text, r"¬", r"*/")
for name in options:
found = m(name, r"^(\w+)=(.*)")
if found:
o.var[found.group(1)] = found.group(2)
continue
#else
try:
input = open(name, "r")
except IOError, error:
warn(#...... (scan_options) ...............
"can not open input file: "+name, error)
continue
text = input.read() ; input.close()
text = encode (cdata1 (text))
file = list.new_File(name)
# cut per-function comment block
text = s(text, r"(?x) [/][*][*](?=\s) ([^¬]+) ¬ ([^\{\}\;\#]+) [\{\;]",
lambda x : list.add_function_declaration(
decode(x.group(1)), decode(x.group(2))))
# cut per-file comment block
found = m(text, r"(?sx) [/][*]+(?=\s) ([^¬]+) ¬ "
r"(?:\s*\#define\s*\S+)*"
r"(\s*\#include\s*<[^<>]*>(?:\s*//[^\n]*)?)")
if found:
file.comment = decode(found.group(1))
file.include = cdata31(found.group(2))
else:
file.comment = None
file.include = None
found = m(text, r"(?sx) ^ [/][*]+(?=\s) ([^¬]+) ¬ ")
if found:
file.comment = decode(found.group(1))
#fi
# throw away the rest - further processing on memorized strings only
return None
all = InputFiles()
scan_options (sys.argv[1:], all)
if not o.docbookfile:
o.docbookfile = o.package+o.suffix+".docbook"
if not o.libhtmlfile:
o.libhtmlfile = o.package+o.suffix+".html"
if not o.dumpdocfile:
o.dumpdocfile = o.package+o.suffix+".dxml"
# ...........................................................................
# check out information in the file.comment section
def all_files_comment2section(list):
for file in list:
if file.comment is None: continue
file.section = file_comment2section(file.comment)
file.section = s(
file.section, r"(?sx) \b[Aa]uthor\s*:(.*</email>) ", lambda x
: "<author>" + file.set_author(x.group(1)) + "</author>")
file.section = s(
file.section, r"(?sx) \b[Cc]opyright\s*:([^<>]*)</para> ",lambda x
: "<copyright>" + file.set_copyright(x.group(1)) + "</copyright>")
# if "file" in file.name: print >> sys.stderr, file.comment # 2.3
#od
all_files_comment2section(all.files)
# -----------------------------------------------------------------------
class Function:
" <prespec>void* </><namespec>hello</><namespec> (int) const</callspec> "
def __init__(self):
self.prespec = ""
self.namespec = ""
self.callspec = ""
self.name = ""
# def set(self, **defines):
# name = defines.keys()[0]
# self.__dict__[name] = defines[name]
# return defines[name]
# def cut(self, **defines):
# name = defines.keys()[0]
# self.__dict__[name] += defines[name]
# return ""
def __getattr__(self, name):
""" defend against program exit on members being not inited """
if self.__dict__.has_key(name): return self.__dict__[name]
warn("no such member: "+name); return None
def dict(self):
return self.__dict__
def dict_sorted_keys(self):
keys = self.__dict__.keys()
keys.sort()
return keys
def parse(self, prototype):
found = m(prototype, r"(?sx) ^(.*[^.]) \b(\w[\w.]*\w)\b (\s*\(.*) $ ")
if found:
self.prespec = found.group(1).lstrip()
self.namespec = found.group(2)
self.callspec = found.group(3).lstrip()
self.name = self.namespec.strip()
return self.name
return None
# pass 1 of per-func strings ...............................................
# (a) cut prototype into prespec/namespec/callspec
# (b) cut out first line of comment as headline information
# (c) sanitize rest of comment block into proper docbook formatted .body
#
# do this while copying strings from all.funcs to function_list
# and remember the original order in name_list
def markup_callspec(text):
return (
s(s(s(s(s(text,
r"(?sx) ^([^\(\)]*)\(", r"\1<parameters>(<paramdef>",1),
r"(?sx) \)([^\(\)]*)$", r"</paramdef>)</parameters>\1",1),
r"(?sx) , ", r"</paramdef>,<paramdef>"),
r"(?sx) <paramdef>(\s+) ", r"\1<paramdef>"),
r"(?sx) (\s+)</paramdef>", r"</paramdef>\1"))
def parse_all_functions(func_list): # list of FunctionDeclarations
""" parse all FunctionDeclarations and create a list of Functions """
list = []
for func in all.funcs:
function = Function()
if not function.parse (func.prototype): continue
list.append(function)
function.body = markup_link_syntax(func.comment)
if "\n" not in function.body: # single-line comment is the head
function.head = function.body
function.body = ""
else: # cut comment in first-line and only keep the rest as descr body
function.head = s(function.body, r"(?sx) ^([^\n]*\n).*",r"\1",1)
function.body = s(function.body, r"(?sx) ^[^\n]*\n", r"", 1)
#fi
if m(function.head, r"(?sx) ^\s*$ "): # empty head line, autofill here
function.head = s("("+func.file.name+")", r"[.][.][/]", r"")
function.body = func_comment2section(function.body)
function.src = func # keep a back reference
# add extra docbook markups to callspec in $fn-hash
function.callspec = markup_callspec (function.callspec)
#od
return list
function_list = parse_all_functions(all.funcs)
def examine_head_anchors(func_list):
""" .into tells later steps which func-name is the leader of a man
page and that this func should add its descriptions over there. """
for function in func_list:
function.into = None
function.seealso = None
found = m(function.head, r"(?sx) ^ \s* <link>(\w[\w.]*\w)<\/link>")
# if found and found.group(1) in func_list.names:
if found and found.group(1):
function.into = found.group(1)
def set_seealso(f, value):
f.seealso = value
return value
function.head = s(function.head, r"(.*)also:(.*)", lambda x
: set_seealso(function, x.group(2)) and x.group(1))
if function.seealso and None:
print "function[",function.name,"].seealso=",function.seealso
examine_head_anchors(function_list)
# =============================================================== HTML =====
def find_by_name(func_list, name):
for func in func_list:
if func.name == name:
return func
#od
return None
#fu
class HtmlFunction:
def __init__(self, func):
self.src = func.src
self.into = func.into
self.name = func.name
self.toc_line = paramdef2html(
" <td valign=\"top\"><code>"+func.prespec+"</code></td>\n"+
" <td valign=\"top\"> </td>\n"+
" <td valign=\"top\"><a href=\"#"+func.name+"\">\n"+
" <code>"+func.namespec+"</code>"+
" </a></td>\n"+
" <td valign=\"top\"> </td>\n"+
" <td valign=\"top\">"+func.callspec+"</td>\n")
self.synopsis = paramdef2html(
" <code>"+func.prespec+"</code>\n"+
" <br /><b><code>"+func.namespec+"</code></b>\n"+
" <code>"+func.callspec+"</code>\n")
self.anchor = "<a name=\""+func.name+"\" />"
self.section = "<para><em> "+func.head+"\n"+ \
"\n</em></para>"+section2html(func.body)
#class
class HtmlFunctionFamily(HtmlFunction):
def __init__(page, func):
HtmlFunction.__init__(page, func)
page.toc_line_list = [ page.toc_line ]
# page.html_txt = page.synopsis
page.synopsis_list = [ page.synopsis ]
page.anchor_list = [ page.anchor ]
page.section_list = [ this_function_link(page.section, func.name) ]
def ensure_name(text, name):
adds = "<small><code>"+name+"</code></small> -"
match = r"(?sx) .*>[^<>]*\b" + name + r"\b[^<>]*<.*"
found = m(text, match)
if found: return text
found = m(text, r".*<p(ara)?>.*")
if found: return s(text, r"(<p(ara)?>)", r"\1"+adds, 1)
return adds+text
def combined_html_pages(func_list):
""" and now add descriptions of non-leader entries (html-mode) """
combined = {}
for func in func_list: # assemble leader pages
if func.into is not None: continue
combined[func.name] = HtmlFunctionFamily(func)
for func in func_list:
if func.into is None: continue
if func.into not in combined :
warn(#......... (combine_html_pages) ..............
"function '"+func.name+"'s into => '"+func.into+
"\n: no such target function: "+func.into)
combined[func.name] = HtmlFunctionFamily(func)
continue
#fi
page = HtmlFunction(func)
into = combined[func.into]
into.toc_line_list.append( page.toc_line )
into.anchor_list.append( page.anchor )
into.synopsis_list.append( page.synopsis )
into.section_list.append(
s(ensure_name(this_function_link(section2html( func.body ),
func.name), func.name),
r"(?sx) (</?para>\s*) <br\s*\/>", r"\1"))
return combined.values()
html_pages = combined_html_pages(function_list)
def html_resolve_links_on_page(text, list):
""" link ref-names of a page with its endpoint on the same html page"""
def html_link (name , extra):
""" make <link>s to <href> of correct target or make it <code> """
if find_by_name(list, name) is None:
return "<code>"+name+extra+"</code>"
else:
return "<a href=\"#"+name+"\"><code>"+name+extra+"</code></a>"
#fu html_link
return s(s(text, r"(?sx) <link>(\w+)([^<>]*)<\/link> ",
lambda x : html_link(x.group(1),x.group(2))),
r"(?sx) \-\> ", r"<small>-></small>") # just sanitize..
#fu html_resolve_links
class HtmlPage:
def __init__(self):
self.toc = ""
self.txt = ""
self.package = o.package
self.version = o.version
def page_text(self):
""" render .toc and .txt parts into proper <html> page """
T = ""
T += "<html><head>"
T += "<title>"+self.package+"autodoc documentation </title>"
T += "</head>\n<body>\n"
T += "\n<h1>"+self.package+" <small><small><i>- "+self.version
T += "</i></small></small></h1>"
T += "\n<table border=0 cellspacing=2 cellpadding=0>"
T += self.toc
T += "\n</table>"
T += "\n<h3>Documentation</h3>\n\n<dl>"
T += html_resolve_links_on_page(self.txt, function_list)
T += "\n</dl>\n</body></html>\n"
return T
def add_page_map(self, list):
""" generate the index-block at the start of the onepage-html file """
keys = list.keys()
keys.sort()
for name in keys:
self.toc += "<tr valign=\"top\">\n"+ \
"\n</tr><tr valign=\"top\">\n".join(
list[name].toc_line_list)+"</tr>\n"
self.txt += "\n<dt>"+" ".join(list[name].anchor_list)
self.txt += "\n"+"\n<br />".join(list[name].synopsis_list)+"<dt>"
self.txt += "\n<dd>\n"+"\n".join(list[name].section_list)
self.txt += ("\n<p align=\"right\">"+
"<small>("+list[name].src.file.name+")</small>"+
"</p></dd>")
def add_page_list(self, functions):
""" generate the index-block at the start of the onepage-html file """
mapp = {}
for func in functions:
mapp[func.name] = func
#od
self.add_page_map(mapp)
#end
html = HtmlPage()
# html.add_function_dict(Fn)
# html.add_function_list(Fn.sort.values())
html.add_page_list(html_pages)
# and finally print the html-formatted output
try:
F = open(o.libhtmlfile, "w")
except IOError, error:
warn(# ............. open(o.libhtmlfile, "w") ..............
"can not open html output file: "+o.libhtmlfile, error)
else:
print >> F, html.page_text()
F.close()
#fi
# ========================================================== DOCBOOK =====
# let's go for the pure docbook, a reference type master for all man pages
class RefPage:
def __init__(self, func):
""" initialize the fields needed for a man page entry - the fields are
named after the docbook-markup that encloses (!!) the text we store
the entries like X.refhint = "hello" will be printed therefore as
<refhint>hello</refhint>. Names with underscores are only used as
temporaries but they are memorized, perhaps for later usage. """
self.refhint = "\n<!--========= "+func.name+" (3) ===========-->\n"
self.refentry = None
self.refentry_date = o.version.strip() # //refentryinfo/date
self.refentry_productname = o.package.strip() # //refentryinfo/prod*
self.refentry_title = None # //refentryinfo/title
self.refentryinfo = None # override
self.manvolnum = "3" # //refmeta/manvolnum
self.refentrytitle = None # //refmeta/refentrytitle
self.refmeta = None # override
self.refpurpose = None # //refnamediv/refpurpose
self.refname = None # //refnamediv/refname
self.refname_list = []
self.refnamediv = None # override
self.mainheader = func.src.file.mainheader
self.includes = func.src.file.include
self.funcsynopsisinfo = "" # //funcsynopsisdiv/funcsynopsisinfo
self.funcsynopsis = None # //funcsynopsisdiv/funcsynopsis
self.funcsynopsis_list = []
self.description = None
self.description_list = []
# optional sections
self.authors_list = [] # //sect1[authors]/listitem
self.authors = None # override
self.copyright = None
self.copyright_list = []
self.seealso = None
self.seealso_list = []
if func.seealso:
self.seealso_list.append(func.seealso)
# func.func references
self.func = func
self.file_authors = None
if func.src.file.authors:
self.file_authors = func.src.file.authors
self.file_copyright = None
if func.src.file.copyright:
self.file_copyright = func.src.file.copyright
#fu
def refentryinfo_text(page):
""" the manvol formatter wants to render a footer line and header line
on each manpage and such info is set in <refentryinfo> """
if page.refentryinfo:
return page.refentryinfo
if page.refentry_date and \
page.refentry_productname and \
page.refentry_title: return (
"\n <date>"+page.refentry_date+"</date>"+
"\n <productname>"+page.refentry_productname+"</productname>"+
"\n <title>"+page.refentry_title+"</title>")
if page.refentry_date and \
page.refentry_productname: return (
"\n <date>"+page.refentry_date+"</date>"+
"\n <productname>"+page.refentry_productname+"</productname>")
return ""
def refmeta_text(page):
""" the manvol formatter needs to know the filename of the manpage to
be made up and these parts are set in <refmeta> actually """
if page.refmeta:
return page.refmeta
if page.manvolnum and page.refentrytitle:
return (
"\n <refentrytitle>"+page.refentrytitle+"</refentrytitle>"+
"\n <manvolnum>"+page.manvolnum+"</manvolnum>")
if page.manvolnum and page.func.name:
return (
"\n <refentrytitle>"+page.func.name+"</refentrytitle>"+
"\n <manvolnum>"+page.manvolnum+"</manvolnum>")
return ""
def refnamediv_text(page):
""" the manvol formatter prints a header line with a <refpurpose> line
and <refname>'d functions that are described later. For each of
the <refname>s listed here, a mangpage is generated, and for each
of the <refname>!=<refentrytitle> then a symlink is created """
if page.refnamediv:
return page.refnamediv
if page.refpurpose and page.refname:
return ("\n <refname>"+page.refname+'</refname>'+
"\n <refpurpose>"+page.refpurpose+" </refpurpose>")
if page.refpurpose and page.refname_list:
T = ""
for refname in page.refname_list:
T += "\n <refname>"+refname+'</refname>'
T += "\n <refpurpose>"+page.refpurpose+" </refpurpose>"
return T
return ""
def funcsynopsisdiv_text(page):
""" refsynopsisdiv shall be between the manvol mangemaent information
and the reference page description blocks """
T=""
if page.funcsynopsis:
T += "\n<funcsynopsis>"
if page.funcsynopsisinfo:
T += "\n<funcsynopsisinfo>"+ page.funcsynopsisinfo + \
"\n</funcsynopsisinfo>\n"
T += page.funcsynopsis + \
"\n</funcsynopsis>\n"
if page.funcsynopsis_list:
T += "\n<funcsynopsis>"
if page.funcsynopsisinfo:
T += "\n<funcsynopsisinfo>"+ page.funcsynopsisinfo + \
"\n</funcsynopsisinfo>\n"
for funcsynopsis in page.funcsynopsis_list:
T += funcsynopsis
T += "\n</funcsynopsis>\n"
#fi
return T
def description_text(page):
""" the description section on a manpage is the main part. Here
it is generated from the per-function comment area. """
if page.description:
return page.description
if page.description_list:
T = ""
for description in page.description_list:
if not description: continue
T += description
if T: return T
return ""
def authors_text(page):
""" part of the footer sections on a manpage and a description of
original authors. We prever an itimizedlist to let the manvol
show a nice vertical aligment of authors of this ref item """
if page.authors:
return page.authors
if page.authors_list:
T = "<itemizedlist>"
previous=""
for authors in page.authors_list:
if not authors: continue
if previous == authors: continue
T += "\n <listitem><para>"+authors+"</para></listitem>"
previous = authors
T += "</itemizedlist>"
return T
if page.authors:
return page.authors
return ""
def copyright_text(page):
""" the copyright section is almost last on a manpage and purely
optional. We list the part of the per-file copyright info """
if page.copyright:
return page.copyright
""" we only return the first valid instead of merging them """
if page.copyright_list:
T = ""
for copyright in page.copyright_list:
if not copyright: continue
return copyright # !!!
return ""
def seealso_text(page):
""" the last section on a manpage is called 'SEE ALSO' usally and
contains a comma-separated list of references. Some manpage
viewers can parse these and convert them into hyperlinks """
if page.seealso:
return page.seealso
if page.seealso_list:
T = ""
for seealso in page.seealso_list:
if not seealso: continue
if T: T += ", "
T += seealso
if T: return T
return ""
def refentry_text(page, id=None):
""" combine fields into a proper docbook refentry """
if id is None:
id = page.refentry
if id:
T = '<refentry id="'+id+'">'
else:
T = '<refentry>' # this is an error
if page.refentryinfo_text():
T += "\n<refentryinfo>"+ page.refentryinfo_text()+ \
"\n</refentryinfo>\n"
if page.refmeta_text():
T += "\n<refmeta>"+ page.refmeta_text() + \
"\n</refmeta>\n"
if page.refnamediv_text():
T += "\n<refnamediv>"+ page.refnamediv_text() + \
"\n</refnamediv>\n"
if page.funcsynopsisdiv_text():
T += "\n<refsynopsisdiv>\n"+ page.funcsynopsisdiv_text()+ \
"\n</refsynopsisdiv>\n"
if page.description_text():
T += "\n<refsect1><title>Description</title> " + \
page.description_text() + "\n</refsect1>"
if page.authors_text():
T += "\n<refsect1><title>Author</title> " + \
page.authors_text() + "\n</refsect1>"
if page.copyright_text():
T += "\n<refsect1><title>Copyright</title> " + \
page.copyright_text() + "\n</refsect1>\n"
if page.seealso_text():
T += "\n<refsect1><title>See Also</title><para> " + \
page.seealso_text() + "\n</para></refsect1>\n"
T += "\n</refentry>\n"
return T
#fu
#end
# -----------------------------------------------------------------------
class FunctionRefPage(RefPage):
def reinit(page):
""" here we parse the input function for its values """
if page.func.into:
page.refhint = "\n <!-- see "+page.func.into+" -->\n"
#fi
page.refentry = page.func.name # //refentry@id
page.refentry_title = page.func.name.strip() # //refentryinfo/title
page.refentrytitle = page.func.name # //refmeta/refentrytitle
if page.includes:
page.funcsynopsisinfo += "\n"+page.includes
if not page.funcsynopsisinfo:
page.funcsynopsisinfo="\n"+' #include <'+page.mainheader+'>'
page.refpurpose = page.func.head
page.refname = page.func.name
def funcsynopsis_of(func):
return (
"\n <funcprototype>\n <funcdef>"+func.prespec+
" <function>"+func.name+"</function></funcdef>"+
"\n"+s(s(s(func.callspec,
r"<parameters>\s*\(",r" "),
r"\)\s*</parameters>",r" "),
r"</paramdef>\s*,\s*",r"</paramdef>\n ")+
" </funcprototype>")
page.funcsynopsis = funcsynopsis_of(page.func)
page.description = (
html2docbook(this_function_link(page.func.body, page.func.name)))
if page.file_authors:
def add_authors(page, ename, email):
page.authors_list.append( ename+' '+email )
return ename+email
s(page.file_authors,
r"(?sx) \s* ([^<>]*) (<email>[^<>]*</email>) ", lambda x
: add_authors(page, x.group(1), x.group(2)))
#fi
if page.file_copyright:
page.copyright = "<screen>\n"+page.file_copyright+"</screen>\n"
#fi
return page
def __init__(page,func):
RefPage.__init__(page, func)
FunctionRefPage.reinit(page)
def refpage_list_from_function_list(funclist):
list = []
mapp = {}
for func in funclist:
mapp[func.name] = func
#od
for func in funclist:
page = FunctionRefPage(func)
if func.into and func.into not in mapp:
warn (# ............ (refpage_list_from_function_list) .......
"page '"+page.func.name+"' has no target => "+
"'"+page.func.into+"'"
"\n: going to reset .into of Function '"+page.func.name+"'")
func.into = None
#fi
list.append(FunctionRefPage(func))
return list
#fu
# ordered list of pages
refpage_list = refpage_list_from_function_list(function_list)
class FunctionFamilyRefPage(RefPage):
def __init__(self, page):
RefPage.__init__(self, page.func)
self.seealso_list = [] # reset
self.refhint_list = []
def refhint_list_text(page):
T = ""
for hint in page.refhint_list:
T += hint
return T
def refentry_text(page):
return page.refhint_list_text() + "\n" + \
RefPage.refentry_text(page)
pass
def docbook_pages_recombine(pagelist):
""" take a list of RefPages and create a new list where sections are
recombined in a way that their description is listed on the same
page and the manvol formatter creates symlinks to the combined
function description page - use the attribute 'into' to guide the
processing here as each of these will be removed from the output
list. If no into-pages are there then the returned list should
render to the very same output text like the input list would do """
list = []
combined = {}
for orig in pagelist:
if orig.func.into: continue
page = FunctionFamilyRefPage(orig)
combined[orig.func.name] = page ; list.append(page)
page.refentry = orig.refentry # //refentry@id
page.refentry_title = orig.refentrytitle # //refentryinfo/title
page.refentrytitle = orig.refentrytitle # //refmeta/refentrytitle
page.includes = orig.includes
page.funcsynopsisinfo = orig.funcsynopsisinfo
page.refpurpose = orig.refpurpose
if orig.refhint:
page.refhint_list.append( orig.refhint )
if orig.refname:
page.refname_list.append( orig.refname )
elif orig.refname_list:
page.refname_list.extend( orig.refname_list )
if orig.funcsynopsis:
page.funcsynopsis_list.append( orig.funcsynopsis )
elif orig.refname_list:
page.funcsynopsis_list.extend( orig.funcsynopsis_list )
if orig.description:
page.description_list.append( orig.description )
elif orig.refname_list:
page.description_list.extend( orig.description_list )
if orig.seealso:
page.seealso_list.append( orig.seealso )
elif orig.seealso_list:
page.seealso_list.extend( orig.seealso_list )
if orig.authors:
page.authors_list.append( orig.authors )
elif orig.authors_list:
page.authors_list.extend( orig.authors_list )
if orig.copyright:
page.copyright_list.append( orig.copyright )
elif orig.refname_list:
page.copyright_list.extend( orig.copyright_list )
#od
for orig in pagelist:
if not orig.func.into: continue
if orig.func.into not in combined:
warn("page for '"+orig.func.name+
"' has no target => '"+orig.func.into+"'")
page = FunctionFamilyRefPage(orig)
else:
page = combined[orig.func.into]
if orig.refname:
page.refname_list.append( orig.refname )
elif orig.refname_list:
page.refname_list.extend( orig.refname_list )
if orig.funcsynopsis:
page.funcsynopsis_list.append( orig.funcsynopsis )
elif orig.refname_list:
page.funcsynopsis_list.extend( orig.funcsynopsis_list )
if orig.description:
page.description_list.append( orig.description )
elif orig.refname_list:
page.description_list.extend( orig.description_list )
if orig.seealso:
page.seealso_list.append( orig.seealso )
elif orig.seealso_list:
page.seealso_list.extend( orig.seealso_list )
if orig.authors:
page.authors_list.append( orig.authors )
elif orig.authors_list:
page.authors_list.extend( orig.authors_list )
if orig.copyright:
page.copyright_list.append( orig.copyright )
elif orig.refname_list:
page.copyright_list.extend( orig.copyright_list )
#od
return list
#fu
combined_pages = docbook_pages_recombine(pagelist = refpage_list)
# -----------------------------------------------------------------------
class HeaderRefPage(RefPage):
pass
def docbook_refpages_perheader(page_list): # headerlist
" creating the per-header manpage - a combination of function man pages "
header = {}
for page in page_list:
assert not page.func.into
file = page.func.src.file.mainheader # short for the mainheader index
if file not in header:
header[file] = HeaderRefPage(page.func)
header[file].id = s(file, r"[^\w\.]","-")
header[file].refentry = header[file].id
header[file].refentryinfo = None
header[file].refentry_date = page.refentry_date
header[file].refentry_productname = (
"the library "+page.refentry_productname)
header[file].manvolnum = page.manvolnum
header[file].refentrytitle = file
header[file].funcsynopsis = ""
if 1: # or += or if not header[file].refnamediv:
header[file].refpurpose = " library "
header[file].refname = header[file].id
if not header[file].funcsynopsisinfo and page.funcsynopsisinfo:
header[file].funcsynopsisinfo = page.funcsynopsisinfo
if page.funcsynopsis:
header[file].funcsynopsis += "\n"+page.funcsynopsis
if not header[file].copyright and page.copyright:
header[file].copyright = page.copyright
if not header[file].authors and page.authors:
header[file].authors = page.authors
if not header[file].authors and page.authors_list:
header[file].authors_list = page.authors_list
if not header[file].description:
found = m(commands.getoutput("cat "+o.package+".spec"),
r"(?s)\%description\b([^\%]*)\%")
if found:
header[file].description = found.group(1)
elif not header[file].description:
header[file].description = "<para>" + (
page.refentry_productname + " library") + "</para>";
#fi
#fi
#od
return header#list
#fu
def leaders(pagelist):
list = []
for page in pagelist:
if page.func.into : continue
list.append(page)
return list
header_refpages = docbook_refpages_perheader(leaders(refpage_list))
# -----------------------------------------------------------------------
# printing the docbook file is a two-phase process - we spit out the
# leader pages first - later we add more pages with _refstart pointing
# to the leader page, so that xmlto will add the functions there. Only the
# leader page contains some extra info needed for troff page processing.
doctype = '<!DOCTYPE reference PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"'
doctype += "\n "
doctype += '"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd">'+"\n"
try:
F = open(o.docbookfile,"w")
except IOError, error:
warn("can not open docbook output file: "+o.docbookfile, error)
else:
print >> F, doctype, '<reference><title>Manual Pages</title>'
for page in combined_pages:
print >> F, page.refentry_text()
#od
for page in header_refpages.values():
if not page.refentry: continue
print >> F, "\n<!-- _______ "+page.id+" _______ -->",
print >> F, page.refentry_text()
#od
print >> F, "\n",'</reference>',"\n"
F.close()
#fi
# _____________________________________________________________________
try:
F = open( o.dumpdocfile, "w")
except IOError, error:
warn ("can not open"+o.dumpdocfile,error)
else:
for func in function_list:
name = func.name
print >> F, "<fn id=\""+name+"\">"+"<!-- FOR \""+name+"\" -->\n"
for H in sorted_keys(func.dict()):
print >> F, "<"+H+" name=\""+name+"\">",
print >> F, str(func.dict()[H]),
print >> F, "</"+H+">"
#od
print >> F, "</fn><!-- END \""+name+"\" -->\n\n";
#od
F.close();
#fi
if errors: sys.exit(errors)
| 39.758989 | 79 | 0.535784 | 18,156 | 0.443684 | 0 | 0 | 0 | 0 | 0 | 0 | 13,897 | 0.339606 |
c5844e0aad5d3be4e5309b96d1cf831066661e3d | 2,618 | py | Python | cpm/cli.py | tzabal/cpm | b8736ec5a72e2e9f22e73b237a814516545db3e0 | [
"Apache-2.0"
] | 14 | 2017-03-09T20:55:06.000Z | 2021-11-12T11:43:51.000Z | cpm/cli.py | tzabal/cpm | b8736ec5a72e2e9f22e73b237a814516545db3e0 | [
"Apache-2.0"
] | null | null | null | cpm/cli.py | tzabal/cpm | b8736ec5a72e2e9f22e73b237a814516545db3e0 | [
"Apache-2.0"
] | 6 | 2017-03-28T01:54:46.000Z | 2021-02-23T20:42:49.000Z | # Copyright 2015 Tzanetos Balitsaris
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os.path
import sys
import prettytable
import cpm
def process_arguments():
description = ('A program that implements the Critical Path Method '
'algorithm in order to schedule a set of project activities '
'at the minimum total cost with the optimum duration.')
parser = argparse.ArgumentParser(description=description)
arg_help = 'a file that describes the project in JSON format'
parser.add_argument('project_file', help=arg_help)
arg_help = 'a directory that the generated images will be placed in'
parser.add_argument('-o', '--images-dir', help=arg_help)
arguments = parser.parse_args()
if not os.path.isfile(arguments.project_file):
sys.stderr.write('The project_file is not an existing regular file.\n')
sys.exit(1)
if arguments.images_dir and not os.path.isdir(arguments.images_dir):
sys.stderr.write('The images_dir is not an existing directory.\n')
sys.exit(1)
return arguments
def main():
arguments = process_arguments()
try:
project = cpm.validate(arguments.project_file)
except cpm.ProjectValidationException as exc:
sys.stderr.write(str(exc) + '\n')
sys.exit(1)
images_dir = arguments.images_dir + '/' if arguments.images_dir else ''
cpmnet = cpm.CriticalPathMethod(project)
cpmnet.run_cpm()
results, images, optimum_solution = cpmnet.get_results(images_dir)
results_table = prettytable.PrettyTable([
"Project Duration", "Critical Path(s)", "Direct Cost", "Indirect Cost", "Total Cost"
])
for result in results:
results_table.add_row([
result['project_duration'], result['critical_paths'],
result['direct_cost'], result['indirect_cost'], result['total_cost']
])
print results_table
print 'The optimum solution is {} for total cost and {} for project duration.'\
.format(optimum_solution[0], optimum_solution[1])
if __name__ == '__main__':
main()
| 35.378378 | 92 | 0.699771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,217 | 0.464859 |
c584ba722dcfc049b1e65f8d0be9570ef5cdc8bf | 1,566 | py | Python | KivyApp/login.py | yeltayzhastay/jadenapp | 41f593fb897cb6b4e17aeeb1dff4287a9e89f4d9 | [
"MIT"
] | null | null | null | KivyApp/login.py | yeltayzhastay/jadenapp | 41f593fb897cb6b4e17aeeb1dff4287a9e89f4d9 | [
"MIT"
] | null | null | null | KivyApp/login.py | yeltayzhastay/jadenapp | 41f593fb897cb6b4e17aeeb1dff4287a9e89f4d9 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import pickle
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
class Jaden:
_model = None
_vector = None
_vocabulary = None
def __init__(self):
self._model = pickle.load(open('_model.sav', 'rb'))
self._vector = pickle.load(open('_vectorized.sav', 'rb'))
with open('dataset/tarih.csv', newline='', encoding='utf8') as f:
reader = csv.reader(f)
_vocabulary = list(reader)
self._vocabulary = _vocabulary
def find_answer(self, question):
_cos_sim = linear_kernel(_model.transform([question]), _vector).flatten()
_cos_sim = np.ndarray.argsort(-_cos_sim)[:5]
_result = []
for i in _cos_sim:
_result.append(self._vocabulary[i+1][1])
return _result
class LoginScreen(GridLayout):
def __init__(self, **kwargs):
super(LoginScreen, self).__init__(**kwargs)
self.cols = 2
self.add_widget(Label(text='User Name'))
self.username = TextInput(multiline=False)
self.add_widget(self.username)
self.add_widget(Label(text='password'))
self.password = TextInput(password=True, multiline=False)
self.add_widget(self.password)
class MyApp(App):
def build(self):
return LoginScreen()
MyApp().run() | 27.473684 | 81 | 0.659642 | 1,218 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.054278 |
c584ef51737ff6abecfeaa9cb9bcdc82e84361a4 | 369 | py | Python | yoyoman01_traject/scripts/set_state_model.py | Gepetto/yoyoman01_robot | cd2fccb9378a1208605de88c88ec7b1482eaa271 | [
"BSD-2-Clause"
] | null | null | null | yoyoman01_traject/scripts/set_state_model.py | Gepetto/yoyoman01_robot | cd2fccb9378a1208605de88c88ec7b1482eaa271 | [
"BSD-2-Clause"
] | 1 | 2018-09-28T14:13:45.000Z | 2018-10-01T12:42:07.000Z | yoyoman01_traject/scripts/set_state_model.py | Gepetto/yoyoman01_robot | cd2fccb9378a1208605de88c88ec7b1482eaa271 | [
"BSD-2-Clause"
] | 2 | 2018-04-13T07:29:15.000Z | 2018-05-24T14:19:07.000Z | #!/usr/bin/env python
import rospy
import roslaunch
if __name__ == "__main__":
rospy.sleep(6.5)
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
launch = roslaunch.parent.ROSLaunchParent(uuid,["/home/ntestar/catkin_ws/src/yoyoman01_robot/yoyoman01_traject/launch/set_state.launch"])
launch.start()
rospy.spin()
| 30.75 | 139 | 0.772358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.319783 |
c585553f44b153f6ed94aa2150ec4058763335d6 | 744 | py | Python | quiz/templatetags/quiz_tags.py | Gagan-Shenoy/sushiksha-website | a41991df1a1d46336cbf019e83add5df56dde363 | [
"Apache-2.0"
] | 31 | 2020-11-07T03:23:55.000Z | 2022-03-16T18:21:45.000Z | quiz/templatetags/quiz_tags.py | Gagan-Shenoy/sushiksha-website | a41991df1a1d46336cbf019e83add5df56dde363 | [
"Apache-2.0"
] | 124 | 2020-11-07T03:27:49.000Z | 2022-03-20T05:28:06.000Z | quiz/templatetags/quiz_tags.py | Gagan-Shenoy/sushiksha-website | a41991df1a1d46336cbf019e83add5df56dde363 | [
"Apache-2.0"
] | 44 | 2020-11-09T04:39:39.000Z | 2022-03-12T09:48:19.000Z | from django import template
register = template.Library()
@register.inclusion_tag('quiz/correct_answer.html', takes_context=True)
def correct_answer_for_all(context, question):
"""
processes the correct answer based on a given question object
if the answer is incorrect, informs the user
"""
answers = question.get_answers()
incorrect_list = context.get('incorrect_questions', [])
if question.id in incorrect_list:
user_was_incorrect = True
else:
user_was_incorrect = False
return {'previous': {'answers': answers},
'user_was_incorrect': user_was_incorrect}
@register.filter
def answer_choice_to_string(question, answer):
return question.answer_choice_to_string(answer)
| 28.615385 | 71 | 0.728495 | 0 | 0 | 0 | 0 | 679 | 0.912634 | 0 | 0 | 212 | 0.284946 |
c5858aecb37322f35c3a4e926766151d51d2a7ee | 12,867 | py | Python | pixel_gps.py | DerekGloudemans/tensorflow-yolov4-tflite | 1faf48015f7587ce417d3623566926a5c8d30b42 | [
"MIT"
] | 1 | 2021-01-29T15:09:35.000Z | 2021-01-29T15:09:35.000Z | pixel_gps.py | DerekGloudemans/tensorflow-yolov4-tflite | 1faf48015f7587ce417d3623566926a5c8d30b42 | [
"MIT"
] | null | null | null | pixel_gps.py | DerekGloudemans/tensorflow-yolov4-tflite | 1faf48015f7587ce417d3623566926a5c8d30b42 | [
"MIT"
] | 1 | 2021-01-29T15:09:58.000Z | 2021-01-29T15:09:58.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 14:14:45 2020
@author: Nikki
"""
import numpy as np
import cv2
import transform as tform
import sys
import math
import scipy.spatial
import markers
###---------------------------------------------------------------------------
# Allows video to be initialized using a string
#
# returns - video_path - path to video to be used
# returns - GPS_pix - matrix to convert from GPS to pixel
# - pix_GPS - matrix to convert from pixel to GPS
# - origin - approximate camera location in GPS
###
def sample_select(name):
if name == 'aot3':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample3.mp4'
elif name == 'mrb3':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/20190422_153844_DA4A.mkv'
elif name == 'aot1':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample1_1.mp4'
elif name == 'aot2':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample2_1.mp4'
GPS_pix, pix_GPS, origin = get_transform(name)
return video_path, GPS_pix, pix_GPS, origin
###---------------------------------------------------------------------------
# Used to find transformation matrices between GPS and pixel space and vice versa.
#
# returns - GPS_pix - matrix to convert from GPS to pixel
# - pix_GPS - matrix to convert from pixel to GPS
###
def get_transform(name):
if name == 'mrb3':
x, y, origin = markers.mrb3_markers()
elif name == 'aot1':
x, y, origin = markers.aot_1_markers()
elif name == 'aot2':
x, y, origin = markers.aot_2_markers()
elif name == 'aot3':
x, y, origin = markers.aot_3_markers()
else:
print("Camera name invalid")
GPS_pix = tform.get_best_transform(x, y)
pix_GPS = tform.get_best_transform(y, x)
return(GPS_pix, pix_GPS, origin)
###---------------------------------------------------------------------------
# Given photo points at people's feet, draws '6 foot' ellipse around them.
# Most useful of these functions for implementing with yolo bounding box points.
#
# returns - img - input frame with ellipses drawn at specified points
###
def draw_radius(frame, pts, GPS_pix, pix_GPS, origin):
bounds = four_pts(pts, pix_GPS, GPS_pix, origin)
mytree = load_tree(pts, pix_GPS)
img, count = draw_ellipse(frame, bounds, pts, mytree, pix_GPS)
return img, count
###---------------------------------------------------------------------------
# Given an array of photo pts and conversion matrices, converts to GPS, finds
# defining points of 6 ft circle at camera angle, and converts back to pixel coords.
#
# returns - final - array of arrays of 4 pixel coordinates to be used to define each ellipse's axes
###
def four_pts(pts, pix_GPS, GPS_pix, origin):
#convert to gps coords
gps = tform.transform_pt_array(pts, pix_GPS)
final = []
#calculate locations six feet away at given bearings and add to array
for pt in gps:
degrees = calc_bearing(pt, origin)
for angle in degrees:
a = six_ft(pt, angle)
final.append(a)
#convert list of pts to numpy array
final = np.array([final])
final = np.squeeze(np.asarray(final))
#check if final has any elements?
#convert to pixel coords
final = tform.transform_pt_array(final, GPS_pix)
return final
###---------------------------------------------------------------------------
# Given a point, calculates it's bearing in relation to the approximate camera location.
# This enables GPS circle points to be found such that they define an ellipse within pixel
# plane that appears properly scaled. Uses haversine formula.
# Formula from: https://www.movable-type.co.uk/scripts/latlong.html
#
# returns - array of 4 bearings in degrees, clockwise from north. First is bearing
# between camera and given pt)
###
def calc_bearing(pt, origin):
#convert GPS coords to radians
la1 = math.radians(origin[0])
la2 = math.radians(pt[0])
lo1 = math.radians(origin[1])
lo2 = math.radians(pt[1])
#perform calculation
y = math.sin(lo2-lo1) * math.cos(la2)
x = math.cos(la1) * math.sin(la2) - math.sin(la1) * math.cos(la2) * math.cos(lo2-lo1)
b = math.atan2(y,x)
#convert to degrees
b = math.degrees(b)
#fill arrray with 90 degree increments
bearing = 4 * [None]
i = 0
while i < 4:
bearing[i] = (b + i * 90) % 360
i = i + 1
return bearing
###---------------------------------------------------------------------------
# Loads array of pts into a ckd tree for to enable easy finding of nearest pt
#
# returns - ckd tree
###
def load_tree(pts, pix_GPS):
gps = tform.transform_pt_array(pts, pix_GPS)
mytree = scipy.spatial.cKDTree(gps)
return mytree
###---------------------------------------------------------------------------
# Given array of defining points of several ellipses (endpoints of axes) and
# corresponding center points, draws ellipses on given image
#
# returns - all_img - given image with ellipses drawn onto it
###
def draw_ellipse(frame, pts, centers, mytree, pix_GPS):
#define qualities of the ellipse
thickness = -1
line_type = 8
#set transparency
alpha = 0.25
#create separate image for ellipses to be drawn into
ellipses = frame.copy()
#iterate through list of ellipse points and centers, drawing each into ellipse image
i = 0
count = 0
gps_centers = tform.transform_pt_array(centers, pix_GPS)
while i < pts.shape[0]:
a = pts[i]
b = pts[i + 1]
c = pts[i + 2]
d = pts[i + 3]
minor = int((math.sqrt(math.pow((c[0]-a[0]), 2) + math.pow((c[1]-a[1]), 2)))/2)
major = int((math.sqrt(math.pow((d[0]-b[0]), 2) + math.pow((d[1]-b[1]), 2)))/2)
if centers.size <= 2:
centers = np.array([centers])
center = centers[i//4]
x = int(center[0])
y = int(center[1])
if centers.size > 2:
gps_center = gps_centers[i//4]
dist, ind = mytree.query(gps_center, k=2)
closest = mytree.data[ind[1]]
dist = GPS_to_ft(gps_center, closest)
if dist < 6:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 0, 0), thickness, line_type)
count = count + 1
elif dist < 8:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 140, 0), thickness, line_type)
elif dist < 10:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 255, 0), thickness, line_type)
else:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0,255,0), thickness, line_type)
else:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0,255,0), thickness, line_type)
i = i + 4
#combine original image and ellipse image into one
all_img = cv2.addWeighted(ellipses, alpha, frame, 1-alpha, 0)
return all_img, count
###---------------------------------------------------------------------------
# Given a GPS point and a bearing, finds point six feet away in that direction,
# using haversine formula.
# Formula from: https://www.movable-type.co.uk/scripts/latlong.html
#
# returns - GPS coord 6 ft away
###
def six_ft(pt1, b):
#convert to rad
la1 = math.radians(pt1[0])
lo1 = math.radians(pt1[1])
b = math.radians(b)
#calc latitude and longitude
radius = 20902231
d =(6.0/radius)
la2 = math.asin(math.sin(la1) * math.cos(d) + math.cos(la1) * math.sin(d) * math.cos(b))
lo2 = lo1 + math.atan2((math.sin(b) * math.sin(d) * math.cos(la1)), (math.cos(d) - math.sin(la1) * math.sin(la2)))
#reconvert to GPS standard, degrees
pt2 = (math.degrees(la2), math.degrees(lo2))
return(pt2)
###---------------------------------------------------------------------------
# Given two GPS points, finds distance in ft between them, calulated using
# haversine formula.
#
# returns - distance in ft between given points
###
def GPS_to_ft(pt1, pt2):
#earths rad in ft
radius = 20902231
la1 = math.radians(pt1[0])
la2 = math.radians(pt2[0])
lo1 = math.radians(pt1[1])
lo2 = math.radians(pt2[1])
#la2, lo2 = six_ft(pt1, 90)
a = math.pow(((la2 - la1) / 2), 2)
b = math.cos(la1) * math.cos(la2)
c = math.pow(((lo2 - lo1) / 2), 2)
d = math.sin(a) + b * math.sin(c)
dist = 2 * radius * math.asin(math.sqrt(d))
#print(dist)
return dist
###---------------------------------------------------------------------------
# Following functions are not utilized in video processing code, but were helpful
# during development
###---------------------------------------------------------------------------
###---------------------------------------------------------------------------
# Returns pixel coordinate value of location left-clicked on screen
# Based on:
# https://stackoverflow.com/questions/60066334/get-pixel-coordinates-using-mouse-in-cv2-video-frame-with-python
def get_pixel_coord(video_path):
try:
video_capture = cv2.VideoCapture(video_path)
def mouseHandler(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
print(x, y)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("result", mouseHandler)
while(True):
# Capture frame-by-frame
_, frame = video_capture.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow("result", frame)
video_capture.release()
cv2.destroyAllWindows()
except:
video_capture.release()
cv2.destroyAllWindows()
###---------------------------------------------------------------------------
# Given points, draws circles around them
###
def make_circles(frame, centers, size):
size = size[0] // 128
thickness = -1
line_type = 8
for center in centers:
pt = (int(center[0]), int(center[1]))
cv2.circle(frame, pt, size, (0,0,255), thickness, line_type)
###---------------------------------------------------------------------------
# Draws 4 ellipses on video, utilizing most functions in this doc.
###
def test():
# define where video comes from
# video_path = './data/AOTsample3.mp4'
video_path = './data/20190422_153844_DA4A.mkv'
# get transfer function from known GPS and pixel locations
GPS_pix, pix_GPS = get_transform()
# load in sample pts
# a = np.array([36.148342, -86.799332]) #closest lamp
# b = np.array([36.148139, -86.799375]) #lamp across street, right
# c = np.array([36.148349, -86.799135]) #closest left corner of furthest crosswalk dash to right
# d = np.array([36.147740, -86.799218]) #sixth tree down the street
a = np.array([36.144187, -86.799707]) #far left street pole
b = np.array([36.143990, -86.799594]) #pole by bike sign
c = np.array([36.143997, -86.800180]) #corner of sidewalk
d = np.array([36.144203, -86.800149]) #right of sidewalk stripe closest to camera
x = np.array([a,b,c,d])
pts = tform.transform_pt_array(x, GPS_pix)
print(pts)
# start video
print("Video from: ", video_path )
vid = cv2.VideoCapture(video_path)
try:
while True:
# skip desired number of frames to speed up processing
for i in range (10):
vid.grab()
# read frame
return_value, frame = vid.read()
# if frame doesn't exist, exit
if not return_value:
cv2.destroyWindow('result')
print('Video has ended')
break
# draw ellipse
img, count = draw_radius(frame, pts, GPS_pix, pix_GPS)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", img)
if cv2.waitKey(1) & 0xFF == ord('q'): break
# end video, close viewer, stop writing to file
vid.release()
cv2.destroyAllWindows()
# if interrupted, end video, close viewer, stop writing to file
except:
print("Unexpected error:", sys.exc_info()[0])
vid.release()
cv2.destroyAllWindows()
#test() | 32.087282 | 120 | 0.550245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,666 | 0.440351 |
c5864362375a81a891800b2885457459c5fea69d | 85 | py | Python | src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 41f699aadaa6214acd5d947b717394b1237a7223 | [
"MIT"
] | null | null | null | src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 41f699aadaa6214acd5d947b717394b1237a7223 | [
"MIT"
] | null | null | null | src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 41f699aadaa6214acd5d947b717394b1237a7223 | [
"MIT"
] | null | null | null | """Select2 support for DAL."""
# default_app_config = 'dal_select2.apps.DefaultApp'
| 21.25 | 52 | 0.741176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.964706 |
c5873bfe530e5cfee8ae3643ecee6214da8d25dd | 714 | py | Python | AssetsApp/urls.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | AssetsApp/urls.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | AssetsApp/urls.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
#Assets Section
path('all', views.AssetsView.all_assets, name='AssetsList'),
path('lookup', views.AssetsView.lookup, name='Lookup_Asset'),
path('add', views.AssetsView.add_asset, name='Add_Asset'),
path('<str:asset_id>/statement', views.AssetsView.statement, name='Asset_Statement'),
#Categories
path('categories/add_category', views.AssetsView.add_category, name='Add_Assets_Category'),
path('categories/add_subcategory', views.AssetsView.add_sub_category, name='Add_Assets_SubCategory'),
#Transactions
path('transactions', views.TransactionView.all_transactions, name='Assets_Transactions')
] | 42 | 105 | 0.736695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.378151 |
c5874d6ddb540e4365637af3b4bd8fde3429502b | 1,493 | py | Python | migrations/versions/597e346723ee_.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 25 | 2015-01-14T10:45:13.000Z | 2021-05-26T17:21:41.000Z | migrations/versions/597e346723ee_.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 641 | 2015-01-15T11:10:50.000Z | 2021-06-15T22:18:42.000Z | migrations/versions/597e346723ee_.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 22 | 2015-06-13T15:37:45.000Z | 2021-08-19T23:40:49.000Z | """empty message
Revision ID: 597e346723ee
Revises: 56b57f01c4b4
Create Date: 2015-03-25 16:36:11.552342
"""
# revision identifiers, used by Alembic.
revision = '597e346723ee'
down_revision = '56b57f01c4b4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import column, table
from sqlalchemy import String
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('archived_services', sa.Column('status', sa.String(), nullable=True))
op.add_column('services', sa.Column('status', sa.String(), nullable=True))
op.create_check_constraint(
"ck_services_status",
"services",
"status in ('disabled', 'enabled', 'published')"
)
op.create_check_constraint(
"ck_archived_services_status",
"archived_services",
"status in ('disabled', 'enabled', 'published')"
)
services = table('services', column('status', String))
archived_services = table('archived_services', column('status', String))
op.execute(
services.update(). \
values({'status': op.inline_literal('enabled')})
)
op.execute(
archived_services.update(). \
values({'status': op.inline_literal('enabled')})
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'status')
op.drop_column('archived_services', 'status')
### end Alembic commands ###
| 25.741379 | 87 | 0.660415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.465506 |
c58a05ca2bcf72d1989a9ba98a67911ef7b9e543 | 4,813 | py | Python | cloudify_gcp/dns/dns.py | cloudify-cosmo/cloudify-gcp-plugin | c70faee0555070f7fc67f0001395eaafb681b23c | [
"Apache-2.0"
] | 4 | 2016-10-24T17:42:07.000Z | 2020-05-31T00:34:07.000Z | cloudify_gcp/dns/dns.py | cloudify-cosmo/cloudify-gcp-plugin | c70faee0555070f7fc67f0001395eaafb681b23c | [
"Apache-2.0"
] | 35 | 2015-04-30T20:14:01.000Z | 2022-02-03T21:35:54.000Z | cloudify_gcp/dns/dns.py | cloudify-cosmo/cloudify-gcp-plugin | c70faee0555070f7fc67f0001395eaafb681b23c | [
"Apache-2.0"
] | 13 | 2015-04-17T16:42:03.000Z | 2021-06-24T04:12:14.000Z | # #######
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
from .. import constants
from .. import utils
from ..gcp import (
check_response,
GoogleCloudPlatform,
)
class DNSZone(GoogleCloudPlatform):
def __init__(self,
config,
logger,
name,
dns_name=None,
additional_settings=None,
):
"""
Create DNSZone object
:param config: gcp auth file
:param logger: logger object
:param name: internal name for the dns zone
:param dns_name: FQDN for the zone
"""
super(DNSZone, self).__init__(
config,
logger,
utils.get_gcp_resource_name(name),
discovery='dns',
scope='https://www.googleapis.com/auth/ndev.clouddns.readwrite',
additional_settings=additional_settings,
)
self.name = name
self.dns_name = dns_name
@check_response
def create(self):
"""
Create GCP DNS Zone.
Global operation.
:return: REST response with operation responsible for the zone
creation process and its status
"""
self.logger.info("Create DNS Zone '{0}'".format(self.name))
return self.discovery.managedZones().create(
project=self.project,
body=self.to_dict()).execute()
@check_response
def delete(self):
"""
Delete GCP DNS Zone.
Global operation
:return: REST response with operation responsible for the zone
deletion process and its status
"""
self.logger.info("Delete DNS Zone '{0}'".format(self.name))
return self.discovery.managedZones().delete(
project=self.project,
managedZone=self.name).execute()
def to_dict(self):
body = {
'description': 'Cloudify generated DNS Zone',
constants.NAME: self.name,
'dnsName': self.dns_name,
}
self.body.update(body)
return self.body
def list_records(self, name=None, type=None):
rrsets = []
resources = self.discovery.resourceRecordSets()
request = resources.list(
project=self.project,
managedZone=self.name,
type=type,
name='.'.join([name, self.dns_name]),
)
while request is not None:
response = request.execute()
rrsets.extend(response['rrsets'])
request = resources.list_next(
previous_request=request,
previous_response=response)
return rrsets
def get(self):
return self.discovery.managedZones().get(
project=self.project,
managedZone=self.name).execute()
@operation(resumable=True)
@utils.throw_cloudify_exceptions
def create(name, dns_name, additional_settings=None, **kwargs):
if utils.resource_created(ctx, constants.NAME):
return
gcp_config = utils.get_gcp_config()
if not name:
name = ctx.node.id
if not dns_name:
dns_name = name
name = utils.get_final_resource_name(name)
dns_zone = DNSZone(
gcp_config,
ctx.logger,
name,
dns_name,
additional_settings=additional_settings,
)
resource = utils.create(dns_zone)
ctx.instance.runtime_properties.update(resource)
@operation(resumable=True)
@utils.retry_on_failure('Retrying deleting dns zone')
@utils.throw_cloudify_exceptions
def delete(**kwargs):
gcp_config = utils.get_gcp_config()
name = ctx.instance.runtime_properties.get(constants.NAME)
if name:
dns_zone = DNSZone(
gcp_config,
ctx.logger,
name)
utils.delete_if_not_external(dns_zone)
if not utils.is_object_deleted(dns_zone):
ctx.operation.retry('Zone is not yet deleted. Retrying:',
constants.RETRY_DEFAULT_DELAY)
ctx.instance.runtime_properties.pop(constants.NAME, None)
| 29.347561 | 76 | 0.606482 | 2,696 | 0.56015 | 0 | 0 | 2,155 | 0.447746 | 0 | 0 | 1,427 | 0.296489 |
c58b105434ef0739e6e7fc842f2dba276d5bf04c | 8,974 | py | Python | SaIL/network/supervised_regression_network.py | yonetaniryo/SaIL | c7404024c7787184c3638e9730bd185373ed0bf6 | [
"BSD-3-Clause"
] | 12 | 2018-05-18T19:29:09.000Z | 2020-05-15T13:47:12.000Z | SaIL/network/supervised_regression_network.py | yonetaniryo/SaIL | c7404024c7787184c3638e9730bd185373ed0bf6 | [
"BSD-3-Clause"
] | 1 | 2018-05-18T19:36:42.000Z | 2018-07-20T03:03:13.000Z | SaIL/network/supervised_regression_network.py | yonetaniryo/SaIL | c7404024c7787184c3638e9730bd185373ed0bf6 | [
"BSD-3-Clause"
] | 10 | 2018-01-11T21:23:40.000Z | 2021-11-10T04:38:07.000Z | #!/usr/bin/env python
"""Generic network class for supervised regression
Created on: March 25, 2017
Author: Mohak Bhardwaj"""
from collections import defaultdict
from planning_python.heuristic_functions.heuristic_function import EuclideanHeuristicNoAng, ManhattanHeuristicNoAng, DubinsHeuristic
import numpy as np
import random
class SupervisedRegressionNetwork():
def __init__(self ,\
output_size ,\
input_size ,\
learning_rate = 0.001 ,\
batch_size = 32 ,\
training_epochs = 15 ,\
summary_file = "learner_1" ,\
mode = "cpu" ,\
seed_val = 1234 ,\
graph_type = "XY"):
self.output_size = output_size
self.input_size = input_size
self.learning_rate = learning_rate
self.batch_size = batch_size
self.training_epochs = training_epochs
self.display_step = 1
self.summary_dir_train = os.path.join(os.path.abspath('saved_data/summaries'), summary_file+'_train')
self.summary_dir_test = os.path.join(os.path.abspath('saved_data/summaries'), summary_file+'_test')
print self.summary_dir_test
print self.summary_dir_train
self.seed_val = seed_val
self.input_shape = [self.input_size]
if mode == "gpu":
self.device = '/gpu:0'
else:
self.device = '/cpu:0'
global tf
global tflearn
import tensorflow as tf
import tflearn
# config = tf.ConfigProto()
# # config.device_count = {'GPU': 0}
# # config.log_device_placement=True
# config.allow_soft_placement=True
# config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))#, log_device_placement=True))
#Heuristic Functions that will be helpful in calculating features
self.hs = [EuclideanHeuristicNoAng(), ManhattanHeuristicNoAng()]
#Some values that will help with normalization of features
if self.graph_type == "XY":
self.dist_norm = (lattice_params['x_lims'][1]-lattice_params['x_lims'][0])*(lattice_params['y_lims'][1]-lattice_params['y_lims'][0])#Max possible distance between two nodes
self.max_children = self.lattice.children.shape[0]
self.coord_norm = np.array([self.lattice.num_cells[0], self.lattice.num_cells[1]], dtype=np.float)
elif self.graph_type == "XYH":
self.dist_norm = None #Max possible dubin's distance between two nodes
self.max_children = len(self.lattice.children[0])
self.coord_norm = np.array([self.lattice.num_cells[0], self.lattice.num_cells[1], self.lattice.num_headings], dtype=np.float)
self.hs.append(DubinsHeuristic(lattice_params['radius']))
self.norm_start_n = np.array(self.start_n,dtype=np.float)/self.coord_norm
self.norm_goal_n = np.array(self.goal_n,dtype=np.float)/self.coord_norm
#Dictionaries that keep track of important values for feature calculation
self.cost_so_far = defaultdict(lambda: np.inf) #For each node, this is the cost of the shortest path to the start
self.num_invalid_predecessors = defaultdict(lambda: -1) #For each node, this is the number of invalid predecessor edges (including siblings of parent)
self.num_invalid_children = defaultdict(lambda: -1) #For each node, this is the number of invalid children edges
self.num_invalid_siblings = defaultdict(lambda: -1) #For each node, this is the number of invalid siblings edges (from best parent so far)
self.num_invalid_grand_children = defaultdict(lambda: -1) #For each node, this is the number of invalid grandchildren edges (seen so far)
self.depth_so_far = defaultdict(lambda: -1) #For each node, this is the depth of the node along the tree(along shortest path)
with tf.device(self.device):
self.graph_ops = self.init_graph()
self.init_op = tf.global_variables_initializer()
# _, self.episode_stats_vars = self.build_summaries()
# self.summary_ops = tf.summary.merge_all()
# print('Here')
# self.test_writer = tf.summary.FileWriter(self.summary_dir_test, self.sess.graph)
# print('Here2')
# self.train_writer = tf.summary.FileWriter(self.summary_dir_train, self.sess.graph)
# print('Her3')
self.sess.run(self.init_op)
print('network created and initialized')
def create_network(self):
"""Constructs and initializes core network architecture"""
state_input = tf.placeholder(tf.float32, [None] + self.input_shape)
net = tf.py_func(self.get_feature_vec, state_input, tf.float32, stateful=True, name="feature_calc")
net = tflearn.fully_connected(state_input, 100, activation='relu')
net = tflearn.fully_connected(net, 50, activation ='relu')
output = tflearn.fully_connected(net, self.output_size, activation = 'linear')
return state_input, output
def init_graph(self):
"""Overall architecture including target network,
gradient ops etc"""
state_input, output = self.create_network()
network_params = tf.trainable_variables()
target = tf.placeholder(tf.float32, [None] + [self.output_size])
cost = tf.reduce_sum(tf.pow(output - target, 2))/(2*self.batch_size)
optimizer = tf.train.RMSPropOptimizer(learning_rate = self.learning_rate)
train_net = optimizer.minimize(cost, var_list = network_params)
saver = tf.train.Saver()
graph_operations = {"s": state_input,\
"output": output,\
"target": target,\
"cost": cost,\
"train_net": train_net,\
"network_params": network_params,\
"saver": saver}
return graph_operations
def get_feature_vec(self, node):
"""Given a node, calculate the features for that node"""
feature_vec = [self.norm_start_n, self.norm_goal_n]
feature_vec.append(node,/self.coord_norm)
for h in self.heuristic_functions: feature_vec.append(h(node, self.goal_n)/self.dist_norm) #Normalize the distances
feature_vec.append(self.num_invalid_predecessors[node]/self.depth_so_far[node]*self.max_children) #Normalized invalid predecessors
feature_vec.append(self.num_invalid_siblings/self.max_children)
feature_vec.append(self.num_invalid_children/self.max_children)
feature_vec.append(self.num_invalid_grand_children/2*self.max_children)
return feature_vec
def train(self, database):
#Shuffle the database
random.shuffle(database)
for epoch in xrange(self.training_epochs):
# random.shuffle(database)
avg_cost = 0.
total_batch = int(len(database)/self.batch_size)
for i in xrange(total_batch):
batch_x, batch_y = self.get_next_batch(database, i)
#Run optimization op(backprop) and cost op(to get loss value)
_, c = self.sess.run([self.graph_ops['train_net'], self.graph_ops['cost']],\
feed_dict = {self.graph_ops['s']:batch_x,\
self.graph_ops['target']:batch_y})
#Compute Average Loss
avg_cost+= c/total_batch
#Display logs per epoch
if epoch%self.display_step == 0:
print "epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(np.sqrt(avg_cost))
print('optimization finished!')
def get_best_node(self, obs):
"""takes as input an open list and returns the best node to be expanded"""
return None
def get_q_value(self, obs):
obs = obs.reshape(self.input_shape)
output = self.graph_ops['output'].eval(session=self.sess, feed_dict={self.graph_ops['s']:[obs]})
return output
def save_params(self, file_name):
#file_path = os.path.join(os.path.abspath('saved_data/saved_models'), file_name +'.ckpt')
print(file_name)
save_path = self.graph_ops['saver'].save(self.sess, file_name)
print("Model saved in file: %s" % file_name)
return
def load_params(self, file_name):
#file_path = os.path.join(os.path.abspath('saved_data/saved_models'), file_name +'.ckpt')
self.graph_ops['saver'].restore(self.sess, file_name)
print('Weights loaded from file %s'%file_name)
def get_params(self):
return self.graph_ops['network_params']
def set_params(self, input_params):
[self.graph_ops['network_params'].assign(input_params[i]) for i in range(len(input_params))]
def get_next_batch(self, database, i):
batch = database[i*self.batch_size: (i+1)*self.batch_size]
batch_x = np.array([_[0] for _ in batch])
batch_y = np.array([_[1] for _ in batch])
new_shape_ip = [self.batch_size] + self.input_shape
new_shape_op = [self.batch_size] + [self.output_size]
batch_x = batch_x.reshape(new_shape_ip)
batch_y = batch_y.reshape(new_shape_op)
return batch_x, batch_y
def reset(self):
self.sess.run(self.init_op)
| 45.323232 | 178 | 0.676621 | 8,642 | 0.963004 | 0 | 0 | 0 | 0 | 0 | 0 | 2,506 | 0.279251 |
c58b2acf5e308231e4fc666a15cea8491b5c0053 | 187 | py | Python | netsuite/constants.py | cart-com/netsuite | 5a4cbbea26c6584348ebea2b4d6de0b9607cea0c | [
"MIT"
] | null | null | null | netsuite/constants.py | cart-com/netsuite | 5a4cbbea26c6584348ebea2b4d6de0b9607cea0c | [
"MIT"
] | null | null | null | netsuite/constants.py | cart-com/netsuite | 5a4cbbea26c6584348ebea2b4d6de0b9607cea0c | [
"MIT"
] | null | null | null | import os
NOT_SET: object = object()
DEFAULT_INI_PATH: str = os.environ.get(
"NETSUITE_CONFIG", os.path.expanduser("~/.config/netsuite.ini"),
)
DEFAULT_INI_SECTION: str = "netsuite"
| 23.375 | 68 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.272727 |
c58da775f7281671275019ef175a55447db04132 | 7,996 | py | Python | delivery_merge/merge.py | astroconda/delivery_merge | ae16564097d28c9c1ce8ac19f266dc15429b20b8 | [
"BSD-3-Clause"
] | null | null | null | delivery_merge/merge.py | astroconda/delivery_merge | ae16564097d28c9c1ce8ac19f266dc15429b20b8 | [
"BSD-3-Clause"
] | 2 | 2019-05-16T19:28:10.000Z | 2019-05-17T02:15:45.000Z | delivery_merge/merge.py | astroconda/delivery_merge | ae16564097d28c9c1ce8ac19f266dc15429b20b8 | [
"BSD-3-Clause"
] | null | null | null | import os
import re
import sys
from .conda import conda, conda_env_load, conda_cmd_channels, ei_touch
from .utils import comment_find, git, pushd, sh
from configparser import ConfigParser
from glob import glob
from ruamel.yaml import YAML
DMFILE_RE = re.compile(r'^(?P<name>[A-z\-_l]+)(?:[=<>\!]+)?(?P<version>[A-z0-9. ]+)?') # noqa
DMFILE_INVALID_VERSION_RE = re.compile(r'[\ \!\@\#\$\%\^\&\*\(\)\-_]+')
DELIVERY_NAME_RE = re.compile(r'(?P<name>.*)[-_](?P<version>.*)[-_]py(?P<python_version>\d+)[-_.](?P<iteration>\d+)[-_.](?P<ext>.*)') # noqa
class EmptyPackageSpec(Exception):
pass
class InvalidPackageSpec(Exception):
pass
def dmfile(filename):
""" Return the contents of a file without comments
:param filename: string: path to file
:returns: list: of dicts, one per package
"""
result = []
with open(filename, 'r') as fp:
for line in fp:
line = line.strip()
comment_pos = comment_find(line)
if comment_pos >= 0:
line = line[:comment_pos].strip()
if not line:
continue
match = DMFILE_RE.match(line)
if match is None:
raise InvalidPackageSpec(f"'{line}'")
pkg = match.groupdict()
if pkg['version']:
invalid = DMFILE_INVALID_VERSION_RE.match(pkg['version'])
if invalid:
raise InvalidPackageSpec(f"'{line}'")
pkg['fullspec'] = line
result.append(pkg)
if not result:
raise EmptyPackageSpec("Nothing to do")
return result
def env_combine(filename, conda_env, conda_channels=[]):
""" Install packages listed in `filename` inside `conda_env`.
Packages are quote-escaped to prevent spurious file redirection.
:param filename: str: path to file
:param conda_env: str: conda environment name
:param conda_channels: list: channel URLs
:returns: None
:raises subprocess.CalledProcessError: via check_returncode method
"""
packages = []
for record in dmfile(filename):
packages.append(f"'{record['fullspec']}'")
packages_result = ' '.join([x for x in packages])
with conda_env_load(conda_env):
ei_touch()
# Perform package installation
proc = conda('install', '-q', '-y',
'-n', conda_env,
conda_cmd_channels(conda_channels),
packages_result)
if proc.stderr:
print(proc.stderr.decode())
proc.check_returncode()
def testable_packages(filename, prefix):
""" Scan a mini/anaconda prefix for unpacked packages matching versions
requested by dmfile.
:param filename: str: path to file
:param prefix: str: path to conda root directory (aka prefix)
:returns: dict: git commit hash and repository URL information
"""
pkgdir = os.path.join(prefix, 'pkgs')
paths = []
yaml = YAML(typ='safe')
for record in dmfile(filename):
# Reconstruct ${package}-${version} format (when possible)
pattern = f"{record['name']}-"
if record['version']:
pattern += record['version']
pattern += '*'
# Record path to extracted package
path = ''.join([x for x in glob(os.path.join(pkgdir, pattern))
if os.path.isdir(x)])
paths.append(path)
for root in paths:
info_d = os.path.join(root, 'info')
recipe_d = os.path.join(info_d, 'recipe')
git_log = os.path.join(info_d, 'git')
if not os.path.exists(git_log):
continue
git_log_data = open(git_log).readlines()
if not git_log_data:
continue
with open(os.path.join(recipe_d, 'meta.yaml')) as yaml_data:
source = yaml.load(yaml_data)['source']
if not isinstance(source, dict):
continue
repository = source['git_url']
head = git_log_data[1].split()[1]
yield dict(repo=repository, commit=head)
def integration_test(pkg_data, conda_env, results_root='.'):
"""
:param pkg_data: dict: data returned by `testable_packages` method
:param conda_env: str: conda environment name
:param results_root: str: path to store XML reports
:returns: str: path to XML report
:raises subprocess.CalledProcessError: via check_returncode method
"""
results = ''
results_root = os.path.abspath(os.path.join(results_root, 'results'))
src_root = os.path.abspath('src')
if not os.path.exists(src_root):
os.mkdir(src_root, 0o755)
with pushd(src_root) as _:
repo_root = os.path.basename(pkg_data['repo']).replace('.git', '')
if not os.path.exists(repo_root):
git(f"clone --recursive {pkg_data['repo']} {repo_root}")
with pushd(repo_root) as _:
git(f"checkout {pkg_data['commit']}")
force_xunit2()
with conda_env_load(conda_env):
ei_touch()
results = os.path.abspath(os.path.join(results_root,
repo_root,
'result.xml'))
conda("uninstall", "-y", repo_root)
proc_pip_install = sh("python", "-m pip install --upgrade pip pytest ci-watson")
if proc_pip_install.returncode:
print(proc_pip_install.stdout.decode())
print(proc_pip_install.stderr.decode())
proc_pip = sh("python", "-m pip install -v .[test]")
proc_pip_stderr = proc_pip.stderr.decode()
if proc_pip.returncode:
print(proc_pip.stdout.decode())
print(proc_pip.stderr.decode())
if 'consider upgrading' not in proc_pip_stderr:
proc_pip.check_returncode()
proc_egg = sh("python", "setup.py egg_info")
if proc_egg.returncode:
print(proc_egg.stdout.decode())
print(proc_egg.stderr.decode())
proc_pytest = sh("python", "-m pytest", f"-v --basetemp=.tmp --junitxml={results}")
print(proc_pytest.stdout.decode())
if proc_pytest.returncode:
print(proc_pytest.stderr.decode())
return results
def force_xunit2(project='.'):
""" Set project configuration to emit xunit2 regardless of orignal settings
:param project: str: path project (i.e. source directory)
"""
configs = [os.path.abspath(os.path.join(project, x))
for x in ['pytest.ini', 'setup.cfg']]
if any([os.path.exists(x) for x in configs]):
for filename in configs:
if not os.path.exists(filename):
continue
cfg = ConfigParser()
cfg.read(filename)
cfg['tool:pytest'] = {'junit_family': 'xunit2'}
with open(filename, 'w') as data:
cfg.write(data)
break
else:
data = """[pytest]\njunit_family = xunit2\n"""
with open('pytest.ini', 'w+') as cfg:
cfg.write(data)
return
def force_yaml_channels(yamlfile, channels):
""" Replace the `channels:` block with `channels`
:param yamlfile: str: path to yaml file
:param channels: list: channel URLs
"""
if not isinstance(channels, list):
raise TypeError("Expecting a list of URLs")
yaml = YAML()
yaml.default_flow_style = False
yaml.indent(offset=2)
with open(yamlfile) as yaml_data:
result = yaml.load(yaml_data)
if not result.get('channels'):
print(f"{yamlfile} has no channels", file=sys.stderr)
return
# Assuming there's a reason to change the file...
if result['channels'] != channels:
result['channels'] = channels
with open(yamlfile, 'w') as fp:
yaml.dump(result, fp)
| 31.984 | 141 | 0.580665 | 88 | 0.011006 | 1,475 | 0.184467 | 0 | 0 | 0 | 0 | 2,516 | 0.314657 |
c58f7f6e36957f182ebdaa734a818234840d4203 | 5,762 | py | Python | HMM/utils.py | rushill2/CS440SP21 | 62fb36db6639a93f9d31866b0e77559abd6f53ff | [
"AFL-1.1"
] | null | null | null | HMM/utils.py | rushill2/CS440SP21 | 62fb36db6639a93f9d31866b0e77559abd6f53ff | [
"AFL-1.1"
] | null | null | null | HMM/utils.py | rushill2/CS440SP21 | 62fb36db6639a93f9d31866b0e77559abd6f53ff | [
"AFL-1.1"
] | null | null | null | # mp4.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created Fall 2018: Margaret Fleck, Renxuan Wang, Tiantian Fang, Edward Huang (adapted from a U. Penn assignment)
# Modified Spring 2020: Jialu Li, Guannan Guo, and Kiran Ramnath
# Modified Fall 2020: Amnon Attali, Jatin Arora
# Modified Spring 2021 by Kiran Ramnath (kiranr2@illinois.edu)
import collections
START_TAG = "START"
END_TAG = "END"
def evaluate_accuracies(predicted_sentences, tag_sentences):
"""
:param predicted_sentences:
:param tag_sentences:
:return: (Accuracy, correct word-tag counter, wrong word-tag counter)
"""
assert len(predicted_sentences) == len(tag_sentences), "The number of predicted sentence {} does not match the true number {}".format(len(predicted_sentences), len(tag_sentences))
correct_wordtagcounter = {}
wrong_wordtagcounter = {}
correct = 0
wrong = 0
for pred_sentence, tag_sentence in zip(predicted_sentences, tag_sentences):
assert len(pred_sentence) == len(tag_sentence), "The predicted sentence length {} does not match the true length {}".format(len(pred_sentence), len(tag_sentence))
for pred_wordtag, real_wordtag in zip(pred_sentence, tag_sentence):
assert pred_wordtag[0] == real_wordtag[0], "The predicted sentence WORDS do not match with the original sentence, you should only be predicting the tags"
word = pred_wordtag[0]
if real_wordtag[1] in [START_TAG, END_TAG]:
continue
if pred_wordtag[1] == real_wordtag[1]:
if word not in correct_wordtagcounter.keys():
correct_wordtagcounter[word] = collections.Counter()
correct_wordtagcounter[word][real_wordtag[1]] += 1
correct += 1
else:
if word not in wrong_wordtagcounter.keys():
wrong_wordtagcounter[word] = collections.Counter()
wrong_wordtagcounter[word][real_wordtag[1]] += 1
wrong += 1
accuracy = correct / (correct + wrong)
return accuracy, correct_wordtagcounter, wrong_wordtagcounter
def specialword_accuracies(train_sentences, predicted_sentences, tag_sentences):
"""
:param train_sentences:
:param predicted_sentences:
:param tag_sentences:
:return: Accuracy on words with multiple tags, and accuracy on words that do not occur in the training sentences
"""
seen_words, words_with_multitags_set = get_word_tag_statistics(train_sentences)
multitags_correct = 0
multitags_wrong = 0
unseen_correct = 0
unseen_wrong = 0
for i in range(len(predicted_sentences)):
for j in range(len(predicted_sentences[i])):
word = tag_sentences[i][j][0]
tag = tag_sentences[i][j][1]
if tag in [START_TAG, END_TAG]:
continue
if predicted_sentences[i][j][1] == tag:
if word in words_with_multitags_set:
multitags_correct += 1
if word not in seen_words:
unseen_correct += 1
else:
if word in words_with_multitags_set:
multitags_wrong += 1
if word not in seen_words:
unseen_wrong += 1
multitag_accuracy = multitags_correct / (multitags_correct + multitags_wrong)
total_unseen = unseen_correct + unseen_wrong
unseen_accuracy = unseen_correct / total_unseen if total_unseen > 0 else 0
return multitag_accuracy, unseen_accuracy
def topk_wordtagcounter(wordtagcounter, k):
top_items = sorted(wordtagcounter.items(), key=lambda item: sum(item[1].values()), reverse=True)[:k]
top_items = list(map(lambda item: (item[0], dict(item[1])), top_items))
return top_items
def load_dataset(data_file):
sentences = []
with open(data_file, 'r', encoding='UTF-8') as f:
for line in f:
sentence = [(START_TAG, START_TAG)]
raw = line.split()
for pair in raw:
splitted = pair.split('=')
if (len(splitted) < 2):
continue
else:
tag = splitted[-1]
# find word
word = splitted[0]
for element in splitted[1:-1]:
word += '/' + element
sentence.append((word.lower(), tag))
sentence.append((END_TAG, END_TAG))
sentences.append(sentence)
return sentences
def strip_tags(sentences):
'''
Strip tags
input: list of sentences
each sentence is a list of (word,tag) pairs
output: list of sentences
each sentence is a list of words (no tags)
'''
sentences_without_tags = []
for sentence in sentences:
sentence_without_tags = []
for i in range(len(sentence)):
pair = sentence[i]
sentence_without_tags.append(pair[0])
sentences_without_tags.append(sentence_without_tags)
return sentences_without_tags
def get_word_tag_statistics(data_set):
# get set of all seen words and set of words with multitags
word_tags = collections.defaultdict(lambda: set())
word_set = set()
for sentence in data_set:
for word, tag in sentence:
word_tags[word].add(tag)
word_set.add(word)
return word_set, set(map(lambda elem: elem[0], filter(lambda elem: len(elem[1]) > 1, word_tags.items())))
| 38.413333 | 183 | 0.634155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,489 | 0.258417 |
c5924e0b85cfa3c5247ec98d6988bcc8eef21a43 | 251 | py | Python | 1 - Beginner/1079.py | andrematte/uri-submissions | 796e7fee56650d9e882880318d6e7734038be2dc | [
"MIT"
] | 1 | 2020-09-09T12:48:09.000Z | 2020-09-09T12:48:09.000Z | 1 - Beginner/1079.py | andrematte/uri-submissions | 796e7fee56650d9e882880318d6e7734038be2dc | [
"MIT"
] | null | null | null | 1 - Beginner/1079.py | andrematte/uri-submissions | 796e7fee56650d9e882880318d6e7734038be2dc | [
"MIT"
] | null | null | null | # Uri Online Judge 1079
N = int(input())
for i in range(0,N):
Numbers = input()
num1 = float(Numbers.split()[0])
num2 = float(Numbers.split()[1])
num3 = float(Numbers.split()[2])
print(((2*num1+3*num2+5*num3)/10).__round__(1)) | 19.307692 | 51 | 0.59761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.091633 |
c5940979564969266e00ad7e811e6b04f162aae1 | 633 | py | Python | ex4/sigmoidGradient.py | junwon1994/Coursera-ML | 91e96c3c14c058cd6d745a4fada1baf40d91458f | [
"MIT"
] | 3 | 2018-03-16T01:48:14.000Z | 2020-08-14T09:52:58.000Z | ex4/sigmoidGradient.py | junwon1994/Coursera-ML | 91e96c3c14c058cd6d745a4fada1baf40d91458f | [
"MIT"
] | null | null | null | ex4/sigmoidGradient.py | junwon1994/Coursera-ML | 91e96c3c14c058cd6d745a4fada1baf40d91458f | [
"MIT"
] | null | null | null | from ex2.sigmoid import sigmoid
def sigmoidGradient(z):
"""computes the gradient of the sigmoid function
evaluated at z. This should work regardless if z is a matrix or a
vector. In particular, if z is a vector or matrix, you should return
the gradient for each element."""
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of the sigmoid function evaluated at
# each value of z (z can be a matrix, vector or scalar).
g = sigmoid(z) * (1 - sigmoid(z))
# =============================================================
return g
| 37.235294 | 77 | 0.554502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.78515 |
c594a1e017366c6929f4c36ec117193b359c4b5f | 2,363 | py | Python | test.py | sebdah/python-inspector | 32415acb6447115cbca5d799a4ce63e9ccc37030 | [
"Apache-2.0"
] | 1 | 2019-06-13T04:43:05.000Z | 2019-06-13T04:43:05.000Z | test.py | sebdah/python-inspector | 32415acb6447115cbca5d799a4ce63e9ccc37030 | [
"Apache-2.0"
] | null | null | null | test.py | sebdah/python-inspector | 32415acb6447115cbca5d799a4ce63e9ccc37030 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Test the Inspector
"""
import os.path, sys
sys.path.append(os.path.dirname(__file__))
import unittest
import inspector
########################################################
#
# Inspector test
#
########################################################
def func2(*args, **kwargs):
"""
Example method
"""
return inspector.trace(*args, **kwargs)
def func1(*args, **kwargs):
"""
Example method
"""
return func2(*args, **kwargs)
def do_inspect(*args, **kwargs):
"""
Call this function to test
"""
return func1(*args, **kwargs)
########################################################
#
# TEST CASES
#
########################################################
class InspectorTest(unittest.TestCase):
"""
Unit tests for the Inspector
"""
def test_full_trace(self):
"""
Test a simple full trace
"""
expected = """\
test.py:28 in method func1
\treturn func2(*args, **kwargs)test.py:34 in method do_inspect
\treturn func1(*args, **kwargs)test.py:57 in method test_full_trace
\toutput = do_inspect(basename_only=True)[:3]
"""
output = do_inspect(basename_only=True)[:3]
self.assertEqual(
(''.join(output)).strip(),
expected.strip())
def test_trace_with_depth(self):
"""
Test a simple trace with a limited depth
"""
expected = """\
test.py:28 in method func1
\treturn func2(*args, **kwargs)
"""
output = do_inspect(depth=1, basename_only=True)[:3]
self.assertEqual(
(''.join(output)).strip(),
expected.strip())
def test_trace_with_one_line_log(self):
"""
Test a simple trace with a limited depth and one line response
"""
expected = """\
test.py:28 in method func1: \treturn func2(*args, **kwargs)
"""
output = do_inspect(
depth=1, one_line_response=True, basename_only=True)[:3]
self.assertEqual(
(''.join(output)).strip(),
expected.strip())
if __name__ == '__main__':
SUITE = unittest.TestSuite()
SUITE.addTest(InspectorTest('test_full_trace'))
SUITE.addTest(InspectorTest('test_trace_with_depth'))
SUITE.addTest(InspectorTest('test_trace_with_one_line_log'))
unittest.TextTestRunner(verbosity=2).run(SUITE)
| 24.614583 | 70 | 0.55311 | 1,332 | 0.56369 | 0 | 0 | 0 | 0 | 0 | 0 | 1,084 | 0.458739 |
c596936b4b1e69d9d5a5ea7c1e0eafe831e8c36a | 4,418 | py | Python | dynamodb.py | oliveroneill/sir | 12573e191c6a740dd06f98500070e443e63452a8 | [
"MIT"
] | null | null | null | dynamodb.py | oliveroneill/sir | 12573e191c6a740dd06f98500070e443e63452a8 | [
"MIT"
] | null | null | null | dynamodb.py | oliveroneill/sir | 12573e191c6a740dd06f98500070e443e63452a8 | [
"MIT"
] | null | null | null | """Util functions for accessing data via DynamoDB."""
import boto3
from boto3.dynamodb.conditions import Key
class UnknownInviteCodeError(Exception):
"""An error when an unknown invite code is entered."""
pass
def get_invitee(invite_code: str):
"""Get a dictionary of the stored information for this invite code."""
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Invitations')
return get_invitee_from_table(invite_code, table)
def get_invitee_from_table(invite_code: str, table):
"""
Get a dictionary of the stored information for this invite code.
Args:
invite_code: The invitation code to search for
table: A DynamoDB table for querying
Returns:
A dictionary of information stored under the invite code
Throws:
UnknownInviteCodeError: If the invite code is not in the database
"""
response = table.query(
KeyConditionExpression=Key('invite_code').eq(invite_code)
)
items = response['Items']
if len(items) == 0:
# If there were no matches to the code then throw an error
raise UnknownInviteCodeError()
# The output will be a list, so we'll just use the first one since there
# should not be duplicates
items = items[0]
# DynamoDB cannot store empty strings, so we use null instead and convert
# between it as needed. At this point in time, we have no significance for
# null so this works fine.
items = {k: convert_null_to_empty_string(v) for k, v in items.items()}
return items
def update_rsvp(data: dict):
"""
Update the RSVP info for a invitee.
The invite code will be retrieved from within the input dictionary.
TODO: add data validation. This could possibly be done in API Gateway
Args:
data: A dictionary of data to be inserted into the database. The values
here are expected to match the database schema.
Throws:
UnknownInviteCodeError: If the invite code is not in the database
"""
code = data["invite_code"]
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('Invitations')
# Check that the code is known
try:
get_invitee_from_table(invite_code=code, table=table)
except UnknownInviteCodeError:
raise
# Convert the data since DynamoDB can't handle empty strings
data = {k: convert_empty_string_to_none(v) for k, v in data.items()}
expression = """
set going = :g, food=:f, plus_one=:p, music=:m, notes=:n,
plus_one_name=:pn, plus_one_food=:pf, song_id=:s, sent_rsvp=:sr
"""
table.update_item(
Key={
'invite_code': code
},
UpdateExpression=expression,
ExpressionAttributeValues={
':g': data.get("going") == 'true',
':f': data.get("food"),
':p': data.get("plus_one") == 'true',
':m': data.get("music"),
':n': data.get("notes"),
':pn': data.get("plus_one_name"),
':pf': data.get("plus_one_food"),
':s': data.get("song_id"),
# Set sent_rsvp to true since this is the response
':sr': True,
}
)
def convert_null_to_empty_string(value):
"""
Convert `None` type to empty strings.
Used specifically to work around DynamoDB's limitation of not allowing
empty strings.
See https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html#limits-attributes
Args:
value: The value to convert to an empty string if None
Returns:
Empty string if value is None. If not, it will return the unchanged
value
"""
if value is None:
return ""
return value
def convert_empty_string_to_none(value):
"""
Convert empty strings to `None` and leave others unchanged.
Used specifically to work around DynamoDB's limitation of not allowing
empty strings.
See https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html#limits-attributes
Empty strings will be entered into Dynamo as null values and converted
using `convert_null_to_empty_string`
Args:
value: The value to convert to None if an empty string
Returns:
None if value is an empty string. Returns unchanged value otherwise
"""
if isinstance(value, str) and len(value) == 0:
return None
return value
| 30.468966 | 102 | 0.658443 | 109 | 0.024672 | 0 | 0 | 0 | 0 | 0 | 0 | 2,781 | 0.62947 |
c596d357a3ee4b4fab32730d15cc0655102ec33a | 10,199 | py | Python | psets/ZHH_HHToTauTauGG_GenSim_pset_cfg.py | bsathian/Hgg-MC-Generation | 5f44503b6d5c57aef862299cbcd5a9910a4f8ab8 | [
"MIT"
] | null | null | null | psets/ZHH_HHToTauTauGG_GenSim_pset_cfg.py | bsathian/Hgg-MC-Generation | 5f44503b6d5c57aef862299cbcd5a9910a4f8ab8 | [
"MIT"
] | null | null | null | psets/ZHH_HHToTauTauGG_GenSim_pset_cfg.py | bsathian/Hgg-MC-Generation | 5f44503b6d5c57aef862299cbcd5a9910a4f8ab8 | [
"MIT"
] | 1 | 2021-05-17T22:24:09.000Z | 2021-05-17T22:24:09.000Z | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/HIG-RunIIFall17wmLHEGS-05498-fragment.py --python_filename HIG-RunIIFall17wmLHEGS-05498_1_cfg.py --eventcontent RAWSIM,LHE --customise Configuration/DataProcessing/Utils.addMonitoring --datatier GEN-SIM,LHE --fileout file:HIG-RunIIFall17wmLHEGS-05498.root --conditions 93X_mc2017_realistic_v3 --beamspot Realistic25ns13TeVEarly2017Collision --customise_commands process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(99) --step LHE,GEN,SIM --geometry DB:Extended --era Run2_2017 --no_exec --mc -n 643 --nThreads 1
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('SIM',eras.Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeVEarly2017Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(643)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/HIG-RunIIFall17wmLHEGS-05498-fragment.py nevts:643'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('file:GENSIM.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
process.LHEoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('LHE'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:GENSIM_inLHE.root'),
outputCommands = process.LHEEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.XMLFromDBSource.label = cms.string("Extended")
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '93X_mc2017_realistic_v3', '')
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters'),
processParameters = cms.vstring('25:m0 = 125.0',
'25:onMode = off',
'25:onIfMatch = 15 -15',
'25:onIfMatch = 22 22',
'ResonanceDecayFilter:filter = on',
'ResonanceDecayFilter:exclusive = on', #off: require at least the specified number of daughters, on: require exactly the specified number of daughters
'ResonanceDecayFilter:mothers = 25', #list of mothers not specified => count all particles in hard process+resonance decays (better to avoid specifying mothers when including leptons from the lhe in counting, since intermediate resonances are not gauranteed to appear in general
'ResonanceDecayFilter:daughters = 15,15,22,22' #
),
pythia8CP5Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:ecmPow=0.03344',
'PDF:pSet=20',
'MultipartonInteractions:bProfile=2',
'MultipartonInteractions:pT0Ref=1.41',
'MultipartonInteractions:coreRadius=0.7634',
'MultipartonInteractions:coreFraction=0.63',
'ColourReconnection:range=5.176',
'SigmaTotal:zeroAXB=off',
'SpaceShower:alphaSorder=2',
'SpaceShower:alphaSvalue=0.118',
'SigmaProcess:alphaSvalue=0.118',
'SigmaProcess:alphaSorder=2',
'MultipartonInteractions:alphaSvalue=0.118',
'MultipartonInteractions:alphaSorder=2',
'TimeShower:alphaSorder=2',
'TimeShower:alphaSvalue=0.118'),
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'),
pythia8PSweightsSettings = cms.vstring('UncertaintyBands:doVariations = on',
'UncertaintyBands:List = {isrRedHi isr:muRfac=0.707,fsrRedHi fsr:muRfac=0.707,isrRedLo isr:muRfac=1.414,fsrRedLo fsr:muRfac=1.414,isrDefHi isr:muRfac=0.5,fsrDefHi fsr:muRfac=0.5,isrDefLo isr:muRfac=2.0,fsrDefLo fsr:muRfac=2.0,isrConHi isr:muRfac=0.25,fsrConHi fsr:muRfac=0.25,isrConLo isr:muRfac=4.0,fsrConLo fsr:muRfac=4.0,fsr_G2GG_muR_dn fsr:G2GG:muRfac=0.5,fsr_G2GG_muR_up fsr:G2GG:muRfac=2.0,fsr_G2QQ_muR_dn fsr:G2QQ:muRfac=0.5,fsr_G2QQ_muR_up fsr:G2QQ:muRfac=2.0,fsr_Q2QG_muR_dn fsr:Q2QG:muRfac=0.5,fsr_Q2QG_muR_up fsr:Q2QG:muRfac=2.0,fsr_X2XG_muR_dn fsr:X2XG:muRfac=0.5,fsr_X2XG_muR_up fsr:X2XG:muRfac=2.0,fsr_G2GG_cNS_dn fsr:G2GG:cNS=-2.0,fsr_G2GG_cNS_up fsr:G2GG:cNS=2.0,fsr_G2QQ_cNS_dn fsr:G2QQ:cNS=-2.0,fsr_G2QQ_cNS_up fsr:G2QQ:cNS=2.0,fsr_Q2QG_cNS_dn fsr:Q2QG:cNS=-2.0,fsr_Q2QG_cNS_up fsr:Q2QG:cNS=2.0,fsr_X2XG_cNS_dn fsr:X2XG:cNS=-2.0,fsr_X2XG_cNS_up fsr:X2XG:cNS=2.0,isr_G2GG_muR_dn isr:G2GG:muRfac=0.5,isr_G2GG_muR_up isr:G2GG:muRfac=2.0,isr_G2QQ_muR_dn isr:G2QQ:muRfac=0.5,isr_G2QQ_muR_up isr:G2QQ:muRfac=2.0,isr_Q2QG_muR_dn isr:Q2QG:muRfac=0.5,isr_Q2QG_muR_up isr:Q2QG:muRfac=2.0,isr_X2XG_muR_dn isr:X2XG:muRfac=0.5,isr_X2XG_muR_up isr:X2XG:muRfac=2.0,isr_G2GG_cNS_dn isr:G2GG:cNS=-2.0,isr_G2GG_cNS_up isr:G2GG:cNS=2.0,isr_G2QQ_cNS_dn isr:G2QQ:cNS=-2.0,isr_G2QQ_cNS_up isr:G2QQ:cNS=2.0,isr_Q2QG_cNS_dn isr:Q2QG:cNS=-2.0,isr_Q2QG_cNS_up isr:Q2QG:cNS=2.0,isr_X2XG_cNS_dn isr:X2XG:cNS=-2.0,isr_X2XG_cNS_up isr:X2XG:cNS=2.0}',
'UncertaintyBands:nFlavQ = 4',
'UncertaintyBands:MPIshowers = on',
'UncertaintyBands:overSampleFSR = 10.0',
'UncertaintyBands:overSampleISR = 10.0',
'UncertaintyBands:FSRpTmin2Fac = 20',
'UncertaintyBands:ISRpTmin2Fac = 1')
),
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/pre2017/13TeV/madgraph/V5_2.6.0/ZHH_CV_1_0_C2V_1_0_C3_1_0_13TeV-madgraph/v1/ZHH_CV_1_0_C2V_1_0_C3_1_0_13TeV-madgraph_slc6_amd64_gcc630_CMSSW_9_3_8_tarball.tar.xz'),
nEvents = cms.untracked.uint32(643),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
process.LHEoutput_step = cms.EndPath(process.LHEoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.RAWSIMoutput_step,process.LHEoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(99)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| 55.12973 | 1,451 | 0.755956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,884 | 0.576919 |
c597a65becfbb1c61ad0f698d1e6335d34dce5df | 4,251 | py | Python | viewcount.py | Peace1-zhwiki/MOSIW | 13a97842ef53fd500296d3569e548a83e12698d1 | [
"MIT"
] | null | null | null | viewcount.py | Peace1-zhwiki/MOSIW | 13a97842ef53fd500296d3569e548a83e12698d1 | [
"MIT"
] | null | null | null | viewcount.py | Peace1-zhwiki/MOSIW | 13a97842ef53fd500296d3569e548a83e12698d1 | [
"MIT"
] | null | null | null | import pywikibot
from pywikibot import pagegenerators
from urllib.request import urlopen
import urllib.parse
import regex as re #use this rather than "re" to avoid the "look-behind requires fixed-width pattern" error
site = pywikibot.Site('zh','wikipedia')
cat = pywikibot.Category(site,'Category:連結格式不正確的條目')
page_to_write = pywikibot.Page(site, u"User:和平奮鬥救地球/MOSIW")
gen = pagegenerators.CategorizedPageGenerator(cat, recurse=True)
ilh='(?<!\{\{(Advtranslation|Plant\-translation|Translate|Translating|Translation[ _]+WIP|Translation|Trans|Tran|Voltranslation|Wptranslation|正在翻(譯|译)|(翻)?(譯|译)(中)?)[^\}]*)\[\[\:(aa|ab|ace|ady|af|ak|als|am|an|ang|ar|arc|arz|as|ast|av|ay|az|azb|ba|bar|bat-smg|bcl|be|be-tarask|be-x-old|bg|bh|bi|bjn|bm|bn|bo|bpy|br|bs|bug|bxr|ca|cbk-zam|cdo|ce|ceb|ch|cho|chr|chy|ckb|co|cr|crh|cs|csb|cu|cv|cy|da|de|diq|dsb|dv|dz|ee|egl|eml|el|en|eo|es|et|eu|ext|fa|ff|fi|fiu-vro|fj|fo|fr|frp|frr|fur|fy|ga|gag|gan|gd|gl|glk|gn|gom|got|gsw|als|gu|gv|ha|hak|haw|he|hi|hif|ho|hr|hsb|ht|hu|hy|hz|ia|id|ie|ig|ii|ik|ilo|io|is|it|iu|ja|jp|jam|jbo|jv|ka|kaa|kab|kbd|kg|ki|kj|kk|kl|km|kn|ko|koi|kr|krc|ks|ksh|ku|kv|kw|ky|la|lad|lb|lbe|lez|lg|li|lij|lmo|ln|lo|lrc|lt|ltg|lv|lzh|zh-classical|mai|map-bms|mdf|mg|mh|mhr|mi|min|mk|ml|mn|mo|mr|mrj|ms|mt|mus|mwl|my|myv|mzn|na|nah|nan|zh-min-nan|nap|nb|no|nds|nds-nl|ne|ne|new|ng|nl|nn|no|nov|nrm|nso|nv|ny|oc|olo|om|or|os|pa|pag|pam|pap|pcd|pdc|pfl|pi|pih|pl|pms|pnb|pnt|ps|pt|qu|rm|rmy|rn|ro|roa-rup|roa-tara|ru|rue|rup|rw|sa|sah|sc|scn|sco|sd|se|sg|sgs|sh|si|simple|sk|sl|sm|sn|so|sq|sr|srn|ss|st|stq|su|sv|sw|szl|ta|tcy|te|tet|tg|th|ti|tk|tl|tn|to|tpi|tr|ts|tt|tum|tw|ty|tyv|udm|ug|uk|ur|uz|ve|vec|vep|vi|vls|vo|vro|wa|war|wo|wuu|xal|xh|xmf|yi|yo|yue|zh-yue|za|zea|zu)\:(?!(wiktionary|wikt|wikinews|n|wikibooks|b|wikiquote|q|wikisource|s|oldwikisource|species|wikispecies|wikiversity|v|betawikiversity|wikimedia|foundation|wmf|wikivoyage|voy|commons|c|meta|metawikipedia|m|strategy|incubator|mediawikiwiki|mw|mediawiki|quality|otrswiki|otrs|ticket|phabricator|bugzilla|mediazilla|phab|nost|testwiki|wikidata|d|outreach|outreachwiki|toollabs|wikitech|dbdump|download|gerrit|mail|mailarchive|rev|spcom|sulutil|svn|tools|tswiki|wm2016|wm2017|wmania|User|Wikipedia|MediaWiki|File|Image|WP|Project|Template|Help|Special|U|利用者)\:)|\[\[(JP|JA|EN)\:\:'
viewcount = 0
arts = []
views = []
ilh_count = []
edit_num = []
page_size = []
count = 0
html_start = "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/zh.wikipedia/all-access/user/"
html_end = "/monthly/2020100100/2020110100"
tot_num = len(list(cat.articles(namespaces=0,recurse=True)))
print(tot_num)
for page in gen:
count+=1
percentage = 100*count/tot_num
art_name = page.title()
html_url = html_start + urllib.parse.quote(art_name).replace('/','%2F') + html_end
try:
urlopen(html_url)
except:
continue
html = urlopen(html_url).read()
strhtml = str(html)
viewcount = strhtml[strhtml.find('views')+7:-4]
if(int(viewcount)<1000): continue
art_txt = page.text
ilh_num = len(re.findall(ilh,art_txt,re.I))
print(format(percentage, '0.3f'),'%:',art_name,viewcount,ilh_num,page.revision_count(),len(page.text.encode("utf8")))
arts.append(art_name)
views.append(int(viewcount))
ilh_count.append(ilh_num)
edit_num.append(page.revision_count())
page_size.append(len(page.text.encode("utf8")))
for i in range(len(views)):
for j in range(len(views)-i-1):
if views[j]<views[j+1]:
views[j], views[j+1] = views[j+1], views[j]
arts[j], arts[j+1] = arts[j+1], arts[j]
ilh_count[j], ilh_count[j+1] = ilh_count[j+1], ilh_count[j]
edit_num[j], edit_num[j+1] = edit_num[j+1], edit_num[j]
page_size[j], page_size[j+1] = page_size[j+1], page_size[j]
writestr = '[[:Category:連結格式不正確的條目]]當中前1,000高瀏覽量(2020年10月份數據)之條目\n\n'
writestr += '最後更新時間:~~~~~\n\n'
writestr += '{| class="wikitable sortable"\n! 條目名 !! 瀏覽量 !! 不合規跨語言連結總數(粗估) !! 頁面編輯次數 !! 頁面長度(位元組)\n'
for i in range(len(views)):
if i>=1000: break
print(arts[i],views[i])
writestr += '|-\n|[[' + arts[i] + ']]||' + str(views[i]) + '||' + str(ilh_count[i]) + '||' + str(edit_num[i]) + '||' + str(page_size[i]) + '\n'
writestr += '|}'
page_to_write.text = writestr
page_to_write.save(u"使用[[mw:Manual:Pywikibot/zh|Pywikibot]]更新數據")
print('Done') | 53.810127 | 1,865 | 0.717008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,628 | 0.58937 |
c59b4e98e7d29ef1358075e68917d6c46dec6776 | 5,306 | py | Python | pyvarnam/varnam.py | sebinthomas/pyvarnam | 87ad99c6d399c1bcaa55fddc2dab36d0e48a3c90 | [
"MIT"
] | 1 | 2022-03-01T18:46:27.000Z | 2022-03-01T18:46:27.000Z | pyvarnam/varnam.py | sebinthomas/pyvarnam | 87ad99c6d399c1bcaa55fddc2dab36d0e48a3c90 | [
"MIT"
] | null | null | null | pyvarnam/varnam.py | sebinthomas/pyvarnam | 87ad99c6d399c1bcaa55fddc2dab36d0e48a3c90 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
The main varnam module.
"""
from .library import InternalVarnamLibrary
from .utils import *
from .varnam_defs import *
from warnings import warn
import ctypes as C
class Varnam:
""" Varnam class which encapsulates all
InternalVarnamLibrary calls.
"""
def __init__(self):
self.lib = InternalVarnamLibrary()
self.__handle_obj = VarnamHandle()
self.handle = C.pointer(self.__handle_obj)
self.message = STRING()
self.__vlearn_status_obj = VlearnStatus()
self.learn_status = C.pointer(self.__vlearn_status_obj)
self.learn_callback = self.lib.callback(VOID, VARNAM_PTR, STRING,
INT, VOID)
def varnam_init(self, scheme_file=""):
"""
function to initialize varnam handle
scheme_file: valid scheme file(*.vst) path
"""
scheme_file = bytes(scheme_file, 'utf-8')
return self.lib.varnam_init(
scheme_file, C.byref(self.handle),
C.byref(self.message))
def varnam_init_from_id(self, lang_code):
"""
Initializes the varnam library from language code
lang_code: language code in ISO 639-1 format
"""
lang_code = bytes(lang_code, 'utf-8')
return self.lib.varnam_init_from_id(
lang_code, C.byref(self.handle),
C.byref(self.message))
def varnam_version(self):
"""
Returns the version of libvarnam
"""
return self.lib.varnam_version()
def varnam_transliterate(self, input):
"""
Performs transliteration on a given input
Returns: tuple of transliterations encoded in UTF-8
and their confidence values.
input: input word to transliterate.
"""
input = bytes(input, 'utf-8')
varray_object = Varray()
varray_ptr = C.pointer(varray_object)
res_code = self.lib.varnam_transliterate(self.handle,
input,
C.byref(varray_ptr))
if res_code is not VARNAM_SUCCESS:
raise VarnamResultNotSuccess("varnam_transliterate",res_code)
result = []
length = self.lib.varray_length(varray_ptr)
for i in range(length):
word_ptr = C.cast(self.lib.varray_get(varray_ptr, i),
C.POINTER(Word))
word = word_ptr.contents.text.decode('utf-8')
result.append((word, word_ptr.contents.confidence))
return result
def varnam_create_token(self, pattern, value1, value2, value3,
tag, token_type, match_type, priority,
accept_condition, buffered):
"""creates a token
for more info regarding parameters look into api.h
"""
pattern = bytes(pattern, 'utf-8')
value1 = bytes(value1, 'utf-8')
value2 = bytes(value2, 'utf-8')
value3 = bytes(value3, 'utf-8')
tag = bytes(tag, 'utf-8')
res_code = self.lib.varnam_create_token(self.handle,
pattern, value1,
value2, value3,
tag, token_type,
match_type, priority,
accept_condition,
buffered)
return res_code
def varnam_learn(self, word):
"""
Varnam will learn the supplied word and possible
ways to write it.
word: const char* string to learn
"""
word = bytes(word, 'utf-8')
res_code = self.lib.varnam_learn(self.handle, word)
return res_code
def varnam_train(self, pattern, word):
"""
Trains varnam to associate a pattern with the word
pattern: const char* string pattern
word: const char* string word
"""
pattern = bytes(pattern, 'utf-8')
word = bytes(word, 'utf-8')
res_code = self.lib.varnam_train(self.handle, pattern, word)
return res_code
def varnam_config(self, conf_type, *args):
"""
Varnam configuration.
Does not persist. Resets to default when varnam_init()
is called again
"""
conf_type = bytes(conf_type, 'utf-8')
res_code = self.lib.varnam_config(self.handle, conf_type, *args)
return res_code
def varnam_learn_from_file(self, filepath, callback):
"""
Varnam learns from the file specified.
filepath: File to learn
callback: callback function invoked on each word
consult api.h for more information
"""
filepath = bytes(filepath, 'utf-8')
l_callback = self.learn_callback(callback)
res_code = self.lib.varnam_learn_from_file(self.handle, filepath,
self.learn_status, l_callback,
None)
def varnam_destroy(self):
self.lib.varnam_destroy(self.handle)
| 35.139073 | 81 | 0.55032 | 5,083 | 0.957972 | 0 | 0 | 0 | 0 | 0 | 0 | 1,551 | 0.292311 |
c59b618649e99ca9999f774d8427182098a18b35 | 3,755 | py | Python | classifier/bic.py | NYPL/Simplified-server-core | d924b0cfb687f8c1eaaf731e510dd9196a77b0b2 | [
"Apache-2.0"
] | 8 | 2017-05-15T13:58:57.000Z | 2020-09-01T19:28:44.000Z | classifier/bic.py | NYPL/Simplified-server-core | d924b0cfb687f8c1eaaf731e510dd9196a77b0b2 | [
"Apache-2.0"
] | 621 | 2016-01-26T17:10:56.000Z | 2022-03-29T20:51:40.000Z | classifier/bic.py | NYPL/Simplified-server-core | d924b0cfb687f8c1eaaf731e510dd9196a77b0b2 | [
"Apache-2.0"
] | 16 | 2016-09-02T14:59:44.000Z | 2021-02-26T15:30:03.000Z | from . import *
class BICClassifier(Classifier):
# These prefixes came from from http://editeur.dyndns.org/bic_categories
LEVEL_1_PREFIXES = {
Art_Design: 'A',
Biography_Memoir: 'B',
Foreign_Language_Study: 'C',
Literary_Criticism: 'D',
Reference_Study_Aids: 'G',
Social_Sciences: 'J',
Personal_Finance_Business: 'K',
Law: 'L',
Medical: 'M',
Science_Technology: 'P',
Technology: 'T',
Computers: 'U',
}
LEVEL_2_PREFIXES = {
Art_History: 'AC',
Photography: 'AJ',
Design: 'AK',
Architecture: 'AM',
Film_TV: 'AP',
Performing_Arts: 'AS',
Music: 'AV',
Poetry: 'DC',
Drama: 'DD',
Classics: 'FC',
Mystery: 'FF',
Suspense_Thriller: 'FH',
Adventure: 'FJ',
Horror: 'FK',
Science_Fiction: 'FL',
Fantasy: 'FM',
Erotica: 'FP',
Romance: 'FR',
Historical_Fiction: 'FV',
Religious_Fiction: 'FW',
Comics_Graphic_Novels: 'FX',
History: 'HB',
Philosophy: 'HP',
Religion_Spirituality: 'HR',
Psychology: 'JM',
Education: 'JN',
Political_Science: 'JP',
Economics: 'KC',
Business: 'KJ',
Mathematics: 'PB',
Science: 'PD',
Self_Help: 'VS',
Body_Mind_Spirit: 'VX',
Food_Health: 'WB',
Antiques_Collectibles: 'WC',
Crafts_Hobbies: 'WF',
Humorous_Nonfiction: 'WH',
House_Home: 'WK',
Gardening: 'WM',
Nature: 'WN',
Sports: 'WS',
Travel: 'WT',
}
LEVEL_3_PREFIXES = {
Historical_Mystery: 'FFH',
Espionage: 'FHD',
Westerns: 'FJW',
Space_Opera: 'FLS',
Historical_Romance: 'FRH',
Short_Stories: 'FYB',
World_History: 'HBG',
Military_History: 'HBW',
Christianity: 'HRC',
Buddhism: 'HRE',
Hinduism: 'HRG',
Islam: 'HRH',
Judaism: 'HRJ',
Fashion: 'WJF',
Poetry: 'YDP',
Adventure: 'YFC',
Horror: 'YFD',
Science_Fiction: 'YFG',
Fantasy: 'YFH',
Romance: 'YFM',
Humorous_Fiction: 'YFQ',
Historical_Fiction: 'YFT',
Comics_Graphic_Novels: 'YFW',
Art: 'YNA',
Music: 'YNC',
Performing_Arts: 'YND',
Film_TV: 'YNF',
History: 'YNH',
Nature: 'YNN',
Religion_Spirituality: 'YNR',
Science_Technology: 'YNT',
Humorous_Nonfiction: 'YNU',
Sports: 'YNW',
}
LEVEL_4_PREFIXES = {
European_History: 'HBJD',
Asian_History: 'HBJF',
African_History: 'HBJH',
Ancient_History: 'HBLA',
Modern_History: 'HBLL',
Drama: 'YNDS',
Comics_Graphic_Novels: 'YNUC',
}
PREFIX_LISTS = [LEVEL_4_PREFIXES, LEVEL_3_PREFIXES, LEVEL_2_PREFIXES, LEVEL_1_PREFIXES]
@classmethod
def is_fiction(cls, identifier, name):
if identifier.startswith('f') or identifier.startswith('yf'):
return True
return False
@classmethod
def audience(cls, identifier, name):
# BIC doesn't distinguish children's and YA.
# Classify it as YA to be safe.
if identifier.startswith("y"):
return cls.AUDIENCE_YOUNG_ADULT
return cls.AUDIENCE_ADULT
@classmethod
def genre(cls, identifier, name, fiction=None, audience=None):
for prefixes in cls.PREFIX_LISTS:
for l, v in list(prefixes.items()):
if identifier.startswith(v.lower()):
return l
return None
Classifier.classifiers[Classifier.BIC] = BICClassifier
| 27.408759 | 91 | 0.541145 | 3,681 | 0.980293 | 0 | 0 | 704 | 0.187483 | 0 | 0 | 568 | 0.151265 |
c59ba40f823d1d5bb7c0c5cd08444d56a4e529a0 | 4,490 | py | Python | pusher/pusher.py | tkhieu/pusher-rest-python | 95a91c92b66612c7fa3ad71190deb65a91a1901f | [
"MIT"
] | null | null | null | pusher/pusher.py | tkhieu/pusher-rest-python | 95a91c92b66612c7fa3ad71190deb65a91a1901f | [
"MIT"
] | null | null | null | pusher/pusher.py | tkhieu/pusher-rest-python | 95a91c92b66612c7fa3ad71190deb65a91a1901f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import (print_function, unicode_literals, absolute_import,
division)
from pusher.config import Config
from pusher.request import Request
from pusher.sync import SynchronousBackend
from pusher.util import GET, POST, text, validate_channel
import collections
import json
import six
class RequestMethod(object):
def __init__(self, pusher, f):
self.pusher = pusher
self.f = f
def __call__(self, *args, **kwargs):
return self.pusher.backend.send_request(self.make_request(*args, **kwargs))
def make_request(self, *args, **kwargs):
return self.f(self.pusher, *args, **kwargs)
def doc_string(doc):
def decorator(f):
f.__doc__ = doc
return f
return decorator
def request_method(f):
@property
@doc_string(f.__doc__)
def wrapped(self):
return RequestMethod(self, f)
return wrapped
def join_attributes(attributes):
for attr in attributes:
if not isinstance(attr, six.text_type):
raise TypeError('Each attr should be %s' % text)
return six.text_type(',').join(attributes)
class Pusher(object):
"""Client for the Pusher HTTP API.
This client supports various backend adapters to support various http
libraries available in the python ecosystem.
:param config: a pusher.Config instance
:param backend: an object that responds to the send_request(request)
method. If none is provided, a
python.sync.SynchronousBackend instance is created.
"""
def __init__(self, config, backend=None):
if not isinstance(config, Config):
raise TypeError("config should be a pusher.Config object")
self.backend = backend or SynchronousBackend(config)
self.config = config
@request_method
def trigger(self, channels, event_name, data, socket_id=None):
'''
Trigger an event on one or more channels, see:
http://pusher.com/docs/rest_api#method-post-event
'''
if isinstance(channels, six.string_types) or not isinstance(channels, (collections.Sized, collections.Iterable)):
raise TypeError("Expected a collection of channels (each channel should be %s)" % text)
if len(channels) > 10:
raise ValueError("Too many channels")
for channel in channels:
validate_channel(channel)
if not isinstance(event_name, six.text_type):
raise TypeError("event_name should be %s" % text)
if len(event_name) > 200:
raise ValueError("event_name too long")
if not isinstance(data, six.text_type):
data = json.dumps(data)
if len(data) > 10240:
raise ValueError("Too much data")
params = {
'name': event_name,
'channels': channels,
'data': data
}
if socket_id:
if not isinstance(socket_id, six.text_type):
raise TypeError("Socket ID should be %s" % text)
params['socket_id'] = socket_id
return Request(self.config, POST, "/apps/%s/events" % self.config.app_id, params)
@request_method
def channels_info(self, prefix_filter=None, attributes=[]):
'''
Get information on multiple channels, see:
http://pusher.com/docs/rest_api#method-get-channels
'''
params = {}
if attributes:
params['info'] = join_attributes(attributes)
if prefix_filter:
params['filter_by_prefix'] = prefix_filter
return Request(self.config, GET, "/apps/%s/channels" % self.config.app_id, params)
@request_method
def channel_info(self, channel, attributes=[]):
'''
Get information on a specific channel, see:
http://pusher.com/docs/rest_api#method-get-channel
'''
validate_channel(channel)
params = {}
if attributes:
params['info'] = join_attributes(attributes)
return Request(self.config, GET, "/apps/%s/channels/%s" % (self.config.app_id, channel), params)
@request_method
def users_info(self, channel):
'''
Fetch user ids currently subscribed to a presence channel
http://pusher.com/docs/rest_api#method-get-users
'''
validate_channel(channel)
return Request(self.config, GET, "/apps/%s/channels/%s/users" % (self.config.app_id, channel))
| 32.302158 | 121 | 0.632294 | 3,670 | 0.817372 | 0 | 0 | 2,724 | 0.606682 | 0 | 0 | 1,337 | 0.297773 |
c59c27c031d685506df2be78e4e37cfcf8d5d558 | 1,986 | py | Python | src/bigfix_prefetch/__main__.py | jgstew/generate-prefetch | e92497b3ec9216fe46ec14c96ba21c51141e0b24 | [
"MIT"
] | null | null | null | src/bigfix_prefetch/__main__.py | jgstew/generate-prefetch | e92497b3ec9216fe46ec14c96ba21c51141e0b24 | [
"MIT"
] | null | null | null | src/bigfix_prefetch/__main__.py | jgstew/generate-prefetch | e92497b3ec9216fe46ec14c96ba21c51141e0b24 | [
"MIT"
] | null | null | null | """
To run this module directly
"""
# pylint: disable=no-else-return
import argparse
import os
try:
from . import prefetch_from_file
except ImportError:
import prefetch_from_file
try:
from . import prefetch_from_url
except ImportError:
import prefetch_from_url
def validate_filepath_or_url(filepath_or_url=""):
"""validate string is filepath or URL"""
if ("://" in filepath_or_url) or (
os.path.isfile(filepath_or_url) and os.access(filepath_or_url, os.R_OK)
):
return filepath_or_url
else:
raise ValueError(filepath_or_url)
def build_argument_parser():
"""Build and return the argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"filepath_or_url",
nargs="?",
type=validate_filepath_or_url,
default="bigfix_prefetch/__init__.py",
help="Path to file or URL to create prefetch for.",
)
parser.add_argument(
"--prefetch-block",
default=False,
action="store_true",
help="generate a prefetch block instead of prefetch statement",
)
parser.add_argument(
"--override-url",
default="http://localhost/unknown",
help="URL to use in prefetch statement if providing file path",
)
return parser
def main(argv=None):
"""execution starts here"""
# print("bigfix_prefetch __main__ main()")
# Parse command line arguments.
argparser = build_argument_parser()
args = argparser.parse_args(argv)
try:
prefetch_result = prefetch_from_file.file_to_prefetch(
args.filepath_or_url, args.override_url
)
print(prefetch_result)
return prefetch_result
except FileNotFoundError:
prefetch_result = prefetch_from_url.url_to_prefetch(args.filepath_or_url)
print(prefetch_result)
return prefetch_result
main()
| 25.461538 | 81 | 0.676737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 537 | 0.270393 |
c59c737c48fd15ed97be12bba02f43112a04741e | 2,577 | py | Python | workshop/sampledecoder/src/rfi_power_switch.py | arnd/aws-iot-core-lorawan | 945b7ceea07a17525cfdf15420a573a250fe1149 | [
"MIT-0"
] | 54 | 2020-12-15T21:57:58.000Z | 2022-03-27T14:05:14.000Z | workshop/sampledecoder/src/rfi_power_switch.py | arnd/aws-iot-core-lorawan | 945b7ceea07a17525cfdf15420a573a250fe1149 | [
"MIT-0"
] | 20 | 2020-12-16T19:09:02.000Z | 2022-03-05T13:28:51.000Z | workshop/sampledecoder/src/rfi_power_switch.py | arnd/aws-iot-core-lorawan | 945b7ceea07a17525cfdf15420a573a250fe1149 | [
"MIT-0"
] | 25 | 2020-12-16T01:18:22.000Z | 2022-03-04T12:05:24.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import base64
def dict_from_payload(base64_input: str):
payload_bytes = base64.b64decode(base64_input)
if payload_bytes[0] == 0x00:
result = {
"messagetype": "switchstatus",
"switch_status": "off"
}
elif payload_bytes[0] == 0x01:
result = {
"messagetype": "switchstatus",
"switch_status": "off"
}
elif payload_bytes[0] == 0xF0:
result = {
"messagetype": "interval",
"interval": payload_bytes[2] | payload_bytes[1] << 8
}
return result
# Tests
if __name__ == "__main__":
test_definition = [
{
"input": "00",
"output": {
"switch_status": "off"
}
},
{
"input": "01",
"output": {
"switch_status": "on"
}
},
{
"input": "F0",
"output": {
"switch_status": "on"
}
}
]
for test in test_definition:
base64_input = base64.b64encode(
bytearray.fromhex(test.get("input"))).decode("utf-8")
output = dict_from_payload(base64_input)
for key in test.get("output"):
if(test.get("output").get(key) != output.get(key)):
raise Exception(
f'Assertion failed for input {test.get("input")}, key {key}, expected {test.get("output").get(key)}, got {output.get(key)} ')
else:
print("OK")
| 33.038462 | 145 | 0.578968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,387 | 0.538223 |
c59dc3c8ab8e831eb7caf7fcbfb73b6aca3f9f0b | 1,219 | py | Python | src/tests/aguirregabiria_simple_tests.py | cdagnino/LearningModels | b31d4e1dd5381ba06fc5b1d2b0e2eb1515f2d15f | [
"Apache-2.0"
] | null | null | null | src/tests/aguirregabiria_simple_tests.py | cdagnino/LearningModels | b31d4e1dd5381ba06fc5b1d2b0e2eb1515f2d15f | [
"Apache-2.0"
] | null | null | null | src/tests/aguirregabiria_simple_tests.py | cdagnino/LearningModels | b31d4e1dd5381ba06fc5b1d2b0e2eb1515f2d15f | [
"Apache-2.0"
] | null | null | null | import numpy as np
from src import const
#TODO: should be imported from aguirregabiria_simple.py
def period_profit(p: np.ndarray, lambdas: np.ndarray, betas_transition=const.betas_transition):
"""
Correct expected period return profit. See ReadMe for derivation
"""
constant_part = (p-const.c) * np.e ** const.α * np.e ** ((const.σ_ɛ ** 2) / 2)
summation = np.dot(np.e**(betas_transition*np.log(p[:, np.newaxis])), lambdas)
return constant_part*summation
def test_period_profit():
p = np.array([1.4, 1.2])
lambdas = np.array([0.5, 0.4, 0.1])
beta_p_part = np.array([[np.e ** (-3. * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e ** (-2 * 0.33647224)],
[np.e ** (-3. * 0.18232156), np.e ** (-2.5 * 0.18232156), np.e ** (-2 * 0.18232156)]])
summation_part = np.array([0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 * lambdas[2],
0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + 0.69444444 * lambdas[2]])
expected = (p - const.c) * np.e ** const.α * np.e ** ((const.σ_ɛ ** 2) / 2) * summation_part
computed = period_profit(p, lambdas)
assert np.allclose(expected, computed, rtol=0.05) | 42.034483 | 114 | 0.599672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.110204 |
c59e1c319be909d4355e8aed4203a86618692226 | 6,187 | py | Python | rdhyee_utils/aws/__init__.py | rdhyee/rdhyee_utils | 3504ee3dd6230f576dfc091129cc38efdc296af2 | [
"Apache-2.0"
] | null | null | null | rdhyee_utils/aws/__init__.py | rdhyee/rdhyee_utils | 3504ee3dd6230f576dfc091129cc38efdc296af2 | [
"Apache-2.0"
] | null | null | null | rdhyee_utils/aws/__init__.py | rdhyee/rdhyee_utils | 3504ee3dd6230f576dfc091129cc38efdc296af2 | [
"Apache-2.0"
] | null | null | null | # code adapted from http://my.safaribooksonline.com/book/-/9781449308100/2dot-ec2-recipes/id2529379
def launch_instance(aws_access_key_id=None,
aws_secret_access_key=None,
ami='ami-a29943cb',
instance_type='t1.micro',
key_name='rdhyee_public_key',
key_extension='.pem',
key_dir='~/.ssh',
group_name='default',
ssh_port=22,
cidr='0.0.0.0/0',
tag='paws',
user_data=None,
cmd_shell=True,
login_user='ubuntu',
ssh_pwd=None):
"""
Launch an instance and wait for it to start running.
Returns a tuple consisting of the Instance object and the CmdShell
object, if request, or None.
aws_access_key_id: Amazon AWS key
aws_secret_access_key: Amazon secret
ami The ID of the Amazon Machine Image that this instance will
be based on. Default is a 64-bit Amazon Linux EBS image.
instance_type The type of the instance.
key_name The name of the SSH Key used for logging into the instance.
It will be created if it does not exist.
key_extension The file extension for SSH private key files.
key_dir The path to the directory containing SSH private keys.
This is usually ~/.ssh.
group_name The name of the security group used to control access
to the instance. It will be created if it does not exist.
ssh_port The port number you want to use for SSH access (default 22).
cidr The CIDR block used to limit access to your instance.
tag A name that will be used to tag the instance so we can
easily find it later.
user_data Data that will be passed to the newly started
instance at launch and will be accessible via
the metadata service running at http://169.254.169.254.
cmd_shell If true, a boto CmdShell object will be created and returned.
This allows programmatic SSH access to the new instance.
login_user The user name used when SSH'ing into new instance. The
default is 'ubuntu'
ssh_pwd The password for your SSH key if it is encrypted with a
passphrase.
"""
cmd = None
import boto
import time
import os
# Create a connection to EC2 service.
# You can pass credentials in to the connect_ec2 method explicitly
# or you can use the default credentials in your ~/.boto config file
# as we are doing here.
ec2 = boto.connect_ec2(aws_access_key_id, aws_secret_access_key)
# Check to see if specified keypair already exists.
# If we get an InvalidKeyPair.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
try:
key = ec2.get_all_key_pairs(keynames=[key_name])[0]
except ec2.ResponseError as e:
if e.code == 'InvalidKeyPair.NotFound':
print('Creating keypair: %s' % key_name)
# Create an SSH key to use when logging into instances.
key = ec2.create_key_pair(key_name)
# AWS will store the public key but the private key is
# generated and returned and needs to be stored locally.
# The save method will also chmod the file to protect
# your private key.
key.save(key_dir)
else:
raise
# Check to see if specified security group already exists.
# If we get an InvalidGroup.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
try:
group = ec2.get_all_security_groups(groupnames=[group_name])[0]
except ec2.ResponseError as e:
if e.code == 'InvalidGroup.NotFound':
print('Creating Security Group: %s' % group_name)
# Create a security group to control access to instance via SSH.
group = ec2.create_security_group(group_name,
'A group that allows SSH access')
else:
raise
# Add a rule to the security group to authorize SSH traffic
# on the specified port.
try:
group.authorize('tcp', ssh_port, ssh_port, cidr)
except ec2.ResponseError as e:
if e.code == 'InvalidPermission.Duplicate':
print('Security Group: %s already authorized' % group_name)
else:
raise
# Now start up the instance. The run_instances method
# has many, many parameters but these are all we need
# for now.
reservation = ec2.run_instances(ami,
key_name=key_name,
security_groups=[group_name],
instance_type=instance_type,
user_data=user_data)
# Find the actual Instance object inside the Reservation object
# returned by EC2.
instance = reservation.instances[0]
# The instance has been launched but it's not yet up and
# running. Let's wait for its state to change to 'running'.
print('waiting for instance')
while instance.state != 'running':
print('.')
time.sleep(5)
instance.update()
print('done')
# Let's tag the instance with the specified label so we can
# identify it later.
instance.add_tag(tag)
# The instance is now running, let's try to programmatically
# SSH to the instance using Paramiko via boto CmdShell.
if cmd_shell:
key_path = os.path.join(os.path.expanduser(key_dir),
key_name+key_extension)
cmd = boto.manage.cmdshell.sshclient_from_instance(instance,
key_path,
user_name=login_user,
ssh_pwd=ssh_pwd)
return (instance, cmd) | 40.175325 | 99 | 0.589139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,571 | 0.577178 |
c59e4dc0f8a313005e4c9d3eaf2d4fcbd2711fd5 | 10,998 | py | Python | kinto/tests/core/test_authorization.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | null | null | null | kinto/tests/core/test_authorization.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | null | null | null | kinto/tests/core/test_authorization.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | 1 | 2020-07-15T04:27:08.000Z | 2020-07-15T04:27:08.000Z | import mock
from pyramid.request import Request
from .support import DummyRequest, unittest
from kinto.core import authentication
from kinto.core.authorization import RouteFactory, AuthorizationPolicy
from kinto.core.storage import exceptions as storage_exceptions
class RouteFactoryTest(unittest.TestCase):
def setUp(self):
self.record_uri = "/foo/bar"
def assert_request_resolves_to(self, method, permission, uri=None,
record_not_found=False):
if uri is None:
uri = self.record_uri
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
resource.record_id = 1
if record_not_found:
resource.model.get_record.side_effect = \
storage_exceptions.RecordNotFoundError
else:
resource.model.get_record.return_value = 1
current_service().resource.return_value = resource
# Do the actual call.
request = DummyRequest(method=method)
request.upath_info = uri
context = RouteFactory(request)
self.assertEquals(context.required_permission, permission)
def test_http_unknown_does_not_raise_a_500(self):
self.assert_request_resolves_to("unknown", None)
def test_http_get_resolves_in_a_read_permission(self):
self.assert_request_resolves_to("get", "read")
def test_http_post_resolves_in_a_create_permission(self):
self.assert_request_resolves_to("post", "create")
def test_http_delete_resolves_in_a_write_permission(self):
self.assert_request_resolves_to("delete", "write")
def test_http_put_unexisting_record_resolves_in_a_create_permission(self):
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
resource.record_id = 1
resource.model.get_record.side_effect = \
storage_exceptions.RecordNotFoundError
current_service().resource.return_value = resource
current_service().collection_path = '/buckets/{bucket_id}'
# Do the actual call.
request = DummyRequest(method='put')
request.upath_info = '/buckets/abc/collections/1'
request.matchdict = {'bucket_id': 'abc'}
context = RouteFactory(request)
self.assertEquals(context.required_permission, 'create')
def test_http_put_existing_record_resolves_in_a_write_permission(self):
self.assert_request_resolves_to("put", "write")
def test_http_put_sets_current_record_attribute(self):
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
resource.record_id = 1
resource.model.get_record.return_value = mock.sentinel.record
current_service().resource.return_value = resource
# Do the actual call.
request = DummyRequest(method='put')
context = RouteFactory(request)
self.assertEquals(context.current_record, mock.sentinel.record)
def test_http_patch_resolves_in_a_write_permission(self):
self.assert_request_resolves_to("patch", "write")
def test_attributes_are_none_with_blank_requests(self):
request = Request.blank(path='/')
request.registry = mock.Mock(settings={})
request.authn_type = 'fxa'
request.prefixed_userid = property(authentication.prefixed_userid)
context = RouteFactory(request)
self.assertIsNone(context.required_permission)
self.assertIsNone(context.current_record)
self.assertIsNone(context.resource_name)
self.assertIsNone(context.get_shared_ids)
def test_attributes_are_none_with_non_resource_requests(self):
basic_service = object()
request = Request.blank(path='/')
request.prefixed_userid = property(authentication.prefixed_userid)
request.matched_route = mock.Mock(pattern='foo')
request.registry = mock.Mock(cornice_services={'foo': basic_service})
request.registry.settings = {}
context = RouteFactory(request)
self.assertIsNone(context.current_record)
self.assertIsNone(context.required_permission)
self.assertIsNone(context.resource_name)
self.assertIsNone(context.get_shared_ids)
def test_route_factory_adds_allowed_principals_from_settings(self):
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
current_service().resource.return_value = resource
current_service().collection_path = '/buckets'
# Do the actual call.
request = DummyRequest(method='post')
request.current_resource_name = 'bucket'
request.upath_info = '/buckets'
request.matchdict = {}
request.registry = mock.Mock()
request.registry.settings = {
'bucket_create_principals': 'fxa:user'
}
context = RouteFactory(request)
self.assertEquals(context.allowed_principals, ['fxa:user'])
class AuthorizationPolicyTest(unittest.TestCase):
def setUp(self):
self.authz = AuthorizationPolicy()
self.authz.get_bound_permissions = mock.sentinel.get_bound_perms
self.context = mock.MagicMock()
self.context.get_prefixed_userid.return_value = None
self.context.allowed_principals = []
self.context.object_id = mock.sentinel.object_id
self.context.required_permission = 'read'
self.principals = []
self.permission = 'dynamic'
def test_permits_does_not_refer_to_context_if_permission_is_private(self):
self.assertFalse(self.authz.permits(None, [], 'private'))
def test_permits_return_if_authenticated_when_permission_is_private(self):
self.assertTrue(self.authz.permits(None,
['system.Authenticated'],
'private'))
def test_permits_refers_to_context_to_check_permissions(self):
self.context.check_permission.return_value = True
allowed = self.authz.permits(self.context, self.principals, 'dynamic')
self.assertTrue(allowed)
def test_permits_refers_to_context_to_check_permission_principals(self):
self.context.check_permission.return_value = False
self.context.allowed_principals = ['fxa:user']
allowed = self.authz.permits(
self.context, ['fxa:user', 'system.Authenticated'], 'dynamic')
self.assertTrue(allowed)
def test_permits_reads_the_context_when_permission_is_dynamic(self):
self.authz.permits(self.context, self.principals, 'dynamic')
self.context.check_permission.assert_called_with(
'read',
self.principals,
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_permits_consider_permission_when_not_dynamic(self):
self.authz.permits(self.context, self.principals, 'foobar')
self.context.check_permission.assert_called_with(
'foobar',
self.principals,
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_permits_prepend_obj_type_to_permission_on_create(self):
self.context.required_permission = 'create'
self.context.resource_name = 'record'
self.authz.permits(self.context, self.principals, 'dynamic')
self.context.check_permission.assert_called_with(
'record:create',
self.principals,
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_permits_takes_route_factory_allowed_principals_into_account(self):
self.context.resource_name = 'record'
self.context.required_permission = 'create'
self.context.allowed_principals = ['fxa:user']
has_permission = self.authz.permits(
self.context, ['fxa:user'], 'dynamic')
self.context.check_permission.assert_not_called()
self.assertTrue(has_permission)
def test_prefixed_userid_is_added_to_principals(self):
self.context.get_prefixed_userid.return_value = 'fxa:userid'
self.authz.permits(self.context, self.principals, 'foobar')
self.context.check_permission.assert_called_with(
'foobar',
self.principals + ['fxa:userid', 'fxa_userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_unprefixed_userid_is_removed_from_principals(self):
self.context.get_prefixed_userid.return_value = 'fxa:userid'
self.authz.permits(self.context, ['userid'], 'foobar')
self.context.check_permission.assert_called_with(
'foobar',
['fxa:userid', 'fxa_userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
class GuestAuthorizationPolicyTest(unittest.TestCase):
def setUp(self):
self.authz = AuthorizationPolicy()
self.authz.get_bound_permissions = mock.sentinel.get_bound_perms
self.request = DummyRequest(method='GET')
self.context = RouteFactory(self.request)
self.context.on_collection = True
self.context.check_permission = mock.Mock(return_value=False)
def test_permits_returns_true_if_collection_and_shared_records(self):
self.context.fetch_shared_records = mock.MagicMock(return_value=[
'record1', 'record2'])
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.context.fetch_shared_records.assert_called_with(
'read',
['userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
self.assertTrue(allowed)
def test_permits_does_not_return_true_if_not_collection(self):
self.context.on_collection = False
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.assertFalse(allowed)
def test_permits_does_not_return_true_if_not_list_operation(self):
self.context.required_permission = 'create'
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.assertFalse(allowed)
allowed = self.authz.permits(self.context, ['userid'], 'create')
self.assertFalse(allowed)
def test_permits_returns_false_if_collection_is_unknown(self):
self.context.fetch_shared_records = mock.MagicMock(return_value=None)
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.context.fetch_shared_records.assert_called_with(
'read',
['userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
self.assertFalse(allowed)
| 43.816733 | 79 | 0.68267 | 10,722 | 0.974905 | 0 | 0 | 0 | 0 | 0 | 0 | 1,072 | 0.097472 |
c59e92e67a171f7d5604e58b73bfb9fa142c32b1 | 6,663 | py | Python | src/main.py | tomaszbartoszewski/sense-hat-sokoban | a6784d648103f8dc29e99113e7ed4ca502618ff0 | [
"MIT"
] | null | null | null | src/main.py | tomaszbartoszewski/sense-hat-sokoban | a6784d648103f8dc29e99113e7ed4ca502618ff0 | [
"MIT"
] | null | null | null | src/main.py | tomaszbartoszewski/sense-hat-sokoban | a6784d648103f8dc29e99113e7ed4ca502618ff0 | [
"MIT"
] | null | null | null | import copy
#from enum import IntFlag
from time import sleep
# I tried to use enum here, but I was having a problem with packages in the image, so I gave up as I just want to get it done
class FieldValue:
Empty = 0
Wall = 1
Player = 2
Box = 4
Goal = 8
class SenseHATColour:
Red = (204, 4, 4)
White = (255, 255, 255)
Yellow = (234, 231, 51)
Green = (1, 158, 1)
Blue = (13, 0, 198)
Black = (0, 0, 0)
def mapStringToBoardRow(line):
field_map = {
'#': FieldValue.Wall,
'@': FieldValue.Player,
'+': (FieldValue.Player | FieldValue.Goal),
'$': FieldValue.Box,
'*': (FieldValue.Box | FieldValue.Goal),
'.': FieldValue.Goal,
' ': FieldValue.Empty
}
row = []
for cell in line:
row.append(field_map[cell])
return row
def get_levels():
#board_definition = open('TestDefinition.txt', 'r')
board_definition = open('BoardDefinition.txt', 'r')
levels = []
board = []
number_of_rows = 0
for line in board_definition.read().splitlines():
if number_of_rows == 0:
if len(board) > 0:
levels.append(board)
board = []
number_of_rows = int(line)
else:
board.append(mapStringToBoardRow(line))
number_of_rows -= 1
levels.append(board)
return levels
def print_to_console(level):
field_to_console_map = {
FieldValue.Wall: '#',
FieldValue.Player: '@',
FieldValue.Player | FieldValue.Goal: '+',
FieldValue.Box: '$',
FieldValue.Box | FieldValue.Goal: '*',
FieldValue.Goal: '.',
FieldValue.Empty: ' '
}
for row in level:
for cell in row:
print(field_to_console_map[cell], end='')
print()
from sense_hat import SenseHat
sense = SenseHat()
def print_to_senseHAT(level):
field_to_colour_map = {
FieldValue.Wall: SenseHATColour.Red,
FieldValue.Player: SenseHATColour.White,
FieldValue.Player | FieldValue.Goal: SenseHATColour.White,
FieldValue.Box: SenseHATColour.Yellow,
FieldValue.Box | FieldValue.Goal: SenseHATColour.Green,
FieldValue.Goal: SenseHATColour.Blue,
FieldValue.Empty: SenseHATColour.Black
}
sense.clear()
print(level)
for row_index, row in enumerate(level):
for column_index, cell in enumerate(row):
sense.set_pixel(column_index, row_index, field_to_colour_map[cell])
def can_move(level, destination, behind):
(dest_x, dest_y) = destination
destination_value = level[dest_y][dest_x]
if destination_value == FieldValue.Wall:
return False
(behind_x, behind_y) = behind
behind_value = level[behind_y][behind_x]
if destination_value & FieldValue.Box == FieldValue.Box and (
behind_value & FieldValue.Box == FieldValue.Box or behind_value & FieldValue.Wall == FieldValue.Wall):
return False
return True
def get_player_position(level):
for row_index, row in enumerate(level):
for column_index, cell in enumerate(row):
if cell & FieldValue.Player == FieldValue.Player:
return column_index, row_index
def try_move(level, player_position, destination, behind):
if can_move(level, destination, behind):
level[player_position[1]][player_position[0]] = level[player_position[1]][player_position[0]] & ~FieldValue.Player
level[destination[1]][destination[0]] = level[destination[1]][destination[0]] | FieldValue.Player
if level[destination[1]][destination[0]] & FieldValue.Box == FieldValue.Box:
level[destination[1]][destination[0]] = level[destination[1]][destination[0]] & ~FieldValue.Box
level[behind[1]][behind[0]] = level[behind[1]][behind[0]] | FieldValue.Box
print(level)
return True
return False
def won(level):
for row in level:
for cell in row:
if cell & FieldValue.Goal == FieldValue.Goal and cell & FieldValue.Box != FieldValue.Box:
return False
return True
def play_level(level):
current_state = copy.deepcopy(level)
(position_x, position_y) = get_player_position(current_state)
board_changed = True
while True:
if board_changed:
board_changed = False
(position_x, position_y) = get_player_position(current_state)
print_to_senseHAT(current_state)
if won(current_state):
sleep(0.5)
return
for event in sense.stick.get_events():
if event.action == "pressed":
if event.direction == "middle":
current_state = copy.deepcopy(level)
board_changed = True
elif event.direction == "up":
board_changed = try_move(current_state, (position_x, position_y), (position_x, position_y - 1),
(position_x, position_y - 2))
elif event.direction == "down":
board_changed = try_move(current_state, (position_x, position_y), (position_x, position_y + 1),
(position_x, position_y + 2))
elif event.direction == "left":
board_changed = try_move(current_state, (position_x, position_y), (position_x - 1, position_y),
(position_x - 2, position_y))
elif event.direction == "right":
board_changed = try_move(current_state, (position_x, position_y), (position_x + 1, position_y),
(position_x + 2, position_y))
def show_victory_sequence():
victory_sequence = [(SenseHATColour.Red, 3, 4), (SenseHATColour.Blue, 2, 5), (SenseHATColour.Green, 1, 6), (SenseHATColour.Yellow, 0, 7)]
sense.clear()
for colour, start, end in victory_sequence:
for y in range(start, end + 1):
sense.set_pixel(start, y, colour)
sense.set_pixel(end, y, colour)
for x in range(start + 1, end):
sense.set_pixel(x, start, colour)
sense.set_pixel(x, end, colour)
sleep(0.25)
def main():
levels = get_levels()
while True:
for index, level in enumerate(levels):
sense.show_message(str(index + 1), text_colour = list(SenseHATColour.Red))
play_level(copy.deepcopy(level))
show_victory_sequence()
sense.show_message("You won!", text_colour = list(SenseHATColour.Green))
if __name__ == '__main__':
main()
| 33.994898 | 141 | 0.600931 | 253 | 0.037971 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.049377 |
c59ff391ff6f3b1025b7ee03eaa7698322a5189f | 4,082 | py | Python | src/similary_reviews/repeated_review_detection.py | maxuepo/x-review-processor | 0ea78eb4b88eaf4212f5145dad9dff2c4fdd7fc3 | [
"MIT"
] | null | null | null | src/similary_reviews/repeated_review_detection.py | maxuepo/x-review-processor | 0ea78eb4b88eaf4212f5145dad9dff2c4fdd7fc3 | [
"MIT"
] | null | null | null | src/similary_reviews/repeated_review_detection.py | maxuepo/x-review-processor | 0ea78eb4b88eaf4212f5145dad9dff2c4fdd7fc3 | [
"MIT"
] | null | null | null | from __future__ import print_function
from sklearn.feature_extraction.text import TfidfVectorizer
from common.util import ReviewUtil
import numpy as np
import ntpath
import pandas as pd
import os
from common.base_task import BaseTask
class ReviewDedupTask(BaseTask):
def __init__(self, inputdir, threshold=0.9, site='jd'):
BaseTask.__init__(self)
self.input_path = inputdir
self.threshold = threshold
self.sys_default_reviews = self.default_review_set()
self.site = site
def __str__(self):
return "ReviewDedupTask"
def default_review_set(self):
df = pd.read_csv(self.str_params['sys_default_path'], sep='\t', header=None)
return set(df.loc[:, 0].values)
def find_similar_reviews(self, df, threshold):
visited = set()
output_df_repeated = list()
all_temp = set()
if df.empty:
self.logger.warn('Input dataframe is empty! Returning returning empty dataframe!')
return pd.DataFrame()
df_dft, df_input = self.pre_process(df)
vect = TfidfVectorizer(min_df=1)
doc_word_split = df_input['content'].apply(lambda t: ReviewUtil.spaced_words(t))
chn_only_doc = doc_word_split.apply(lambda x: ReviewUtil.chn_char_only(x))
try:
tfidf = vect.fit_transform(chn_only_doc)
except ValueError:
return pd.DataFrame()
sim_mat = tfidf * tfidf.T
ind = 0
for row in sim_mat:
index_set = set()
arry = row.toarray()
if ind in visited:
ind += 1
continue
(x, y) = np.where(arry > threshold)
for yy in y:
if ind >= yy:
continue
visited.add(ind)
visited.add(yy)
index_set.add(ind)
index_set.add(yy)
all_temp.union(index_set)
temp = df_input.ix[list(index_set)]
if len(temp) != 0:
output_df_repeated.append(temp['id'].values.tolist())
ind += 1
output_df_nonrepeated = df_input.loc[~df_input.index.isin(all_temp)]
return output_df_repeated, output_df_nonrepeated, df_dft
def run(self):
file_paths = ReviewUtil.get_all_valid_path(self.input_path)
for f in file_paths:
self.logger.info("Review dedup task on: {}".format(str(f)))
prodid = os.path.splitext(ntpath.basename(str(f)))[0]
df = ReviewUtil.read_as_pandas(f, self.site, delimiter=',')
if df.empty:
continue
output_path_repeated = ReviewUtil.get_output_path(str(f), prodid, "repeated")
output_path_nonrepeated = ReviewUtil.get_output_path(str(f), prodid, "nonrepeated")
output_path_dft = ReviewUtil.get_output_path(str(f), prodid, "dft")
file_repeated = open(output_path_repeated, "w+")
file_nonrepeated = open(output_path_nonrepeated, "w+")
file_default = open(output_path_dft, "w+")
try:
output_repeated, ouput_nonrepeated, df_dft = self.find_similar_reviews(df, self.threshold)
except ValueError:
continue
for lst in output_repeated:
str_temp = "\t".join(str(x) for x in lst)
file_repeated.write(str_temp)
file_repeated.write("\n")
ouput_nonrepeated['id'] = ouput_nonrepeated['id'].astype(int)
ouput_nonrepeated.to_csv(file_nonrepeated, index=False)
df_dft.to_csv(file_default, index=False)
file_default.close()
file_repeated.close()
file_nonrepeated.close()
def pre_process(self, df: pd.DataFrame) -> pd.DataFrame:
df.loc[:, 'content'] = df['content'].apply(lambda content: content.strip())
df_dft = df.loc[df['content'].isin(self.sys_default_reviews)]
df_input = df.loc[~df['content'].isin(self.sys_default_reviews)].reset_index(drop=True)
return df_dft, df_input
| 41.232323 | 106 | 0.609015 | 3,845 | 0.94194 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.05904 |
c59ff6269abc6e4442e009258e5c25cd74a0c2dd | 59,325 | py | Python | nipyapi/nifi/apis/parameter_contexts_api.py | oneextrafact/nipyapi | 4c184d69002a8ee3ac528fda63b2ffcc6cedbae5 | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/apis/parameter_contexts_api.py | oneextrafact/nipyapi | 4c184d69002a8ee3ac528fda63b2ffcc6cedbae5 | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/apis/parameter_contexts_api.py | oneextrafact/nipyapi | 4c184d69002a8ee3ac528fda63b2ffcc6cedbae5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ParameterContextsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_parameter_context(self, body, **kwargs):
"""
Create a Parameter Context
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_parameter_context(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ParameterContextEntity body: The Parameter Context. (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_parameter_context_with_http_info(body, **kwargs)
else:
(data) = self.create_parameter_context_with_http_info(body, **kwargs)
return data
def create_parameter_context_with_http_info(self, body, **kwargs):
"""
Create a Parameter Context
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_parameter_context_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ParameterContextEntity body: The Parameter Context. (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_parameter_context`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_parameter_context(self, id, **kwargs):
"""
Deletes the Parameter Context with the given ID
Deletes the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_parameter_context(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The Parameter Context ID. (required)
:param str version: The version is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, a new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_parameter_context_with_http_info(id, **kwargs)
else:
(data) = self.delete_parameter_context_with_http_info(id, **kwargs)
return data
def delete_parameter_context_with_http_info(self, id, **kwargs):
"""
Deletes the Parameter Context with the given ID
Deletes the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_parameter_context_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The Parameter Context ID. (required)
:param str version: The version is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, a new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'version', 'client_id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_parameter_context`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'version' in params:
query_params.append(('version', params['version']))
if 'client_id' in params:
query_params.append(('clientId', params['client_id']))
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_update_request(self, context_id, request_id, **kwargs):
"""
Deletes the Update Request with the given ID
Deletes the Update Request with the given ID. After a request is created via a POST to /nifi-api/parameter-contexts/update-requests, it is expected that the client will properly clean up the request by DELETE'ing it, once the Update process has completed. If the request is deleted before the request completes, then the Update request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_update_request(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the ParameterContext (required)
:param str request_id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_update_request_with_http_info(context_id, request_id, **kwargs)
else:
(data) = self.delete_update_request_with_http_info(context_id, request_id, **kwargs)
return data
def delete_update_request_with_http_info(self, context_id, request_id, **kwargs):
"""
Deletes the Update Request with the given ID
Deletes the Update Request with the given ID. After a request is created via a POST to /nifi-api/parameter-contexts/update-requests, it is expected that the client will properly clean up the request by DELETE'ing it, once the Update process has completed. If the request is deleted before the request completes, then the Update request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_update_request_with_http_info(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the ParameterContext (required)
:param str request_id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'request_id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_update_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `delete_update_request`")
# verify the required parameter 'request_id' is set
if ('request_id' not in params) or (params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `delete_update_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'request_id' in params:
path_params['requestId'] = params['request_id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests/{requestId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_validation_request(self, context_id, id, **kwargs):
"""
Deletes the Validation Request with the given ID
Deletes the Validation Request with the given ID. After a request is created via a POST to /nifi-api/validation-contexts, it is expected that the client will properly clean up the request by DELETE'ing it, once the validation process has completed. If the request is deleted before the request completes, then the Validation request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_validation_request(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_validation_request_with_http_info(context_id, id, **kwargs)
else:
(data) = self.delete_validation_request_with_http_info(context_id, id, **kwargs)
return data
def delete_validation_request_with_http_info(self, context_id, id, **kwargs):
"""
Deletes the Validation Request with the given ID
Deletes the Validation Request with the given ID. After a request is created via a POST to /nifi-api/validation-contexts, it is expected that the client will properly clean up the request by DELETE'ing it, once the validation process has completed. If the request is deleted before the request completes, then the Validation request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_validation_request_with_http_info(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `delete_validation_request`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_context(self, id, **kwargs):
"""
Returns the Parameter Context with the given ID
Returns the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_context_with_http_info(id, **kwargs)
else:
(data) = self.get_parameter_context_with_http_info(id, **kwargs)
return data
def get_parameter_context_with_http_info(self, id, **kwargs):
"""
Returns the Parameter Context with the given ID
Returns the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_parameter_context`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_context_update(self, context_id, request_id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /nifi-api/parameter-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_update(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str request_id: The ID of the Update Request (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_context_update_with_http_info(context_id, request_id, **kwargs)
else:
(data) = self.get_parameter_context_update_with_http_info(context_id, request_id, **kwargs)
return data
def get_parameter_context_update_with_http_info(self, context_id, request_id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /nifi-api/parameter-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_update_with_http_info(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str request_id: The ID of the Update Request (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'request_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_context_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_parameter_context_update`")
# verify the required parameter 'request_id' is set
if ('request_id' not in params) or (params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `get_parameter_context_update`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'request_id' in params:
path_params['requestId'] = params['request_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests/{requestId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_validation_request(self, context_id, id, **kwargs):
"""
Returns the Validation Request with the given ID
Returns the Validation Request with the given ID. Once a Validation Request has been created by performing a POST to /nifi-api/validation-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_validation_request(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Validation Request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_validation_request_with_http_info(context_id, id, **kwargs)
else:
(data) = self.get_validation_request_with_http_info(context_id, id, **kwargs)
return data
def get_validation_request_with_http_info(self, context_id, id, **kwargs):
"""
Returns the Validation Request with the given ID
Returns the Validation Request with the given ID. Once a Validation Request has been created by performing a POST to /nifi-api/validation-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_validation_request_with_http_info(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Validation Request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_validation_request`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_parameter_context_update(self, context_id, body, **kwargs):
"""
Initiate the Update Request of a Parameter Context
This will initiate the process of updating a Parameter Context. Changing the value of a Parameter may require that one or more components be stopped and restarted, so this acttion may take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextUpdateRequestEntity, and the process of updating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/update-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_parameter_context_update(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextEntity body: The updated version of the parameter context. (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.submit_parameter_context_update_with_http_info(context_id, body, **kwargs)
else:
(data) = self.submit_parameter_context_update_with_http_info(context_id, body, **kwargs)
return data
def submit_parameter_context_update_with_http_info(self, context_id, body, **kwargs):
"""
Initiate the Update Request of a Parameter Context
This will initiate the process of updating a Parameter Context. Changing the value of a Parameter may require that one or more components be stopped and restarted, so this acttion may take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextUpdateRequestEntity, and the process of updating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/update-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_parameter_context_update_with_http_info(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextEntity body: The updated version of the parameter context. (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_parameter_context_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `submit_parameter_context_update`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `submit_parameter_context_update`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_validation_request(self, context_id, body, **kwargs):
"""
Initiate a Validation Request to determine how the validity of components will change if a Parameter Context were to be updated
This will initiate the process of validating all components whose Process Group is bound to the specified Parameter Context. Performing validation against an arbitrary number of components may be expect and take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextValidationRequestEntity, and the process of validating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/validation-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/validation-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_validation_request(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextValidationRequestEntity body: The validation request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.submit_validation_request_with_http_info(context_id, body, **kwargs)
else:
(data) = self.submit_validation_request_with_http_info(context_id, body, **kwargs)
return data
def submit_validation_request_with_http_info(self, context_id, body, **kwargs):
"""
Initiate a Validation Request to determine how the validity of components will change if a Parameter Context were to be updated
This will initiate the process of validating all components whose Process Group is bound to the specified Parameter Context. Performing validation against an arbitrary number of components may be expect and take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextValidationRequestEntity, and the process of validating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/validation-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/validation-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_validation_request_with_http_info(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextValidationRequestEntity body: The validation request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `submit_validation_request`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `submit_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_parameter_context(self, id, body, **kwargs):
"""
Modifies a Parameter Context
This endpoint will update a Parameter Context to match the provided entity. However, this request will fail if any component is running and is referencing a Parameter in the Parameter Context. Generally, this endpoint is not called directly. Instead, an update request should be submitted by making a POST to the /parameter-contexts/update-requests endpoint. That endpoint will, in turn, call this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_parameter_context(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param ParameterContextEntity body: The updated Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_parameter_context_with_http_info(id, body, **kwargs)
else:
(data) = self.update_parameter_context_with_http_info(id, body, **kwargs)
return data
def update_parameter_context_with_http_info(self, id, body, **kwargs):
"""
Modifies a Parameter Context
This endpoint will update a Parameter Context to match the provided entity. However, this request will fail if any component is running and is referencing a Parameter in the Parameter Context. Generally, this endpoint is not called directly. Instead, an update request should be submitted by making a POST to the /parameter-contexts/update-requests endpoint. That endpoint will, in turn, call this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_parameter_context_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param ParameterContextEntity body: The updated Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_parameter_context`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_parameter_context`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.661827 | 752 | 0.615306 | 58,438 | 0.985048 | 0 | 0 | 0 | 0 | 0 | 0 | 34,545 | 0.582301 |
c5a01d4fbbbb1fc700ce617c6691f4e30e690d80 | 16,811 | py | Python | sw/3rd_party/VTK-7.1.0/Utilities/Maintenance/vtk_reindent_code.py | esean/stl_voro_fill | c569a4019ff80afbf85482c7193711ea85a7cafb | [
"MIT"
] | 4 | 2019-05-30T01:52:12.000Z | 2021-09-29T21:12:13.000Z | sw/3rd_party/VTK-7.1.0/Utilities/Maintenance/vtk_reindent_code.py | esean/stl_voro_fill | c569a4019ff80afbf85482c7193711ea85a7cafb | [
"MIT"
] | null | null | null | sw/3rd_party/VTK-7.1.0/Utilities/Maintenance/vtk_reindent_code.py | esean/stl_voro_fill | c569a4019ff80afbf85482c7193711ea85a7cafb | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | #!/usr/bin/env python
"""
Usage: python vtk_reindent_code.py [--test] <file1> [<file2> ...]
This script takes old-style "Whitesmiths" indented VTK source files as
input, and re-indents the braces according to the new VTK style.
Only the brace indentation is modified.
If called with the --test option, then it will print an error message
for each file that it would modify, but it will not actually modify the
files.
Written by David Gobbi on Sep 30, 2015.
"""
import sys
import os
import re
def reindent(filename, dry_run=False):
"""Reindent a file from Whitesmiths style to Allman style"""
# The first part of this function clears all strings and comments
# where non-grammatical braces might be hiding. These changes will
# not be saved back to the file, they just simplify the parsing.
# look for ', ", /*, and //
keychar = re.compile(r"""[/"']""")
# comments of the form /* */
c_comment = re.compile(r"\/\*(\*(?!\/)|[^*])*\*\/")
c_comment_start = re.compile(r"\/\*(\*(?!\/)|[^*])*$")
c_comment_end = re.compile(r"^(\*(?!\/)|[^*])*\*\/")
# comments of the form //
cpp_comment = re.compile(r"\/\/.*")
# string literals ""
string_literal = re.compile(r'"([^\\"]|\\.)*"')
string_literal_start = re.compile(r'"([^\\"]|\\.)*\\$')
string_literal_end = re.compile(r'^([^\\"]|\\.)*"')
# character literals ''
char_literal = re.compile(r"'([^\\']|\\.)*'")
char_literal_start = re.compile(r"'([^\\']|\\.)*\\$")
char_literal_end = re.compile(r"^([^\\']|\\.)*'")
# read the file
try:
f = open(filename)
lines = f.readlines()
f.close()
except:
sys.stderr.write(filename + ": ")
sys.stderr.write(str(sys.exc_info()[1]) + "\n")
sys.exit(1)
# convert strings to "", char constants to '', and remove comments
n = len(lines) # 'lines' is the input
newlines = [] # 'newlines' is the output
cont = None # set if e.g. we found /* and we are looking for */
for i in range(n):
line = lines[i].rstrip()
if cont is not None:
# look for closing ' or " or */
match = cont.match(line)
if match:
# found closing ' or " or */
line = line[match.end():]
cont = None
else:
# this whole line is in the middle of a string or comment
if cont is c_comment_end:
# still looking for */, clear the whole line
newlines.append("")
continue
else:
# still looking for ' or ", set line to backslash
newlines.append('\\')
continue
# start at column 0 and search for ', ", /*, or //
pos = 0
while True:
match = keychar.search(line, pos)
if match is None:
break
pos = match.start()
end = match.end()
# was the match /* ... */ ?
match = c_comment.match(line, pos)
if match:
line = line[0:pos] + " " + line[match.end():]
pos += 1
continue
# does the line have /* ... without the */ ?
match = c_comment_start.match(line, pos)
if match:
if line[-1] == '\\':
line = line[0:pos] + ' \\'
else:
line = line[0:pos]
cont = c_comment_end
break
# does the line have // ?
match = cpp_comment.match(line, pos)
if match:
if line[-1] == '\\':
line = line[0:pos] + ' \\'
else:
line = line[0:pos]
break
# did we find "..." ?
match = string_literal.match(line, pos)
if match:
line = line[0:pos] + "\"\"" + line[match.end():]
pos += 2
continue
# did we find "... without the final " ?
match = string_literal_start.match(line, pos)
if match:
line = line[0:pos] + "\"\"\\"
cont = string_literal_end
break
# did we find '...' ?
match = char_literal.match(line, pos)
if match:
line = line[0:pos] + "\' \'" + line[match.end():]
pos += 3
continue
# did we find '... without the final ' ?
match = char_literal_start.match(line, pos)
if match:
line = line[0:pos] + "\' \'\\"
cont = char_literal_end
break
# if we got to here, we found / that wasn't /* or //
pos += 1
# strip any trailing whitespace!
newlines.append(line.rstrip())
# The second part of this function looks for braces in the simplified
# code that we wrote to "newlines" after removing the contents of all
# string literals, character literals, and comments.
# Whenever we encounter an opening brace, we push its position onto a
# stack. Whenever we encounter the matching closing brace, we indent
# the braces as a pair.
# For #if directives, we check whether there are mismatched braces
# within the conditional block, and if so, we print a warning and reset
# the stack to the depth that it had at the start of the block.
# For #define directives, we save the stack and then restart counting
# braces until the end of the #define. Then we restore the stack.
# all changes go through this function
lines_changed = {} # keeps track of each line that was changed
def changeline(i, newtext, lines_changed=lines_changed):
if newtext != lines[i]:
lines[i] = newtext
lines_changed[i] = newtext
# we push a tuple (delim, row, col, newcol) onto this stack whenever
# we find a {, (, or [ delimiter, this keeps track of where we found
# the delimeter and what column we want to move it to
stack = []
lastdepth = 0
# this is a superstack that allows us to save the entire stack when we
# enter into an #if conditional block
dstack = []
# these are syntactic elements we need to look for
directive = re.compile(r"\s*#\s*(..)")
label = re.compile(r"""(case(?!\w)([^:]|::)+|\w+\s*(::\s*)*\s*:(?!:))""")
cflow = re.compile(r"(if|else|for|do|while|switch)(?!\w)")
delims = re.compile(r"[{}()\[\];]")
spaces = re.compile(r"\s*")
other = re.compile(r"(\w+|[^{}()\[\];\w\s]+)\s*")
cplusplus = re.compile(r"\s*#\s*ifdef\s+__cplusplus")
indentation = 0 # current indentation column
continuation = False # true if line continues an unfinished statement
new_context = True # also set when we enter a #define statement
in_else = False # set if in an #else
in_define = False # set if in #define
in_assign = False # set to deal with "= {" or #define x {"
leaving_define = False # set if at the end of a #define
save_stack = None # save stack when entering a #define
for i in range(n):
line = newlines[i]
# restore stack when leaving #define
if leaving_define:
stack, indentation, continuation = save_stack
save_stack = None
in_define = False
leaving_define = False
# handle #if conditionals
is_directive = False
in_else = False
match = directive.match(line)
if match:
is_directive = True
if match.groups()[0] == 'if':
dstack.append((list(stack), indentation, continuation,
line))
elif match.groups()[0] in ('en', 'el'):
oldstack, oldindent, oldcont, dline = dstack.pop()
if len(stack) > len(oldstack) and not cplusplus.match(dline):
sys.stderr.write(filename + ":" + str(i) + ": ")
sys.stderr.write("mismatched delimiter in \"" +
dline + "\" block\n")
if match.groups()[0] == 'el':
in_else = True
indentation = oldindent
continuation = oldcont
stack = oldstack
dstack.append((list(stack), indentation, continuation,
line))
elif match.groups()[0] == 'de':
in_define = True
leaving_define = False
save_stack = (stack, indentation, continuation)
stack = []
new_context = True
# remove backslash at end of line, if present
if len(line) > 0 and line[-1] == '\\':
line = line[0:-1].rstrip()
elif in_define:
leaving_define = True
if not is_directive and len(line) > 0 and not continuation:
# what is the indentation of the current line?
match = spaces.match(line)
if not line[match.end()] == '{':
indentation = match.end()
continuation = True
# new_context marks beginning of a file or a macro
if new_context:
continuation = False
indentation = 0
new_context = False
# skip initial whitespace
if is_directive:
pos = directive.match(line).end()
else:
pos = spaces.match(line).end()
# check for a label e.g. case
match = label.match(line, pos)
if match:
base = True
for item in stack:
if item[0] != '{':
base = False
if base:
word = re.match(r"\w*", match.group())
if word in ("case", "default"):
indentation = pos
continuation = False
# check for multiple labels on the same line
while match:
pos = spaces.match(line, match.end()).end()
match = label.match(line, pos)
# parse the line
while pos != len(line):
# check for if, else, for, while, do, switch
match = cflow.match(line, pos)
if match:
# if we are at the beginning of the line
if spaces.match(line).end() == pos:
indentation = pos
pos = spaces.match(line, match.end()).end()
continue
# check for a delimiter {} () [] or ;
match = delims.match(line, pos)
if not match:
# check for any other identifiers, operators
match = other.match(line, pos)
if match:
pos = match.end()
continue
else:
break
# found a delimiter
delim = line[pos]
if delim in ('(', '['):
# save delim, row, col, and current indentation
stack.append((delim, i, pos, indentation))
elif delim == '{':
if in_assign or line[0:pos-1].rstrip()[-1:] == "=":
# do not adjust braces for initializer lists
stack.append((delim, i, -1, indentation))
elif ((in_else or in_define) and spaces.sub("", line) == "{"):
# for opening braces that might have no match
indent = " "*indentation
changeline(i, spaces.sub(indent, lines[i], count=1))
stack.append((delim, i, pos, indentation))
else:
# save delim, row, col, and previous indentation
stack.append((delim, i, pos, indentation))
if spaces.sub("", newlines[i][0:pos]) == "":
indentation += 2
continuation = False
elif delim == ';':
# ';' marks end of statement unless inside for (;;)
if len(stack) == 0 or stack[-1][0] == '{':
continuation = False
else:
# found a ')', ']', or '}' delimiter, so pop its partner
try:
ldelim, j, k, indentation = stack.pop()
in_assign = (k < 0)
except IndexError:
ldelim = ""
if ldelim != {'}':'{', ')':'(', ']':'['}[delim]:
sys.stderr.write(filename + ":" + str(i) + ": ")
sys.stderr.write("mismatched \'" + delim + "\'\n")
# adjust the indentation of matching '{', '}'
if (ldelim == '{' and delim == '}' and not in_assign and
spaces.sub("", line[0:pos]) == ""):
if spaces.sub("", newlines[j][0:k]) == "":
indent = " "*indentation
changeline(j, spaces.sub(indent, lines[j], count=1))
changeline(i, spaces.sub(indent, lines[i], count=1))
elif i != j:
indent = " "*indentation
changeline(i, spaces.sub(indent, lines[i], count=1))
if delim == '}':
continuation = False
# eat whitespace and continue
pos = spaces.match(line, match.end()).end()
# check for " = " and #define assignments for the sake of
# the { inializer list } that might be on the following line
if len(line) > 0:
if (line[-1] == '=' or
(is_directive and in_define and not leaving_define)):
in_assign = True
elif not is_directive:
in_assign = False
if len(dstack) != 0:
sys.stderr.write(filename + ": ")
sys.stderr.write("mismatched #if conditional.\n")
if len(stack) != 0:
sys.stderr.write(filename + ":" + str(stack[0][1]) + ": ")
sys.stderr.write("no match for " + stack[0][0] +
" before end of file.\n")
if lines_changed:
# remove any trailing whitespace
trailing = re.compile(r" *$")
for i in range(n):
lines[i] = trailing.sub("", lines[i])
while n > 0 and lines[n-1].rstrip() == "":
n -= 1
if dry_run:
errcount = len(lines_changed)
line_numbers = list(lines_changed.keys())
line_numbers.sort()
line_numbers = [str(l + 1) for l in line_numbers[0:10] ]
if errcount > len(line_numbers):
line_numbers.append("...")
sys.stderr.write("Warning: " + filename +
": incorrect brace indentation on " +
str(errcount) +
(" lines: ", "line: ")[errcount == 1] +
", ".join(line_numbers) + "\n")
else:
# rewrite the file
ofile = open(filename, 'w')
ofile.writelines(lines)
ofile.close()
return True
return False
if __name__ == "__main__":
# ignore generated files
ignorefiles = ["lex.yy.c", "vtkParse.tab.c"]
files = []
opt_ignore = False # ignore all further options
opt_test = False # the --test option
for arg in sys.argv[1:]:
if arg[0:1] == '-' and not opt_ignore:
if arg == '--':
opt_ignore = True
elif arg == '--test':
opt_test = True
else:
sys.stderr.write("%s: unrecognized option %s\n" %
(os.path.split(sys.argv[0])[-1], arg))
sys.exit(1)
elif os.path.split(arg)[-1] not in ignorefiles:
files.append(arg)
# if --test was set, whenever a file needs modification, we set
# "failed" and continue checking the rest of the files
failed = False
for filename in files:
# repeat until no further changes occur
while reindent(filename, dry_run=opt_test):
if opt_test:
failed = True
break
if failed:
sys.exit(1)
| 39.278037 | 79 | 0.479924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,231 | 0.311165 |
c5a14c70626d442e04bd03fd58414f48d6a57094 | 119 | py | Python | communication_modules/azure_iot_hub/__init__.py | dbenge/SimpleSensor_contrib | f48c31d3a0e0e29531ac5b0b445dccafd4f1e1d9 | [
"Apache-2.0"
] | null | null | null | communication_modules/azure_iot_hub/__init__.py | dbenge/SimpleSensor_contrib | f48c31d3a0e0e29531ac5b0b445dccafd4f1e1d9 | [
"Apache-2.0"
] | 5 | 2018-07-22T03:06:33.000Z | 2018-11-08T22:42:53.000Z | communication_modules/azure_iot_hub/__init__.py | dbenge/SimpleSensor_contrib | f48c31d3a0e0e29531ac5b0b445dccafd4f1e1d9 | [
"Apache-2.0"
] | 3 | 2018-07-11T14:49:06.000Z | 2022-03-24T18:31:26.000Z | from simplesensor.communication_modules.azure_iot_hub.azureIotHubModule import AzureIotHubModule as CommunicationModule | 119 | 119 | 0.932773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c5a2b2cc9bb64eb2dd4546a33e1ab663ecfa9c59 | 591 | py | Python | example/example_list.py | vtheno/Vtools | 5a03891d0c2365602aaae68541f31ba89a6476e5 | [
"MIT"
] | 2 | 2018-04-25T08:59:55.000Z | 2018-04-25T09:53:35.000Z | example/example_list.py | vtheno/Vtools | 5a03891d0c2365602aaae68541f31ba89a6476e5 | [
"MIT"
] | null | null | null | example/example_list.py | vtheno/Vtools | 5a03891d0c2365602aaae68541f31ba89a6476e5 | [
"MIT"
] | null | null | null | from util.List import *
print( dir() )
lst = list(range(9999))
hd,*tl = lst
print( hd )
print( tl )
del hd,tl
Lst = toList(lst)
try:
print( hd,tl )
except NameError as e:
with Lst as (hd,tl):
print( hd )
print( tl )
print( type(hd),type(tl) )
print( Lst.hd is hd )
print( Lst.tl is tl )
del hd,tl
del lst,Lst
lst = Cons(1,Cons(2,Cons(3,Cons(4,nil))))
print( lst,type(lst) )
Lst = toPylist(lst)
print( Lst,type(Lst) )
with lst as (a,(b,c)):
print (a,b,c)
print (a is lst.hd)
print (b is lst.tl.hd)
print (c is lst.tl.tl)
| 18.46875 | 41 | 0.563452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c5a673641a1ea480ef65b68e6729bcb7a7f14c22 | 1,809 | py | Python | scripts_for_public/05_crf_solver.py | NeLy-EPFL/Ascending_neuron_screen_analysis_pipeline | 438b9db15765bf26581ecd4b8a1f93e8a844ebbd | [
"Apache-2.0"
] | null | null | null | scripts_for_public/05_crf_solver.py | NeLy-EPFL/Ascending_neuron_screen_analysis_pipeline | 438b9db15765bf26581ecd4b8a1f93e8a844ebbd | [
"Apache-2.0"
] | null | null | null | scripts_for_public/05_crf_solver.py | NeLy-EPFL/Ascending_neuron_screen_analysis_pipeline | 438b9db15765bf26581ecd4b8a1f93e8a844ebbd | [
"Apache-2.0"
] | null | null | null | import os.path
from scipy.optimize import fsolve
import math
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import utils_Florian as utils
def equations(p, t_peak, t_half):
x, y = p
return (0.5 * (math.exp(-x * t_peak) - math.exp(-y * t_peak)) - (math.exp(-x * t_half) - math.exp(-y * t_half)), -x * math.exp(-x * t_peak) + y * math.exp(-y * t_peak))
results = pd.DataFrame()
t_peaks = []
t_halfs = []
xs = []
ys = []
initial_conditions = ((12, 5),
(14, 4),
(14, 4),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1))
for alpha in range(1, 16):
t_peak = 0.1415
t_half = t_peak + 0.2 + alpha * 0.05
print("Target: ", t_half)
x, y = fsolve(equations, initial_conditions[alpha], args=(t_peak, t_half))
t_peaks.append(t_peak)
t_halfs.append(t_half - t_peak)
xs.append(x)
ys.append(y)
t = np.linspace(0, 2.0, 10000)
crf = -np.exp(-x * t) + np.exp(-y * t)
crf = crf / sum(crf)
print("t peak", t[np.argmax(crf)])
diff = crf - 0.5 * max(crf)
diff[:np.argmax(crf)] = np.inf
diff = np.abs(diff)
half_idx = np.argmin(diff)
print("t half", t[half_idx] - t[np.argmax(crf)])
plt.plot(t, crf, label=str(t_half - t_peak))
results = results.append(pd.DataFrame({"t_peak": [t_peak], "t_half": [t_half - t_peak], "a": [x], "b": [y]}))
results.to_csv(os.path.join(utils.output_dir, "crf_parameters.csv"))
| 28.265625 | 172 | 0.480376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.03759 |
c5aa6db1461a81d1e86603163e1dd494f722650c | 1,334 | py | Python | multi_prisoner/tests.py | Charlotte-exp/Multichannel-Games | 83ebb452454ed5d1a8535b59dac49099a9509be4 | [
"MIT"
] | null | null | null | multi_prisoner/tests.py | Charlotte-exp/Multichannel-Games | 83ebb452454ed5d1a8535b59dac49099a9509be4 | [
"MIT"
] | 1 | 2021-01-20T11:48:18.000Z | 2021-01-20T11:48:18.000Z | crosstalk/tests.py | Charlotte-exp/Multichannel-Games | 83ebb452454ed5d1a8535b59dac49099a9509be4 | [
"MIT"
] | 2 | 2021-01-21T15:29:19.000Z | 2022-03-29T09:26:36.000Z | from otree.api import Currency as c, currency_range
from . import pages
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
def play_round(self):
if self.round_number <= self.participant.vars['last_round']:
yield pages.Decision, {"decision_high": 1, "decision_low": 1}
# assert 'Both of you chose to Cooperate' in self.html # no clue what this is
# assert self.player.payoff == Constants.both_cooperate_payoff # no clue what this is
# if self.round_number % 2 == 0:
# if self.player.id_in_group == 1:
# yield pages.Decision, dict(decision_high="Cooperate", decision_low="Cooperate")
# else:
# if self.player.id_in_group == 1:
# yield pages.Decision, dict(decision_high="Defect", decision_low="Defect")
yield pages.Results
if self.round_number == self.participant.vars['last_round']:
yield pages.End
yield pages.Demographics, {"age": '22', "gender": 'Female', "income": '£10.000 - £29.999',
"education": 'Postgraduate degree', "ethnicity": 'White'}
yield pages.CommentBox, {"comment_box": 'n/a'}
yield pages.Payment
yield pages.ProlificLink
| 47.642857 | 102 | 0.596702 | 1,205 | 0.901946 | 1,179 | 0.882485 | 0 | 0 | 0 | 0 | 626 | 0.468563 |
c5ab0b3466c00112abbf1baa6d0c33332509984d | 511 | py | Python | plugins/zhihu/handler.py | KuangjuX/QBot | 20533f55f58e5dfeef533b338accd1ca2f3dc405 | [
"MIT"
] | null | null | null | plugins/zhihu/handler.py | KuangjuX/QBot | 20533f55f58e5dfeef533b338accd1ca2f3dc405 | [
"MIT"
] | null | null | null | plugins/zhihu/handler.py | KuangjuX/QBot | 20533f55f58e5dfeef533b338accd1ca2f3dc405 | [
"MIT"
] | null | null | null | import requests
async def req_top_topic():
url = "https://www.zhihu.com/api/v4/search/top_search"
user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6"
headers = {
"User-Agent": user_agent
}
try:
resp = requests.get(url=url, headers=headers)
if resp.status_code == 200:
return resp.json()["top_search"]["words"]
except ConnectionError as e:
print("[Error] " + e )
return None | 31.9375 | 115 | 0.60274 | 0 | 0 | 0 | 0 | 0 | 0 | 494 | 0.966732 | 187 | 0.365949 |
c5ab8747666ccf31bc2f89c48e1288edc70dac32 | 11,219 | py | Python | docqa/triviaqa/answer_detection.py | Willyoung2017/doc-qa | 7ee02218952b0b9db63bc82b3895f743cdbd8f22 | [
"Apache-2.0"
] | 422 | 2017-10-31T12:20:29.000Z | 2022-03-14T11:25:16.000Z | docqa/triviaqa/answer_detection.py | Willyoung2017/doc-qa | 7ee02218952b0b9db63bc82b3895f743cdbd8f22 | [
"Apache-2.0"
] | 54 | 2017-11-02T10:34:45.000Z | 2021-02-04T05:05:20.000Z | docqa/triviaqa/answer_detection.py | Willyoung2017/doc-qa | 7ee02218952b0b9db63bc82b3895f743cdbd8f22 | [
"Apache-2.0"
] | 138 | 2017-11-02T10:49:09.000Z | 2021-11-26T15:34:01.000Z | import re
import string
import numpy as np
from tqdm import tqdm
from typing import List
from docqa.triviaqa.read_data import TriviaQaQuestion
from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score
from docqa.utils import flatten_iterable, split
"""
Tools for turning the aliases and answer strings from TriviaQA into labelled spans
"""
class ExactMatchDetector(object):
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [x.lower() for x in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class NormalizedAnswerDetector(object):
""" Try to labels tokens sequences, such that the extracted sequence would be evaluated as 100% correct
by the official trivia-qa evaluation script """
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [normalize_answer(w) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next == "":
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class FastNormalizedAnswerDetector(object):
""" almost twice as fast and very,very close to NormalizedAnswerDetector's output """
def __init__(self):
# These come from the TrivaQA official evaluation script
self.skip = {"a", "an", "the", ""}
self.strip = string.punctuation + "".join([u"‘", u"’", u"´", u"`", "_"])
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
# Normalize the paragraph
words = [w.lower().strip(self.strip) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
# Locations where the first word occurs
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
# Advance forward until we find all the words, skipping over articles
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class CarefulAnswerDetector(object):
"""
There are some common false negatives in the above answer detection, in particular plurals of answers are
often not found (nor are counted correct by the official script). This detector makes a stronger effort to
find them, although its unclear if training with these additional answers would hurt/help our overall score
since I never got around to trying it.
"""
def __init__(self):
self.skip = {"a", "an", "the", "&", "and", "-", "\u2019", "\u2018", "\"", ";", "'",
"(", ")", "'s'", "s", ":", ",", "."}
self.answer_regex = None
self.aliases = None
def set_question(self, normalized_aliases):
answer_regex = []
self.aliases = normalized_aliases
for answer in normalized_aliases:
tokens = []
for token in answer:
if len(token) > 1:
tokens.append(token + "s?")
else:
tokens.append(token)
if tokens[-1] == "s":
tokens[-1] = "s?"
answer_regex.append([re.compile(x, re.IGNORECASE) for x in tokens])
self.answer_regex = answer_regex
def any_found(self, para):
words = flatten_iterable(para)
occurances = []
for answer_ix, answer in enumerate(self.answer_regex):
word_starts = [i for i, w in enumerate(words) if answer[0].fullmatch(w)]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token].match(next):
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
def evaluate_question_detector(questions, corpus, word_tokenize, detector, reference_detector=None, compute_f1s=False):
""" Just for debugging """
n_no_docs = 0
answer_per_doc = []
answer_f1s = []
for question_ix, q in enumerate(tqdm(questions)):
tokenized_aliases = [word_tokenize(x) for x in q.answer.normalized_aliases]
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
doc = corpus.get_document(doc.doc_id)
if doc is None:
n_no_docs += 1
continue
output = []
for i, para in enumerate(doc):
for s,e in detector.any_found(para):
output.append((i, s, e))
if len(output) == 0 and reference_detector is not None:
if reference_detector is not None:
reference_detector.set_question(tokenized_aliases)
detected = []
for i, para in enumerate(doc):
for s, e in reference_detector.any_found(para):
detected.append((i, s, e))
if len(detected) > 0:
print("Found a difference")
print(q.answer.normalized_aliases)
print(tokenized_aliases)
for p, s, e in detected:
token = flatten_iterable(doc[p])[s:e]
print(token)
answer_per_doc.append(output)
if compute_f1s:
f1s = []
for p, s, e in output:
token = flatten_iterable(doc[p])[s:e]
answer = normalize_answer(" ".join(token))
f1 = 0
for gt in q.answer.normalized_aliases:
f1 = max(f1, f1_score(answer, gt))
f1s.append(f1)
answer_f1s.append(f1s)
n_answers = sum(len(x) for x in answer_per_doc)
print("Found %d answers (av %.4f)" % (n_answers, n_answers/len(answer_per_doc)))
print("%.4f docs have answers" % np.mean([len(x) > 0 for x in answer_per_doc]))
if len(answer_f1s) > 0:
print("Average f1 is %.4f" % np.mean(flatten_iterable(answer_f1s)))
def compute_answer_spans(questions: List[TriviaQaQuestion], corpus, word_tokenize,
detector):
for i, q in enumerate(questions):
if i % 500 == 0:
print("Completed question %d of %d (%.3f)" % (i, len(questions), i/len(questions)))
q.question = word_tokenize(q.question)
if q.answer is None:
continue
tokenized_aliases = [word_tokenize(x) for x in q.answer.all_answers]
if len(tokenized_aliases) == 0:
raise ValueError()
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
text = corpus.get_document(doc.doc_id)
if text is None:
raise ValueError()
spans = []
offset = 0
for para_ix, para in enumerate(text):
for s, e in detector.any_found(para):
spans.append((s+offset, e+offset-1)) # turn into inclusive span
offset += sum(len(s) for s in para)
if len(spans) == 0:
spans = np.zeros((0, 2), dtype=np.int32)
else:
spans = np.array(spans, dtype=np.int32)
doc.answer_spans = spans
def _compute_answer_spans_chunk(questions, corpus, tokenizer, detector):
# We use tokenize_paragraph since some questions can have multiple sentences,
# but we still store the results as a flat list of tokens
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
def compute_answer_spans_par(questions: List[TriviaQaQuestion], corpus,
tokenizer, detector, n_processes: int):
if n_processes == 1:
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
from multiprocessing import Pool
with Pool(n_processes) as p:
chunks = split(questions, n_processes)
questions = flatten_iterable(p.starmap(_compute_answer_spans_chunk,
[[c, corpus, tokenizer, detector] for c in chunks]))
return questions
def main():
from trivia_qa.build_span_corpus import TriviaQaWebDataset
from data_processing.text_utils import NltkAndPunctTokenizer
dataset = TriviaQaWebDataset()
qs = dataset.get_train()
qs = np.random.RandomState(0).choice(qs, 1000, replace=False)
evaluate_question_detector(qs, dataset.evidence, NltkAndPunctTokenizer().tokenize_paragraph_flat,
FastNormalizedAnswerDetector())
if __name__ == "__main__":
main() | 38.553265 | 119 | 0.557269 | 5,869 | 0.522897 | 0 | 0 | 0 | 0 | 0 | 0 | 1,358 | 0.120991 |
c5ad2bebf11ef489fbbd6dfa3f637f4206f67729 | 2,171 | py | Python | custom_components/grocy/schema.py | smhgit/grocery_list | dd9a5fc753c35b21167d09d6a3bf8f412a081199 | [
"Apache-2.0"
] | 1 | 2020-07-20T14:38:49.000Z | 2020-07-20T14:38:49.000Z | custom_components/grocy/schema.py | smhgit/grocery_list | dd9a5fc753c35b21167d09d6a3bf8f412a081199 | [
"Apache-2.0"
] | 1 | 2020-04-04T23:26:45.000Z | 2020-04-04T23:26:45.000Z | custom_components/grocy/schema.py | smhgit/grocery_list | dd9a5fc753c35b21167d09d6a3bf8f412a081199 | [
"Apache-2.0"
] | null | null | null | """Schemas for grocy."""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (CONF_HOST, CONF_ENTITY_ID, CONF_USERNAME, CONF_PASSWORD)
from .const import (DOMAIN,
CONF_APIKEY, CONF_AMOUNT, CONF_SHOPPING_LIST_ID,
CONF_BARCODE, CONF_STORE, CONF_PRODUCT_GROUP_ID,
CONF_NAME, CONF_VALUE,
CONF_PRODUCT_LOCATION_ID, CONF_PRODUCT_DESCRIPTION,
DEFAULT_AMOUNT, DEFAULT_SHOPPING_LIST_ID, DEFAULT_STORE,
DEFAULT_PRODUCT_DESCRIPTION)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_APIKEY): cv.string,
vol.Optional(CONF_STORE): vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
})
}, extra=vol.ALLOW_EXTRA)
ADD_TO_LIST_SERVICE_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_AMOUNT, default=DEFAULT_AMOUNT): cv.positive_int,
vol.Optional(CONF_SHOPPING_LIST_ID, default=DEFAULT_SHOPPING_LIST_ID): cv.positive_int
})
SUBTRACT_FROM_LIST_SERVICE_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_AMOUNT, default=DEFAULT_AMOUNT): cv.positive_int,
vol.Optional(CONF_SHOPPING_LIST_ID, default=DEFAULT_SHOPPING_LIST_ID): cv.positive_int
})
ADD_PRODUCT_SERVICE_SCHEMA = vol.Schema({
vol.Required(CONF_BARCODE): cv.string,
vol.Required(CONF_PRODUCT_GROUP_ID): cv.positive_int,
vol.Required(CONF_PRODUCT_LOCATION_ID): cv.positive_int,
vol.Required(CONF_STORE, default=DEFAULT_STORE): cv.string,
vol.Optional(CONF_PRODUCT_DESCRIPTION, default=DEFAULT_PRODUCT_DESCRIPTION): cv.string
})
ADD_FAVORITE_SERVICE_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_ids
})
REMOVE_FAVORITE_SERVICE_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_ids
})
REMOVE_PRODUCT_SERVICE_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_ids
}) | 36.79661 | 90 | 0.726854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.011055 |
c5addbf22c0a3577e07532c7bdb1e500725ff953 | 548 | py | Python | examples/vacuum_send_command.py | giuseppeg88/lovelace-xiaomi-vacuum-map-card | ab6f395a7cd8af0b55aa59b46e949845d27acea6 | [
"MIT"
] | 798 | 2019-07-30T09:50:47.000Z | 2022-03-26T13:29:00.000Z | examples/vacuum_send_command.py | giuseppeg88/lovelace-xiaomi-vacuum-map-card | ab6f395a7cd8af0b55aa59b46e949845d27acea6 | [
"MIT"
] | 214 | 2019-08-12T20:20:22.000Z | 2022-03-28T16:29:39.000Z | examples/vacuum_send_command.py | giuseppeg88/lovelace-xiaomi-vacuum-map-card | ab6f395a7cd8af0b55aa59b46e949845d27acea6 | [
"MIT"
] | 157 | 2019-08-02T16:37:18.000Z | 2022-03-28T18:39:30.000Z | entity_id = data.get('entity_id')
command = data.get('command')
params = str(data.get('params'))
parsedParams = []
for z in params.replace(' ', '').replace('],[', '|').replace('[', '').replace(']', '').split('|'):
rect = []
for c in z.split(','):
rect.append(int(c))
parsedParams.append(rect)
if command in ["app_goto_target", "app_segment_clean"]:
parsedParams = parsedParams[0]
hass.services.call('vacuum', 'send_command',
{'entity_id': entity_id, 'command': command, 'params': parsedParams}, True)
| 32.235294 | 98 | 0.607664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.260949 |
c5ae3a56e64e4529136d5912d32600637f06223a | 417 | py | Python | base/migrations/0006_profile_history.py | polarity-cf/arugo | 530ea6092702916d63f36308d5a615d118b73850 | [
"MIT"
] | 34 | 2021-11-11T14:00:15.000Z | 2022-03-16T12:30:04.000Z | base/migrations/0006_profile_history.py | polarity-cf/arugo | 530ea6092702916d63f36308d5a615d118b73850 | [
"MIT"
] | 22 | 2021-11-11T23:18:14.000Z | 2022-03-31T15:07:02.000Z | base/migrations/0006_profile_history.py | polarity-cf/arugo | 530ea6092702916d63f36308d5a615d118b73850 | [
"MIT"
] | 1 | 2022-03-14T07:35:09.000Z | 2022-03-14T07:35:09.000Z | # Generated by Django 3.2.9 on 2021-11-13 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0005_authquery_password'),
]
operations = [
migrations.AddField(
model_name='profile',
name='history',
field=models.CharField(default='[]', max_length=1000),
),
]
| 21.947368 | 67 | 0.568345 | 318 | 0.76259 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.242206 |
c5aeb41622feaa3f7b1564f4b8743b3dd401d728 | 199 | py | Python | nanodet/util/rank_filter.py | zjiao19/nanodet | 17af4a81fa93e0405f3a9f8c8feb75ad7b9adc50 | [
"Apache-2.0"
] | 8 | 2021-05-01T14:11:19.000Z | 2022-01-11T01:08:35.000Z | nanodet/util/rank_filter.py | zjiao19/nanodet | 17af4a81fa93e0405f3a9f8c8feb75ad7b9adc50 | [
"Apache-2.0"
] | 1 | 2021-12-20T08:01:20.000Z | 2021-12-20T08:01:20.000Z | nanodet/util/rank_filter.py | zjiao19/nanodet | 17af4a81fa93e0405f3a9f8c8feb75ad7b9adc50 | [
"Apache-2.0"
] | null | null | null |
def rank_filter(func):
def func_filter(local_rank=-1, *args, **kwargs):
if local_rank < 1:
return func(*args, **kwargs)
else:
pass
return func_filter
| 22.111111 | 52 | 0.557789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c5b0116a9713ffc1692da22ed0c63bbffd60bb86 | 897 | py | Python | ThreePointsCircle.py | formeo/checkio_tasks | 54ab77821e797238379afe483f6f6358cfba39a6 | [
"MIT"
] | null | null | null | ThreePointsCircle.py | formeo/checkio_tasks | 54ab77821e797238379afe483f6f6358cfba39a6 | [
"MIT"
] | null | null | null | ThreePointsCircle.py | formeo/checkio_tasks | 54ab77821e797238379afe483f6f6358cfba39a6 | [
"MIT"
] | null | null | null | import math
def checkio(data):
x1=int(data[1])
y1=int(data[3])
x2 = int(data[7])
y2 = int(data[9])
x3 = int(data[13])
y3 = int(data[15])
A = x2 - x1
B = y2 - y1
C = x3 - x1
D = y3 - y1
E = A * (x1 + x2) + B * (y1 + y2)
F = C * (x1 + x3) + D * (y1 + y3)
G = 2 * (A * (y3 - y2) - B * (x3 - x2))
h = (D * E - B * F) / G
k = (A * F - C * E) / G
r = ((x1 - h) ** 2) + ((y1 - k) ** 2)
if h.is_integer():
h=int(h)
hstr =str(h)
else:
hstr=float("{0:.2f}".format(h))
if k.is_integer():
k=int(k)
kstr =str(k)
else:
kstr=float("{0:.2f}".format(k))
r=math.sqrt(r)
r=float("{0:.2f}".format(r))
if r.is_integer():
rstr = int(r)
else:
rstr =float("{0:.2f}".format(r))
return "(x-"+str(hstr)+")^2+(y-"+str(kstr)+")^2="+str(rstr)+"^2"
| 19.085106 | 68 | 0.402453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.06689 |
c5b2728e8814b93e58ee5c076b847c4197704dea | 903 | py | Python | schafkopf/utils/settings_utils.py | PanchoVarallo/Schafkopf-Application | 98e58f69cadfeb63c13068320df1e8ea96cd91fe | [
"MIT"
] | 1 | 2022-01-19T23:58:03.000Z | 2022-01-19T23:58:03.000Z | schafkopf/utils/settings_utils.py | PanchoVarallo/Schafkopf-Application | 98e58f69cadfeb63c13068320df1e8ea96cd91fe | [
"MIT"
] | 7 | 2020-12-02T19:02:51.000Z | 2020-12-04T11:01:29.000Z | schafkopf/utils/settings_utils.py | PanchoVarallo/Schafkopf-Application | 98e58f69cadfeb63c13068320df1e8ea96cd91fe | [
"MIT"
] | null | null | null | import configparser
import enum
ini = 'settings.ini'
class Database(enum.Enum):
SQLITE = 1
POSTGRES = 2
def get_db() -> Database:
return Database[get_entry('Database', 'db')]
def get_database_url() -> str:
return get_entry('Database', 'database_url')
def get_init_username() -> str:
return get_entry('Auth', 'username')
def get_init_password() -> str:
return get_entry('Auth', 'password')
def get_entry(section: str, entry: str) -> str:
config = _get_settings_ini_config()
if section not in config:
raise ValueError(f'Ini is not valid. "{section}" section is missing.')
if entry not in config[section]:
raise ValueError(f'Ini is not valid. {entry} entry for section {section} is missing.')
return config[section][entry]
def _get_settings_ini_config():
config = configparser.ConfigParser()
config.read(ini)
return config
| 22.02439 | 94 | 0.683278 | 58 | 0.06423 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.225914 |
c5b46f4e695997da12b52c911a37e6a3483e1e67 | 3,070 | py | Python | tests/parser/test_lambda.py | csun-comp430-s22/lispy | 03820211bd3c5a83e0de4b1ac26a864080e06aca | [
"MIT"
] | null | null | null | tests/parser/test_lambda.py | csun-comp430-s22/lispy | 03820211bd3c5a83e0de4b1ac26a864080e06aca | [
"MIT"
] | 3 | 2022-02-23T08:12:52.000Z | 2022-03-09T00:13:33.000Z | tests/parser/test_lambda.py | csun-comp430-s22/lispy | 03820211bd3c5a83e0de4b1ac26a864080e06aca | [
"MIT"
] | null | null | null | import pytest
from lispyc import nodes
from lispyc.exceptions import SpecialFormSyntaxError, TypeSyntaxError
from lispyc.nodes import ComposedForm, Constant
from lispyc.nodes import FunctionParameter as Param
from lispyc.nodes import Program, Variable, types
from lispyc.parser import parse
VALID = [
("(lambda () ())", (), nodes.List(())),
(
"(lambda ((int int) (bool bool) (float float)) (Y b 123))",
(
Param(Variable("int"), types.IntType()),
Param(Variable("bool"), types.BoolType()),
Param(Variable("float"), types.FloatType()),
),
ComposedForm(Variable("Y"), (Variable("b"), Constant(123))),
),
(
"(lambda ((a int)) (1.2 (a -44) false))",
(Param(Variable("a"), types.IntType()),),
ComposedForm(
Constant(1.2), (ComposedForm(Variable("a"), (Constant(-44),)), Constant(False))
),
),
(
"(lambda ((func (func ((list int) float) bool)) (list (list float))) (j__9e (true d)))",
(
Param(
Variable("func"),
types.FunctionType(
(types.ListType(types.IntType()), types.FloatType()), types.BoolType()
),
),
Param(Variable("list"), types.ListType(types.FloatType())),
),
ComposedForm(Variable("j__9e"), (ComposedForm(Constant(True), (Variable("d"),)),)),
),
(
"(lambda ((f (func () (func (int) float)))) 1)",
(
Param(
Variable("f"),
types.FunctionType((), types.FunctionType((types.IntType(),), types.FloatType())),
),
),
Constant(1),
),
]
INVALID = [
"(lambda)", # Missing params and body.
"(lambda ())", # Missing params or body.
"(lambda ((x int) (y str)))", # Missing body.
"(lambda (x 1 2))", # Missing params.
"(lambda (x int) ())", # Param not nested.
"(lambda (()) ())", # Empty param.
]
INVALID_PARAM_TYPES = [
"(lambda ((list 1)) ())",
"(lambda ((list list)) ())",
"(lambda ((func false)) ())",
"(lambda ((func list)) ())",
"(lambda ((name (list 123))) ())",
"(lambda ((name (list list))) ())",
"(lambda ((name (list))) ())",
"(lambda ((name (func list))) ())",
"(lambda ((name (func () ()))) ())",
"(lambda ((name (func (a b) 1.4))) ())",
"(lambda ((name (func ((x int) (b float)) bool))) ())",
]
@pytest.mark.parametrize(["program", "params", "body"], VALID)
def test_lambda_parses(program: str, params: tuple[Param, ...], body: nodes.Form):
result = parse(program)
assert result == Program((nodes.Lambda(params, body),))
@pytest.mark.parametrize("program", INVALID)
def test_invalid_lambda_fails(program: str):
with pytest.raises(SpecialFormSyntaxError, match="(lambda|function parameter):"):
parse(program)
@pytest.mark.parametrize("program", INVALID_PARAM_TYPES)
def test_invalid_lambda_param_types_fails(program: str):
with pytest.raises(TypeSyntaxError):
parse(program)
| 32.659574 | 98 | 0.551466 | 0 | 0 | 0 | 0 | 609 | 0.198371 | 0 | 0 | 962 | 0.313355 |
c5b6c0de5cd1879d83d673da44359c98a06dd5a1 | 306 | py | Python | auth/api/urls.py | gabrielangelo/revelo-wallet | 3e91117b673e5aaf50773aa180af4117235965c9 | [
"BSD-3-Clause"
] | null | null | null | auth/api/urls.py | gabrielangelo/revelo-wallet | 3e91117b673e5aaf50773aa180af4117235965c9 | [
"BSD-3-Clause"
] | 8 | 2020-02-11T23:50:12.000Z | 2022-03-14T22:51:54.000Z | auth/api/urls.py | gabrielangelo/revelo-wallet | 3e91117b673e5aaf50773aa180af4117235965c9 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from rest_framework_jwt.views import (
obtain_jwt_token,
refresh_jwt_token,
verify_jwt_token
)
urlpatterns = [
url(r'^obtain-token', obtain_jwt_token),
url(r'^token-refresh/', refresh_jwt_token),
url(r'^api-token-verify/', verify_jwt_token)
]
| 20.4 | 48 | 0.712418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.179739 |
c5b9de0176823bf46c536854ba42d25cf47a7c38 | 5,289 | py | Python | test/check_multiset.py | constantinpape/paintera_tools | d37f27f559e0d716b797483d94ce9eb886d807d0 | [
"MIT"
] | 1 | 2019-06-23T21:32:15.000Z | 2019-06-23T21:32:15.000Z | test/check_multiset.py | constantinpape/paintera_tools | d37f27f559e0d716b797483d94ce9eb886d807d0 | [
"MIT"
] | null | null | null | test/check_multiset.py | constantinpape/paintera_tools | d37f27f559e0d716b797483d94ce9eb886d807d0 | [
"MIT"
] | 1 | 2019-08-08T11:36:47.000Z | 2019-08-08T11:36:47.000Z | import nifty.tools as nt
import numpy as np
import z5py
from elf.label_multiset import deserialize_multiset
from tqdm import trange
def check_serialization(mset1, mset2):
if len(mset1) != len(mset2):
print("Serialization sizes disagree:", len(mset1), len(mset2))
return False
if not np.array_equal(mset1, mset2):
disagree = (mset1 != mset2)
print("Serializations disagree for entries", disagree.sum(), "/", disagree.size)
return False
print("Check serialization passed")
return True
def check_multiset_members(mset1, mset2):
assert mset1.shape == mset2.shape
if mset1.n_elements != mset2.n_elements:
print("N-elements disagree:", mset1.n_elements, mset2.n_elements)
return False
amax1, amax2 = mset1.argmax, mset2.argmax
if not np.array_equal(amax1, amax2):
disagree = (amax1 != amax2)
print("Argmax disagree for entries", disagree.sum(), "/", disagree.size)
return False
off1, off2 = mset1.offsets, mset2.offsets
if not np.array_equal(off1, off2):
disagree = (off1 != off2)
print("Offsets disagree for entries", disagree.sum(), "/", disagree.size)
return False
id1, id2 = mset1.ids, mset2.ids
if not np.array_equal(id1, id2):
disagree = (id1 != id2)
print("Ids disagree for entries", disagree.sum(), "/", disagree.size)
return False
count1, count2 = mset1.counts, mset2.counts
if not np.array_equal(count1, count2):
disagree = (count1 != count2)
print("Counts disagree for entries", disagree.sum(), "/", disagree.size)
return False
print("Check members passed")
return True
def check_pixels(mset1, mset2, seg, scale, offset):
roi_end = mset1.shape
blocking = nt.blocking([0, 0, 0], roi_end, [1, 1, 1])
for block_id in trange(blocking.numberOfBlocks):
block = blocking.getBlock(block_id)
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
i1, c1 = mset1[bb]
i2, c2 = mset2[bb]
if not np.array_equal(i1, i2) or not np.array_equal(c1, c2):
print("Entries disagree for block", block_id, ":", bb)
print("Ids")
print("Res:", i1)
print("Exp:", i2)
print("Counts")
print("Res:", c1)
print("Exp:", c2)
print("From segmentation")
effective_bb = tuple(slice(b.start * sc + off, b.stop * sc + off) for b, sc, off in zip(bb, scale, offset))
print(effective_bb)
sub_seg = seg[effective_bb]
print(sub_seg)
sids, scounts = np.unique(sub_seg, return_counts=True)
print("Ids")
print(sids)
print("Counts")
print(scounts)
return False
print("Check pixels passed")
return True
def check_chunk(blocking, chunk_id, ds_mset1, ds_mset2, ds_seg, scale):
if isinstance(chunk_id, tuple):
bpd = blocking.blocksPerAxis
strides = [bpd[2] * bpd[1], bpd[2], 1]
chunk_id = sum([stride * cid for stride, cid in zip(strides, chunk_id)])
print(chunk_id)
block = blocking.getBlock(chunk_id)
chunk = tuple(beg // ch for beg, ch in zip(block.begin, blocking.blockShape))
mset1 = ds_mset1.read_chunk(chunk)
mset2 = ds_mset2.read_chunk(chunk)
if(check_serialization(mset1, mset2)):
print("Multisets agree")
return
mset1 = deserialize_multiset(mset1, block.shape)
mset2 = deserialize_multiset(mset2, block.shape)
if(check_multiset_members(mset1, mset2)):
print("Multisets agree")
return
ds_seg.n_threads = 8
seg = ds_seg[:]
offset = tuple(beg * sc for beg, sc in zip(block.begin, scale))
if(check_pixels(mset1, mset2, seg, scale, offset)):
print("Multisets agree")
else:
print("Multisets disagree")
def check_multiset(level, chunk_id=0):
path = '/home/pape/Work/data/cremi/example/sampleA.n5'
seg_key = 'volumes/segmentation/multicut'
mset_key = 'paintera/data/s%i' % level
f = z5py.File(path)
ds_seg = f[seg_key]
ds_mset = f[mset_key]
path1 = '/home/pape/Work/data/cremi/example/sampleA_paintera.n5'
mset_key1 = 'volumes/segmentation/multicut/data/s%i' % level
f1 = z5py.File(path1)
ds_mset1 = f1[mset_key1]
assert ds_mset.shape == ds_mset1.shape
assert ds_mset.chunks == ds_mset1.chunks, "%s, %s" % (str(ds_mset.chunks),
str(ds_mset1.chunks))
shape, chunks = ds_mset.shape, ds_mset.chunks
ds_factor = ds_mset.attrs.get('downsamplingFactors', None)
ds_factor_exp = ds_mset1.attrs.get('downsamplingFactors', None)
assert ds_factor == ds_factor_exp
scale = [int(df) for df in ds_factor[::-1]]
print("Have scale", scale)
blocking = nt.blocking([0, 0, 0], shape, chunks)
check_chunk(blocking, chunk_id, ds_mset, ds_mset1, ds_seg, scale)
if __name__ == '__main__':
level = 1
# chunk_id = 0
chunk_id = (0, 2, 0)
check_multiset(level, chunk_id)
# print("Checking mult-sets for chunk 0 of scales:")
# for scale in range(5):
# print("Check scale", scale)
# check_multiset(scale)
| 32.850932 | 119 | 0.622613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.165627 |
c5bb6e0941f5920f796692212ebb5a1963fb2579 | 729 | py | Python | jp.atcoder/abc119/abc119_c/11972856.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc119/abc119_c/11972856.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc119/abc119_c/11972856.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
from itertools import product
n, *abc = map(int, sys.stdin.readline().split())
*l, = map(int, sys.stdin.read().split())
def main():
cand = []
for p in product([0, 1, 2, 3], repeat=n):
group = [[] for _ in range(4)]
for i in range(n):
group[p[i]].append(l[i])
cost = 0
for i in range(1, 4):
if not group[i]:
break
cost += 10 * (len(group[i]) - 1)
group[i] = sum(group[i])
else:
group = group[1:]
for i in range(3):
cost += abs(abc[i] - group[i])
cand.append(cost)
print(min(cand))
if __name__ == '__main__':
main()
| 25.137931 | 49 | 0.447188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013717 |
c5bc4fd8ba6c477f146381cee819b78adb80847e | 2,882 | py | Python | projects/Gan/gan.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | projects/Gan/gan.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | projects/Gan/gan.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from torchvision.datasets import MNIST
from vis_util import visual_mnist
##### settings
x_dim = 28 * 28 # size of mnist digit
z_dim = 100 # random noise
h_dim = 128 # hidden layer
batch_size = 60000 // 1000
lr = 1e-3
epochs = 120
##### load data and generate targets
# NOTE: we only need train data
trainset = MNIST('mnist', train=True, transform=ToTensor(), download=True)
dataloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
# targets for D, who only output 1 for real data, 0 for fake data
real_target = torch.ones(batch_size, 1)
fake_target = torch.zeros(batch_size, 1)
##### network arch
class Generator(nn.Module):
"""generator, generates fake data which is similar to the real data, to
fool the discriminator"""
def __init__(self, z_dim, h_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(z_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, x_dim),
nn.Sigmoid(),
)
def forward(self, x):
x = self.net(x)
return x
class Discriminator(nn.Module):
"""discriminator, is response for judging whether an input is real data
or fake data that generated by the generator"""
def __init__(self, x_dim, h_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(x_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, 1),
nn.Sigmoid(),
)
def forward(self, x):
x = self.net(x)
return x
##### init model
G = Generator(z_dim, h_dim)
D = Discriminator(x_dim, h_dim)
##### optimizer and loss
G_optim = optim.Adam(G.parameters(), lr=lr)
D_optim = optim.Adam(D.parameters(), lr=lr)
D_loss_real = nn.BCELoss()
D_loss_fake = nn.BCELoss()
G_loss_f2r = nn.BCELoss()
##### training loop
G.train()
D.train()
for epoch_i in range(epochs):
for x, _ in dataloader:
# step 1: G generate fake data fx using noise z
z = torch.randn(batch_size, z_dim)
fx = G(z)
# step 2: D judge on (fx, x), then update itself
fake = D(fx)
x = x.view(-1, x_dim)
real = D(x)
D_loss = D_loss_real(real, real_target) + D_loss_fake(fake, fake_target)
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
# step 3: G update
z = torch.randn(batch_size, z_dim)
fx = G(z)
fake = D(fx)
G_loss = G_loss_f2r(fake, real_target)
G_optim.zero_grad()
G_loss.backward()
G_optim.step()
# for each epoch, visualize result
print(f"Epoch-{epoch_i}: D_loss: {D_loss:.5f}, G_loss: {G_loss:.5f}")
visual_mnist(epoch_i, fx.detach().numpy()[:16], (4, 4)) | 26.440367 | 80 | 0.623178 | 916 | 0.317835 | 0 | 0 | 0 | 0 | 0 | 0 | 716 | 0.248439 |
c5bc81bb35d2ed62c01fad8ae72988d4d2b92a04 | 6,296 | py | Python | apps/orgs/views.py | hzde0128/edu_online | c514493d98e1a3a1033c0471f47307c9bc5ae3ec | [
"MIT"
] | 11 | 2020-04-11T14:41:07.000Z | 2022-01-30T06:02:21.000Z | apps/orgs/views.py | hzde0128/edu_online | c514493d98e1a3a1033c0471f47307c9bc5ae3ec | [
"MIT"
] | 1 | 2020-06-20T13:37:28.000Z | 2020-06-20T13:37:28.000Z | apps/orgs/views.py | hzde0128/edu_online | c514493d98e1a3a1033c0471f47307c9bc5ae3ec | [
"MIT"
] | 4 | 2020-10-12T07:01:37.000Z | 2021-08-08T11:29:33.000Z | from django.shortcuts import render, redirect, reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from django.views.generic import View
from .models import OrgInfo, CityInfo, TeacherInfo
from operations.models import UserLove
# Create your views here.
class OrgList(View):
"""
org_list 机构列表展示
"""
@staticmethod
def get(request):
all_orgs = OrgInfo.objects.all()
all_citys = CityInfo.objects.all()
sort_orgs = all_orgs.order_by('-love_num')[:3]
# 全局搜索过滤,模糊搜索
keyword = request.GET.get('keyword', '')
if keyword:
all_orgs = all_orgs.filter(Q(name__icontains=keyword)|Q(desc__icontains=keyword)|Q(detail__icontains=keyword))
# 根据机构类型进行过滤
category = request.GET.get('cat', '')
if category:
all_orgs = all_orgs.filter(org_category=category)
# 根据城市进行过滤
city_id = request.GET.get('city', '')
if city_id:
all_orgs = all_orgs.filter(city_id=int(city_id))
# 排序
sort = request.GET.get('sort', '')
if sort:
if sort == 'course_num':
pass
else:
all_orgs = all_orgs.order_by('-'+sort)
# 分页
page = request.GET.get('page')
pa = Paginator(all_orgs, 2)
try:
pages = pa.page(page)
except PageNotAnInteger:
pages = pa.page(1)
except EmptyPage:
pages = pa.page(pa.num_pages)
return render(request, 'orgs/org-list.html', {
'all_orgs': all_orgs,
'all_citys': all_citys,
'sort_orgs': sort_orgs,
'pages': pages,
'category': category,
'city_id': city_id,
'sort': sort,
'keyword': keyword,
})
class OrgDetail(View):
"""
org_detail 机构详情页
用户点击机构详情页,点击数+1
当用户登录时显示用户收藏状态信息
@params org_id: 机构id 通过查询数据库找到对应的机构进行展示
"""
@staticmethod
def get(request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
# 动态修改机构点击数
org.click_num += 1
org.save()
# 在返回页面数据的时候,需要返回收藏这个机构的收藏状态
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-homepage.html', {
'org': org,
'detail_type': 'home',
'love_status': love_status,
})
class OrgDetailCourse(View):
"""
org_detail_course 机构详情页-机构课程
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-course.html', {
'org': org,
'detail_type': 'course',
'love_status': love_status,
})
class OrgDetailDesc(View):
"""
org_detail_desc 机构详情页-机构描述
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-desc.html', {
'org': org,
'detail_type': 'desc',
'love_status': love_status,
})
class OrgDetailTeacher(View):
"""
org_detail_teacher 机构详情页-机构讲师
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-teachers.html', {
'org': org,
'detail_type': 'teacher',
'love_status': love_status,
})
class TeacherList(View):
"""
teacher_list 讲师列表
"""
@staticmethod
def get(request):
all_teachers = TeacherInfo.objects.all()
recommend = all_teachers.order_by('-love_num')[:2]
# 全局搜索过滤,模糊搜索
keyword = request.GET.get('keyword', '')
if keyword:
all_teachers = all_teachers.filter(Q(name__icontains=keyword))
# 排序
sort = request.GET.get('sort', '')
if sort:
all_teachers = all_teachers.order_by('-' + sort)
# 分页
page = request.GET.get('page')
pa = Paginator(all_teachers, 2)
try:
pages = pa.page(page)
except PageNotAnInteger:
pages = pa.page(1)
except EmptyPage:
pages = pa.page(pa.num_pages)
return render(request, 'orgs/teachers-list.html', {
'all_teachers': all_teachers,
'pages': pages,
'recommend': recommend,
'sort': sort,
'keyword': keyword,
})
class TeacherDetail(View):
"""
teacher_detail 讲师详情
"""
def get(self, request, teacher_id):
if teacher_id:
teacher_list = TeacherInfo.objects.filter(id=teacher_id)
if teacher_list:
teacher = teacher_list[0]
teacher.click_num += 1
teacher.save()
# 讲师排行
recommend = TeacherInfo.objects.all().order_by('-click_num')[:3]
return render(request, 'orgs/teacher-detail.html', {
'teacher': teacher,
'recommend': recommend,
})
| 29.420561 | 122 | 0.54749 | 6,321 | 0.949669 | 0 | 0 | 3,304 | 0.496394 | 0 | 0 | 1,440 | 0.216346 |
c5bd4d3b64eb6bd036b9f5e78b463c58d2f38085 | 1,021 | py | Python | PictureColoring/MyUtils.py | chiihero/DeepLearning | 029d547ada401a00a1cc0b314cdb2fb8937062c9 | [
"Apache-2.0"
] | null | null | null | PictureColoring/MyUtils.py | chiihero/DeepLearning | 029d547ada401a00a1cc0b314cdb2fb8937062c9 | [
"Apache-2.0"
] | null | null | null | PictureColoring/MyUtils.py | chiihero/DeepLearning | 029d547ada401a00a1cc0b314cdb2fb8937062c9 | [
"Apache-2.0"
] | null | null | null | from keras.callbacks import TensorBoard,EarlyStopping,TerminateOnNaN,ReduceLROnPlateau,ModelCheckpoint
import os
import sys
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
file_abspath = os.path.abspath(sys.argv[0]) # exe所在文件地址
location = os.path.dirname(file_abspath) # exe所在文件夹目录地址
tbCallBack = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
esCallBack=EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
tnonCallBack = TerminateOnNaN()
rpCallBack = ReduceLROnPlateau(monitor='val_acc', factor=0.2,patience=3, min_lr=0.0001)
mcCallBack = ModelCheckpoint(filepath=file_abspath[:-3]+'.model', monitor='val_acc', mode='auto', period=1,save_best_only=True)
callbacklist=[tbCallBack,esCallBack,tnonCallBack,rpCallBack,mcCallBack]
| 51.05 | 248 | 0.813908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.111323 |
c5be6868849918d06af4471aca57e790b36777ca | 8,545 | py | Python | tests/analysis/test_executor.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 1 | 2019-07-09T16:42:15.000Z | 2019-07-09T16:42:15.000Z | tests/analysis/test_executor.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 103 | 2016-11-14T15:58:53.000Z | 2022-03-07T21:01:03.000Z | tests/analysis/test_executor.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 2 | 2017-03-17T20:43:22.000Z | 2018-01-04T19:15:18.000Z | from copy import deepcopy
from bmds.bmds3.constants import ContinuousModelIds, DichotomousModelIds
from bmds.bmds3.types.priors import PriorClass
from bmds_server.analysis.executor import AnalysisSession
class TestAnalysisSession:
def test_default_dichotomous(self, bmds3_complete_dichotomous):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_dichotomous)
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert len(session.bayesian.models) == 1
def test_default_continuous(self, bmds3_complete_continuous):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_continuous)
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert len(session.bayesian.models) == 1
def test_default_continuous_individual(self, bmds3_complete_continuous_individual):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_continuous_individual)
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert len(session.bayesian.models) == 1
def test_prior_classes(self, bmds3_complete_dichotomous):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {
"frequentist_restricted": ["Gamma"],
"frequentist_unrestricted": ["Gamma"],
"bayesian": [{"model": "Gamma", "prior_weight": 1}],
}
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 2
assert len(session.bayesian.models) == 1
assert (
session.frequentist.models[0].settings.priors.prior_class
is PriorClass.frequentist_restricted
)
assert (
session.frequentist.models[1].settings.priors.prior_class
is PriorClass.frequentist_unrestricted
)
assert session.bayesian.models[0].settings.priors.prior_class is PriorClass.bayesian
def test_exponential_unpacking(self, bmds3_complete_continuous):
data = deepcopy(bmds3_complete_continuous)
data["models"] = {
"frequentist_restricted": ["Exponential"],
"bayesian": [{"model": "Exponential", "prior_weight": 1}],
}
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 2
assert session.frequentist.models[0].bmd_model_class.id == ContinuousModelIds.c_exp_m3
assert session.frequentist.models[1].bmd_model_class.id == ContinuousModelIds.c_exp_m5
assert len(session.bayesian.models) == 2
assert session.bayesian.models[0].bmd_model_class.id == ContinuousModelIds.c_exp_m3
assert session.bayesian.models[1].bmd_model_class.id == ContinuousModelIds.c_exp_m5
def test_multistage_permutations(self, bmds3_complete_dichotomous):
def _expected_degree(session, n: int):
assert session.bayesian is None
assert len(session.frequentist.models) == n
model_classes = set([model.bmd_model_class.id for model in session.frequentist.models])
assert model_classes == {DichotomousModelIds.d_multistage}
degrees = set([model.settings.degree for model in session.frequentist.models])
assert degrees == set(list(range(1, n + 1)))
# degree = 1
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {"frequentist_restricted": ["Multistage"]}
data["dataset_options"][0]["degree"] = 1
session = AnalysisSession.create(data, 0, 0)
_expected_degree(session, 1)
# degree = 2
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {"frequentist_restricted": ["Multistage"]}
data["dataset_options"][0]["degree"] = 2
session = AnalysisSession.create(data, 0, 0)
_expected_degree(session, 2)
# 3 dose-groups; degree = N-1; expected 2
for num_doses in range(3, 8):
expected_degree = min(max(num_doses - 1, 2), 4)
data = deepcopy(bmds3_complete_dichotomous)
data["datasets"] = [
{
"dtype": "D",
"metadata": {"id": 123},
"doses": list(range(num_doses)),
"ns": list(range(num_doses)),
"incidences": list(range(num_doses)),
}
]
assert len(data["datasets"][0]["doses"]) == num_doses
data["models"] = {"frequentist_restricted": ["Multistage"]}
data["dataset_options"][0]["degree"] = 0 # n-1
session = AnalysisSession.create(data, 0, 0)
print(f"{num_doses=} {expected_degree=}")
_expected_degree(session, expected_degree)
# degree = N -1, bayesian, fixed at degree == 2
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {"bayesian": [{"model": "Multistage", "prior_weight": 1}]}
data["dataset_options"][0]["degree"] = 0
session = AnalysisSession.create(data, 0, 0)
assert session.frequentist is None
assert len(session.bayesian.models) == 1
model = session.bayesian.models[0]
assert model.bmd_model_class.id == DichotomousModelIds.d_multistage
assert model.settings.degree == 2
def test_polynomial_unpacking(self, bmds3_complete_continuous):
# test linear; degree 0
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Linear"]}
data["dataset_options"][0]["degree"] = 0
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert session.frequentist.models[0].settings.degree == 1
assert session.bayesian is None
# test polynomial; degree 2
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Polynomial"]}
data["dataset_options"][0]["degree"] = 2
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert session.frequentist.models[0].settings.degree == 2
assert session.bayesian is None
# test polynomial; degree 3
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Polynomial"]}
data["dataset_options"][0]["degree"] = 3
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 2
assert session.frequentist.models[0].settings.degree == 2
assert session.frequentist.models[1].settings.degree == 3
assert session.bayesian is None
# test linear + polynomial; degree 3
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Linear", "Polynomial"]}
data["dataset_options"][0]["degree"] = 3
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 3
assert session.frequentist.models[0].settings.degree == 1
assert session.frequentist.models[1].settings.degree == 2
assert session.frequentist.models[2].settings.degree == 3
assert session.bayesian is None
# disttype 3 Linear and power are not added
def test_disttype(self, bmds3_complete_continuous):
data = deepcopy(bmds3_complete_continuous)
data["models"] = {
"frequentist_restricted": ["Hill", "Linear", "Power"],
}
# normal
data["options"][0]["dist_type"] = 1
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 3
names = [model.name() for model in session.frequentist.models]
assert names == ["Hill", "Linear", "Power"]
data["options"][0]["dist_type"] = 2
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 3
names = [model.name() for model in session.frequentist.models]
assert names == ["Hill", "Linear", "Power"]
# lognormal
data["options"][0]["dist_type"] = 3
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
names = [model.name() for model in session.frequentist.models]
assert names == ["Hill"]
| 45.452128 | 99 | 0.640609 | 8,336 | 0.975541 | 0 | 0 | 0 | 0 | 0 | 0 | 1,495 | 0.174956 |
c5bedaf2756a09941c7750735b35a4cb10278fa5 | 483 | py | Python | 1094.py | gabzin/beecrowd | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | 3 | 2021-12-15T20:27:14.000Z | 2022-03-01T12:30:08.000Z | 1094.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | null | null | null | 1094.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | null | null | null | tot=coe=rat=sap=0
for i in range(int(input())):
n,s=input().split()
n=int(n)
tot+=n
if s=='C':coe+=n
elif s=='R':rat+=n
elif s=='S':sap+=n
print(f"Total: {tot} cobaias\nTotal de coelhos: {coe}\nTotal de ratos: {rat}\nTotal de sapos: {sap}")
p=(coe/tot)*100
print("Percentual de coelhos: %.2f"%p,end="")
print(" %")
p=(rat/tot)*100
print("Percentual de ratos: %.2f"%p,end="")
print(" %")
p=(sap/tot)*100
print("Percentual de sapos: %.2f"%p,end="")
print(" %")
| 25.421053 | 101 | 0.585921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.42236 |
c5c1ceac12c3d4acf6d93b005cde760e9701d344 | 1,722 | py | Python | extras/createTestBlocksForReadBlkUpdate.py | Manny27nyc/BitcoinArmory | 1d02a6640d6257ab0c37013e5cd4b99681a5cfc3 | [
"MIT"
] | 505 | 2016-02-04T15:54:46.000Z | 2022-03-27T18:43:01.000Z | extras/createTestBlocksForReadBlkUpdate.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 528 | 2016-02-06T19:50:12.000Z | 2022-01-15T10:21:16.000Z | extras/createTestBlocksForReadBlkUpdate.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 208 | 2015-01-02T10:31:40.000Z | 2021-12-14T07:37:36.000Z | from sys import path
path.append('..')
from armoryengine import *
TheBDM.setBlocking(True)
TheBDM.setOnlineMode(True)
if not os.path.exists('testmultiblock'):
os.mkdir('testmultiblock')
fout = []
fout.append([0, 101, 'testmultiblock/blk00000.dat'])
fout.append([0, 102, 'testmultiblock/blk00000_test1.dat']) # Add 1 block
fout.append([0, 105, 'testmultiblock/blk00000_test2.dat']) # Add 3 blocks
fout.append([106, 106, 'testmultiblock/blk00001_test3.dat']) # Just block split
fout.append([107, 109, 'testmultiblock/blk00002_test4.dat']) # Another block split 3 blks
fout.append([107, 110, 'testmultiblock/blk00002_test5.dat']) # Add block
fout.append([110, 113, 'testmultiblock/blk00003_test5.dat']) # and split
for start,end,theFile in fout:
if os.path.exists(theFile):
os.remove(theFile)
lastLocation = [0]*len(fout)
openfiles = [[trip[0], trip[1], open(trip[2],'wb')] for trip in fout]
# Assume we are only reading into blk000000.dat, no split
for h in range(120):
head = TheBDM.getHeaderByHeight(h)
blk = head.serializeWholeBlock(MAGIC_BYTES, True)
for i,trip in enumerate(openfiles):
start,end,theFile = trip
if (start <= h <= end):
theFile.write(blk)
lastLocation[i] += len(blk)
for start,end,opnfil in openfiles:
opnfil.close()
for i,trip in enumerate(fout):
start,end,theFile = trip
sz = os.path.getsize(theFile)
f = open(theFile,'ab')
if i<3:
f.write('\x00'*(22000-sz))
else:
f.write('\x00'*(1000-sz))
f.close()
print 'Blocks written out:'
for start,end,fn in fout:
if end-start==0:
print '\t%d in file: %s' % (end,fn)
else:
print '\t%d-%d in file: %s' % (start,end,fn)
| 28.229508 | 89 | 0.662602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 516 | 0.299652 |
c5c1e4e3885d653cd8bccb778237b0517bd6b7f7 | 369 | py | Python | dashboard/migrations/0034_auto_20201226_2150.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | dashboard/migrations/0034_auto_20201226_2150.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | dashboard/migrations/0034_auto_20201226_2150.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-12-26 20:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0033_auto_20201226_2148'),
]
operations = [
migrations.RenameField(
model_name='sleepnight',
old_name='data',
new_name='diary_day',
),
]
| 20.5 | 49 | 0.593496 | 284 | 0.769648 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.303523 |
c5c266d68c81372fdaece6c752fe6c6bf1fa42c1 | 7,050 | py | Python | fear_greed_index/CNNFearAndGreedIndex.py | mcharipar/multi_tool_bot | 6f9ec31a6542d6bb6782fff5cd8e43ce4e9b3670 | [
"MIT"
] | null | null | null | fear_greed_index/CNNFearAndGreedIndex.py | mcharipar/multi_tool_bot | 6f9ec31a6542d6bb6782fff5cd8e43ce4e9b3670 | [
"MIT"
] | null | null | null | fear_greed_index/CNNFearAndGreedIndex.py | mcharipar/multi_tool_bot | 6f9ec31a6542d6bb6782fff5cd8e43ce4e9b3670 | [
"MIT"
] | null | null | null | """Fear and Greed Index Class"""
__docformat__ = "numpy"
from matplotlib import pyplot as plt
from fear_greed_index import scrape_cnn
from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator
class CNNFearAndGreedIndex:
"""CNN Fear and Greed Index
Attributes
----------
junk_bond_demand : FearAndGreedIndicator
Junk Bond Demand indicator
market_volatility : FearAndGreedIndicator
Market Volatility indicator
put_and_call_options : FearAndGreedIndicator
Put and Call Options indicator
market_momentum : FearAndGreedIndicator
Market Momentum indicator
stock_price_strength : FearAndGreedIndicator
Stock Price Strength indicator
stock_price_breadth : FearAndGreedIndicator
Stock Price Breadth indicator
safe_heaven_demand : FearAndGreedIndicator
Safe Heaven Demand indicator
index_summary : str
Summary of the current sentiment index
index_chat : "N/A"
Chart of the historical sentiment index
"""
indicator_chart_type = {
"Junk Bond Demand": "IGHYPtile",
"Market Volatility": "VIXPtile",
"Put and Call Options": "PutCallPtile",
"Market Momentum": "SPXPtile",
"Stock Price Strength": "NHNLPtile",
"Stock Price Breadth": "McOscPtile",
"Safe Heaven Demand": "StkBdPtile",
}
def __init__(self):
"""Constructor"""
self.junk_bond_demand = FearAndGreedIndicator("Junk Bond Demand")
self.market_volatility = FearAndGreedIndicator("Market Volatility")
self.put_and_call_options = FearAndGreedIndicator("Put and Call Options")
self.market_momentum = FearAndGreedIndicator("Market Momentum")
self.stock_price_strength = FearAndGreedIndicator("Stock Price Strength")
self.stock_price_breadth = FearAndGreedIndicator("Stock Price Breadth")
self.safe_heaven_demand = FearAndGreedIndicator("Safe Heaven Demand")
self.index_summary = "N/A"
self.index_chart = None
self.all_indicators = [
self.junk_bond_demand,
self.market_volatility,
self.put_and_call_options,
self.market_momentum,
self.stock_price_strength,
self.stock_price_breadth,
self.safe_heaven_demand,
]
self._load_fear_and_greed()
def _load_fear_and_greed(self):
"""Load Fear and Greed Index by scraping CNN data"""
text_soup_cnn = scrape_cnn._get_fear_greed_index()
# Fill in indicators summary, last_sentiment, last_changed, update_on
indicator_idx = 0
for text in text_soup_cnn.findAll("div", {"class": "modContent feargreed"}):
for content in text.contents:
for txt in content.find_all("div", {"class": "wsod_fLeft smarttext"}):
self.all_indicators[indicator_idx]._set_summary(
txt.contents[0].text
)
if len(txt.contents) > 1:
self.all_indicators[indicator_idx]._set_last_changed(
txt.contents[1].text
)
self.all_indicators[indicator_idx]._set_last_sentiment(
txt.contents[1].span.text
)
if len(txt.contents) > 2:
self.all_indicators[indicator_idx]._set_update_on(
txt.contents[2].text
)
indicator_idx += 1
# Fill in indicator sentiment
indicator_idx = 0
for text in text_soup_cnn.findAll("div", {"class": "modContent feargreed"}):
for content in text.contents:
for txt in content.find_all("div", {"class": "wsod_fRight"}):
if "wsod_fgIndicatorCht" not in txt["class"]:
self.all_indicators[indicator_idx]._set_sentiment(
txt.contents[0]
)
indicator_idx += 1
# Fill in indicators charts
for indicator in self.all_indicators:
indicator._set_chart(
scrape_cnn._get_chart(
self.indicator_chart_type[indicator.get_type_indicator()]
)
)
# Fill in fear and greed index
index_data = (
text_soup_cnn.findAll("div", {"class": "modContent feargreed"})[0]
.contents[0]
.text
)
fg_index = [fg + ")" for fg in index_data.split(")")[:-1]]
self.index_summary = fg_index[0] + "\n "
self.index_summary += "\n ".join(
[fg.strip("Fear & Greed ") for fg in fg_index[1:]]
)
# Fill in index chart
self.index_chart = scrape_cnn._get_chart("AvgPtileModel")
def get_junk_bond_demand(self):
"""Get Junk Bond Demand"""
return self.junk_bond_demand
def get_market_volatility(self):
"""Get Market Volatility"""
return self.market_volatility
def get_put_and_call_options(self):
"""Get Put and Call Options"""
return self.put_and_call_options
def get_market_momentum(self):
"""Get Market Momentum"""
return self.market_momentum
def get_stock_price_strength(self):
"""Get Stock Price Strength"""
return self.stock_price_strength
def get_stock_price_breadth(self):
"""Get Stock Price Breadth"""
return self.stock_price_breadth
def get_safe_heaven_demand(self):
"""Get Safe Heaven Demand"""
return self.safe_heaven_demand
def get_indicators_report(self):
"""Get Indicators Report"""
indicators_report = ""
for indicator in self.all_indicators:
indicators_report += indicator.get_report() + "\n"
return indicators_report
def get_index(self):
"""Get Index Summary"""
return self.index_summary
def get_index_chart(self):
"""Get Index Chart"""
return self.index_chart
def get_complete_report(self):
"""Plot Complete report"""
complete_report = self.get_index() + "\n\n"
complete_report += self.get_indicators_report()
return complete_report
def plot_all_charts(self, fig: plt.figure):
"""Plot all indicators and index charts
Parameters
----------
plt.figure
matplotlib figure to plot all charts
Returns
-------
plt.figure
matplotlib figure ready to be plot
"""
for i, indicator in enumerate(self.all_indicators):
ax = fig.add_subplot(3, 3, i + 1)
ax.set_axis_off()
plt.imshow(indicator.chart)
ax = fig.add_subplot(3, 3, 8)
ax.set_axis_off()
plt.imshow(self.index_chart)
fig.subplots_adjust(wspace=0, hspace=-1)
plt.tight_layout()
return fig | 34.90099 | 86 | 0.601135 | 6,840 | 0.970213 | 0 | 0 | 0 | 0 | 0 | 0 | 2,241 | 0.317872 |
c5c3d96b3d93a31801e5a47a67db36d7b2787b3f | 1,632 | py | Python | pyDEV/imagespic.py | wangzhihong911/py37 | 68e39791689bd35dcdc1f08c05a8d6e7c7bacf11 | [
"BSD-3-Clause"
] | null | null | null | pyDEV/imagespic.py | wangzhihong911/py37 | 68e39791689bd35dcdc1f08c05a8d6e7c7bacf11 | [
"BSD-3-Clause"
] | null | null | null | pyDEV/imagespic.py | wangzhihong911/py37 | 68e39791689bd35dcdc1f08c05a8d6e7c7bacf11 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/dev python
# coding=utf-8
#让爬虫等待几秒
from pyDEV import BsDI, ToolDI
from bs4 import BeautifulSoup;
import os
import urllib;
#创建入口url
url_web = "http://48et.com/pic/12/"
html_pag = '';
for i in range(1,1001):
if i > 1 :
html_pag='p_'+str(i)+'.html'
url_web = url_web+html_pag
html = BsDI.getURL_CODE(url_web, 'GBK')
soup = BsDI.get_htmlInfo(html)
html_body = soup.select('body')
html_inWrap = html_body[0].find_all('div')
html_mydiv = html_inWrap[8]
html_ul = html_mydiv.find_all('ul')
html_li = html_ul[0].select('li')
html_href = 'http://48et.com'
save_file = 'E:\\av_images\\'
for tag in html_li:
html_a = tag.select('a')[0]
# 得到所有url地址
html_ohref = html_a.attrs['href'].strip().replace(' ', '')
# 得到文件名字
html_title = html_a.text
# 创建文件夹 开始
path = ToolDI.create_file(html_title, save_file)
if not os.path.exists(path):
os.mkdir(path) # 创建目录
os.chdir(path) # 进入该目录
# 创建文件夹结束
# 进入子页面爬取图片 开始
html_inpage = html_href + html_ohref
htm_pic = BsDI.getURL_CODE(html_inpage, 'GBK')
htm_soup = BsDI.get_htmlInfo(htm_pic)
htm_body = htm_soup.select('div.post')
htm_img = htm_body[0].select('img')
# [<img alt="点击大图看下一张:女王甄甄美乳微露销魂胴体春光乍泄" src="https://tupian.sxtp.net/d/file/201801/2liukckjwkcd.jpg!800"/>]
for htm_tag in htm_img:
inner = htm_img.index(htm_tag)
link = htm_tag.attrs['src']
ToolDI.copy_img(link, path, inner)
print('%s%s%s' % ('文件夹:', html_title, '执行完毕'))
| 32 | 115 | 0.604167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.307182 |
c5c4c83c0fcae56bd39628ba7e24ff40a480e5b9 | 3,553 | py | Python | models/van_der_waals.py | HARSHAL-IITB/spa-design-tool | 84d250a02cc3f4af56770550c9f559feb524cb07 | [
"MIT"
] | null | null | null | models/van_der_waals.py | HARSHAL-IITB/spa-design-tool | 84d250a02cc3f4af56770550c9f559feb524cb07 | [
"MIT"
] | null | null | null | models/van_der_waals.py | HARSHAL-IITB/spa-design-tool | 84d250a02cc3f4af56770550c9f559feb524cb07 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015, EPFL Reconfigurable Robotics Laboratory,
# Philip Moseley, philip.moseley@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
#--------------------------------------------------------------------------------
# Material model name.
#--------------------------------------------------------------------------------
def name(): return 'vdw'
def pname(): return 'Van der Waals'
def params(): return 'mu lambda_m alpha beta'
def descr(): return 'Van der Waals Model.'
#--------------------------------------------------------------------------------
# Function defining the uniaxial stress given strain.
#--------------------------------------------------------------------------------
def stressU(x, u, Lm, a, B):
L = 1.0+x
I1 = np.power(L,2.0) + 2.0*np.power(L,-1.0)
I2 = np.power(L,-2.0) + 2.0*L
I = (1.0-B)*I1 + B*I2
n = np.sqrt((I-3.0)/(np.power(Lm,2.0)-3.0))
t1 = (1.0/(1.0-n)) - a * np.sqrt(0.5*(I-3.0))
t2 = L*(1.0-B) + B
return u*(1.0-np.power(L,-3.0)) * t1 * t2
#--------------------------------------------------------------------------------
# Function defining the biaxial stress given strain.
#--------------------------------------------------------------------------------
def stressB(x, u, Lm, a, B):
L = 1.0+x
I1 = 2.0*np.power(L,2.0) + np.power(L,-4.0)
I2 = 2.0*np.power(L,-2.0) + np.power(L,4.0)
I = (1.0-B)*I1 + B*I2
n = np.sqrt((I-3.0)/(np.power(Lm,2.0)-3.0))
t1 = (1.0/(1.0-n)) - a * np.sqrt(0.5*(I-3.0))
t2 = 1.0 - B + B*np.power(L,2.0)
return u*(L-np.power(L,-5.0)) * t1 * t2
#--------------------------------------------------------------------------------
# Function defining the planar stress given strain.
#--------------------------------------------------------------------------------
def stressP(x, u, Lm, a, B):
L = 1.0+x
I1 = np.power(L,2.0)+np.power(L,-2.0) + 1.0
I2 = I1
I = (1.0-B)*I1 + B*I2
n = np.sqrt((I-3.0)/(np.power(Lm,2.0)-3.0))
t1 = (1.0/(1.0-n)) - a * np.sqrt(0.5*(I-3.0))
return u*(L-np.power(L,-3.0)) * t1
#--------------------------------------------------------------------------------
# Calculate the Ds
#--------------------------------------------------------------------------------
def compressibility(v, u, Lm, a, B):
u0 = u
D1 = 3.0*(1.0-2.0*v) / (u0*(1.0+v))
return [D1]
| 41.8 | 82 | 0.474529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,319 | 0.652688 |
c5c5651c307431ee7d13f20d6102f8ee5f585f1c | 6,331 | py | Python | party/migrations/0002_auto__chg_field_party_primaries_date__chg_field_party_qualifying_date_.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | party/migrations/0002_auto__chg_field_party_primaries_date__chg_field_party_qualifying_date_.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | party/migrations/0002_auto__chg_field_party_primaries_date__chg_field_party_qualifying_date_.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Party.primaries_date'
db.alter_column('party_party', 'primaries_date', self.gf('django.db.models.fields.DateField')(null=True))
# Changing field 'Party.qualifying_date'
db.alter_column('party_party', 'qualifying_date', self.gf('django.db.models.fields.DateField')(null=True))
# Changing field 'Party.open_knesset_id'
db.alter_column('party_party', 'open_knesset_id', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Party.number_of_members'
db.alter_column('party_party', 'number_of_members', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Party.primaries_date'
raise RuntimeError("Cannot reverse this migration. 'Party.primaries_date' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Party.qualifying_date'
raise RuntimeError("Cannot reverse this migration. 'Party.qualifying_date' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Party.open_knesset_id'
raise RuntimeError("Cannot reverse this migration. 'Party.open_knesset_id' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Party.number_of_members'
raise RuntimeError("Cannot reverse this migration. 'Party.number_of_members' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'party.party': {
'Meta': {'object_name': 'Party'},
'first_in_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'open_knesset_id': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'primaries_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True'}),
'qualifying_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['party']
| 65.947917 | 182 | 0.599905 | 6,198 | 0.978992 | 0 | 0 | 0 | 0 | 0 | 0 | 4,330 | 0.683936 |
c5c61d26183ce293ac95d3678eb8c57a71b32702 | 2,767 | py | Python | modules/boost/simd/arithmetic/script/average.py | timblechmann/nt2 | 6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce | [
"BSL-1.0"
] | 2 | 2016-09-14T00:23:53.000Z | 2018-01-14T12:51:18.000Z | modules/boost/simd/arithmetic/script/average.py | timblechmann/nt2 | 6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce | [
"BSL-1.0"
] | null | null | null | modules/boost/simd/arithmetic/script/average.py | timblechmann/nt2 | 6c71f7063ca4e5975c9c019877e6b2fe07c9e4ce | [
"BSL-1.0"
] | null | null | null | [ ## this file was manually modified by jt
{
'functor' : {
'description' : [ "The function always returns a value of the same type than the entry.",
"Take care that for integers the value returned can differ by one unit",
"from \c ceil((a+b)/2.0) or \c floor((a+b)/2.0), but is always one of",
"the two"
],
'module' : 'boost',
'arity' : '2',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_', 'signed_int_', 'unsigned_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'modified by jt the 28/11/2010',
'included' : [],
'notes' : ['for integer values average does not,coincide with (a0+a1)/2 by at most one unit.'],
'stamp' : 'modified by jt the 13/12/2010',
},
'ranges' : {
'real_' : [['T(-100)', 'T(100)'], ['T(-100)', 'T(100)']],
'signed_int_' : [['T(-100)', 'T(100)'], ['T(-100)', 'T(100)']],
'unsigned_int_' : [['T(0)', 'T(100)'], ['T(0)', 'T(100)']],
},
'specific_values' : {
'default' : {
},
'real_' : {
'boost::simd::Inf<T>()' : 'boost::simd::Inf<T>()',
'boost::simd::Minf<T>()' : 'boost::simd::Minf<T>()',
'boost::simd::Mone<T>()' : 'boost::simd::Mone<T>()',
'boost::simd::Nan<T>()' : 'boost::simd::Nan<T>()',
'boost::simd::One<T>()' : 'boost::simd::One<T>()',
'boost::simd::Zero<T>()' : 'boost::simd::Zero<T>()',
},
'signed_int_' : {
'boost::simd::Mone<T>()' : 'boost::simd::Mone<T>()',
'boost::simd::One<T>()' : 'boost::simd::One<T>()',
'boost::simd::Zero<T>()' : 'boost::simd::Zero<T>()',
},
'unsigned_int_' : {
'boost::simd::One<T>()' : 'boost::simd::One<T>()',
'boost::simd::Zero<T>()' : 'boost::simd::Zero<T>()',
},
},
'verif_test' : {
'property_call' : {
'default' : ['boost::simd::average(a0,a1)'],
},
'property_value' : {
'default' : ['(a0+a1)/2'],
},
'ulp_thresh' : {
'default' : ['1'],
'real_' : ['0'],
},
},
},
'version' : '0.1',
},
]
| 39.528571 | 108 | 0.37116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,518 | 0.548609 |
c5c6c50d1f1d2f7cfb0c7a23c1b784e0443a1dce | 6,165 | py | Python | conductor_helpers/workflow.py | metamorph-inc/conductor-mdao | 293744893aba5c44f025414d83f53cb7cedd5299 | [
"MIT"
] | null | null | null | conductor_helpers/workflow.py | metamorph-inc/conductor-mdao | 293744893aba5c44f025414d83f53cb7cedd5299 | [
"MIT"
] | null | null | null | conductor_helpers/workflow.py | metamorph-inc/conductor-mdao | 293744893aba5c44f025414d83f53cb7cedd5299 | [
"MIT"
] | null | null | null | from __future__ import print_function
from conductor.conductor import MetadataClient, WorkflowClient
class Workflow(object):
def __init__(self, name, description=None):
self.tasks = {}
self.inputs = {}
self.outputs = {}
self.connections = {}
self.name = name
if description:
self.description = description
else:
self.description = name
def add_task(self, name, task):
if 'name' in self.tasks:
raise ValueError('A task with this name already exists')
self.tasks[name] = task
def add_input(self, name, default):
self.inputs[name] = default
def add_output(self, name, src):
if '.' in src:
# Src comes from another task
src_split = src.split('.')
source = '${{{}.output.{}}}'.format(src_split[0], src_split[1])
else:
# Src comes from a workflow input
source = '${{workflow.input.{}}}'.format(src)
self.outputs[name] = source
def connect(self, src, dst):
self.connections[dst] = src
def _definition(self):
# First, build tasks
tasks = []
for task_name in self.tasks.keys():
task = self._task_definition(task_name)
task['inputParameters'] = {}
for dst, src in self.connections.items():
# If the destination is within this task, we link to the input
if dst.startswith(task_name + '.'):
input = dst.split('.')[1]
if '.' in src:
# Src comes from another task
src_split = src.split('.')
source = '${{{}.output.{}}}'.format(src_split[0], src_split[1])
else:
# Src comes from a workflow input
source = '${{workflow.input.{}}}'.format(src)
task['inputParameters'][input] = source
tasks.append(task)
return {
'name': self.name,
'description': self.description,
'version': 1,
'tasks': tasks,
'outputParameters': self.outputs,
'inputParameters': list(self.inputs.keys()),
'failureWorkflow': 'cleanup_encode_resources',
'restarteable': True,
'workflowStatusListenerEnabled': True,
'schemaVersion': 2,
}
def _task_definition(self, task_name):
return {
'name': self.tasks[task_name].name,
'taskReferenceName': task_name,
'type': 'SIMPLE',
}
def register(self, endpoint='http://localhost:8080/api'):
mc = MetadataClient(endpoint)
workflow_def = self._definition()
# import json
# print(json.dumps(workflow_def, indent=2))
mc.updateWorkflowDefs([workflow_def])
def start(self, start_tasks=False, wait=True):
wc = WorkflowClient('http://localhost:8080/api')
id = wc.startWorkflow(wfName=self.name,
inputjson=self.inputs)
import json
print(json.dumps(id, indent=2))
if start_tasks:
for idx, key in enumerate(self.tasks.keys(), start=1):
if wait:
# We will poll the workflow, so no need to keep the last task running.
self.tasks[key].start(wait=False)
else:
# We won't poll the workflow, so keep the last task running.
self.tasks[key].start(wait=idx == len(self.tasks.keys()))
if wait:
import time
res = wc.getWorkflow(id)
while res['status'] != 'COMPLETED':
time.sleep(0.1)
res = wc.getWorkflow(id)
print(json.dumps(res['output'], indent=2))
return res['output']
else:
return id
def register_tasks(self):
for k, v in self.tasks.items():
v.register()
if __name__ == '__main__':
from openmdao.examples.hohmann_transfer import VCircComp, TransferOrbitComp, DeltaVComp
from openmdao_wrapper import OpenMdaoWrapper
from sum_task import SumTask
from json import dumps
leo = OpenMdaoWrapper(VCircComp())
geo = OpenMdaoWrapper(VCircComp())
transfer = OpenMdaoWrapper(TransferOrbitComp())
dv1 = OpenMdaoWrapper(DeltaVComp())
dv2 = OpenMdaoWrapper(DeltaVComp())
dv_total = SumTask('dv_total', num_inputs=2)
dinc_total = SumTask('dinc_total', num_inputs=2)
workflow = Workflow('Hohmann Transfer', 'A test for the Workflow class.')
workflow.add_task('leo', leo)
workflow.add_task('geo', geo)
workflow.add_task('transfer', transfer)
workflow.add_task('dv1', dv1)
workflow.add_task('dv2', dv2)
workflow.add_task('dv_total', dv_total)
workflow.add_task('dinc_total', dinc_total)
workflow.add_input('dinc1', 28.5 / 2)
workflow.add_input('dinc2', 28.5 / 2)
workflow.add_input('r1', 6778.137)
workflow.add_input('r2', 42164.0)
workflow.add_input('mu', 398600.4418)
workflow.connect('r1', 'leo.r')
workflow.connect('r1', 'transfer.rp')
workflow.connect('r2', 'geo.r')
workflow.connect('r2', 'transfer.ra')
workflow.connect('mu', 'leo.mu')
workflow.connect('mu', 'geo.mu')
workflow.connect('mu', 'transfer.mu')
workflow.connect('leo.vcirc', 'dv1.v1')
workflow.connect('transfer.vp', 'dv1.v2')
workflow.connect('dinc1', 'dv1.dinc')
workflow.connect('transfer.va', 'dv2.v1')
workflow.connect('geo.vcirc', 'dv2.v2')
workflow.connect('dinc2', 'dv2.dinc')
workflow.connect('dv1.delta_v', 'dv_total.i1')
workflow.connect('dv2.delta_v', 'dv_total.i2')
workflow.connect('dinc1', 'dinc_total.i1')
workflow.connect('dinc2', 'dinc_total.i2')
workflow.add_output('dv1_deltav', 'dv1.delta_v')
workflow.add_output('dv2_deltav', 'dv2.delta_v')
# print(dumps(workflow._definition(), indent=2))
workflow.register_tasks()
workflow.register()
workflow.start(start_tasks=True)
| 32.447368 | 91 | 0.577129 | 3,971 | 0.64412 | 0 | 0 | 0 | 0 | 0 | 0 | 1,440 | 0.233577 |
c5c799a24a8cdb582f6cb85ceb027f2c8d2d114f | 4,049 | py | Python | vswitch/server.py | jvy1106/vswitch | c5a248889dc4458c5beb4848c864d28692365758 | [
"MIT"
] | null | null | null | vswitch/server.py | jvy1106/vswitch | c5a248889dc4458c5beb4848c864d28692365758 | [
"MIT"
] | null | null | null | vswitch/server.py | jvy1106/vswitch | c5a248889dc4458c5beb4848c864d28692365758 | [
"MIT"
] | null | null | null | '''super basic web server to start/stop and monitor virtual environments'''
import sys
import os
import logging
import web
import time
import argparse
from webpyutils import api
from webpyutils import APIServer
from vswitch import VirtualSwitch
#get project path from current file or venv it setup locally
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) + '/../'
if 'VIRTUAL_ENV' in os.environ: PROJECT_PATH = os.path.dirname(os.environ['VIRTUAL_ENV']) + '/'
os.chdir(os.path.join(PROJECT_PATH, 'vswitch'))
#default to users .vswitch.yml
CONFIG_PATH = os.path.join(os.path.expanduser('~'), '.vswitch.yml')
def css_state(state):
'''return css state'''
ret = 'info'
if state == 'running':
ret = 'success'
elif state == 'stopped':
ret = 'danger'
return ret
render = web.template.render(os.path.join(PROJECT_PATH, 'vswitch/templates'), base='layout', globals={'state': css_state})
vswitch = VirtualSwitch(CONFIG_PATH)
class Index(object):
'''server static page'''
def GET(self):
data = {}
for env in vswitch.get_environments():
data[env] = vswitch.get_status(env)
return render.index(data)
class VirtualSwitchAPI(object):
'''get status of all environments'''
@api
def GET(self):
'''return info about our environments'''
ret = {'code': 200, 'data': {}}
for env in vswitch.get_environments():
ret['data'][env] = vswitch.get_status(env)
return ret
@api
def POST(self):
'''toggle on/off for an environment'''
ret = {'code': 200}
environment = web.input().get('environment')
toggle = web.input().get('toggle')
if toggle in ['true', 'True', 'TRUE']:
toggle = True
elif toggle in ['false', 'False', 'FALSE']:
toggle = False
if toggle is None:
ret['code'] = 400
ret['message'] = 'toggle param is required'
return ret
if environment is None:
ret['code'] = 400
ret['message'] = 'environment param is required'
return ret
if environment not in vswitch.get_environments():
ret['code'] = 404
ret['message'] = 'unknown environment: %s' % environment
return ret
if toggle is True:
vswitch.turn_on(environment)
vswitch.register_elb_instances(environment)
time.sleep(3)
vswitch.register_elb_instances(environment)
ret['data'] = vswitch.get_status(environment)
elif toggle is False:
vswitch.turn_off(environment)
vswitch.deregister_elb_instances(environment)
ret['data'] = vswitch.get_status(environment)
else:
ret['code'] = 400
ret['message'] = 'toggle param must be bool value'
return ret
URLS = (
'/', Index,
'/v1/vswitch', VirtualSwitchAPI,
)
application = web.application(URLS, globals())
def main():
'''webserver entry point'''
ap = argparse.ArgumentParser(description='virtual environment switch - webserver')
ap.add_argument('--port', type=int, default=8888, help='Change the port the service listens on. Default is 8888')
ap.add_argument('--ip', type=str, default='0.0.0.0', help='Set IP address to listen on. Default to 0.0.0.0')
ap.add_argument('--threads', type=int, default=10, help='Number of threads in webserver threadpool. Defaults is 10 threads')
ap.add_argument('--debug', action='store_true', help='Puts the service into a debug mode to aid development')
args = ap.parse_args()
#default settings
web.config.debug = False
log_level = logging.INFO
if args.debug:
web.config.debug = True
log_level = logging.DEBUG
server = APIServer(application, server_name='vswitch', log_path=os.path.join(PROJECT_PATH, 'logs/vswitch.log'), log_level=log_level)
server.run(ip=args.ip, port=args.port, threads=args.threads)
if __name__ == '__main__':
main()
| 32.918699 | 136 | 0.630526 | 1,926 | 0.475673 | 0 | 0 | 1,628 | 0.402075 | 0 | 0 | 1,152 | 0.284515 |
c5c89aef54358898a0e1bcf5928f0cc81d126d81 | 2,441 | py | Python | vmware.py | Sbaljepa/get_esxi_host_info | cb12bcc3712135e22fc456178349c51cbd480c03 | [
"MIT"
] | null | null | null | vmware.py | Sbaljepa/get_esxi_host_info | cb12bcc3712135e22fc456178349c51cbd480c03 | [
"MIT"
] | null | null | null | vmware.py | Sbaljepa/get_esxi_host_info | cb12bcc3712135e22fc456178349c51cbd480c03 | [
"MIT"
] | null | null | null | from con_esxi_host import *
from math import pow, ceil
class vmware:
def get_vm_info(self):
si = connect_to_host()
#global virtual
inv = si.RetrieveContent()
dc1 = inv.rootFolder.childEntity[0]
vmList = dc1.vmFolder.childEntity
virtual = []
for vm in vmList:
name = vm.summary.config.name
guestFullName = vm.summary.config.guestFullName
# powerState = vm.summary.runtime.powerState
bootTime = str(vm.summary.runtime.bootTime)
# overallCPUUsage = vm.summary.quickStats.overallCpuUsage
# maxCpuUsage = vm.summary.runtime.maxCpuUsage / 1024
cpuUtilization = vm.summary.quickStats.overallCpuUsage * 100 / vm.summary.runtime.maxCpuUsage;
# paused = vm.summary.runtime.paused
# snapshotInBackground = vm.summary.runtime.snapshotInBackground
# toolsStatus = vm.summary.guest.toolsStatus
hostName = vm.summary.guest.hostName
ipAddress = vm.summary.guest.ipAddress
# vmPathName = vm.summary.config.vmPathName
memorySizeMB = ceil(vm.summary.config.memorySizeMB / 1024);
numEthernetCards = vm.summary.config.numEthernetCards
numVirtualDisks = vm.summary.config.numVirtualDisks
# guestId = vm.summary.config.guestId
annotation = vm.summary.config.annotation
unshared = ceil(vm.summary.storage.unshared / pow(1024, 3))
uncommitted = ceil(vm.summary.storage.uncommitted / pow(1024, 3))
committed = uncommitted + unshared
numCpu = vm.summary.config.numCpu
vm_info = {'Type': "Virtual Machine", 'Name': name, 'HostName': hostname}
vm_kernal = {'Type': "kernal", 'OS': guestFullName, 'Boot Time': bootTime}
vm_nw = {'Type': "NetWork", 'IP': ipAddress 'EthernetCards': numEthernetCards}
vm_strg = {'Type': "Storage", 'Storage': committed, 'Disks': numVirtualDisks}
vm_mem = {'Type': "RAM", 'Name': "Memory", 'Size': memorySizeMB}
vm_cpu = {'Type': "CPU", 'Name': "Cores", 'capacity':numCpu}
config_vm = vm_kernal, vm_cpu,vm_mem,vm_strg, vm_nw
vm_info['configuration'] = config_vm
virtual.append(vm_info)
#print(virtual)
return virtual
| 51.93617 | 107 | 0.603032 | 2,382 | 0.97583 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.258501 |
c5c995d4fb4af0698845544bbd38a70bcaadb45a | 2,155 | py | Python | src/code.py | aniketdashpute/Watermark-python | 10060af56f6fc67bc66248dd23dbdbb1fbfc40b9 | [
"Apache-2.0"
] | null | null | null | src/code.py | aniketdashpute/Watermark-python | 10060af56f6fc67bc66248dd23dbdbb1fbfc40b9 | [
"Apache-2.0"
] | null | null | null | src/code.py | aniketdashpute/Watermark-python | 10060af56f6fc67bc66248dd23dbdbb1fbfc40b9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
from pathlib import Path
import glob2 as glob
import os
import sys
savedir = "./output/"
def AddWatermarkFolder(str_foldername, str_watermarkname, alpha1=1.0, alpha2=0.2):
path = str_foldername + '/*.png*'
for iter, path_name in enumerate(glob.glob(path)):
print(path_name)
AddWatermark(path_name, str_watermarkname, alpha1, alpha2)
def AddWatermark(str_imgname, str_watermarkname, alpha1=1.0, alpha2=0.2):
# Load the image to be watermarked
img = cv2.imread(str_imgname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Load the watermark
waterimg = cv2.imread(str_watermarkname)
waterimg = cv2.cvtColor(waterimg, cv2.COLOR_BGR2RGB)
waterimgRes = __MirrorResize(img, waterimg)
# Blend both the images
img_blend = cv2.addWeighted(img, alpha1, waterimgRes, alpha2, 0.0)
img_blend_write = cv2.cvtColor(img_blend, cv2.COLOR_RGB2BGR)
strsplit = str_imgname.split('/')
str_out = savedir+strsplit[-1]
#str_out = savedir+str_out
cv2.imwrite(str_out, img_blend_write)
return img_blend
def __MirrorResize(img, waterimg):
# First make the watermark image the same size as source image
waterimgRes = np.zeros(img.shape)
# First, in height 0-dimension:
if (img.shape[0] > waterimg.shape[0]):
bottom_pad = img.shape[0] - waterimg.shape[0]
waterimgRes = cv2.copyMakeBorder(waterimg, 0, bottom_pad, 0, 0, cv.BORDER_REPLICATE)
else:
waterimgRes = waterimg[:img.shape[0],:,:]
# Now, in width 0-dimension:
if (img.shape[1] > waterimgRes.shape[1]):
right_pad = img.shape[1] - waterimgRes.shape[1]
waterimgRes = cv2.copyMakeBorder(waterimgRes, 0, 0, 0, right_pad, cv.BORDER_REPLICATE)
else:
waterimgRes = waterimgRes[:,:img.shape[1],:]
return waterimgRes
# Add main support to run file from terminal directly
if __name__ == '__main__':
args = sys.argv
# args[0] = current file
# args[1] = function name
# args[2:] = function args : (*unpacked)
globals()[args[1]](*args[2:]) | 31.691176 | 94 | 0.673782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.185151 |
c5ca707dce77fad1c990e8395c01664a2f944aa1 | 4,980 | py | Python | idfy_rest_client/models/person_person_information.py | dealflowteam/Idfy | fa3918a6c54ea0eedb9146578645b7eb1755b642 | [
"MIT"
] | null | null | null | idfy_rest_client/models/person_person_information.py | dealflowteam/Idfy | fa3918a6c54ea0eedb9146578645b7eb1755b642 | [
"MIT"
] | null | null | null | idfy_rest_client/models/person_person_information.py | dealflowteam/Idfy | fa3918a6c54ea0eedb9146578645b7eb1755b642 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
idfy_rest_client.models.person_person_information
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
from idfy_rest_client.api_helper import APIHelper
class PersonPersonInformation(object):
"""Implementation of the 'Person.PersonInformation' model.
TODO: type model description here.
Attributes:
firstname (string): TODO: type description here.
middlename (string): TODO: type description here.
lastname (string): TODO: type description here.
date_of_birth (string): TODO: type description here.
address (string): TODO: type description here.
zip_code (string): TODO: type description here.
city (string): TODO: type description here.
mobile (string): TODO: type description here.
phone (string): TODO: type description here.
gender (string): TODO: type description here.
raw_json (string): TODO: type description here.
request_id (string): TODO: type description here.
dead (datetime): TODO: type description here.
source (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"firstname":'Firstname',
"middlename":'Middlename',
"lastname":'Lastname',
"date_of_birth":'DateOfBirth',
"address":'Address',
"zip_code":'ZipCode',
"city":'City',
"mobile":'Mobile',
"phone":'Phone',
"gender":'Gender',
"raw_json":'RawJson',
"request_id":'RequestId',
"dead":'Dead',
"source":'Source'
}
def __init__(self,
firstname=None,
middlename=None,
lastname=None,
date_of_birth=None,
address=None,
zip_code=None,
city=None,
mobile=None,
phone=None,
gender=None,
raw_json=None,
request_id=None,
dead=None,
source=None,
additional_properties = {}):
"""Constructor for the PersonPersonInformation class"""
# Initialize members of the class
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.date_of_birth = date_of_birth
self.address = address
self.zip_code = zip_code
self.city = city
self.mobile = mobile
self.phone = phone
self.gender = gender
self.raw_json = raw_json
self.request_id = request_id
self.dead = APIHelper.RFC3339DateTime(dead) if dead else None
self.source = source
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
firstname = dictionary.get('Firstname')
middlename = dictionary.get('Middlename')
lastname = dictionary.get('Lastname')
date_of_birth = dictionary.get('DateOfBirth')
address = dictionary.get('Address')
zip_code = dictionary.get('ZipCode')
city = dictionary.get('City')
mobile = dictionary.get('Mobile')
phone = dictionary.get('Phone')
gender = dictionary.get('Gender')
raw_json = dictionary.get('RawJson')
request_id = dictionary.get('RequestId')
dead = APIHelper.RFC3339DateTime.from_value(dictionary.get("Dead")).datetime if dictionary.get("Dead") else None
source = dictionary.get('Source')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(firstname,
middlename,
lastname,
date_of_birth,
address,
zip_code,
city,
mobile,
phone,
gender,
raw_json,
request_id,
dead,
source,
dictionary)
| 34.109589 | 121 | 0.555823 | 4,734 | 0.950602 | 0 | 0 | 1,970 | 0.395582 | 0 | 0 | 2,215 | 0.444779 |
c5cb2fe4e70cd612e8f9a3f0b90ce50441f4e7bc | 1,018 | py | Python | deployer/logger.py | bwood/deployer | d7dfbcd87497e2a1f803201880bbe9d885e9ed1d | [
"MIT"
] | 1 | 2021-03-01T23:58:53.000Z | 2021-03-01T23:58:53.000Z | deployer/logger.py | bwood/deployer | d7dfbcd87497e2a1f803201880bbe9d885e9ed1d | [
"MIT"
] | null | null | null | deployer/logger.py | bwood/deployer | d7dfbcd87497e2a1f803201880bbe9d885e9ed1d | [
"MIT"
] | 2 | 2020-12-04T22:43:10.000Z | 2021-06-09T17:25:25.000Z | import logging
import subprocess
# create logger
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
# create console handler and set level to INFO
console_logger = logging.StreamHandler()
console_logger.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# add formatter to console_logger
console_logger.setFormatter(formatter)
# add console_logger to logger
logger.addHandler(console_logger)
def update_colors(color_dictionary):
subprocess.call('', shell=True) # Called to enable ANSI encoding on Windows
escape_code = u'\033'
color_dictionary['error'] = escape_code + u'[91m'
color_dictionary['debug'] = escape_code + u'[3;35m'
color_dictionary['info'] = escape_code + u'[3m'
color_dictionary['warning'] = escape_code + u'[1;33m'
color_dictionary['stack'] = escape_code + u'[1;93m'
color_dictionary['underline'] = escape_code + u'[4m'
color_dictionary['reset'] = escape_code + u'[0m' | 32.83871 | 79 | 0.739686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.352652 |
c5cbbb0cba0072bbd3a2426dbea64e76c05e64a7 | 2,214 | py | Python | home/views.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | home/views.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | home/views.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.cache import cache_page
from django.views.decorators.gzip import gzip_page
from django.views.generic import DetailView
from about.models import AboutItem
from blog.models import Article
from home.models import Home
from case.models import Case, Category
from product.models import Product
from service.models import Service
from website.utils import cache
import logging
logger = logging.getLogger(__name__)
class HomeView(DetailView):
# 首页
model = Home
def get_context_data(self, **kwargs):
# 判断手机,跳转手机模板首页
if self.request.is_mobile:
self.template_name = 'mobile/index.html'
kwargs['carousel_list'] = self.object.carousel_set.all().order_by('sequence')
kwargs['adware_list'] = self.object.adware_set.all().order_by('sequence')
kwargs['superiority_list'] = self.object.superiority_set.all().order_by('sequence')
kwargs['category_list'] = Category.objects.all().order_by('sequence')
kwargs['case_list'] = Case.objects.all()[:6]
kwargs['product_list'] = Product.objects.all()
kwargs['service_list'] = Service.objects.all()
kwargs['aboutitem_list'] = AboutItem.objects.all().order_by('sequence')
else:
self.template_name = 'index.html'
kwargs['carousel_list'] = self.object.carousel_set.all().order_by('sequence')
kwargs['adware_list'] = self.object.adware_set.all().order_by('sequence')
kwargs['superiority_list'] = self.object.superiority_set.all().order_by('sequence')
kwargs['category_list'] = Category.objects.all().order_by('sequence')
kwargs['case_list'] = Case.objects.all()[:6]
product_list = Product.objects.all()
kwargs['first_product'] = product_list[0]
kwargs['other_products'] = product_list[1:5]
kwargs['service_list'] = Service.objects.all()
kwargs['aboutitem_list'] = AboutItem.objects.all().order_by('sequence')
return super().get_context_data(**kwargs)
def index(request):
return HttpResponse('ok')
| 41 | 95 | 0.677958 | 1,676 | 0.746881 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.192959 |
c5cc1d9938221b8bf194d0222219ed92f3839aa4 | 4,366 | py | Python | lib/scaler/preprocessing_data/data_preprocessor.py | thangbk2209/mfea_autoscaling | 5b9425331734a93a38b4f73cafd11456c5e1fcf7 | [
"MIT"
] | null | null | null | lib/scaler/preprocessing_data/data_preprocessor.py | thangbk2209/mfea_autoscaling | 5b9425331734a93a38b4f73cafd11456c5e1fcf7 | [
"MIT"
] | null | null | null | lib/scaler/preprocessing_data/data_preprocessor.py | thangbk2209/mfea_autoscaling | 5b9425331734a93a38b4f73cafd11456c5e1fcf7 | [
"MIT"
] | 2 | 2020-11-11T13:30:05.000Z | 2021-01-02T10:09:27.000Z | import numpy as np
from pandas import read_csv
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from config import *
from lib.preprocess.read_data import DataReader
from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer
class DataPreprocessor:
def __init__(self, metrics):
self.train_size = Config.TRAIN_SIZE
self.valid_size = Config.VALID_SIZE
self.train_data_type = metrics['train_data_type']
self.predict_data = metrics['predict_data']
self.google_trace_config = Config.GOOGLE_TRACE_DATA_CONFIG
self.read_data()
def read_data(self):
self.data = None
data_reader = DataReader()
official_data = data_reader.read()
self.x_data, self.y_data = self.create_x_y_data(official_data)
def create_x_y_data(self, official_data):
if Config.DATA_EXPERIMENT == 'google_trace':
# DEFINE X DATA
if self.train_data_type == 'cpu_mem':
x_data = [official_data['cpu'], official_data['mem']]
elif self.train_data_type == 'cpu':
x_data = [official_data['cpu']]
elif self.train_data_type == 'mem':
x_data = [official_data['mem']]
# DEFINE Y DATA
if self.predict_data == 'cpu':
y_data = official_data['cpu']
elif self.predict_data == 'mem':
y_data = official_data['mem']
else:
print('|-> ERROR: Not support these data')
return x_data, y_data
def create_timeseries(self, X):
if len(X) > 1:
data = np.concatenate((X[0], X[1]), axis=1)
if(len(X) > 2):
for i in range(2, len(X), 1):
data = np.column_stack((data, X[i]))
else:
data = []
for i in range(len(X[0])):
data.append(X[0][i])
data = np.array(data)
return data
def create_x(self, timeseries, sliding):
dataX = []
for i in range(len(timeseries) - sliding):
datai = []
for j in range(sliding):
datai.append(timeseries[i + j])
dataX.append(datai)
return dataX
def init_data_lstm(self, sliding, scaler_method):
#print('>>> start init data for training LSTM model <<<')
data_normalizer = DataNormalizer(scaler_method)
x_timeseries, y_time_series, self.y_scaler = data_normalizer.normalize(
self.x_data, self.y_data)
num_points = x_timeseries.shape[0]
train_point = int(self.train_size * num_points)
x_sample = self.create_x(x_timeseries, sliding)
x_train = x_sample[0:train_point - sliding]
x_train = np.array(x_train)
x_test = x_sample[train_point - sliding:]
x_test = np.array(x_test)
y_train = y_time_series[sliding: train_point]
y_train = np.array(y_train)
y_test = self.y_data[train_point:]
y_test = np.array(y_test)
# print(x_train.shape, x_test.shape)
# print(y_train.shape, y_test.shape)
# print('>>> Init data for training model complete <<<')
return x_train, y_train, x_test, y_test, data_normalizer
def init_data_ann(self, sliding, scaler_method):
print('>>> start init data for training ANN model <<<')
data_normalizer = DataNormalizer(scaler_method)
x_timeseries, y_time_series, self.y_scaler = data_normalizer.normalize(
self.x_data, self.y_data)
num_points = x_timeseries.shape[0]
train_point = int(self.train_size * num_points)
x_sample = self.create_x(x_timeseries, sliding)
x_train = x_sample[0:train_point - sliding]
x_train = np.array(x_train)
x_train = np.reshape(
x_train, (x_train.shape[0], sliding * int(x_train.shape[2])))
x_test = x_sample[train_point - sliding:]
x_test = np.array(x_test)
x_test = np.reshape(
x_test, (x_test.shape[0], sliding * int(x_test.shape[2])))
y_train = y_time_series[sliding: train_point]
y_train = np.array(y_train)
y_test = self.y_data[train_point:]
y_test = np.array(y_test)
return x_train, y_train, x_test, y_test, data_normalizer
| 33.584615 | 79 | 0.610399 | 4,068 | 0.931745 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.092075 |
c5cc748812d8a678d614f011ee8f5b8566bed123 | 16,292 | py | Python | devel/.private/px_comm/lib/python2.7/dist-packages/px_comm/msg/_CameraInfo.py | akshastry/Neo_WS | 6c646227b1fedf4fb8cf700533ca8fc47f381b46 | [
"MIT"
] | 1 | 2021-08-31T03:07:52.000Z | 2021-08-31T03:07:52.000Z | devel/.private/px_comm/lib/python2.7/dist-packages/px_comm/msg/_CameraInfo.py | akshastry/Neo_WS | 6c646227b1fedf4fb8cf700533ca8fc47f381b46 | [
"MIT"
] | null | null | null | devel/.private/px_comm/lib/python2.7/dist-packages/px_comm/msg/_CameraInfo.py | akshastry/Neo_WS | 6c646227b1fedf4fb8cf700533ca8fc47f381b46 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from px_comm/CameraInfo.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class CameraInfo(genpy.Message):
_md5sum = "014513fdee9cefabe3cec97bca5e5c57"
_type = "px_comm/CameraInfo"
_has_header = True # flag to mark the presence of a Header object
_full_text = """#######################################################################
# Image acquisition info #
#######################################################################
# Time of image acquisition, camera coordinate frame ID
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of camera
# +x should point to the right in the image
# +y should point down in the image
# +z should point into the plane of the image
#######################################################################
# Calibration Parameters #
#######################################################################
# These are fixed during camera calibration. Their values will be the #
# same in all messages until the camera is recalibrated. Note that #
# self-calibrating systems may "recalibrate" frequently. #
#######################################################################
# The camera model used.
string camera_model
# The name of the camera.
string camera_name
# The type of the camera.
string camera_type
# The image dimensions with which the camera was calibrated. Normally
# this will be the full camera resolution in pixels.
uint32 image_width
uint32 image_height
# The distortion parameters, size depending on the distortion model.
float64[] D
# The projection parameters, size depending on the projection model.
float64[] P
# Other parameters which are not defined by either the distortion or
# projection model.
float64[] M
# Pose of camera with respect to a specific reference frame.
geometry_msgs/Pose pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','camera_model','camera_name','camera_type','image_width','image_height','D','P','M','pose']
_slot_types = ['std_msgs/Header','string','string','string','uint32','uint32','float64[]','float64[]','float64[]','geometry_msgs/Pose']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,camera_model,camera_name,camera_type,image_width,image_height,D,P,M,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(CameraInfo, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.camera_model is None:
self.camera_model = ''
if self.camera_name is None:
self.camera_name = ''
if self.camera_type is None:
self.camera_type = ''
if self.image_width is None:
self.image_width = 0
if self.image_height is None:
self.image_height = 0
if self.D is None:
self.D = []
if self.P is None:
self.P = []
if self.M is None:
self.M = []
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
else:
self.header = std_msgs.msg.Header()
self.camera_model = ''
self.camera_name = ''
self.camera_type = ''
self.image_width = 0
self.image_height = 0
self.D = []
self.P = []
self.M = []
self.pose = geometry_msgs.msg.Pose()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_model
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.image_width, _x.image_height))
length = len(self.D)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.Struct(pattern).pack(*self.D))
length = len(self.P)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.Struct(pattern).pack(*self.P))
length = len(self.M)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.Struct(pattern).pack(*self.M))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_model = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_model = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_type = str[start:end]
_x = self
start = end
end += 8
(_x.image_width, _x.image_height,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.D = s.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.P = s.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.M = s.unpack(str[start:end])
_x = self
start = end
end += 56
(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_model
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.image_width, _x.image_height))
length = len(self.D)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.D.tostring())
length = len(self.P)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.P.tostring())
length = len(self.M)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.M.tostring())
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_model = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_model = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_type = str[start:end]
_x = self
start = end
end += 8
(_x.image_width, _x.image_height,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.M = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_x = self
start = end
end += 56
(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
| 35.112069 | 201 | 0.60097 | 15,460 | 0.948932 | 0 | 0 | 0 | 0 | 0 | 0 | 5,339 | 0.327707 |
c5cca9d03c0d9fcd4b29b96bb68a691816ff964e | 3,567 | py | Python | search/models.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 4 | 2019-03-28T06:42:17.000Z | 2021-06-06T13:10:51.000Z | search/models.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 177 | 2018-09-28T14:21:56.000Z | 2022-03-30T21:45:26.000Z | search/models.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 8 | 2018-10-25T20:43:10.000Z | 2022-03-17T14:19:27.000Z | from itertools import chain
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from wagtail.core.models import Page
from wagtail.search.models import Query
from about.models import AboutPage, AboutSubPage, CaseStudyPage, HistoryPage, PeoplePage
from contact.models import ContactPage
from events.models import EventPage
from guidance_and_support.models import GuidanceAndSupportPage, GuidanceGroupPage, GuidancePage
# from guidance_and_support.models import KnowledgebasePage
from news.models import NewsPage
from home.models import AbstractBasePage, StandardPage
from iati_standard.models import IATIStandardPage
class SearchPage(AbstractBasePage):
"""A model for a seach page, to respond to query requests."""
class Meta:
verbose_name = 'Search'
parent_page_types = ['home.HomePage']
subpage_types = []
max_count = 1
def get_paginated(self, collection, page: int, per_page: int = 10):
"""Handle some error conditions and tries to return working pagination."""
results = None
paginator = None
try:
paginator = Paginator(collection, per_page)
except Exception:
pass
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
results = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
results = paginator.page(paginator.num_pages)
return results, paginator
def serve(self, request, page=None):
"""Serve the search page with query info and paginated results."""
template = self.get_template(request)
context = self.get_context(request)
per_page = 10
searchable_models = [
AboutPage, AboutSubPage, CaseStudyPage, HistoryPage,
PeoplePage, ContactPage, EventPage, GuidanceAndSupportPage,
GuidanceGroupPage, GuidancePage, NewsPage,
StandardPage, IATIStandardPage,
]
# TODO: add KnowledgebasePage back if activated
query = request.GET
search_query = request.GET.get('query', '')
page = request.GET.get('page', 1)
if search_query:
search_results = [r for m in searchable_models
for r in m.objects.live().public().search(search_query).annotate_score('_score')]
search_results = sorted(search_results, key=lambda x: x._score, reverse=True)
promoted = [x.page.specific for x in Query.get(search_query).editors_picks.all() if x.page.live]
query = Query.get(search_query)
query.add_hit()
results = list(chain(promoted, search_results))
else:
results = Page.objects.none()
search_results, paginator = self.get_paginated(results, page, per_page)
total_pages = search_results.paginator.num_pages if search_results else 0
range_start = search_results.number - 5 if search_results.number > 5 else 1
if search_results.number < (total_pages - 4):
range_end = search_results.number + 4
else:
range_end = total_pages
context['search_query'] = search_query
context['search_results'] = search_results
context['paginator_range'] = [i for i in range(range_start, range_end + 1)]
context['paginator'] = paginator
return render(request, template, context)
| 37.547368 | 111 | 0.670311 | 2,892 | 0.810765 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.147743 |
c5ce058c2bdc9823f2137dda32497aab8ba53bf6 | 24,607 | py | Python | gluoncv/model_zoo/action_recognition/r2plus1d.py | AND2797/gluon-cv | 7d2d9c2502f18cbdc19c0921982baf511272acb1 | [
"Apache-2.0"
] | 1 | 2020-03-12T14:43:42.000Z | 2020-03-12T14:43:42.000Z | gluoncv/model_zoo/action_recognition/r2plus1d.py | AND2797/gluon-cv | 7d2d9c2502f18cbdc19c0921982baf511272acb1 | [
"Apache-2.0"
] | null | null | null | gluoncv/model_zoo/action_recognition/r2plus1d.py | AND2797/gluon-cv | 7d2d9c2502f18cbdc19c0921982baf511272acb1 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=arguments-differ,unused-argument,line-too-long
"""R2Plus1D, implemented in Gluon. https://arxiv.org/abs/1711.11248.
Code partially borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/video/resnet.py."""
__all__ = ['R2Plus1D', 'r2plus1d_resnet18_kinetics400',
'r2plus1d_resnet34_kinetics400', 'r2plus1d_resnet50_kinetics400',
'r2plus1d_resnet101_kinetics400', 'r2plus1d_resnet152_kinetics400']
from mxnet import init
from mxnet.context import cpu
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
from mxnet.gluon.nn import BatchNorm
def conv3x1x1(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):
"""3x1x1 convolution with padding"""
return nn.Conv3D(in_channels=in_planes,
channels=out_planes,
kernel_size=(3, 1, 1),
strides=(temporal_stride, spatial_stride, spatial_stride),
padding=(dilation, 0, 0),
dilation=dilation,
use_bias=False)
class Conv2Plus1D(HybridBlock):
r"""Building block of Conv2Plus1D
Parameters
----------
inplanes : int.
Input channels of each block.
planes : int.
Output channels of each block.
midplanes : int.
Intermediate channels of each block.
stride : int, default is 1.
Stride in each dimension of 3D convolutional layers in a block.
padding : int, default is 1.
Padding in each dimension of the feature map.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def __init__(self,
inplanes,
planes,
midplanes,
stride=1,
padding=1,
norm_layer=BatchNorm,
norm_kwargs=None,
**kwargs):
super(Conv2Plus1D, self).__init__()
with self.name_scope():
self.conv1 = nn.Conv3D(in_channels=inplanes,
channels=midplanes,
kernel_size=(1, 3, 3),
strides=(1, stride, stride),
padding=(0, padding, padding),
use_bias=False)
self.bn1 = norm_layer(in_channels=midplanes,
**({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
self.conv2 = nn.Conv3D(in_channels=midplanes,
channels=planes,
kernel_size=(3, 1, 1),
strides=(stride, 1, 1),
padding=(padding, 0, 0),
use_bias=False)
def hybrid_forward(self, F, x):
"""Hybrid forward of a Conv2Plus1D block."""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
return x
class BasicBlock(HybridBlock):
r"""ResNet Basic Block for R2Plus1D
Parameters
----------
inplanes : int.
Input channels of each block.
planes : int.
Output channels of each block.
stride : int, default is 1.
Stride in each dimension of 3D convolutional layers in a block.
downsample : bool.
Whether to contain a downsampling layer in the block.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
layer_name : str, default is ''.
Give a name to current block.
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
norm_layer=BatchNorm, norm_kwargs=None, layer_name='',
**kwargs):
super(BasicBlock, self).__init__()
self.downsample = downsample
with self.name_scope():
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
self.conv1 = Conv2Plus1D(inplanes, planes, midplanes, stride)
self.bn1 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
self.conv2 = Conv2Plus1D(planes, planes, midplanes)
self.bn2 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
def hybrid_forward(self, F, x):
"""Hybrid forward of a ResBlock in R2+1D."""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = F.Activation(out + identity, act_type='relu')
return out
class Bottleneck(HybridBlock):
r"""ResNet Bottleneck Block for R2Plus1D
Parameters
----------
inplanes : int.
Input channels of each block.
planes : int.
Output channels of each block.
stride : int, default is 1.
Stride in each dimension of 3D convolutional layers in a block.
downsample : bool.
Whether to contain a downsampling layer in the block.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
layer_name : str, default is ''.
Give a name to current block.
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
norm_layer=BatchNorm, norm_kwargs=None, layer_name='',
**kwargs):
super(Bottleneck, self).__init__()
self.downsample = downsample
with self.name_scope():
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
# 1x1x1
self.conv1 = nn.Conv3D(in_channels=inplanes, channels=planes, kernel_size=1, use_bias=False)
self.bn1 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
# Second kernel
self.conv2 = Conv2Plus1D(planes, planes, midplanes, stride)
self.bn2 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
self.conv3 = nn.Conv3D(in_channels=planes, channels=planes * self.expansion,
kernel_size=1, use_bias=False)
self.bn3 = norm_layer(in_channels=planes * self.expansion,
**({} if norm_kwargs is None else norm_kwargs))
def hybrid_forward(self, F, x):
"""Hybrid forward of a ResBlock in R2+1D."""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = F.Activation(out + identity, act_type='relu')
return out
class R2Plus1D(HybridBlock):
r"""The R2+1D network.
A Closer Look at Spatiotemporal Convolutions for Action Recognition.
CVPR, 2018. https://arxiv.org/abs/1711.11248
Parameters
----------
nclass : int
Number of classes in the training dataset.
block : Block, default is `Bottleneck`.
Class for the residual block.
layers : list of int
Numbers of layers in each block
dropout_ratio : float, default is 0.5.
The dropout rate of a dropout layer.
The larger the value, the more strength to prevent overfitting.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
init_std : float, default is 0.001.
Standard deviation value when initialize the dense layers.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
partial_bn : bool, default False.
Freeze all batch normalization layers during training except the first layer.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def __init__(self, nclass, block, layers, dropout_ratio=0.5,
num_segments=1, num_crop=1, feat_ext=False,
init_std=0.001, ctx=None, partial_bn=False,
norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(R2Plus1D, self).__init__()
self.partial_bn = partial_bn
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.num_segments = num_segments
self.num_crop = num_crop
self.feat_ext = feat_ext
self.inplanes = 64
self.feat_dim = 512 * block.expansion
with self.name_scope():
self.conv1 = nn.Conv3D(in_channels=3, channels=45, kernel_size=(1, 7, 7),
strides=(1, 2, 2), padding=(0, 3, 3), use_bias=False)
self.bn1 = norm_layer(in_channels=45, **({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
self.conv2 = conv3x1x1(in_planes=45, out_planes=64)
self.bn2 = norm_layer(in_channels=64, **({} if norm_kwargs is None else norm_kwargs))
if self.partial_bn:
if norm_kwargs is not None:
norm_kwargs['use_global_stats'] = True
else:
norm_kwargs = {}
norm_kwargs['use_global_stats'] = True
self.layer1 = self._make_res_layer(block=block,
planes=64 * block.expansion,
blocks=layers[0],
layer_name='layer1_')
self.layer2 = self._make_res_layer(block=block,
planes=128 * block.expansion,
blocks=layers[1],
stride=2,
layer_name='layer2_')
self.layer3 = self._make_res_layer(block=block,
planes=256 * block.expansion,
blocks=layers[2],
stride=2,
layer_name='layer3_')
self.layer4 = self._make_res_layer(block=block,
planes=512 * block.expansion,
blocks=layers[3],
stride=2,
layer_name='layer4_')
self.avgpool = nn.GlobalAvgPool3D()
self.dropout = nn.Dropout(rate=self.dropout_ratio)
self.fc = nn.Dense(in_units=self.feat_dim, units=nclass,
weight_initializer=init.Normal(sigma=self.init_std))
def hybrid_forward(self, F, x):
"""Hybrid forward of R2+1D net"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = F.squeeze(x, axis=(2, 3, 4))
# segmental consensus
x = F.reshape(x, shape=(-1, self.num_segments * self.num_crop, self.feat_dim))
x = F.mean(x, axis=1)
if self.feat_ext:
return x
x = self.fc(self.dropout(x))
return x
def _make_res_layer(self,
block,
planes,
blocks,
stride=1,
norm_layer=BatchNorm,
norm_kwargs=None,
layer_name=''):
"""Build each stage of a ResNet"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.HybridSequential(prefix=layer_name + 'downsample_')
with downsample.name_scope():
downsample.add(nn.Conv3D(in_channels=self.inplanes,
channels=planes * block.expansion,
kernel_size=1,
strides=(stride, stride, stride),
use_bias=False))
downsample.add(norm_layer(in_channels=planes * block.expansion,
**({} if norm_kwargs is None else norm_kwargs)))
layers = nn.HybridSequential(prefix=layer_name)
with layers.name_scope():
layers.add(block(inplanes=self.inplanes,
planes=planes,
stride=stride,
downsample=downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.add(block(inplanes=self.inplanes, planes=planes))
return layers
def r2plus1d_resnet18_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet18 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=BasicBlock,
layers=[2, 2, 2, 2],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet18_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet34_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet34 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=BasicBlock,
layers=[3, 4, 6, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet34_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet50_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet50 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=Bottleneck,
layers=[3, 4, 6, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet50_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet101_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet101 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=Bottleneck,
layers=[3, 4, 23, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet101_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet152_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet152 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=Bottleneck,
layers=[3, 8, 36, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet152_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
| 41.70678 | 113 | 0.580038 | 13,622 | 0.553582 | 0 | 0 | 0 | 0 | 0 | 0 | 10,507 | 0.426992 |
c5cf1d1707b39d0f011396e7faf49933a0c0daf1 | 1,079 | py | Python | run.py | jsicot/idref2zotero | 0838c10cd3236296cd685c84daa77bd8e48567f1 | [
"MIT"
] | 1 | 2020-01-27T09:14:23.000Z | 2020-01-27T09:14:23.000Z | run.py | jsicot/idref2zotero | 0838c10cd3236296cd685c84daa77bd8e48567f1 | [
"MIT"
] | null | null | null | run.py | jsicot/idref2zotero | 0838c10cd3236296cd685c84daa77bd8e48567f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import retrieve_author_ppn as autppn
import retrieve_references as refs
import zot_helpers as pyzot
from itertools import islice
researchers = autppn.constructOutput('test.csv')
autppn.writeCsv('out.csv', researchers)
for researcher in researchers:
ppn = researcher['ppn']
creator_names = researcher['firstname']+" "+researcher['lastname']
collection_name = researcher['lastname'].lower()+"_"+researcher['ppn']
if ppn != "":
json_loaded = refs.getReferences(ppn)
biblio = refs.getRefsByRole(json_loaded, 'aut', creator_names)
total_items = len(biblio)
print(f"Pushing {total_items} items in Zotero bibliography : {collection_name}")
collection_id = pyzot.create_collection(collection_name)
# print(collection_id)
for i in range(0, total_items, 50):
start = i
if i+50 <= total_items:
end = i+50
else :
end = total_items
pyzot.create_items(collection_id, list(islice(biblio, start, end)))
| 34.806452 | 88 | 0.65431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.176089 |