hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
930dfe4b34f879e793ca67eeaa01c43a64b3839b
| 8,089
|
py
|
Python
|
src/T2D23D.py
|
WYGNG/USTC_SSE_Project
|
1c0cd4056f40445aed13ec1ae584608d625b9127
|
[
"MIT"
] | null | null | null |
src/T2D23D.py
|
WYGNG/USTC_SSE_Project
|
1c0cd4056f40445aed13ec1ae584608d625b9127
|
[
"MIT"
] | null | null | null |
src/T2D23D.py
|
WYGNG/USTC_SSE_Project
|
1c0cd4056f40445aed13ec1ae584608d625b9127
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import xlrd
import math
from scipy import optimize
# 计算角度,(x1, y1, z1)为顶点
def get_angle1(x1,y1,z1,x2,y2,z2,x3,y3,z3):
a=math.sqrt((x2-x3)**2+(y2-y3)**2+(z2-z3)**2)
b=math.sqrt((x1-x3)**2+(y1-y3)**2+(z1-z3)**2)
c=math.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
if c*b==0:
cosA=1
else:
cosA=(a**2-c**2-b**2)/(-2*c*b)
if cosA < -1.0:
cosA=-1.0
elif cosA>1.0:
cosA=1.0
A=math.acos(cosA)
deg=math.degrees(A)
return deg
# 躯干12段连杆定义
# L = [40, 34, 34, 29, 29, 58, 58, 40, 50, 50, 42, 42]
# 通过关节点坐标计算比例系数的初值
def get_s(point,L):
s = []
s.append(math.sqrt((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / L[0])
s.append(math.sqrt((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / L[1])
s.append(math.sqrt((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / L[2])
s.append(math.sqrt((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / L[3])
s.append(math.sqrt((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / L[4])
s.append(math.sqrt((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / L[5])
s.append(math.sqrt((point[0] - point[12]) ** 2 + (point[1] - point[13]) ** 2) / L[6])
s.append(math.sqrt((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / L[7])
s.append(math.sqrt((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / L[8])
s.append(math.sqrt((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / L[9])
s.append(math.sqrt((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / L[10])
s.append(math.sqrt((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / L[11])
s_target = max(s)
#print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&",s_target)
return s_target
#由2D关节点坐标和比例系数s计算3D关节点坐标
def get_point_3d(point, s_target,L):
z0 = 525 / s_target
point_3d = []
point_3d.append([point[22] / s_target, point[23] / s_target, z0]) # 0
dz11 = math.sqrt(
max(L[10] ** 2 - ((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / (s_target ** 2), 0))
if point[33]<point[35]:
dz11=-dz11
z14 = z0 + dz11
point_3d.append([point[18] / s_target, point[19] / s_target, z14]) # 1
dz9 = math.sqrt(max(L[8] ** 2 - ((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / (s_target ** 2), 0))
if point[31]<point[33]:
dz9=-dz9
z12 = z14 + dz9
point_3d.append([point[14] / s_target, point[15] / s_target, z12]) # 2
dz8 = math.sqrt(max(L[7] ** 2 - ((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / (s_target ** 2), 0))
if point[30]<point[31]:
dz8=-dz8
z11 = z12 + dz8
point_3d.append([point[12] / s_target, point[13] / s_target, z11]) # 3
dz10 = math.sqrt(
max(L[9] ** 2 - ((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / (s_target ** 2), 0))
if point[32]<point[30]:
dz10=-dz10
z13 = z11 + dz10
point_3d.append([point[16] / s_target, point[17] / s_target, z13]) # 4
dz12 = math.sqrt(
max(L[11] ** 2 - ((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / (s_target ** 2), 0))
if point[34]<point[32]:
dz12=-dz12
z15 = z13 + dz12
point_3d.append([point[20] / s_target, point[21] / s_target, z15]) # 5
dz6 = math.sqrt(max(L[5] ** 2 - ((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / (s_target ** 2), 0))
if point[25]<point[31]:
dz6=-dz6
z6 = z12 + dz6
point_3d.append([point[2] / s_target, point[3] / s_target, z6]) # 6
dz2 = math.sqrt(max(L[1] ** 2 - ((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / (s_target ** 2), 0))
if point[27]<point[25]:
dz2=-dz2
z8 = z6 + dz2
point_3d.append([point[6] / s_target, point[7] / s_target, z8]) # 7
dz4 = math.sqrt(max(L[3] ** 2 - ((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / (s_target ** 2), 0))
if point[29]<point[27]:
dz4=-dz4
z10 = z8 + dz4
point_3d.append([point[10] / s_target, point[11] / s_target, z10]) # 8
dz1 = math.sqrt(max(L[0] ** 2 - ((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / (s_target ** 2), 0))
if point[24]<point[25]:
dz1=-dz1
z5 = z6 + dz1
point_3d.append([point[0] / s_target, point[1] / s_target, z5]) # 9
dz3 = math.sqrt(max(L[2] ** 2 - ((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / (s_target ** 2), 0))
if point[26]<point[24]:
dz3=-dz3
z7 = z5 + dz3
point_3d.append([point[4] / s_target, point[5] / s_target, z7]) #
dz5 = math.sqrt(max(L[4] ** 2 - ((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / (s_target ** 2), 0))
if point[28]<point[26]:
dz5=-dz5
z9 = z7 + dz5
point_3d.append([point[8] / s_target, point[9] / s_target, z9]) # 11
return point_3d
# 单帧优化定义的目标函数
def f(s, point, s_target,L):
dz1 = math.sqrt(max(L[0] ** 2 - ((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / (s_target ** 2), 0))
dz2 = math.sqrt(max(L[1] ** 2 - ((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / (s_target ** 2), 0))
dz3 = math.sqrt(max(L[2] ** 2 - ((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / (s_target ** 2), 0))
dz4 = math.sqrt(max(L[3] ** 2 - ((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / (s_target ** 2), 0))
dz5 = math.sqrt(max(L[4] ** 2 - ((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / (s_target ** 2), 0))
dz6 = math.sqrt(max(L[5] ** 2 - ((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / (s_target ** 2), 0))
dz8 = math.sqrt(max(L[7] ** 2 - ((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / (s_target ** 2), 0))
dz9 = math.sqrt(max(L[8] ** 2 - ((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / (s_target ** 2), 0))
dz10 = math.sqrt(
max(L[9] ** 2 - ((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / (s_target ** 2), 0))
dz11 = math.sqrt(
max(L[10] ** 2 - ((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / (s_target ** 2), 0))
dz12 = math.sqrt(
max(L[11] ** 2 - ((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / (s_target ** 2), 0))
y = 0
y += (s * math.sqrt(L[0] ** 2 - dz1 ** 2) - math.sqrt((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2)) ** 2 +\
(s * math.sqrt(L[1] ** 2 - dz2 ** 2) - math.sqrt((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2)) ** 2 +\
(s * math.sqrt(L[2] ** 2 - dz3 ** 2) - math.sqrt((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2)) ** 2 +\
(s * math.sqrt(L[3] ** 2 - dz4 ** 2) - math.sqrt((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2)) ** 2 +\
(s * math.sqrt(L[4] ** 2 - dz5 ** 2) - math.sqrt((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2)) ** 2 +\
(s * math.sqrt(L[5] ** 2 - dz6 ** 2) - math.sqrt((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2)) ** 2 +\
(s * math.sqrt(L[7] ** 2 - dz8 ** 2) - math.sqrt((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2)) ** 2 +\
(s * math.sqrt(L[8] ** 2 - dz9 ** 2) - math.sqrt((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2)) ** 2 +\
(s * math.sqrt(L[9] ** 2 - dz10 ** 2) - math.sqrt((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2)) ** 2 +\
(s * math.sqrt(L[10] ** 2 - dz11 ** 2) - math.sqrt((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2)) ** 2 +\
(s * math.sqrt(L[11] ** 2 - dz12 ** 2) - math.sqrt((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2)) ** 2
# print("dz!!!!!!!!!!!!!!!!!!!!!!!",dz1,dz2,dz3,dz4,dz5,dz6,dz8,dz9,dz10,dz11,dz12)
# print("\n")
return y
# 多帧优化定义的目标函数
def f_s(s, begin, end,worksheet1, L):
z = 0
for i in range(end - begin + 1):
point = worksheet1.row_values(begin + i)
point.remove(point[0])
# s_target = get_s(point)
z += f(s[i], point, s[i], L)
return z
| 48.14881
| 129
| 0.491532
|
83f3bb07bc302b2491d0d82b05b82098c462eaf8
| 777
|
py
|
Python
|
minigest/tributi/models/tributo/inps.py
|
ctrlmaniac/minigest
|
2bfceb57e41c872e4112e24d0e6991164846888b
|
[
"MIT"
] | null | null | null |
minigest/tributi/models/tributo/inps.py
|
ctrlmaniac/minigest
|
2bfceb57e41c872e4112e24d0e6991164846888b
|
[
"MIT"
] | 1
|
2021-09-22T19:10:20.000Z
|
2021-09-22T19:10:20.000Z
|
minigest/tributi/models/tributo/inps.py
|
ctrlmaniac/minigest
|
2bfceb57e41c872e4112e24d0e6991164846888b
|
[
"MIT"
] | null | null | null |
from django.db import models
from .tributo import Tributo
class TributoSezInps(models.Model):
imposta = models.ForeignKey(
Tributo, on_delete=models.CASCADE, related_name="sezione_inps"
)
sede = models.CharField(max_length=10, verbose_name="codice sede")
causale = models.CharField(max_length=10, verbose_name="causale contributo")
matricola = models.CharField(
max_length=20, verbose_name="matricola INPS/codice INPS/filiale azienda"
)
periodo_da = models.DateField()
periodo_a = models.DateField()
debito = models.DecimalField(
max_digits=19, decimal_places=2, verbose_name="importo a debito versato"
)
class Meta:
verbose_name = "Sezione INPS"
verbose_name_plural = "Sezione INPS"
| 26.793103
| 80
| 0.707851
|
3745c76ade618dc18df531964a9859090af4d097
| 3,062
|
py
|
Python
|
twentyc/vodka/tools/django.py
|
20c/vodka1
|
34fb1c23a27c9a3b2eb59821a6bf967e04c18941
|
[
"Apache-2.0"
] | null | null | null |
twentyc/vodka/tools/django.py
|
20c/vodka1
|
34fb1c23a27c9a3b2eb59821a6bf967e04c18941
|
[
"Apache-2.0"
] | null | null | null |
twentyc/vodka/tools/django.py
|
20c/vodka1
|
34fb1c23a27c9a3b2eb59821a6bf967e04c18941
|
[
"Apache-2.0"
] | null | null | null |
# django <-> vodka synergy
"""
Xbahn replication receiver to replicate django-namespace-perms data to valid
vodka perms using the vodkatools module manager
"""
import twentyc.xbahn.couchdb.replication as replication
import twentyc.vodka.tools.module_manager as modman
import twentyc.database as database
from twentyc.tools.thread import RunInThread
import time
class NamespacePermsReceiver(replication.Receiver):
def __init__(self, xbahn_connection, couchdb_client, config, namespace, batch_limit=1000, batch=True):
replication.Receiver.__init__(self, xbahn_connection, couchdb_client, config, namespace, batch_limit=batch_limit, batch=batch)
self.module_manager = modman.ModuleManager()
self.user_couchdb = database.ClientFromConfig(
"couchdb",
self.config.get("vodka_modules"),
"vodka_modules"
)
self.module_manager.set_database(self.user_couchdb)
self.module_manager.set_xbahn(self.xbahn.get("main"))
self.module_manager.disable_perms_log = True
def handle_save_GroupPermission(self, docname, id, doc):
print docname, doc
self.module_manager.pgroup_perms_set(
doc.get("group").get("name"),
doc.get("namespace"),
doc.get("permissions"),
force=True,
source="xbahn replication",
reason="xbahn replication"
)
def handle_delete_GroupPermission(self, docname, ids, docs):
for id, doc in docs.items():
self.module_manager.pgroup_perms_set(
doc.get("group").get("name"),
doc.get("namespace"),
-1,
force=True,
source="xbahn replication",
reason="xbahn replication"
)
def handle_save_UserPermission(self, docname, id, doc):
self.module_manager.perms_set(
doc.get("user"),
doc.get("namespace"),
doc.get("permissions"),
force=True,
source="xbahn replication",
reason="xbahn replication"
)
def handle_delete_UserPermission(self, docname, ids, docs):
for id, doc in docs.items():
self.module_manager.perms_set(
doc.get("user"),
doc.get("namespace"),
-1,
force=True,
source="xbahn replication",
reason="xbahn replication"
)
def handle_save_User_groups(self, docname, id, doc):
t = time.time()
self.module_manager.pgroup_grant(
doc.get("group").get("name"),
doc.get("user"),
source="xbahn replication",
reason="xbahn replication"
)
t2 = time.time()
print "Assign group: %s %s %.5f" % (doc.get("group").get("name"), doc.get("user"), (t2-t))
def handle_delete_User_groups(self, docname, ids, docs):
for id, doc in docs.items():
t = time.time()
self.module_manager.pgroup_revoke(
doc.get("group").get("name"),
doc.get("user"),
source="xbahn replication",
reason="xbahn replication",
)
t2 = time.time()
print "Revoking group: %s %s %.5f" % (doc.get("group").get("name"), doc.get("user"), (t2-t))
###############################################################################
| 31.56701
| 130
| 0.643697
|
a9dce7f39a2b646d80e4acbbe5ce59549d4ab9f4
| 626
|
py
|
Python
|
plugins/groupinfo.py
|
fosslife/grambot
|
fbec1a8df939823b18915d4689e9da6f5adb871b
|
[
"MIT"
] | 7
|
2020-05-28T04:08:02.000Z
|
2022-02-22T18:11:03.000Z
|
plugins/groupinfo.py
|
fosslife/grambot
|
fbec1a8df939823b18915d4689e9da6f5adb871b
|
[
"MIT"
] | 1
|
2021-07-28T10:12:25.000Z
|
2021-12-13T15:09:43.000Z
|
plugins/groupinfo.py
|
fosslife/grambot
|
fbec1a8df939823b18915d4689e9da6f5adb871b
|
[
"MIT"
] | 4
|
2020-03-30T18:27:08.000Z
|
2022-02-25T16:28:06.000Z
|
from userbot import bot, logger
from telethon import TelegramClient, events
from config import groupinfo
@bot.on(events.NewMessage(**groupinfo))
async def fn(event):
logger.info("group info plugin called")
try:
id = event.message.to_id.channel_id
logger.info(f"sending group id - {id}")
await event.respond(f"groupid - {id}")
except AttributeError:
id = event.message.to_id.user_id
logger.info(f"sending user id - {id}")
await event.respond(f"userid - {id}")
except Exception as e:
logger.exception(f"Error while fetching records {e}")
return
| 29.809524
| 61
| 0.662939
|
07bdafc18774bfcd6fa8cf5e347de36ff0a4e666
| 12,308
|
py
|
Python
|
generated/intermediate/ansible-module-rest/azure_rm_apimanagementnotificationrecipientemail.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/intermediate/ansible-module-rest/azure_rm_apimanagementnotificationrecipientemail.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/intermediate/ansible-module-rest/azure_rm_apimanagementnotificationrecipientemail.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_apimanagementnotificationrecipientemail
version_added: '2.9'
short_description: Manage Azure NotificationRecipientEmail instance.
description:
- 'Create, update and delete instance of Azure NotificationRecipientEmail.'
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
service_name:
description:
- The name of the API Management service.
required: true
type: str
notification_name:
description:
- Notification Name Identifier.
required: true
type: str
email:
description:
- User Email subscribed to notification.
type: str
id:
description:
- Resource ID.
type: str
name:
description:
- Resource name.
type: str
type:
description:
- Resource type for API Management resource.
type: str
state:
description:
- Assert the state of the NotificationRecipientEmail.
- >-
Use C(present) to create or update an NotificationRecipientEmail and
C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: ApiManagementCreateNotificationRecipientEmail
azure_rm_apimanagementnotificationrecipientemail:
resource_group: myResourceGroup
service_name: myService
notification_name: myNotification
email: myRecipientEmail
- name: ApiManagementDeleteNotificationRecipientEmail
azure_rm_apimanagementnotificationrecipientemail:
resource_group: myResourceGroup
service_name: myService
notification_name: myNotification
email: myRecipientEmail
state: absent
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type for API Management resource.
returned: always
type: str
sample: null
properties:
description:
- Recipient Email contract properties.
returned: always
type: dict
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMNotificationRecipientEmail(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
updatable=False,
disposition='resourceGroupName',
required=true
),
service_name=dict(
type='str',
updatable=False,
disposition='serviceName',
required=true
),
notification_name=dict(
type='str',
updatable=False,
disposition='notificationName',
required=true
),
email=dict(
type='str',
updatable=False,
required=true
),
email=dict(
type='str',
disposition='/properties/*'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.service_name = None
self.notification_name = None
self.email = None
self.id = None
self.name = None
self.type = None
self.properties = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-01-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMNotificationRecipientEmail, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.ApiManagement' +
'/service' +
'/{{ service_name }}' +
'/notifications' +
'/{{ notification_name }}' +
'/recipientEmails' +
'/{{ recipient_email_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ service_name }}', self.service_name)
self.url = self.url.replace('{{ notification_name }}', self.notification_name)
self.url = self.url.replace('{{ recipient_email_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("NotificationRecipientEmail instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('NotificationRecipientEmail instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the NotificationRecipientEmail instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('NotificationRecipientEmail instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('NotificationRecipientEmail instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
self.results["type"] = response["type"]
self.results["properties"] = response["properties"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the NotificationRecipientEmail instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the NotificationRecipientEmail instance.')
self.fail('Error creating the NotificationRecipientEmail instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the NotificationRecipientEmail instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the NotificationRecipientEmail instance.')
self.fail('Error deleting the NotificationRecipientEmail instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the NotificationRecipientEmail instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("NotificationRecipientEmail instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the NotificationRecipientEmail instance.')
if found is True:
return response
return False
def main():
AzureRMNotificationRecipientEmail()
if __name__ == '__main__':
main()
| 33.720548
| 113
| 0.53843
|
c9fb9128994d65e802fe7e09c5ea25c6fdc37c5a
| 2,290
|
py
|
Python
|
tests/test_plotting.py
|
dpanici/DESC
|
e98a16394d02411952efc18cc6c009e5226b11e4
|
[
"MIT"
] | 1
|
2020-11-20T17:17:50.000Z
|
2020-11-20T17:17:50.000Z
|
tests/test_plotting.py
|
dpanici/DESC
|
e98a16394d02411952efc18cc6c009e5226b11e4
|
[
"MIT"
] | 12
|
2020-11-19T05:22:13.000Z
|
2020-12-15T03:50:33.000Z
|
tests/test_plotting.py
|
dpanici/DESC
|
e98a16394d02411952efc18cc6c009e5226b11e4
|
[
"MIT"
] | null | null | null |
import unittest
from desc.plotting import Plot
class TestPlot(unittest.TestCase):
def setUp(self):
self.names = ['B', '|B|', 'B^zeta', 'B_zeta', 'B_r', 'B^zeta_r',
'B_zeta_r', 'B**2', 'B_r**2', 'B^zeta**2', 'B_zeta**2',
'B^zeta_r**2', 'B_zeta_r**2']
self.bases = ['B', '|B|', 'B', 'B', 'B', 'B',
'B', 'B', 'B', 'B', 'B',
'B', 'B']
self.sups = ['', '', 'zeta', '', '', 'zeta',
'', '', '', 'zeta', '',
'zeta', '']
self.subs = ['', '', '', 'zeta', '', '',
'zeta', '', '', '', 'zeta',
'', 'zeta']
self.ds = ['', '', '', '', 'r', 'r',
'r', '', 'r', '', '',
'r', 'r']
self.pows = ['', '', '', '', '', '',
'', '2', '2', '2', '2',
'2', '2']
self.name_dicts = []
self.plot = Plot()
for name in self.names:
self.name_dicts.append(self.plot.format_name(name))
def test_name_dict(self):
self.assertTrue(all([self.name_dicts[i]['base'] == self.bases[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['sups'] == self.sups[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['subs'] == self.subs[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['d'] == self.ds[i] for i in
range(len(self.names))]))
self.assertTrue(all([self.name_dicts[i]['power'] == self.pows[i] for i in
range(len(self.names))]))
def test_name_label(self):
labels = [self.plot.name_label(nd) for nd in self.name_dicts]
print(labels)
self.assertTrue(all([label[0] == '$' and label[-1] == '$' for label in labels]))
self.assertTrue(all(['/dr' in labels[i] for i in range(len(labels)) if
self.name_dicts[i]['d'] != '']))
self.assertTrue(all(['^{' not in labels[i] for i in range(len(labels))
if self.name_dicts[i]['sups'] == '' and self.name_dicts[i]['power'] == '']))
self.assertTrue(all(['_{' not in labels[i] for i in range(len(labels))
if self.name_dicts[i]['subs'] == '']))
| 43.207547
| 88
| 0.461135
|
932b93c10975f6027c651fddd480d265b5c3415e
| 461
|
py
|
Python
|
pygsuite/docs/doc_elements/paragraph_elements/horizontal_rule.py
|
gitter-badger/pygsuite
|
536766c36f653edbc7585141f1c3327f508e19da
|
[
"MIT"
] | null | null | null |
pygsuite/docs/doc_elements/paragraph_elements/horizontal_rule.py
|
gitter-badger/pygsuite
|
536766c36f653edbc7585141f1c3327f508e19da
|
[
"MIT"
] | null | null | null |
pygsuite/docs/doc_elements/paragraph_elements/horizontal_rule.py
|
gitter-badger/pygsuite
|
536766c36f653edbc7585141f1c3327f508e19da
|
[
"MIT"
] | null | null | null |
from pygsuite.docs.doc_elements.paragraph_elements.base_paragraph_element import (
BaseParagraphElement,
)
class HorizontalRule(BaseParagraphElement):
def __init__(self, element, presentation):
BaseParagraphElement.__init__(self, element, presentation)
self._detail = element.get("textRun")
@property
def content(self):
return self._detail.get("content")
def style(self):
return self._detail.get("style")
| 27.117647
| 82
| 0.720174
|
7436114e673ba22378b51b3f6dc2a27ca24646eb
| 529
|
py
|
Python
|
SecondExamPrepare/trainers.py
|
nikolayvutov/Python
|
55163496dac452a7110b7f76edc6894ee195f1fe
|
[
"MIT"
] | null | null | null |
SecondExamPrepare/trainers.py
|
nikolayvutov/Python
|
55163496dac452a7110b7f76edc6894ee195f1fe
|
[
"MIT"
] | null | null | null |
SecondExamPrepare/trainers.py
|
nikolayvutov/Python
|
55163496dac452a7110b7f76edc6894ee195f1fe
|
[
"MIT"
] | null | null | null |
n = int(input())
teams = {'Theoretical': 0.0, 'Technical': 0.0, 'Practical': 0.0}
for i in range(n):
dist = float(input())
cargo = float(input())
team = input()
try:
teams[team] += (cargo * 100 * 15) - (7 * dist * 16 * 25)
except:
teams[team] = (cargo * 100 * 15) - (7 * dist * 16 * 25)
max = None
answer = None
for k, v in teams.items():
if max == None or answer == None or max < v:
max = v
answer = k
print('The {0} Trainers win with ${1:.3f}.'.format(answer, max))
| 23
| 64
| 0.52552
|
0e2ec1ba014ff3379d60684153d2cf9e40b1479b
| 11,208
|
py
|
Python
|
HTMLgen/HTMLcalendar.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 4
|
2021-10-17T11:17:59.000Z
|
2022-02-28T16:58:40.000Z
|
HTMLgen/HTMLcalendar.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 17
|
2021-10-05T21:44:06.000Z
|
2022-03-31T16:58:40.000Z
|
HTMLgen/HTMLcalendar.py
|
moibenko/enstore
|
6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9
|
[
"Intel",
"Unlicense"
] | 8
|
2021-09-02T18:55:49.000Z
|
2022-03-09T21:05:28.000Z
|
#!/usr/bin/env python
# COPYRIGHT (C) 1997 ROBIN FRIEDRICH
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
"""Generate HTML calendars
This module reads a text file containing appointment information and
generates web pages containing those scheduled appointments in a
nicely formatted linear table for each month."""
import string, time
from HTMLcolors import *
from HTMLgen import SimpleDocument, TableLite, TD, TR, Font, Name, H, Center, Href
from types import IntType
from calendar import day_name, month_name, mdays, weekday
__version__ = '$Id$'
__author__ = 'Robin Friedrich'
__date__ = '10/13/97'
# Constants
day_month_order = 0 # set to 1 to parse day/month ordering (europe)
# set to 0 to parse month/day ordering (usa)
DateError = 'date error'
PAGECOLOR = PEACH
WEEKDAYCOLOR = GRAY1
WEEKENDCOLOR = GREEN3
TIMECOLOR = BLUE
SOURCETEXTFILE = 'appt.txt'
HTMLSUFFIX = 'html' # use htm if you must
def main():
"""Main routine to drive script and serve as usage example.
"""
parsed_appts = read_appt_file(SOURCETEXTFILE)
current_year = Year()
current_year.load_appointments(parsed_appts)
current_year.write('./', 'Calendar')
class Appointment:
"""Represent an appointment entry.
"""
def __init__(self, hr='9', min='00', text='Nothing'):
self.hr = hr
self.min = min
self.text = text
def __str__(self):
return "%s %s" % (Font(self.hr +':'+ self.min,
size = -1, color = TIMECOLOR),
Font(self.text, size = -1))
class Day:
"""Represent a day record.
Appointment instances are appended to instances of this class.
"""
def __init__(self, year=1997, month=1, day=1):
self.weekday = day_name[weekday(year,month,day)]
self.year = `year`
self.month = `month`
self.day = `day`
self.appointments = []
def append(self, appt):
self.appointments.append(appt)
def repr_date_cell(self):
return '%s %s' % (self.weekday,
Name(self.month +'-'+self.day, self.day))
def repr_appt_cell(self):
strlist = map(str, self.appointments)
return string.join(strlist, '<BR>')
def make_row(self):
if self.weekday in ('Saturday','Sunday'):
row = TR(bgcolor=WEEKENDCOLOR)
else:
row = TR(bgcolor=WEEKDAYCOLOR)
row.append(TD(self.repr_date_cell(), align='right',html_escape='off'))
row.append(TD(self.repr_appt_cell(),html_escape='off'))
return row
def __str__(self):
return str(self.make_row())
class DateTable( TableLite ):
"""Table of days used to represent the month in HTML.
Adds methods allowing direct indexing by day of month.
datetable[7] will return the Day object corresponding
to the 7th day of the month.
"""
def __setitem__(self, i, value):
self.contents[i-1] = value
def __getitem__(self, i):
return self.contents[i-1]
class Month( SimpleDocument ):
"""Class representing a month as an HTML document (page).
Tweeks to the appearance of the calendar would mostly be done here.
Indexing into this class instance will reference the DateTable object
contained within.
"""
def __init__(self, year=1997, month=1):
SimpleDocument.__init__(self, bgcolor = PAGECOLOR)
self.year = year
self.month = month
self.monthname = month_name[month]
self.title = self.monthname + ' ' + str(self.year)
self.ndays = mdays[month]
now = time.strftime("%c", time.localtime(time.time()))
self.append( '<P>', Font("Updated " + now, size = -1) )
self.append( Center( H(3,self.title) ) )
self.datetable = DateTable(cellpadding=6, cellspacing=2, border=3)
self.jumper = ''
for i in range(1, self.ndays+1):
self.jumper = self.jumper + str(Href('#'+`month`+'-'+`i`, `i`)) + ' '
self.datetable.append(Day(self.year, self.month, i))
self.append( Center(self.jumper, html_escape='off') )
self.append('<P>')
self.append( self.datetable )
def add_appt(self, appt):
"""Argument is a 6-tuple.
Form:
(year, month, day, hour, minute, text)
int int int str str str <- type
"""
(year, month, day, hour, minute, text) = appt
if (self.year, self.month) != (year, month):
raise DateError
self[day].append(Appointment(hour, minute, text))
def __setitem__(self, index, dayobj):
try:
self.datetable[index] = dayobj
except IndexError:
print 'Not %d days in %s' % (index, self.monthname)
def __getitem__(self, index):
return self.datetable[index]
class Year:
"""Represent a year as a series of 12 Month instances.
"""
def __init__(self, year=1997):
self.year = year
self.months = [0] #spacer so indexing by month works.
for m in range(1,13):
self.months.append(Month(self.year, m))
def load_appointments(self, data):
"""Load each entry in the appointment list.
"""
for (year, month, day, hour, minute, text) in data:
if year == self.year:
self.months[month][day].append(Appointment(hour, minute, text))
def makeindex(self, i):
"""Utility method to generate the navigation hyperlinks
for each month. To be placed at the top of each page.
"""
index = []
for j in range(1,len(month_name)):
if j == i:
index.append(month_name[j])
else:
index.append(str(Href(self.namefile(j), month_name[j])))
return string.join(index, ' | ')
def namefile(self, i):
"""Generate the html filenames.
"""
return "%s%s-%s.%s" % (self.directory,
self.prefix,
string.zfill(i,2),
HTMLSUFFIX)
def write(self, directory='./', prefix = 'cal', mmin=1, mmax=12):
"""Causes the emission of all pages.
To restrict ranges of months, use 3rd and 4th arg to specify
starting and stopping months by number.
"""
self.directory = directory
self.prefix = prefix
for i in range(mmin, mmax+1):
self.months[i].prepend( Center(self.makeindex(i), html_escape='off') )
self.months[i].write( self.namefile(i) )
def makeint(value):
"""return an integer given either a string or integer
"""
try:
return string.atoi(value)
except TypeError:
if type(value) == IntType:
return value
else:
raise TypeError, ('cannot convert to int', value)
import regex
datepat = regex.compile('^ *\([0-9*][0-9]?\)[/-]' #first 2 char date field
'\([0-9][0-9]?\)[/-]?' #second 2 char date field
'\([12][0-9][0-9][0-9]\)?[ \t]*:') #optional year field
daypat = regex.compile('^ *\('+string.join(day_name,'\|')+'\)')
timepat = regex.compile('\([0-9][0-9]?\):\([0-9][0-9]\)')
def read_appt_file(filename):
"""Parsing function.
Setting the day_month_order flag to 1 will make the parser read
month value from the second position rather than the first.
Example:
2/15/1997: 3:25 Text for the appointment (year must be 4 digit)
4:45 This will be placed on the same day
5/21: 8:00 Leaving off the year will pick up the current year
*/15: 2:00 This will place the appt on the 15th of every month
5:30 Also as above
Friday: 3:30 Place this one on every Friday of the current year.
The ":" after the day entry is significant.
A single appointment text cannot span more than one line.
"""
data = []
current_year = time.localtime(time.time())[0]
f = open(filename, 'r')
for line in f.readlines():
if string.strip(line) == '': #skip blank lines
continue
if datepat.search(line) > -1:
if day_month_order:
month , day , year = datepat.group(2,1,3)
else:
month , day , year = datepat.group(1,2,3)
if year == None:
year = current_year
else:
year = makeint(year)
line = line[datepat.regs[0][1]:]
elif daypat.search(line) > -1:
dayofweek = daypat.group(1)
month, day = (None, None)
year = current_year
line = line[daypat.regs[0][1]:]
if timepat.search(line) > -1:
hour , minute = timepat.group(1,2)
line = line[timepat.regs[0][1]:]
else: #if no time is given just nullify the values
hour = ''
minute = ''
text = string.strip(line)
if month == '*':
#print 'day of month case'
for m in range(1,13):
day = makeint(day)
data.append((year, m, day, hour, minute, text))
elif (month, day) == (None, None):
#print 'day of week case'
for (y, m, d) in list_day_of_week(year, dayofweek):
data.append((y, m, d, hour, minute, text))
else:
#print 'normal case'
month = makeint(month)
day = makeint(day)
data.append((year, month, day, hour, minute, text))
#print (year, month, day, hour, minute, text)
f.close()
return data
def list_day_of_week(year, daystring):
"""return a list of ~52 days corresponding to the given day of the week"""
year = makeint(year)
for i in range(1,8): #start at bgining of year look for day string
if day_name[weekday(year, 1, i)] == daystring: break
# now thay we know the initial day we add it to the list and
# increment by 7 days
list = [(year, 1, i)]
sec_per_week = 60*60*24*7
secs = time.mktime((year, 1, i, 0, 0, 0, 0, 0, 0))
maxsecs = time.mktime((year, 12, 31, 1, 0, 0, 0, 0, 0))
secs = secs + sec_per_week
while secs < maxsecs:
list.append(time.localtime(secs)[:3])
secs = secs + sec_per_week
return list
if __name__ == '__main__': main()
| 36.747541
| 82
| 0.584939
|
68a569e5724f42db636a67c027a5b4ddb386a2f5
| 396
|
py
|
Python
|
nrc/nrc/geocode.py
|
SkyTruth/scraper
|
c1903a74c717a7b36a05f0f466c51544911c4499
|
[
"MIT"
] | 2
|
2016-07-01T02:41:17.000Z
|
2020-04-04T16:16:55.000Z
|
nrc/nrc/geocode.py
|
SkyTruth/scraper
|
c1903a74c717a7b36a05f0f466c51544911c4499
|
[
"MIT"
] | 4
|
2015-01-14T17:00:12.000Z
|
2015-06-29T19:36:27.000Z
|
nrc/nrc/geocode.py
|
SkyTruth/scraper
|
c1903a74c717a7b36a05f0f466c51544911c4499
|
[
"MIT"
] | null | null | null |
# Nrc Geocoder
#Sample query
# http://maps.google.com/maps/geo?output=xml&q=25443&key=ABQIAAAA9E7RFHw8xvp1qJV2sqmcnhRzLYtVAEvZxjHSR41mZtuamhVQghSkixo_dpWZfJxxjhJsEwWBcmEhTw
#Problem - want to use the spider to perform the request?
class NrcGeocoder (object):
def __init__(self):
pass
def getGeoCode (address):
pass
#TODO: implement me
| 24.75
| 143
| 0.691919
|
c04a3d09fe0d2fdea3200b765ba203507a753d41
| 762
|
py
|
Python
|
src/Python/tests/test_neo4j_access.py
|
PathwayAnalysisPlatform/ProteoformNetworks
|
3d31e5b3cb4abc45e6419fa982c08b3dc5c2624e
|
[
"Apache-2.0"
] | 1
|
2019-08-16T12:40:14.000Z
|
2019-08-16T12:40:14.000Z
|
src/Python/tests/test_neo4j_access.py
|
LuisFranciscoHS/ProteoformNetworks
|
a6baa87fe6f76905f6d58a2f7cb66aad5d8d56c5
|
[
"Apache-2.0"
] | 9
|
2019-08-16T07:33:33.000Z
|
2022-03-04T22:20:02.000Z
|
src/Python/tests/test_neo4j_access.py
|
LuisFranciscoHS/ProteoformNetworks
|
a6baa87fe6f76905f6d58a2f7cb66aad5d8d56c5
|
[
"Apache-2.0"
] | 1
|
2022-02-21T17:42:48.000Z
|
2022-02-21T17:42:48.000Z
|
import pytest
import pandas as pd
from lib.graph_database_access import get_query_result
def test_get_query_result():
query = "MATCH (p:Pathway{stId:\"R-HSA-9634600\"}) RETURN p.displayName as Name"
result = get_query_result(query)
assert len(result) == 1
assert type(result) == pd.DataFrame
assert "Name" in result.columns
assert result.iloc[0]['Name'] == "Regulation of glycolysis by fructose 2,6-bisphosphate metabolism"
def test_get_query_result_empty_result():
query = "MATCH (p:Pathway{stId:\"something_wrong\"}) RETURN p.displayName as Name"
result = get_query_result(query)
assert len(result) == 0
assert type(result) == pd.DataFrame
assert "Name" not in result.columns
assert len(result.columns) == 0
| 34.636364
| 103
| 0.721785
|
0c1aac6c79860b47233380412cdfd204f83d237a
| 12,467
|
py
|
Python
|
tweetsearch.py
|
codykingham/spacex_smartweets
|
b2beddbe544d0b7ae23434fc3ade24bbae778e78
|
[
"MIT"
] | 1
|
2020-06-26T18:17:41.000Z
|
2020-06-26T18:17:41.000Z
|
tweetsearch.py
|
codykingham/spacex_smartweets
|
b2beddbe544d0b7ae23434fc3ade24bbae778e78
|
[
"MIT"
] | 1
|
2020-02-09T21:19:29.000Z
|
2020-02-09T21:19:29.000Z
|
tweetsearch.py
|
codykingham/spacex_smartweets
|
b2beddbe544d0b7ae23434fc3ade24bbae778e78
|
[
"MIT"
] | 1
|
2019-06-04T14:37:27.000Z
|
2019-06-04T14:37:27.000Z
|
import os, twitter, re, json, requests, time, pytz
from datetime import datetime
# NLP tools
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
from nltk.corpus import wordnet
lemma = WordNetLemmatizer()
# NB: splits off #s and @s; otherwise, use TweetTokenizer().tokenize
tokenizer = nltk.word_tokenize
spacexdir = os.path.expanduser('~/github/spacex_smartweets/')
keys = os.path.join(spacexdir, 'keys.json')
seentweets = os.path.join(spacexdir, 'seen_tweets.txt')
log = os.path.join(spacexdir,'log.txt')
# get authorization keys
with open(keys, 'r') as infile:
keys = json.load(infile)
# get last collected tweets
with open(seentweets, 'r') as infile:
seen_tweets = infile.read().split()
with open(log, 'r') as infile:
log_file = infile.read()
# instance Twitter-API
api = twitter.Api(**keys['twitter'], tweet_mode='extended')
def closeSession(log_file, seen_tweets):
"""Write final files."""
with open(log, 'w') as outfile:
outfile.write(log_file)
with open(seentweets, 'w') as outfile:
outfile.write(seen_tweets)
def get_wordnet_pos(word):
""" Map POS tag to first character lemmatize() accepts.
Credit: https://www.machinelearningplus.com/nlp/lemmatization-examples-python/
^ which saved me some time from thinking through this...
"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {
'J': wordnet.ADJ,
'N': wordnet.NOUN,
'V': wordnet.VERB,
'R': wordnet.ADV
}
return tag_dict.get(tag, wordnet.NOUN)
def lemmatizeTweet(tweetstring):
"""Lemmatize and lowercase tweet text.
Returns a set of lemmas.
"""
lower_words = [w.lower() for w in tokenizer(tweetstring)]
pos_words = [get_wordnet_pos(w) for w in lower_words]
lemmas = [
lemma.lemmatize(w, pos)
for w, pos in zip(lower_words, pos_words)
]
return lemmas
def matchWords(lemmas, regex):
"""Iterate through lemmas and look for match."""
regex = re.compile(regex)
for lemma in lemmas:
match = regex.match(lemma)
if match:
return match
return match
def matchTweet(tweet, match_terms):
"""Match tweets using regex.
Args:
tweet: twitter Status object
match_terms: a set of terms to be
joined on | for regex matching
Returns:
boolean on whether a match
"""
if tweet is None:
return None
tweet_lemmas = lemmatizeTweet(tweet.full_text)
match_pattern = '|'.join(match_terms)
re_match = matchWords(tweet_lemmas, match_pattern)
return re_match
def formatTweetURL(user, status_id):
"""Returns formatted URL for tweets."""
return f'https://twitter.com/{user}/status/{status_id}'
# Tweet Triggers, Organized by "domain"
# Terms support regex pattern matching
# NB: all tweet strings are lowercased for matching
starship = {
'starship', 'sn\d+', 'bn\d+',
'superheavy', 'raptor', 'bellyflop'
'hopper', 'tps'
}
bocachica = {
'bay', 'highbay', 'midbay',
'boca', 'chica', 'starbase',
'shipyard'
}
starbase = starship | bocachica
spacecraft = {
'thrust', 'rocket', 'isp',
'pad', 'engine', 'fairing', 'booster',
'propellant', 'ch4', 'turbopump', 'nosecone',
'tank', 'flap',
}
spacexthings = {
'falcon', 'merlin', 'ocisly', 'octagrabber',
'octograbber', 'jrti', 'droneship', 'starlink',
'39a', 'dragon', 'draco', 'superdraco',
}
missions = {'dearmoon'}
spacexthings |= missions
space = {
'space', 'mars', 'orbit', 'orbital', 'flight',
'crewed', 'launch', 'moon', 'lunar'
}
testing = {
'test','road', 'close', 'open', 'shut',
'reopen', 'sheriff', 'vent', 'loud',
'sound', 'site', 'launch', 'hover', 'hop',
'roar', 'rumble', 'lit', 'flash', 'flare',
'explosion', 'explode', 'visible', 'shut',
'block', 'roadblock', 'notam', 'tfr', 'tfrs',
'hangar', 'foundation'
}
mcgregor = {'mcgregor', 'raptor', 'test', 'loud', '#spacextests', 'roar'}
spacex_mentions = {'spacex'}
elon_mentions = {'elonmusk'}
nasa_mentions = {'nasa'}
# People/tweets to track + their triggers
people = {
'@elonmusk': {
'real_name':'Elon Musk',
'triggers': starbase|spacexthings|spacecraft|space|spacex_mentions|nasa_mentions,
'replies': True,
'bio': 'the one and only'
},
'@bocachicagal': {
'real_name':'Mary',
'triggers': testing|starbase,
'bio': 'posts updates on tests'
},
'@RGVReagan': {
'real_name': 'Mark Reagan',
'triggers': spacex_mentions|starbase|elon_mentions,
'replies': True,
'bio': 'journalist with @Brownsvillenews'
},
'@SpacePadreIsle': {
'real_name': 'Spadre',
'triggers': spacex_mentions|starbase|testing,
'bio': 'spadre surfing'
},
'@SpaceX': {
'real_name': 'Space Exploration Technologies',
'triggers': set(),
'all_tweets': True,
'replies': True,
'bio': 'the big one'
},
'@austinbarnard45': {
'real_name': 'Austin Barnard',
'triggers': testing|starbase,
'bio': 'Local who takes pictures and streams sometimes'
},
'@bluemoondance74': {
'real_name': 'Reagan Beck',
'triggers': {'spacextests','#spacextests'},
'bio': 'Lives near McGregor test facility'
},
'@Teslarati': {
'real_name': 'Teslarati',
'triggers': spacexthings|starbase|nasa_mentions|spacex_mentions,
'replies': True,
'bio': 'News'
},
'@Erdayastronaut': {
'real_name': 'Tim Dodd',
'triggers': spacexthings|starbase,
'bio': 'Space Youtuber'
},
'@SciGuySpace': {
'real_name': 'Eric Berger',
'triggers': spacexthings|spacex_mentions|elon_mentions|starbase,
'bio': 'Senior Space Editor at Ars Technica'
},
'@_brendan_lewis': {
'real_name': 'Brendan',
'triggers': starbase,
'media': True,
'bio': 'Tweets diagrams'
},
# '@TrevorMahlmann': {
# 'real_name': '',
# 'triggers': spacex_mentions|starbase|spacexthings,
# 'media': True,
# 'bio': 'Tweets photos'
# },
'@ErcXspace': {
'real_name': '',
'triggers': starbase,
'media': True,
'bio': 'Tweets renders'
},
'@Neopork85': {
'real_name': '',
'triggers': starbase,
'media': True,
'bio': 'Tweets renders'
},
'@C_Bass3d': {
'real_name': 'Corey',
'triggers': starship,
'media': True,
'bio': '3D models'
},
'@RGVaerialphotos': {
'real_name': '',
'triggers': spacex_mentions|starbase|spacexthings,
'media': True,
'bio': 'Tweets aerials'
},
'@EmreKelly': {
'real_name': 'Emre Kelly',
'triggers': spacex_mentions|spacexthings|starbase|elon_mentions,
'bio': 'Space reporter @Florida_today & @usatoday'
},
'@fael097': {
'real_name': 'Rafael Adamy',
'triggers': starbase,
'bio': 'builds SNX diagrams'
},
'@NASASpaceflight': {
'real_name': 'Chris B',
'triggers': starbase,
'bio': 'Runs Nasaspaceflight'
},
'@nextspaceflight': {
'real_name': 'Michael Baylor',
'triggers': starbase,
'bio': 'Works for Nasaspaceflight'
},
'@TheFavoritist': {
'real_name': 'Brady Kenniston',
'triggers': starbase,
'bio': 'Works for Nasaspaceflight'
},
'@thejackbeyer': {
'real_name': 'Jack Beyer',
'triggers': starbase,
'bio': 'Works for Nasaspaceflight'
},
'@BocaRoad': {
'real_name': '',
'triggers': {'closure'},
'all_tweets': True,
'bio': 'Posts road closures'
},
}
def searchTweets(log_file=log_file, seen_tweets=seen_tweets):
# check network connection
try:
requests.get('http://x.com') # Elon's tiny website :)
except requests.ConnectionError:
log_file += f'{datetime.now().__str__()}\t\tno connection\n'
closeSession(log_file, ' '.join(seen_tweets))
return None
for person, userdat in people.items():
# load all eligible tweets
do_replies = userdat.get('replies', False)
user_tweets = api.GetUserTimeline(
screen_name=person,
include_rts=userdat.get('retweets', False),
exclude_replies=(not do_replies),
count=20
)
# scan tweets for matches
for tweet in user_tweets:
# skip seen tweets or those older than 30 mins (1800 secs)
now = datetime.now(tz=pytz.utc)
tweet_time = datetime.strptime(tweet.created_at, '%a %b %d %H:%M:%S +0000 %Y')\
.replace(tzinfo=pytz.utc)
tweet_age = (now - tweet_time).total_seconds()
if tweet.id_str in seen_tweets or tweet_age > 4000:
continue
# if tweet is a reply:
# check whether the reply is in response to a matching tweet
if do_replies:
try:
original_tweet = api.GetStatus(tweet.in_reply_to_status_id) if tweet.in_reply_to_status_id else None
except: # if orig. tweet is missing
original_tweet = None
else:
original_tweet = None
# search for tweet matches
match_terms = userdat['triggers']
tweet_match = matchTweet(tweet, match_terms)
orig_match = matchTweet(original_tweet, match_terms)
match_media = (
userdat.get('media', False)
and bool(getattr(tweet, 'media', False))
)
match_alltweets = userdat.get('all_tweets', False)
is_match = any([
bool(tweet_match),
bool(orig_match),
match_alltweets,
match_media,
])
# trigger a notification if match
if is_match:
# format and ship tweet and data
tweet_url = formatTweetURL(person, tweet.id_str)
# rm twitter URLs; prevents Slack from double-preview
clean_text = re.split(
'https://t.co',
tweet.full_text,
maxsplit=1
)
clean_text = clean_text[0].strip()
# format pushed message
match = tweet_match or orig_match or ['']
if not match[0] and match_media:
match = ['MEDIA']
person_name = tweet.user.name
send_text = f'`• {person_name} •`\n{clean_text}\n{tweet_url} "_{match[0]}_"'
# add original tweet if the tweet is a reply to an unseen other tweet
if orig_match and (original_tweet.id_str not in seen_tweets):
orig_name = original_tweet.user.name
orig_text = original_tweet.full_text
send_text = (
f'`• {orig_name} •`\n{orig_text}\n'
'|\n'
'|\n'
'|\n'
) + send_text
# push Slack post
requests.post(
url=keys['slack']['webhook'],
data=json.dumps({'text':send_text})
)
# push Discord post
#requests.post(url=keys['discord']['webhook'],
# data=json.dumps({'content':send_text}))
# log match data
tweet_match = tweet_match or ['']
orig_match = orig_match or ['']
seen_tweets.append(tweet.id_str)
log_file += (
f'{datetime.now().__str__()}\t\ttrigger {tweet.id_str} ({person} ) '
f'| tweet matches: {tweet_match[0]} '
f'| reply matches: {orig_match[0]} '
f'| media match: {match_media} '
f'| tweet_age: {tweet_age}\n'
)
# add final report to log file
log_file += f'{datetime.now().__str__()}\t\tcompleted search\n'
closeSession(log_file, ' '.join(seen_tweets))
# call the search
searchTweets()
| 31.012438
| 120
| 0.556028
|
b9cbf71730c34edbc31cfeb01925f0ab9323c67a
| 1,056
|
py
|
Python
|
setup.py
|
xbanke/matpy
|
a136f0a264f97510f749a2eec46a813ccb5d388e
|
[
"MIT"
] | null | null | null |
setup.py
|
xbanke/matpy
|
a136f0a264f97510f749a2eec46a813ccb5d388e
|
[
"MIT"
] | null | null | null |
setup.py
|
xbanke/matpy
|
a136f0a264f97510f749a2eec46a813ccb5d388e
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 0.1
@author: quantpy
@file: setup.py
@time: 2018/4/10 14:53
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='matlab-python',
version='0.1.2',
description='Call matlab from python',
url='https://github.com/xbanke/matpy',
author='quantpy',
author_email='quantpy@gmail.com',
license='MIT',
packages=['matpy'],
keywords=['matlab', 'python', 'matpy'],
install_requires=['attrs', 'numpy'],
zip_safe=False,
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries'
]
)
| 24.55814
| 59
| 0.609848
|
5b828690455eb62bf68ace2079cf96e6b77ad43a
| 5,536
|
py
|
Python
|
pangramsolver.py
|
zambrey/pangram-solver
|
9ee60c6c5312ccc51bb49b8e191c50d12fafbfb3
|
[
"MIT"
] | null | null | null |
pangramsolver.py
|
zambrey/pangram-solver
|
9ee60c6c5312ccc51bb49b8e191c50d12fafbfb3
|
[
"MIT"
] | null | null | null |
pangramsolver.py
|
zambrey/pangram-solver
|
9ee60c6c5312ccc51bb49b8e191c50d12fafbfb3
|
[
"MIT"
] | 1
|
2021-02-25T00:45:22.000Z
|
2021-02-25T00:45:22.000Z
|
import pygtrie as trie
import requests
from requests.exceptions import HTTPError
from typing import List, Set, Tuple
class pangramgame:
def __init__(self, letters: List[str], required: List[str], lenRange: Tuple[int, int]):
self._letters = set([s.lower() for s in letters])
self._vowels = self._letters.intersection(
set(['a', 'e', 'i', 'o', 'u']))
self._consonants = self._letters - self._vowels
self._required = set([s.lower() for s in required])
self._lenRange = lenRange
self._dictionary = None
# Setting default debug attrs
self.debug_attrs()
def debug_attrs(self, onlyGeneration: bool = False, allowRepeat: bool = True, useHeuristics: bool = False, useLocalDict: bool = True, useTrie: bool = True, trimInvalidPrefixes: bool = True):
self._onlyGeneration = onlyGeneration
self._allowRepeat = allowRepeat
self._useHeuristics = useHeuristics
self._useLocalDict = useLocalDict
self._useTrie = useTrie
self._trimInvalidPrefixes = trimInvalidPrefixes
def _loadDictionary(self):
with open('words_alpha.txt') as word_file:
valid_words = set(word_file.read().split())
if self._useTrie:
self._dictionary = trie.CharTrie.fromkeys(valid_words, True)
else:
self._dictionary = valid_words
def _isDictionaryWord(self, word: str) -> bool:
if self._useLocalDict:
if not self._dictionary:
self._loadDictionary()
return word in self._dictionary
else:
return self._isOwlBotWord(word)
def _isValidPrefix(self, prefix: str) -> bool:
if not self._onlyGeneration and self._trimInvalidPrefixes:
if not self._dictionary:
self._loadDictionary()
return self._dictionary.has_node(prefix)
else:
return True
def _isOwlBotWord(self, word: str) -> bool:
url = "https://owlbot.info/api/v4/dictionary/" + word
try:
response = requests.get(url,
headers={"Authorization": "Token yourapikey"},)
response.raise_for_status()
except Exception:
return False
else:
return True
def _isPossibleWord(self, word: str) -> bool:
# Must have required letters
if len(self._required.intersection(word)) != len(self._required):
return False
if self._useHeuristics:
# A valid word should have at least one vowel
if not self._vowels.intersection(word):
return False
# No more than 2 consecutive consonants
i = 0
while i < (len(word)-2):
if word[i] in self._consonants:
if word[i+1] in self._consonants:
if word[i+2] in self._consonants:
return False
else:
i += 3
else:
i += 2
else:
i += 1
# No more than 2 consecutive vowels
i = 0
while i < (len(word)-2):
if word[i] in self._vowels:
if word[i+1] in self._vowels:
if word[i+2] in self._vowels:
return False
else:
i += 3
else:
i += 2
else:
i += 1
# v, j, k, w and x should never repeat
nonrepeat = set(["v", "j", "k", "w", "x"])
if nonrepeat.intersection(self._letters):
if nonrepeat.intersection(word):
for i in range(0, len(word)-1):
if word[i] in nonrepeat and word[i] == word[i+1]:
return False
# Word should not repeat a letter in the beginning except when e, o or l.
if word[0] not in "eol" and word[0] == word[1]:
return False
# q is always followed by u
if "q" in self._letters and "q" in word:
if "u" in self._letters and "u" in word:
if "qu" not in word:
return False
else:
return False
if not self._onlyGeneration:
# Verify in dict
if not self._isDictionaryWord(word):
return False
return True
def _solveHelper(self, used: Set[str], curr: str, result: List[str]):
if not self._isValidPrefix(curr) or len(curr) > self._lenRange[1]:
return
if (self._lenRange[0] <= len(curr) <= self._lenRange[1]):
if (self._isPossibleWord(curr)):
# print(curr)
result.append(curr)
for l in self._letters:
if self._allowRepeat:
self._solveHelper(used, curr+l, result)
else:
if l not in used:
used.add(l)
self._solveHelper(used, curr+l, result)
used.remove(l)
def solve(self) -> List[str]:
result = []
self._solveHelper(set(), "", result)
return result
| 36.662252
| 195
| 0.501987
|
71193cfecacfe903b295491e05007d0f2cb0e91f
| 5,903
|
py
|
Python
|
models/official/mask_rcnn/mask_rcnn_main.py
|
aman2930/tpu
|
1724c0ee08523ecb4e10872026afd2d8391137c1
|
[
"Apache-2.0"
] | 5,098
|
2018-02-09T16:56:49.000Z
|
2022-03-31T13:50:40.000Z
|
models/official/mask_rcnn/mask_rcnn_main.py
|
aman2930/tpu
|
1724c0ee08523ecb4e10872026afd2d8391137c1
|
[
"Apache-2.0"
] | 550
|
2018-02-07T05:30:06.000Z
|
2022-03-13T22:00:09.000Z
|
models/official/mask_rcnn/mask_rcnn_main.py
|
aman2930/tpu
|
1724c0ee08523ecb4e10872026afd2d8391137c1
|
[
"Apache-2.0"
] | 1,920
|
2018-02-07T23:44:49.000Z
|
2022-03-29T03:11:08.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for Mask-RCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import sys
sys.path.insert(0, 'tpu/models')
from hyperparameters import common_hparams_flags
from hyperparameters import common_tpu_flags
from hyperparameters import flags_to_params
from hyperparameters import params_dict
import dataloader
import distributed_executer
import mask_rcnn_model
from configs import mask_rcnn_config
common_tpu_flags.define_common_tpu_flags()
common_hparams_flags.define_common_hparams_flags()
flags.DEFINE_string(
'distribution_strategy',
default='tpu',
help='Distribution strategy or estimator type to use. One of'
'"multi_worker_gpu"|"tpu".')
# Parameters for MultiWorkerMirroredStrategy
flags.DEFINE_string(
'worker_hosts',
default=None,
help='Comma-separated list of worker ip:port pairs for running '
'multi-worker models with distribution strategy. The user would '
'start the program on each host with identical value for this flag.')
flags.DEFINE_integer(
'task_index', 0, 'If multi-worker training, the task_index of this worker.')
flags.DEFINE_integer(
'num_gpus',
default=0,
help='Number of gpus when using collective all reduce strategy.')
flags.DEFINE_integer(
'worker_replicas',
default=0,
help='Number of workers when using collective all reduce strategy.')
# TPUEstimator parameters
flags.DEFINE_integer(
'num_cores', default=None, help='Number of TPU cores for training')
flags.DEFINE_multi_integer(
'input_partition_dims', None,
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_bool(
'transpose_input',
default=None,
help='Use TPU double transpose optimization')
flags.DEFINE_string(
'tpu_job_name', None,
'Name of TPU worker binary. Only necessary if job name is changed from'
' default tpu_worker.')
# Model specific paramenters
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def run_executer(model_params, train_input_fn=None, eval_input_fn=None):
"""Runs Mask RCNN model on distribution strategy defined by the user."""
if FLAGS.distribution_strategy == 'multi_worker_gpu':
executer = distributed_executer.MultiWorkerExecuter(
FLAGS, model_params, mask_rcnn_model.mask_rcnn_model_fn)
else:
executer = distributed_executer.TPUEstimatorExecuter(
FLAGS, model_params, mask_rcnn_model.mask_rcnn_model_fn)
if FLAGS.mode == 'train':
executer.train(train_input_fn, FLAGS.eval_after_training, eval_input_fn)
elif FLAGS.mode == 'eval':
executer.eval(eval_input_fn)
elif FLAGS.mode == 'train_and_eval':
executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
def main(argv):
del argv # Unused.
# Configure parameters.
params = params_dict.ParamsDict(
mask_rcnn_config.MASK_RCNN_CFG, mask_rcnn_config.MASK_RCNN_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=True)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params = flags_to_params.override_params_from_input_flags(params, FLAGS)
params.validate()
params.lock()
# Check data path
train_input_fn = None
eval_input_fn = None
if (FLAGS.mode in ('train', 'train_and_eval') and
not params.training_file_pattern):
raise RuntimeError('You must specify `training_file_pattern` for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if not params.validation_file_pattern:
raise RuntimeError('You must specify `validation_file_pattern` '
'for evaluation.')
if not params.val_json_file and not params.include_groundtruth_in_features:
raise RuntimeError('You must specify `val_json_file` or '
'include_groundtruth_in_features=True for evaluation.')
if FLAGS.mode in ('train', 'train_and_eval'):
train_input_fn = dataloader.InputReader(
params.training_file_pattern,
mode=tf.estimator.ModeKeys.TRAIN,
use_fake_data=FLAGS.use_fake_data,
use_instance_mask=params.include_mask)
if (FLAGS.mode in ('eval', 'train_and_eval') or
(FLAGS.mode == 'train' and FLAGS.eval_after_training)):
eval_input_fn = dataloader.InputReader(
params.validation_file_pattern,
mode=tf.estimator.ModeKeys.PREDICT,
num_examples=params.eval_samples,
use_instance_mask=params.include_mask)
run_executer(params, train_input_fn, eval_input_fn)
if __name__ == '__main__':
tf.disable_eager_execution()
logging.set_verbosity(logging.INFO)
tf.app.run(main)
| 35.993902
| 80
| 0.737591
|
8e97c9479931a780a49d64dbd6bf6d07aebe4cb1
| 937
|
py
|
Python
|
product/urls.py
|
KylebitXY/myline
|
dce12e1575ea709e4ee6d35ba84df0fc17a6a961
|
[
"MIT"
] | null | null | null |
product/urls.py
|
KylebitXY/myline
|
dce12e1575ea709e4ee6d35ba84df0fc17a6a961
|
[
"MIT"
] | null | null | null |
product/urls.py
|
KylebitXY/myline
|
dce12e1575ea709e4ee6d35ba84df0fc17a6a961
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .api import CategoryAPI
from .api import BrandAPI
from .api import ProductAPI
from .api import Product_FilterAPI
#from .api import Product_Filter_name_API
from product.api import GetProductById
from product.api import GetPicturesByProductId
from product.api import GetCommentsByProductId, CreateComment
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('category/',CategoryAPI.as_view()),
path('Brand/',BrandAPI.as_view()),
path('<int:product_id>/pictures/',GetPicturesByProductId.as_view()),
path('<int:product_id>/comments/',GetCommentsByProductId.as_view()),
path('newcomment/',CreateComment.as_view()),
path('<int:num>/',Product_FilterAPI.as_view()),
#path('product/(?P<username>.+)/$',Product_Filter_name_API.as_view())#Don't Work
path('product/id/<int:id>/',GetProductById.as_view()),
path('product/',ProductAPI.as_view()),
]
| 37.48
| 84
| 0.739594
|
60058beb0be9a8efb2c729106c60f9e2964219fe
| 36,181
|
py
|
Python
|
youtube_dl/extractor/extractors.py
|
s200801005/youtube-dl
|
9979184eb9e643bb87b6172547b9af6366671a00
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/extractors.py
|
s200801005/youtube-dl
|
9979184eb9e643bb87b6172547b9af6366671a00
|
[
"Unlicense"
] | null | null | null |
youtube_dl/extractor/extractors.py
|
s200801005/youtube-dl
|
9979184eb9e643bb87b6172547b9af6366671a00
|
[
"Unlicense"
] | null | null | null |
# flake8: noqa
from __future__ import unicode_literals
from .abc import (
ABCIE,
ABCIViewIE,
)
from .abcnews import (
AbcNewsIE,
AbcNewsVideoIE,
)
from .abcotvs import (
ABCOTVSIE,
ABCOTVSClipsIE,
)
from .academicearth import AcademicEarthCourseIE
from .acast import (
ACastIE,
ACastChannelIE,
)
from .acfun import (
AcfunIE,
AcfunBangumiIE,
AcfunLiveIE,
)
from .adn import ADNIE
from .adobeconnect import AdobeConnectIE
from .adobetv import (
AdobeTVEmbedIE,
AdobeTVIE,
AdobeTVShowIE,
AdobeTVChannelIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aenetworks import (
AENetworksIE,
AENetworksCollectionIE,
AENetworksShowIE,
HistoryTopicIE,
HistoryPlayerIE,
BiographyIE,
)
from .afreecatv import AfreecaTVIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .amara import AmaraIE
from .amcnetworks import AMCNetworksIE
from .americastestkitchen import (
AmericasTestKitchenIE,
AmericasTestKitchenSeasonIE,
)
from .animeondemand import AnimeOnDemandIE
from .anvato import AnvatoIE
from .aol import AolIE
from .allocine import AllocineIE
from .aliexpress import AliExpressLiveIE
from .apa import APAIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import (
AppleTrailersIE,
AppleTrailersSectionIE,
)
from .applepodcasts import ApplePodcastsIE
from .archiveorg import ArchiveOrgIE
from .arcpublishing import ArcPublishingIE
from .arkena import ArkenaIE
from .ard import (
ARDBetaMediathekIE,
ARDIE,
ARDMediathekIE,
)
from .arte import (
ArteTVIE,
ArteTVEmbedIE,
ArteTVPlaylistIE,
)
from .arnes import ArnesIE
from .asiancrush import (
AsianCrushIE,
AsianCrushPlaylistIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .atvat import ATVAtIE
from .audimedia import AudiMediaIE
from .audioboom import AudioBoomIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .awaan import (
AWAANIE,
AWAANVideoIE,
AWAANLiveIE,
AWAANSeasonIE,
)
from .azmedien import AZMedienIE
from .baidu import BaiduVideoIE
from .bandaichannel import BandaiChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE
from .bbc import (
BBCCoUkIE,
BBCCoUkArticleIE,
BBCCoUkIPlayerEpisodesIE,
BBCCoUkIPlayerGroupIE,
BBCCoUkPlaylistIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bellmedia import BellMediaIE
from .beatport import BeatportIE
from .bet import BetIE
from .bfi import BFIPlayerIE
from .bfmtv import (
BFMTVIE,
BFMTVLiveIE,
BFMTVArticleIE,
)
from .bibeltv import BibelTVIE
from .bigflix import BigflixIE
from .bild import BildIE
from .bilibili import (
BiliBiliIE,
BiliBiliBangumiIE,
BilibiliAudioIE,
BilibiliAudioAlbumIE,
BiliBiliPlayerIE,
)
from .biobiochiletv import BioBioChileTVIE
from .bitchute import (
BitChuteIE,
BitChuteChannelIE,
)
from .biqle import BIQLEIE
from .bleacherreport import (
BleacherReportIE,
BleacherReportCMSIE,
)
from .bloomberg import BloombergIE
from .bokecc import BokeCCIE
from .bongacams import BongaCamsIE
from .bostonglobe import BostonGlobeIE
from .box import BoxIE
from .bpb import BpbIE
from .br import (
BRIE,
BRMediathekIE,
)
from .bravotv import BravoTVIE
from .breakcom import BreakIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .businessinsider import BusinessInsiderIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .cammodels import CamModelsIE
from .camtube import CamTubeIE
from .camwithher import CamWithHerIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .canvas import (
CanvasIE,
CanvasEenIE,
VrtNUIE,
DagelijkseKostIE,
)
from .carambatv import (
CarambaTVIE,
CarambaTVPageIE,
)
from .cartoonnetwork import CartoonNetworkIE
from .cbc import (
CBCIE,
CBCPlayerIE,
CBCWatchVideoIE,
CBCWatchIE,
CBCOlympicsIE,
)
from .cbs import CBSIE
from .cbslocal import (
CBSLocalIE,
CBSLocalArticleIE,
)
from .cbsinteractive import CBSInteractiveIE
from .cbsnews import (
CBSNewsEmbedIE,
CBSNewsIE,
CBSNewsLiveVideoIE,
)
from .cbssports import (
CBSSportsEmbedIE,
CBSSportsIE,
TwentyFourSevenSportsIE,
)
from .ccc import (
CCCIE,
CCCPlaylistIE,
)
from .ccma import CCMAIE
from .cctv import CCTVIE
from .cda import CDAIE
from .ceskatelevize import (
CeskaTelevizeIE,
CeskaTelevizePoradyIE,
)
from .channel9 import Channel9IE
from .charlierose import CharlieRoseIE
from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemax import CinemaxIE
from .ciscolive import (
CiscoLiveSessionIE,
CiscoLiveSearchIE,
)
from .cjsw import CJSWIE
from .cliphunter import CliphunterIE
from .clippit import ClippitIE
from .cliprs import ClipRsIE
from .clipsyndicate import ClipsyndicateIE
from .closertotruth import CloserToTruthIE
from .cloudflarestream import CloudflareStreamIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .clyp import ClypIE
from .cmt import CMTIE
from .cnbc import (
CNBCIE,
CNBCVideoIE,
)
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .coub import CoubIE
from .comedycentral import (
ComedyCentralIE,
ComedyCentralTVIE,
)
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .commonprotocols import (
MmsIE,
RtmpIE,
)
from .condenast import CondeNastIE
from .contv import CONtvIE
from .corus import CorusIE
from .cracked import CrackedIE
from .crackle import CrackleIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .ctv import CTVIE
from .ctvnews import CTVNewsIE
from .cultureunplugged import CultureUnpluggedIE
from .curiositystream import (
CuriosityStreamIE,
CuriosityStreamCollectionIE,
)
from .cwtv import CWTVIE
from .dailymail import DailyMailIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import (
DaumIE,
DaumClipIE,
DaumPlaylistIE,
DaumUserIE,
)
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .democracynow import DemocracynowIE
from .dfb import DFBIE
from .dhm import DHMIE
from .digg import DiggIE
from .dotsub import DotsubIE
from .douyutv import (
DouyuShowIE,
DouyuTVIE,
)
from .dplay import (
DPlayIE,
DiscoveryPlusIE,
HGTVDeIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import (
DRTVIE,
DRTVLiveIE,
)
from .dtube import DTubeIE
from .dvtv import DVTVIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .discoverygo import (
DiscoveryGoIE,
DiscoveryGoPlaylistIE,
)
from .discoverynetworks import DiscoveryNetworksDeIE
from .discoveryvr import DiscoveryVRIE
from .disney import DisneyIE
from .dispeak import DigitallySpeakingIE
from .dropbox import DropboxIE
from .dw import (
DWIE,
DWArticleIE,
)
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .egghead import (
EggheadCourseIE,
EggheadLessonIE,
)
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentube import (
EllenTubeIE,
EllenTubeVideoIE,
EllenTubePlaylistIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import (
ESPNIE,
ESPNArticleIE,
FiveThirtyEightIE,
)
from .esri import EsriVideoIE
from .europa import EuropaIE
from .expotv import ExpoTVIE
from .expressen import ExpressenIE
from .extremetube import ExtremeTubeIE
from .eyedotv import EyedoTVIE
from .facebook import (
FacebookIE,
FacebookPluginsVideoIE,
)
from .faz import FazIE
from .fc2 import (
FC2IE,
FC2EmbedIE,
)
from .fczenit import FczenitIE
from .filmon import (
FilmOnIE,
FilmOnChannelIE,
)
from .filmweb import FilmwebIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .formula1 import Formula1IE
from .fourtube import (
FourTubeIE,
PornTubeIE,
PornerBrosIE,
FuxIE,
)
from .fox import FOXIE
from .fox9 import (
FOX9IE,
FOX9NewsIE,
)
from .foxgay import FoxgayIE
from .foxnews import (
FoxNewsIE,
FoxNewsArticleIE,
)
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
FranceTVIE,
FranceTVSiteIE,
FranceTVEmbedIE,
FranceTVInfoIE,
FranceTVInfoSportIE,
FranceTVJeunesseIE,
GenerationWhatIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freshlive import FreshLiveIE
from .frontendmasters import (
FrontendMastersIE,
FrontendMastersLessonIE,
FrontendMastersCourseIE
)
from .fujitv import FujiTVFODPlus7IE
from .funimation import FunimationIE
from .funk import FunkIE
from .fusion import FusionIE
from .gaia import GaiaIE
from .gameinformer import GameInformerIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gaskrank import GaskrankIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .gedidigital import GediDigitalIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import (
GloboIE,
GloboArticleIE,
)
from .go import GoIE
from .godtube import GodTubeIE
from .golem import GolemIE
from .googledrive import GoogleDriveIE
from .googlepodcasts import (
GooglePodcastsIE,
GooglePodcastsFeedIE,
)
from .googlesearch import GoogleSearchIE
from .goshgay import GoshgayIE
from .gputechconf import GPUTechConfIE
from .groupon import GrouponIE
from .hbo import HBOIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hgtv import HGTVComShowIE
from .hketv import HKETVIE
from .hidive import HiDiveIE
from .historicfilms import HistoricFilmsIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hitrecord import HitRecordIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
from .hotstar import (
HotStarIE,
HotStarPlaylistIE,
)
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .hrti import (
HRTiIE,
HRTiPlaylistIE,
)
from .huajiao import HuajiaoIE
from .huffpost import HuffPostIE
from .hungama import (
HungamaIE,
HungamaSongIE,
)
from .hypem import HypemIE
from .ign import (
IGNIE,
IGNVideoIE,
IGNArticleIE,
)
from .iheart import (
IHeartRadioIE,
IHeartRadioPodcastIE,
)
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
ImgurGalleryIE,
)
from .ina import InaIE
from .inc import IncIE
from .indavideo import IndavideoEmbedIE
from .infoq import InfoQIE
from .instagram import (
InstagramIE,
InstagramUserIE,
InstagramTagIE,
)
from .internazionale import InternazionaleIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .itv import (
ITVIE,
ITVBTCCIE,
)
from .ivi import (
IviIE,
IviCompilationIE
)
from .ivideon import IvideonIE
from .iwara import IwaraIE
from .izlesene import IzleseneIE
from .jamendo import (
JamendoIE,
JamendoAlbumIE,
)
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .joj import JojIE
from .jwplatform import JWPlatformIE
from .kakao import KakaoIE
from .kaltura import KalturaIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .ketnet import KetnetIE
from .khanacademy import (
KhanAcademyIE,
KhanAcademyUnitIE,
)
from .kickstarter import KickStarterIE
from .kinja import KinjaEmbedIE
from .kinopoisk import KinoPoiskIE
from .konserthusetplay import KonserthusetPlayIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kusi import KUSIIE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import (
Laola1TvEmbedIE,
Laola1TvIE,
EHFTVIE,
ITTFIE,
)
from .lbry import (
LBRYIE,
LBRYChannelIE,
)
from .lci import LCIIE
from .lcp import (
LcpPlayIE,
LcpIE,
)
from .lecture2go import Lecture2GoIE
from .lecturio import (
LecturioIE,
LecturioCourseIE,
LecturioDeCourseIE,
)
from .leeco import (
LeIE,
LePlaylistIE,
LetvCloudIE,
)
from .lego import LEGOIE
from .lemonde import LemondeIE
from .lenta import LentaIE
from .libraryofcongress import LibraryOfCongressIE
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .limelight import (
LimelightMediaIE,
LimelightChannelIE,
LimelightChannelListIE,
)
from .line import (
LineTVIE,
LineLiveIE,
LineLiveChannelIE,
)
from .linkedin import (
LinkedInLearningIE,
LinkedInLearningCourseIE,
)
from .linuxacademy import LinuxAcademyIE
from .litv import LiTVIE
from .livejournal import LiveJournalIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .localnews8 import LocalNews8IE
from .lovehomeporn import LoveHomePornIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .mailru import (
MailRuIE,
MailRuMusicIE,
MailRuMusicSearchIE,
)
from .malltv import MallTVIE
from .mangomolo import (
MangomoloVideoIE,
MangomoloLiveIE,
)
from .manyvids import ManyVidsIE
from .maoritv import MaoriTVIE
from .markiza import (
MarkizaIE,
MarkizaPageIE,
)
from .massengeschmacktv import MassengeschmackTVIE
from .matchtv import MatchTVIE
from .mdr import MDRIE
from .medaltv import MedalTVIE
from .mediaset import MediasetIE
from .mediasite import (
MediasiteIE,
MediasiteCatalogIE,
MediasiteNamedCatalogIE,
)
from .medici import MediciIE
from .megaphone import MegaphoneIE
from .meipai import MeipaiIE
from .melonvod import MelonVODIE
from .meta import METAIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .mgtv import MGTVIE
from .miaopai import MiaoPaiIE
from .microsoftvirtualacademy import (
MicrosoftVirtualAcademyIE,
MicrosoftVirtualAcademyCourseIE,
)
from .minds import (
MindsIE,
MindsChannelIE,
MindsGroupIE,
)
from .ministrygrid import MinistryGridIE
from .minoto import MinotoIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import (
MixcloudIE,
MixcloudUserIE,
MixcloudPlaylistIE,
)
from .mlb import (
MLBIE,
MLBVideoIE,
)
from .mnet import MnetIE
from .moevideo import MoeVideoIE
from .mofosex import (
MofosexIE,
MofosexEmbedIE,
)
from .mojvideo import MojvideoIE
from .morningstar import MorningstarIE
from .motherless import (
MotherlessIE,
MotherlessGroupIE
)
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movingimage import MovingImageIE
from .msn import MSNIE
from .mtv import (
MTVIE,
MTVVideoIE,
MTVServicesEmbeddedIE,
MTVDEIE,
MTVJapanIE,
)
from .muenchentv import MuenchenTVIE
from .mwave import MwaveIE, MwaveMeetGreetIE
from .mychannels import MyChannelsIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import (
MyviIE,
MyviEmbedIE,
)
from .myvidster import MyVidsterIE
from .nationalgeographic import (
NationalGeographicVideoIE,
NationalGeographicTVIE,
)
from .naver import NaverIE
from .nba import (
NBAWatchEmbedIE,
NBAWatchIE,
NBAWatchCollectionIE,
NBAEmbedIE,
NBAIE,
NBAChannelIE,
)
from .nbc import (
NBCIE,
NBCNewsIE,
NBCOlympicsIE,
NBCOlympicsStreamIE,
NBCSportsIE,
NBCSportsStreamIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import (
NewgroundsIE,
NewgroundsPlaylistIE,
)
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
NextTVIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nfl import (
NFLIE,
NFLArticleIE,
)
from .nhk import (
NhkVodIE,
NhkVodProgramIE,
)
from .nhl import NHLIE
from .nick import (
NickIE,
NickBrIE,
NickDeIE,
NickNightIE,
NickRuIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninecninemedia import NineCNineMediaIE
from .ninegag import NineGagIE
from .ninenow import NineNowIE
from .nintendo import NintendoIE
from .njpwworld import NJPWWorldIE
from .nobelprize import NobelPrizeIE
from .nonktube import NonkTubeIE
from .noovo import NoovoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import (
NovaEmbedIE,
NovaIE,
)
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .noz import NozIE
from .npo import (
AndereTijdenIE,
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
SchoolTVIE,
HetKlokhuisIE,
VPROIE,
WNLIE,
)
from .npr import NprIE
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKSkoleIE,
NRKTVIE,
NRKTVDirekteIE,
NRKRadioPodkastIE,
NRKTVEpisodeIE,
NRKTVEpisodesIE,
NRKTVSeasonIE,
NRKTVSeriesIE,
)
from .nrl import NRLTVIE
from .ntvcojp import NTVCoJpCUIE
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
NYTimesCookingIE,
)
from .nuvid import NuvidIE
from .nzz import NZZIE
from .odatv import OdaTVIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .ondemandkorea import OnDemandKoreaIE
from .onet import (
OnetIE,
OnetChannelIE,
OnetMVPIE,
OnetPlIE,
)
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFFM4IE,
ORFFM4StoryIE,
ORFOE1IE,
ORFOE3IE,
ORFNOEIE,
ORFWIEIE,
ORFBGLIE,
ORFOOEIE,
ORFSTMIE,
ORFKTNIE,
ORFSBGIE,
ORFTIRIE,
ORFVBGIE,
ORFIPTVIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (
PacktPubIE,
PacktPubCourseIE,
)
from .palcomp3 import (
PalcoMP3IE,
PalcoMP3ArtistIE,
PalcoMP3VideoIE,
)
from .pandoratv import PandoraTVIE
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .pearvideo import PearVideoIE
from .peertube import PeerTubeIE
from .people import PeopleIE
from .performgroup import PerformGroupIE
from .periscope import (
PeriscopeIE,
PeriscopeUserIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .picarto import (
PicartoIE,
PicartoVodIE,
)
from .piksel import PikselIE
from .pinkbike import PinkbikeIE
from .pinterest import (
PinterestIE,
PinterestCollectionIE,
)
from .pladform import PladformIE
from .platzi import (
PlatziIE,
PlatziCourseIE,
)
from .playfm import PlayFMIE
from .playplustv import PlayPlusTVIE
from .plays import PlaysTVIE
from .playstuff import PlayStuffIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .pokemon import PokemonIE
from .polskieradio import (
PolskieRadioIE,
PolskieRadioCategoryIE,
)
from .popcorntimes import PopcorntimesIE
from .popcorntv import PopcornTVIE
from .porn91 import Porn91IE
from .porncom import PornComIE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubUserIE,
PornHubPagedVideoListIE,
PornHubUserVideosUploadIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .puhutv import (
PuhuTVIE,
PuhuTVSerieIE,
)
from .presstv import PressTVIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .r7 import (
R7IE,
R7ArticleIE,
)
from .radiocanada import (
RadioCanadaIE,
RadioCanadaAudioVideoIE,
)
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import (
RaiPlayIE,
RaiPlayLiveIE,
RaiPlayPlaylistIE,
RaiIE,
)
from .raywenderlich import (
RayWenderlichIE,
RayWenderlichCourseIE,
)
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redbulltv import (
RedBullTVIE,
RedBullEmbedIE,
RedBullTVRrnContentIE,
RedBullIE,
)
from .reddit import (
RedditIE,
RedditRIE,
)
from .redtube import RedTubeIE
from .regiotv import RegioTVIE
from .rentv import (
RENTVIE,
RENTVArticleIE,
)
from .restudy import RestudyIE
from .reuters import ReutersIE
from .reverbnation import ReverbNationIE
from .rice import RICEIE
from .rmcdecouverte import RMCDecouverteIE
from .ro220 import Ro220IE
from .rockstargames import RockstarGamesIE
from .roosterteeth import RoosterTeethIE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rozhlas import RozhlasIE
from .rtbf import RTBFIE
from .rte import RteIE, RteRadioIE
from .rtlnl import RtlNlIE
from .rtl2 import (
RTL2IE,
RTL2YouIE,
RTL2YouSeriesIE,
)
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE
from .rtvnh import RTVNHIE
from .rtvs import RTVSIE
from .ruhd import RUHDIE
from .rumble import RumbleEmbedIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
RutubePlaylistIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .ruv import RuvIE
from .safari import (
SafariIE,
SafariApiIE,
SafariCourseIE,
)
from .samplefocus import SampleFocusIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .scrippsnetworks import (
ScrippsNetworksWatchIE,
ScrippsNetworksIE,
)
from .scte import (
SCTEIE,
SCTECourseIE,
)
from .seeker import SeekerIE
from .senateisvp import SenateISVPIE
from .sendtonews import SendtoNewsIE
from .servus import ServusIE
from .sevenplus import SevenPlusIE
from .sexu import SexuIE
from .seznamzpravy import (
SeznamZpravyIE,
SeznamZpravyArticleIE,
)
from .shahid import (
ShahidIE,
ShahidShowIE,
)
from .shared import (
SharedIE,
VivoIE,
)
from .showroomlive import ShowRoomLiveIE
from .simplecast import (
SimplecastIE,
SimplecastEpisodeIE,
SimplecastPodcastIE,
)
from .sina import SinaIE
from .sixplay import SixPlayIE
from .skyit import (
SkyItPlayerIE,
SkyItVideoIE,
SkyItVideoLiveIE,
SkyItIE,
SkyItAcademyIE,
SkyItArteIE,
CieloTVItIE,
TV8ItIE,
)
from .skylinewebcams import SkylineWebcamsIE
from .skynewsarabia import (
SkyNewsArabiaIE,
SkyNewsArabiaArticleIE,
)
from .sky import (
SkyNewsIE,
SkySportsIE,
SkySportsNewsIE,
)
from .slideshare import SlideshareIE
from .slideslive import SlidesLiveIE
from .slutload import SlutloadIE
from .snotr import SnotrIE
from .sohu import SohuIE
from .sonyliv import SonyLIVIE
from .soundcloud import (
SoundcloudEmbedIE,
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudTrackStationIE,
SoundcloudPlaylistIE,
SoundcloudSearchIE,
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .spankbang import (
SpankBangIE,
SpankBangPlaylistIE,
)
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spike import (
BellatorIE,
ParamountNetworkIE,
)
from .stitcher import (
StitcherIE,
StitcherShowIE,
)
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .spotify import (
SpotifyIE,
SpotifyShowIE,
)
from .spreaker import (
SpreakerIE,
SpreakerPageIE,
SpreakerShowIE,
SpreakerShowPageIE,
)
from .springboardplatform import SpringboardPlatformIE
from .sprout import SproutIE
from .srgssr import (
SRGSSRIE,
SRGSSRPlayIE,
)
from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .storyfire import (
StoryFireIE,
StoryFireUserIE,
StoryFireSeriesIE,
)
from .streamable import StreamableIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .stretchinternet import StretchInternetIE
from .stv import STVPlayerIE
from .sunporno import SunPornoIE
from .sverigesradio import (
SverigesRadioEpisodeIE,
SverigesRadioPublicationIE,
)
from .svt import (
SVTIE,
SVTPageIE,
SVTPlayIE,
SVTSeriesIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import (
TagesschauPlayerIE,
TagesschauIE,
)
from .tass import TassIE
from .tbs import TBSIE
from .tdslifeway import TDSLifewayIE
from .teachable import (
TeachableIE,
TeachableCourseIE,
)
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .teamtreehouse import TeamTreeHouseIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .tele5 import Tele5IE
from .tele13 import Tele13IE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .telequebec import (
TeleQuebecIE,
TeleQuebecSquatIE,
TeleQuebecEmissionIE,
TeleQuebecLiveIE,
TeleQuebecVideoIE,
)
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
from .tennistv import TennisTVIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .tfo import TFOIE
from .theintercept import TheInterceptIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thescene import TheSceneIE
from .thestar import TheStarIE
from .thesun import TheSunIE
from .theweatherchannel import TheWeatherChannelIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .thisoldhouse import ThisOldHouseIE
from .threeqsdn import ThreeQSDNIE
from .tiktok import (
TikTokIE,
TikTokUserIE,
)
from .tinypic import TinyPicIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixNetworkEmbedIE,
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .toggle import (
ToggleIE,
MeWatchIE,
)
from .tonline import TOnlineIE
from .toongoggles import ToonGogglesIE
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trovo import (
TrovoIE,
TrovoVodIE,
)
from .trunews import TruNewsIE
from .trutv import TruTVIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tumblr import TumblrIE
from .tunein import (
TuneInClipIE,
TuneInStationIE,
TuneInProgramIE,
TuneInTopicIE,
TuneInShortenerIE,
)
from .tunepk import TunePkIE
from .turbo import TurboIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
KatsomoIE,
MTVUutisetArticleIE,
)
from .tv2dk import (
TV2DKIE,
TV2DKBornholmPlayIE,
)
from .tv2hu import TV2HuIE
from .tv4 import TV4IE
from .tv5mondeplus import TV5MondePlusIE
from .tv5unis import (
TV5UnisVideoIE,
TV5UnisIE,
)
from .tva import (
TVAIE,
QubIE,
)
from .tvanouvelles import (
TVANouvellesIE,
TVANouvellesArticleIE,
)
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tver import TVerIE
from .tvigle import TvigleIE
from .tvland import TVLandIE
from .tvn24 import TVN24IE
from .tvnet import TVNetIE
from .tvnoe import TVNoeIE
from .tvnow import (
TVNowIE,
TVNowNewIE,
TVNowSeasonIE,
TVNowAnnualIE,
TVNowShowIE,
)
from .tvp import (
TVPEmbedIE,
TVPIE,
TVPWebsiteIE,
)
from .tvplay import (
TVPlayIE,
ViafreeIE,
TVPlayHomeIE,
)
from .tvplayer import TVPlayerIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentymin import TwentyMinutenIE
from .twentythreevideo import TwentyThreeVideoIE
from .twitcasting import TwitCastingIE
from .twitch import (
TwitchVodIE,
TwitchCollectionIE,
TwitchVideosIE,
TwitchVideosClipsIE,
TwitchVideosCollectionsIE,
TwitchStreamIE,
TwitchClipsIE,
)
from .twitter import (
TwitterCardIE,
TwitterIE,
TwitterAmplifyIE,
TwitterBroadcastIE,
)
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ufctv import (
UFCTVIE,
UFCArabiaIE,
)
from .uktvplay import UKTVPlayIE
from .digiteka import DigitekaIE
from .dlive import (
DLiveVODIE,
DLiveStreamIE,
)
from .umg import UMGDeIE
from .unistra import UnistraIE
from .unity import UnityIE
from .uol import UOLIE
from .uplynk import (
UplynkIE,
UplynkPreplayIE,
)
from .urort import UrortIE
from .urplay import URPlayIE
from .usanetwork import USANetworkIE
from .usatoday import USATodayIE
from .ustream import UstreamIE, UstreamChannelIE
from .ustudio import (
UstudioIE,
UstudioEmbedIE,
)
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import (
VevoIE,
VevoPlaylistIE,
)
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import (
ViceIE,
ViceArticleIE,
ViceShowIE,
)
from .vidbit import VidbitIE
from .viddler import ViddlerIE
from .videa import VideaIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videomore import (
VideomoreIE,
VideomoreVideoIE,
VideomoreSeasonIE,
)
from .videopress import VideoPressIE
from .vidio import VidioIE
from .vidlii import VidLiiIE
from .vidme import (
VidmeIE,
VidmeUserIE,
VidmeUserLikesIE,
)
from .vier import VierIE, VierVideosIE
from .viewlift import (
ViewLiftIE,
ViewLiftEmbedIE,
)
from .viidea import ViideaIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoOndemandIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
VHXEmbedIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .viqeo import ViqeoIE
from .viu import (
ViuIE,
ViuPlaylistIE,
ViuOTTIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
VKWallPostIE,
)
from .vlive import (
VLiveIE,
VLivePostIE,
VLiveChannelIE,
)
from .vodlocker import VodlockerIE
from .vodpl import VODPlIE
from .vodplatform import VODPlatformIE
from .voicerepublic import VoiceRepublicIE
from .voot import VootIE
from .voxmedia import (
VoxMediaVolumeIE,
VoxMediaIE,
)
from .vrt import VRTIE
from .vrak import VrakIE
from .vrv import (
VRVIE,
VRVSeriesIE,
)
from .vshare import VShareIE
from .vtm import VTMIE
from .medialaan import MedialaanIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vvvvid import (
VVVVIDIE,
VVVVIDShowIE,
)
from .vyborymos import VyboryMosIE
from .vzaar import VzaarIE
from .wakanim import WakanimIE
from .walla import WallaIE
from .washingtonpost import (
WashingtonPostIE,
WashingtonPostArticleIE,
)
from .wat import WatIE
from .watchbox import WatchBoxIE
from .watchindianporn import WatchIndianPornIE
from .wdr import (
WDRIE,
WDRPageIE,
WDRElefantIE,
WDRMobileIE,
)
from .webcaster import (
WebcasterIE,
WebcasterFeedIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import (
WeiboIE,
WeiboMobileIE
)
from .weiqitv import WeiqiTVIE
from .wistia import (
WistiaIE,
WistiaPlaylistIE,
)
from .worldstarhiphop import WorldStarHipHopIE
from .wsj import (
WSJIE,
WSJArticleIE,
)
from .wwe import WWEIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xfileshare import XFileShareIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
XHamsterUserIE,
)
from .xiami import (
XiamiSongIE,
XiamiAlbumIE,
XiamiArtistIE,
XiamiCollectionIE
)
from .ximalaya import (
XimalayaIE,
XimalayaAlbumIE
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
YahooGyaOPlayerIE,
YahooGyaOIE,
YahooJapanNewsIE,
)
from .yandexdisk import YandexDiskIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
YandexMusicArtistTracksIE,
YandexMusicArtistAlbumsIE,
)
from .yandexvideo import YandexVideoIE
from .yapfiles import YapFilesIE
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import (
YoukuIE,
YoukuShowIE,
)
from .younow import (
YouNowLiveIE,
YouNowChannelIE,
YouNowMomentIE,
)
from .youporn import YouPornIE
from .yourporn import YourPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubeTabIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
#YoutubeSearchURLIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeYtBeIE,
YoutubeYtUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zattoo import (
BBVTVIE,
EinsUndEinsTVIE,
EWETVIE,
GlattvisionTVIE,
MNetTVIE,
MyVisionTVIE,
NetPlusIE,
OsnatelTVIE,
QuantumTVIE,
QuicklineIE,
QuicklineLiveIE,
SaltTVIE,
SAKTVIE,
VTXTVIE,
WalyTVIE,
ZattooIE,
ZattooLiveIE,
)
from .zdf import ZDFIE, ZDFChannelIE
from .zhihu import ZhihuIE
from .zingmp3 import (
ZingMp3IE,
ZingMp3AlbumIE,
)
from .zoom import ZoomIE
from .zype import ZypeIE
| 21.927879
| 90
| 0.765125
|
93d89c8e755269c9f22298b41db5b8d3aedf6c62
| 11,592
|
py
|
Python
|
sppas/sppas/src/anndata/aio/anvil.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/anndata/aio/anvil.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/anndata/aio/anvil.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
anndata.aio.anvil.py
~~~~~~~~~~~~~~~~~~~~
ANVIL is a free video annotation tool.
| Kipp, M. (2012)
| Multimedia Annotation, Querying and Analysis in ANVIL.
| In: M. Maybury (ed.) Multimedia Information Extraction,
| Chapter 21, John Wiley & Sons, pp: 351-368.
BE AWARE that the support of anvil files by SPPAS has to be verified,
tested and extended!!!
"""
import xml.etree.cElementTree as ET
from ..anndataexc import AnnDataTypeError
from ..ann.annlocation import sppasLocation
from ..ann.annlocation import sppasPoint
from ..ann.annlocation import sppasInterval
from .basetrs import sppasBaseIO
from .aioutils import format_labels
# ---------------------------------------------------------------------------
class sppasAnvil(sppasBaseIO):
"""ANVIL (partially) reader.
:author: Brigitte Bigi, Jibril Saffi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: contact@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
"""
@staticmethod
def detect(filename):
"""Check whether a file is of ANVIL format or not.
:param filename: (str) Name of the file to check.
:returns: (bool)
"""
try:
tree = ET.parse(filename)
root = tree.getroot()
except IOError:
return False
except UnicodeDecodeError:
return False
return root.find('body') is not None
# -----------------------------------------------------------------------
@staticmethod
def make_point(midpoint):
"""The localization is a time value, so always a float."""
try:
midpoint = float(midpoint)
except ValueError:
raise AnnDataTypeError(midpoint, "float")
return sppasPoint(midpoint, radius=0.005)
# -----------------------------------------------------------------------
def __init__(self, name=None):
"""Initialize a new ANVIL instance.
:param name: (str) This transcription name.
"""
if name is None:
name = self.__class__.__name__
super(sppasAnvil, self).__init__(name)
self._accept_multi_tiers = True
self._accept_no_tiers = True
self._accept_metadata = True
self._accept_ctrl_vocab = False # to be verified
self._accept_media = True # to be verified
self._accept_hierarchy = True
self._accept_point = False
self._accept_interval = True
self._accept_disjoint = False
self._accept_alt_localization = False
self._accept_alt_tag = False
self._accept_radius = False
self._accept_gaps = True # to be verified
self._accept_overlaps = False # to be verified
self.default_extension = "anvil"
self.software = "Anvil"
# -----------------------------------------------------------------------
def read(self, filename):
"""Read an ANVIL file and fill the Transcription.
:param filename: (str)
"""
tree = ET.parse(filename)
root = tree.getroot()
# FIXME we ought to get the ctrl vocabs in the spec file
# there also ought to be a representation of the hiererchy,
# but since we have multiple, non aligned tiers,
# it's not trivial to implement
body_root = root.find('body')
self._read_tracks(body_root)
# -----------------------------------------------------------------------
def _read_tracks(self, body_root):
for track_root in body_root.findall('track'):
if(track_root.attrib['type'] == "primary" or
track_root.attrib['type'] == "primarypoint"):
self._read_primary_track(track_root)
elif track_root.attrib['type'] == "singleton":
self._read_singleton_track(track_root, body_root)
elif track_root.attrib['type'] == "span":
self._read_span_track(track_root, body_root)
elif track_root.attrib['type'] == "subdivision":
self._read_subdivision_track(track_root, body_root)
else:
raise Exception('unknown track type')
# -----------------------------------------------------------------------
def _read_primary_track(self, track_root):
"""Read a primary track (primary or primarypoint).
:param track_root:
"""
# Create tiers of the primary track.
self.__create_tier_from_attribute(track_root)
# Parse elements and create annotations
for el_root in track_root.findall('el'):
if track_root.attrib['type'] == 'primary':
begin = float(el_root.attrib['start'])
end = float(el_root.attrib['end'])
if begin > end:
begin, end = end, begin
elif begin == end:
continue
localization = sppasInterval(sppasAnvil.make_point(begin),
sppasAnvil.make_point(end))
elif track_root.attrib['type'] == 'primarypoint':
time = float(el_root.attrib['time'])
localization = sppasAnvil.make_point(time)
else:
raise Exception('unknown primary track type')
self.__create_annotation_from_el(track_root, el_root, localization)
# -----------------------------------------------------------------------
def _read_singleton_track(self, track_root, body_root):
# find ref
ref_root = body_root.find(
"track[@name='%s']" %
track_root.attrib['ref'])
self.__create_tier_from_attribute(track_root)
for el_root in track_root.findall('el'):
ref_el = ref_root.find(
"el[@index='%s']" %
el_root.attrib['ref'])
begin = float(ref_el.attrib['start'])
end = float(ref_el.attrib['end'])
if begin > end:
begin, end = end, begin
elif begin == end:
continue
localization = sppasInterval(sppasAnvil.make_point(begin),
sppasAnvil.make_point(end))
self.__create_annotation_from_el(track_root, el_root, localization)
# -----------------------------------------------------------------------
def _read_span_track(self, track_root, body_root):
# find ref
ref_root = body_root.find(
"track[@name='%s']" %
track_root.attrib['ref'])
self.__create_tier_from_attribute(track_root)
for el_root in track_root.findall('el'):
begin_ref = el_root.attrib['start']
end_ref = el_root.attrib['end']
begin_el = ref_root.find(
"el[@index='%s']" %
begin_ref)
end_el = ref_root.find(
"el[@index='%s']" %
end_ref)
begin = float(begin_el.attrib['start'])
end = float(end_el.attrib['end'])
if begin > end:
begin, end = end, begin
elif begin == end:
continue
localization = sppasInterval(sppasAnvil.make_point(begin),
sppasAnvil.make_point(end))
self.__create_annotation_from_el(track_root, el_root, localization)
# -----------------------------------------------------------------------
def _read_subdivision_track(self, track_root, body_root):
# find ref
ref_root = body_root.find(
"track[@name='%s']" %
track_root.attrib['ref'])
self.__create_tier_from_attribute(track_root)
for el_group_root in track_root.findall('el-group'):
ref_el = ref_root.find(
"el[@index='%s']" %
el_group_root.attrib['ref'])
time_slots = list()
time_slots.append(float(ref_el.attrib['start']))
for el_root in el_group_root.findall('el'):
if 'start' in el_root.attrib:
time_slots.append(float(el_root.attrib['start']))
time_slots.append(float(ref_el.attrib['end']))
b = 0
e = 1
for el_root in el_group_root.findall('el'):
begin = time_slots[b]
b += 1
end = time_slots[e]
e += 1
localization = sppasInterval(sppasAnvil.make_point(begin),
sppasAnvil.make_point(end))
self.__create_annotation_from_el(track_root,
el_root,
localization)
# -----------------------------------------------------------------------
def __create_tier_from_attribute(self, track_root):
"""Create a set of tiers from 'attribute' of 'track'.
:param track_root:
"""
for attribute_node in track_root.iter('attribute'):
tier_name = sppasAnvil.__fix_tier_name(track_root, attribute_node)
if self.find(tier_name) is None:
self.create_tier(tier_name)
# -----------------------------------------------------------------------
def __create_annotation_from_el(self, track_root, el_root, localization):
"""Create a set of annotations from 'attribute' of 'el'.
:param track_root:
:param el_root:
:param localization:
"""
for attribute_node in el_root.findall('attribute'):
labels = format_labels(attribute_node.text)
tier = self.find(sppasAnvil.__fix_tier_name(track_root,
attribute_node))
tier.create_annotation(sppasLocation(localization),
labels)
# -----------------------------------------------------------------------
@staticmethod
def __fix_tier_name(track_root, attribute_node):
return track_root.attrib['name'] + \
'.' + \
attribute_node.attrib['name']
| 34.19469
| 79
| 0.515183
|
d5878ef1831a4fc06f6d927242f0d88d1cac5efa
| 4,347
|
py
|
Python
|
docs/fabfile.py
|
mattvonrocketstein/ymir
|
a16117ec64c60ec52b0daa3b19eb711ec42b5070
|
[
"MIT"
] | 3
|
2015-10-14T04:07:28.000Z
|
2017-09-09T11:12:51.000Z
|
docs/fabfile.py
|
mattvonrocketstein/ymir
|
a16117ec64c60ec52b0daa3b19eb711ec42b5070
|
[
"MIT"
] | 36
|
2015-05-07T11:46:32.000Z
|
2021-09-23T23:20:56.000Z
|
docs/fabfile.py
|
mattvonrocketstein/ymir
|
a16117ec64c60ec52b0daa3b19eb711ec42b5070
|
[
"MIT"
] | 3
|
2016-01-05T17:04:07.000Z
|
2016-03-14T09:08:22.000Z
|
# -*- coding: utf-8 -*-
from fabric import api
import os
import shutil
from fabric import colors
PORT = 8000
PROJECT_NAME = 'ymir'
DOC_ROOT = os.path.dirname(__file__)
SRC_ROOT = os.path.dirname(DOC_ROOT)
GEN_PATH = os.path.join(DOC_ROOT, 'ymir')
DEPLOY_PATH = "~/code/ghio/{0}".format(PROJECT_NAME)
DEPLOY_PATH = os.path.expanduser(DEPLOY_PATH)
def check_links_prod():
return check_links(
# proto='https',
base_domain='mattvonrocketstein.github.io')
def check_links(url='/ymir', proto='http', base_domain="localhost"):
""" check the links wget. """
base_url = '{1}://{0}:'.format(base_domain, proto)
port = str((PORT if base_domain == 'localhost' else 80))
url = base_url + port + url
cmd = (
"webcheck --force "
"--ignore-robots --avoid-external "
"--output webcheck ")
cmd = cmd + url
api.local(cmd)
import webbrowser
webbrowser.open("file://{0}/badlinks.html".format(
os.path.join(os.path.dirname(__file__), 'webcheck/')))
return
def parse_lines(lines):
print colors.red('broken links:')
links = [x.replace(url, '')[1:] for x in lines]
for link in links:
print colors.red(link)
with api.quiet(): # (hide="warn_only=True):
z = api.local(
"find {0} -name *.md|xargs grep '{1}'".format(DOC_ROOT, link), capture=True)
if z.succeeded:
print str(z)
else:
print "could not find any mention"
print
# fab run should already be started
logfile = "link_check.log"
base_url = 'http://{0}:'.format(base_domain)
port = str((PORT if base_domain == 'localhost' else 80))
url = base_url + port + url
wipe_logfile = lambda: api.local('rm -f "{0}"'.format(logfile))
wipe_logfile()
with api.settings(warn_only=True):
api.local(
("wget -e robots=off --spider -r -nd "
"-nv -o {1} {0}").format(url, logfile))
with open(logfile, 'r') as fhandle:
lines = [x.strip() for x in fhandle.readlines()]
start = end = None
for line in lines:
if line.startswith('Found') and line.endswith(" broken links."):
start = lines.index(line)
if line.startswith('FINISHED') and line.endswith('--'):
end = lines.index(line)
if start is not None and end is not None:
lines = lines[start + 2:end - 1]
parse_lines(lines)
else:
print "no broken links found"
def add_coverage(_dir=GEN_PATH):
print colors.red("adding coverage data")
cdir = os.path.join(SRC_ROOT, 'htmlcov')
if os.path.exists(cdir):
api.local("cp -r {0} {1}".format(cdir, _dir))
def clean():
""" Remove generated files """
if os.path.isdir(GEN_PATH):
shutil.rmtree(GEN_PATH)
os.makedirs(GEN_PATH)
def build(conf='pelicanconf.py'):
"""Build local version of site"""
with api.lcd(os.path.dirname(__file__)):
api.local('pelican -s {0} -o {1}'.format(conf, GEN_PATH))
def rebuild():
"""`clean` then `build`"""
clean()
build()
add_coverage(GEN_PATH)
def regenerate():
"""Automatically regenerate site upon file modification"""
with api.lcd(os.path.dirname(__file__)):
api.local('pelican -r -s pelicanconf.py -o {0}'.format(GEN_PATH))
def serve():
"""Serve site at http://localhost:8000/"""
with api.lcd(os.path.dirname(GEN_PATH)):
api.local("twistd -n web -p {0} --path .".format(PORT))
def push():
if os.path.exists(DEPLOY_PATH):
with api.lcd(DEPLOY_PATH):
api.local("find . -type f|xargs git rm -f")
api.local("mkdir -p {0}".format(DEPLOY_PATH))
api.local(
"cp -rfv {0} {1}".format(
os.path.join(GEN_PATH, '*'),
DEPLOY_PATH))
with api.lcd(DEPLOY_PATH):
api.local("find . -type f|xargs git add")
api.local("git commit . -m'publishing {0}'".format(PROJECT_NAME))
api.local("git push")
def publish():
build_prod()
push()
def build_prod():
clean()
build("pelican_publish.py")
add_coverage(GEN_PATH)
def run():
from littleworkers import Pool
commands = [
'fab regenerate',
'fab serve'
]
lil = Pool(workers=2)
lil.run(commands)
| 28.98
| 96
| 0.590522
|
43c84dc12a5f4200a375f099ad8dfb1a247ace17
| 1,728
|
py
|
Python
|
misc/webdriver-w3c-tests/element_location/element_location_test.py
|
chenxix/crosswalk-test-suite
|
a2353dd2df912334cf6489f99bc0af4e091079a0
|
[
"BSD-3-Clause"
] | null | null | null |
misc/webdriver-w3c-tests/element_location/element_location_test.py
|
chenxix/crosswalk-test-suite
|
a2353dd2df912334cf6489f99bc0af4e091079a0
|
[
"BSD-3-Clause"
] | null | null | null |
misc/webdriver-w3c-tests/element_location/element_location_test.py
|
chenxix/crosswalk-test-suite
|
a2353dd2df912334cf6489f99bc0af4e091079a0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import base_test
class ElementLocationTest(base_test.WebDriverBaseTest):
def test_find_element_by_id(self):
self.driver.get(self.webserver.where_is("element_location/res/elements.html"))
e = self.driver.find_element_by_id("id")
self.assertEquals("id", e.text)
def test_find_element_by_name(self):
self.driver.get(self.webserver.where_is("element_location/res/elements.html"))
e = self.driver.find_element_by_name("name")
self.assertEquals("name", e.text)
def test_find_element_by_css_selector(self):
self.driver.get(self.webserver.where_is("element_location/res/elements.html"))
e = self.driver.find_element_by_css_selector("#id")
self.assertEquals("id", e.text)
def test_find_element_by_link_text(self):
self.driver.get(self.webserver.where_is("element_location/res/elements.html"))
e = self.driver.find_element_by_link_text("link text")
self.assertEquals("link text", e.text)
def test_find_element_by_partial_link_text(self):
self.driver.get(self.webserver.where_is("element_location/res/elements.html"))
e = self.driver.find_element_by_partial_link_text("link tex")
self.assertEquals("link text", e.text)
def test_find_element_by_xpath(self):
self.driver.get(self.webserver.where_is("element_location/res/elements.html"))
e = self.driver.find_element_by_xpath("//*[@id='id']")
self.assertEquals("id", e.text)
if __name__ == "__main__":
unittest.main()
| 38.4
| 89
| 0.712384
|
0ce3770fdd33ade486b57b2531eec0488cd463fb
| 94
|
py
|
Python
|
python/GUI/POM/KnowledgePage/__init__.py
|
toilatester/sample-automation-frameworks-across-languages
|
4c1ceb3f8fff14ed838f94c92be7d92013c95d4a
|
[
"Apache-2.0"
] | 8
|
2020-12-11T06:57:12.000Z
|
2021-10-11T12:53:49.000Z
|
python/GUI/POM/KnowledgePage/__init__.py
|
toilatester/sample-automation-frameworks-across-languages
|
4c1ceb3f8fff14ed838f94c92be7d92013c95d4a
|
[
"Apache-2.0"
] | null | null | null |
python/GUI/POM/KnowledgePage/__init__.py
|
toilatester/sample-automation-frameworks-across-languages
|
4c1ceb3f8fff14ed838f94c92be7d92013c95d4a
|
[
"Apache-2.0"
] | 2
|
2021-04-06T08:14:35.000Z
|
2021-08-05T01:43:54.000Z
|
from .KnowledgePage import KnowledgePage
from .KnowledgeValidation import KnowledgeValidation
| 31.333333
| 52
| 0.893617
|
198e4fde4425ad21eb92fa4d7c696b4e1f7875eb
| 36,416
|
py
|
Python
|
test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py
|
Azure/autorest.azure-functions-python
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
[
"MIT"
] | 4
|
2020-10-22T20:35:38.000Z
|
2021-12-21T07:29:01.000Z
|
test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py
|
Azure/autorest.azure-functions-python
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
[
"MIT"
] | 3
|
2020-09-09T15:16:15.000Z
|
2021-12-20T15:25:18.000Z
|
test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/models/_models_py3.py
|
Azure/autorest.azure-functions-python
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
[
"MIT"
] | 2
|
2020-11-10T07:16:23.000Z
|
2020-12-30T11:03:14.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._auto_rest_swagger_batxml_service_enums import *
class AccessPolicy(msrest.serialization.Model):
"""An Access policy.
All required parameters must be populated in order to send to Azure.
:param start: Required. the date-time the policy is active.
:type start: ~datetime.datetime
:param expiry: Required. the date-time the policy expires.
:type expiry: ~datetime.datetime
:param permission: Required. the permissions for the acl policy.
:type permission: str
"""
_validation = {
'start': {'required': True},
'expiry': {'required': True},
'permission': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'iso-8601'},
'expiry': {'key': 'Expiry', 'type': 'iso-8601'},
'permission': {'key': 'Permission', 'type': 'str'},
}
def __init__(
self,
*,
start: datetime.datetime,
expiry: datetime.datetime,
permission: str,
**kwargs
):
super(AccessPolicy, self).__init__(**kwargs)
self.start = start
self.expiry = expiry
self.permission = permission
class AppleBarrel(msrest.serialization.Model):
"""A barrel of apples.
:param good_apples:
:type good_apples: list[str]
:param bad_apples:
:type bad_apples: list[str]
"""
_attribute_map = {
'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'wrapped': True, 'itemsName': 'Apple'}},
'bad_apples': {'key': 'BadApples', 'type': '[str]', 'xml': {'wrapped': True, 'itemsName': 'Apple'}},
}
def __init__(
self,
*,
good_apples: Optional[List[str]] = None,
bad_apples: Optional[List[str]] = None,
**kwargs
):
super(AppleBarrel, self).__init__(**kwargs)
self.good_apples = good_apples
self.bad_apples = bad_apples
class Banana(msrest.serialization.Model):
"""A banana.
:param name:
:type name: str
:param flavor:
:type flavor: str
:param expiration: The time at which you should reconsider eating this banana.
:type expiration: ~datetime.datetime
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str', 'xml': {'name': 'name'}},
'flavor': {'key': 'flavor', 'type': 'str', 'xml': {'name': 'flavor'}},
'expiration': {'key': 'expiration', 'type': 'iso-8601', 'xml': {'name': 'expiration'}},
}
_xml_map = {
'name': 'banana'
}
def __init__(
self,
*,
name: Optional[str] = None,
flavor: Optional[str] = None,
expiration: Optional[datetime.datetime] = None,
**kwargs
):
super(Banana, self).__init__(**kwargs)
self.name = name
self.flavor = flavor
self.expiration = expiration
class Blob(msrest.serialization.Model):
"""An Azure Storage blob.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param deleted: Required.
:type deleted: bool
:param snapshot: Required.
:type snapshot: str
:param properties: Required. Properties of a blob.
:type properties: ~xmlservice.models.BlobProperties
:param metadata: Dictionary of :code:`<string>`.
:type metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'deleted': {'required': True},
'snapshot': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'deleted': {'key': 'Deleted', 'type': 'bool'},
'snapshot': {'key': 'Snapshot', 'type': 'str'},
'properties': {'key': 'Properties', 'type': 'BlobProperties'},
'metadata': {'key': 'Metadata', 'type': '{str}'},
}
_xml_map = {
'name': 'Blob'
}
def __init__(
self,
*,
name: str,
deleted: bool,
snapshot: str,
properties: "BlobProperties",
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(Blob, self).__init__(**kwargs)
self.name = name
self.deleted = deleted
self.snapshot = snapshot
self.properties = properties
self.metadata = metadata
class BlobPrefix(msrest.serialization.Model):
"""BlobPrefix.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
super(BlobPrefix, self).__init__(**kwargs)
self.name = name
class BlobProperties(msrest.serialization.Model):
"""Properties of a blob.
All required parameters must be populated in order to send to Azure.
:param last_modified: Required.
:type last_modified: ~datetime.datetime
:param etag: Required.
:type etag: str
:param content_length: Size in bytes.
:type content_length: long
:param content_type:
:type content_type: str
:param content_encoding:
:type content_encoding: str
:param content_language:
:type content_language: str
:param content_md5:
:type content_md5: str
:param content_disposition:
:type content_disposition: str
:param cache_control:
:type cache_control: str
:param blob_sequence_number:
:type blob_sequence_number: int
:param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob".
:type blob_type: str or ~xmlservice.models.BlobType
:param lease_status: Possible values include: "locked", "unlocked".
:type lease_status: str or ~xmlservice.models.LeaseStatusType
:param lease_state: Possible values include: "available", "leased", "expired", "breaking",
"broken".
:type lease_state: str or ~xmlservice.models.LeaseStateType
:param lease_duration: Possible values include: "infinite", "fixed".
:type lease_duration: str or ~xmlservice.models.LeaseDurationType
:param copy_id:
:type copy_id: str
:param copy_status: Possible values include: "pending", "success", "aborted", "failed".
:type copy_status: str or ~xmlservice.models.CopyStatusType
:param copy_source:
:type copy_source: str
:param copy_progress:
:type copy_progress: str
:param copy_completion_time:
:type copy_completion_time: ~datetime.datetime
:param copy_status_description:
:type copy_status_description: str
:param server_encrypted:
:type server_encrypted: bool
:param incremental_copy:
:type incremental_copy: bool
:param destination_snapshot:
:type destination_snapshot: str
:param deleted_time:
:type deleted_time: ~datetime.datetime
:param remaining_retention_days:
:type remaining_retention_days: int
:param access_tier: Possible values include: "P4", "P6", "P10", "P20", "P30", "P40", "P50",
"Hot", "Cool", "Archive".
:type access_tier: str or ~xmlservice.models.AccessTier
:param access_tier_inferred:
:type access_tier_inferred: bool
:param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate-
pending-to-cool".
:type archive_status: str or ~xmlservice.models.ArchiveStatus
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
}
_attribute_map = {
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
'etag': {'key': 'Etag', 'type': 'str'},
'content_length': {'key': 'Content-Length', 'type': 'long'},
'content_type': {'key': 'Content-Type', 'type': 'str'},
'content_encoding': {'key': 'Content-Encoding', 'type': 'str'},
'content_language': {'key': 'Content-Language', 'type': 'str'},
'content_md5': {'key': 'Content-MD5', 'type': 'str'},
'content_disposition': {'key': 'Content-Disposition', 'type': 'str'},
'cache_control': {'key': 'Cache-Control', 'type': 'str'},
'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'int'},
'blob_type': {'key': 'BlobType', 'type': 'str'},
'lease_status': {'key': 'LeaseStatus', 'type': 'str'},
'lease_state': {'key': 'LeaseState', 'type': 'str'},
'lease_duration': {'key': 'LeaseDuration', 'type': 'str'},
'copy_id': {'key': 'CopyId', 'type': 'str'},
'copy_status': {'key': 'CopyStatus', 'type': 'str'},
'copy_source': {'key': 'CopySource', 'type': 'str'},
'copy_progress': {'key': 'CopyProgress', 'type': 'str'},
'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'},
'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'},
'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'},
'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'},
'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'},
'access_tier': {'key': 'AccessTier', 'type': 'str'},
'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'},
'archive_status': {'key': 'ArchiveStatus', 'type': 'str'},
}
def __init__(
self,
*,
last_modified: datetime.datetime,
etag: str,
content_length: Optional[int] = None,
content_type: Optional[str] = None,
content_encoding: Optional[str] = None,
content_language: Optional[str] = None,
content_md5: Optional[str] = None,
content_disposition: Optional[str] = None,
cache_control: Optional[str] = None,
blob_sequence_number: Optional[int] = None,
blob_type: Optional[Union[str, "BlobType"]] = None,
lease_status: Optional[Union[str, "LeaseStatusType"]] = None,
lease_state: Optional[Union[str, "LeaseStateType"]] = None,
lease_duration: Optional[Union[str, "LeaseDurationType"]] = None,
copy_id: Optional[str] = None,
copy_status: Optional[Union[str, "CopyStatusType"]] = None,
copy_source: Optional[str] = None,
copy_progress: Optional[str] = None,
copy_completion_time: Optional[datetime.datetime] = None,
copy_status_description: Optional[str] = None,
server_encrypted: Optional[bool] = None,
incremental_copy: Optional[bool] = None,
destination_snapshot: Optional[str] = None,
deleted_time: Optional[datetime.datetime] = None,
remaining_retention_days: Optional[int] = None,
access_tier: Optional[Union[str, "AccessTier"]] = None,
access_tier_inferred: Optional[bool] = None,
archive_status: Optional[Union[str, "ArchiveStatus"]] = None,
**kwargs
):
super(BlobProperties, self).__init__(**kwargs)
self.last_modified = last_modified
self.etag = etag
self.content_length = content_length
self.content_type = content_type
self.content_encoding = content_encoding
self.content_language = content_language
self.content_md5 = content_md5
self.content_disposition = content_disposition
self.cache_control = cache_control
self.blob_sequence_number = blob_sequence_number
self.blob_type = blob_type
self.lease_status = lease_status
self.lease_state = lease_state
self.lease_duration = lease_duration
self.copy_id = copy_id
self.copy_status = copy_status
self.copy_source = copy_source
self.copy_progress = copy_progress
self.copy_completion_time = copy_completion_time
self.copy_status_description = copy_status_description
self.server_encrypted = server_encrypted
self.incremental_copy = incremental_copy
self.destination_snapshot = destination_snapshot
self.deleted_time = deleted_time
self.remaining_retention_days = remaining_retention_days
self.access_tier = access_tier
self.access_tier_inferred = access_tier_inferred
self.archive_status = archive_status
class Blobs(msrest.serialization.Model):
"""Blobs.
:param blob_prefix:
:type blob_prefix: list[~xmlservice.models.BlobPrefix]
:param blob:
:type blob: list[~xmlservice.models.Blob]
"""
_attribute_map = {
'blob_prefix': {'key': 'BlobPrefix', 'type': '[BlobPrefix]'},
'blob': {'key': 'Blob', 'type': '[Blob]'},
}
def __init__(
self,
*,
blob_prefix: Optional[List["BlobPrefix"]] = None,
blob: Optional[List["Blob"]] = None,
**kwargs
):
super(Blobs, self).__init__(**kwargs)
self.blob_prefix = blob_prefix
self.blob = blob
class ComplexTypeNoMeta(msrest.serialization.Model):
"""I am a complex type with no XML node.
:param id: The id of the res.
:type id: str
"""
_attribute_map = {
'id': {'key': 'ID', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ComplexTypeNoMeta, self).__init__(**kwargs)
self.id = id
class ComplexTypeWithMeta(msrest.serialization.Model):
"""I am a complex type with XML node.
:param id: The id of the res.
:type id: str
"""
_attribute_map = {
'id': {'key': 'ID', 'type': 'str'},
}
_xml_map = {
'name': 'XMLComplexTypeWithMeta'
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ComplexTypeWithMeta, self).__init__(**kwargs)
self.id = id
class Container(msrest.serialization.Model):
"""An Azure Storage container.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param properties: Required. Properties of a container.
:type properties: ~xmlservice.models.ContainerProperties
:param metadata: Dictionary of :code:`<string>`.
:type metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'properties': {'key': 'Properties', 'type': 'ContainerProperties'},
'metadata': {'key': 'Metadata', 'type': '{str}'},
}
def __init__(
self,
*,
name: str,
properties: "ContainerProperties",
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(Container, self).__init__(**kwargs)
self.name = name
self.properties = properties
self.metadata = metadata
class ContainerProperties(msrest.serialization.Model):
"""Properties of a container.
All required parameters must be populated in order to send to Azure.
:param last_modified: Required.
:type last_modified: ~datetime.datetime
:param etag: Required.
:type etag: str
:param lease_status: Possible values include: "locked", "unlocked".
:type lease_status: str or ~xmlservice.models.LeaseStatusType
:param lease_state: Possible values include: "available", "leased", "expired", "breaking",
"broken".
:type lease_state: str or ~xmlservice.models.LeaseStateType
:param lease_duration: Possible values include: "infinite", "fixed".
:type lease_duration: str or ~xmlservice.models.LeaseDurationType
:param public_access: Possible values include: "container", "blob".
:type public_access: str or ~xmlservice.models.PublicAccessType
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
}
_attribute_map = {
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
'etag': {'key': 'Etag', 'type': 'str'},
'lease_status': {'key': 'LeaseStatus', 'type': 'str'},
'lease_state': {'key': 'LeaseState', 'type': 'str'},
'lease_duration': {'key': 'LeaseDuration', 'type': 'str'},
'public_access': {'key': 'PublicAccess', 'type': 'str'},
}
def __init__(
self,
*,
last_modified: datetime.datetime,
etag: str,
lease_status: Optional[Union[str, "LeaseStatusType"]] = None,
lease_state: Optional[Union[str, "LeaseStateType"]] = None,
lease_duration: Optional[Union[str, "LeaseDurationType"]] = None,
public_access: Optional[Union[str, "PublicAccessType"]] = None,
**kwargs
):
super(ContainerProperties, self).__init__(**kwargs)
self.last_modified = last_modified
self.etag = etag
self.lease_status = lease_status
self.lease_state = lease_state
self.lease_duration = lease_duration
self.public_access = public_access
class CorsRule(msrest.serialization.Model):
"""CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param allowed_origins: Required. The origin domains that are permitted to make a request
against the storage service via CORS. The origin domain is the domain from which the request
originates. Note that the origin must be an exact case-sensitive match with the origin that the
user age sends to the service. You can also use the wildcard character '*' to allow all origin
domains to make requests via CORS.
:type allowed_origins: str
:param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may
use for a CORS request. (comma separated).
:type allowed_methods: str
:param allowed_headers: Required. the request headers that the origin domain may specify on the
CORS request.
:type allowed_headers: str
:param exposed_headers: Required. The response headers that may be sent in the response to the
CORS request and exposed by the browser to the request issuer.
:type exposed_headers: str
:param max_age_in_seconds: Required. The maximum amount time that a browser should cache the
preflight OPTIONS request.
:type max_age_in_seconds: int
"""
_validation = {
'allowed_origins': {'required': True},
'allowed_methods': {'required': True},
'allowed_headers': {'required': True},
'exposed_headers': {'required': True},
'max_age_in_seconds': {'required': True, 'minimum': 0},
}
_attribute_map = {
'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'},
'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'},
'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'},
'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'},
'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'},
}
_xml_map = {
'name': 'CorsRule'
}
def __init__(
self,
*,
allowed_origins: str,
allowed_methods: str,
allowed_headers: str,
exposed_headers: str,
max_age_in_seconds: int,
**kwargs
):
super(CorsRule, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
self.allowed_methods = allowed_methods
self.allowed_headers = allowed_headers
self.exposed_headers = exposed_headers
self.max_age_in_seconds = max_age_in_seconds
class Error(msrest.serialization.Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[int] = None,
message: Optional[str] = None,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.status = status
self.message = message
class JSONInput(msrest.serialization.Model):
"""JSONInput.
:param id:
:type id: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
}
def __init__(
self,
*,
id: Optional[int] = None,
**kwargs
):
super(JSONInput, self).__init__(**kwargs)
self.id = id
class JSONOutput(msrest.serialization.Model):
"""JSONOutput.
:param id:
:type id: int
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
}
def __init__(
self,
*,
id: Optional[int] = None,
**kwargs
):
super(JSONOutput, self).__init__(**kwargs)
self.id = id
class ListBlobsResponse(msrest.serialization.Model):
"""An enumeration of blobs.
All required parameters must be populated in order to send to Azure.
:param service_endpoint:
:type service_endpoint: str
:param container_name: Required.
:type container_name: str
:param prefix: Required.
:type prefix: str
:param marker: Required.
:type marker: str
:param max_results: Required.
:type max_results: int
:param delimiter: Required.
:type delimiter: str
:param blobs: Required.
:type blobs: ~xmlservice.models.Blobs
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'container_name': {'required': True},
'prefix': {'required': True},
'marker': {'required': True},
'max_results': {'required': True},
'delimiter': {'required': True},
'blobs': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'delimiter': {'key': 'Delimiter', 'type': 'str'},
'blobs': {'key': 'Blobs', 'type': 'Blobs'},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
container_name: str,
prefix: str,
marker: str,
max_results: int,
delimiter: str,
blobs: "Blobs",
next_marker: str,
service_endpoint: Optional[str] = None,
**kwargs
):
super(ListBlobsResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.container_name = container_name
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.delimiter = delimiter
self.blobs = blobs
self.next_marker = next_marker
class ListContainersResponse(msrest.serialization.Model):
"""An enumeration of containers.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param prefix: Required.
:type prefix: str
:param marker:
:type marker: str
:param max_results: Required.
:type max_results: int
:param containers:
:type containers: list[~xmlservice.models.Container]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'prefix': {'required': True},
'max_results': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str'},
'marker': {'key': 'Marker', 'type': 'str'},
'max_results': {'key': 'MaxResults', 'type': 'int'},
'containers': {'key': 'Containers', 'type': '[Container]', 'xml': {'wrapped': True}},
'next_marker': {'key': 'NextMarker', 'type': 'str'},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(
self,
*,
service_endpoint: str,
prefix: str,
max_results: int,
next_marker: str,
marker: Optional[str] = None,
containers: Optional[List["Container"]] = None,
**kwargs
):
super(ListContainersResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.containers = containers
self.next_marker = next_marker
class Logging(msrest.serialization.Model):
"""Azure Analytics Logging settings.
All required parameters must be populated in order to send to Azure.
:param version: Required. The version of Storage Analytics to configure.
:type version: str
:param delete: Required. Indicates whether all delete requests should be logged.
:type delete: bool
:param read: Required. Indicates whether all read requests should be logged.
:type read: bool
:param write: Required. Indicates whether all write requests should be logged.
:type write: bool
:param retention_policy: Required. the retention policy.
:type retention_policy: ~xmlservice.models.RetentionPolicy
"""
_validation = {
'version': {'required': True},
'delete': {'required': True},
'read': {'required': True},
'write': {'required': True},
'retention_policy': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str'},
'delete': {'key': 'Delete', 'type': 'bool'},
'read': {'key': 'Read', 'type': 'bool'},
'write': {'key': 'Write', 'type': 'bool'},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
*,
version: str,
delete: bool,
read: bool,
write: bool,
retention_policy: "RetentionPolicy",
**kwargs
):
super(Logging, self).__init__(**kwargs)
self.version = version
self.delete = delete
self.read = read
self.write = write
self.retention_policy = retention_policy
class Metrics(msrest.serialization.Model):
"""Metrics.
All required parameters must be populated in order to send to Azure.
:param version: The version of Storage Analytics to configure.
:type version: str
:param enabled: Required. Indicates whether metrics are enabled for the Blob service.
:type enabled: bool
:param include_apis: Indicates whether metrics should generate summary statistics for called
API operations.
:type include_apis: bool
:param retention_policy: the retention policy.
:type retention_policy: ~xmlservice.models.RetentionPolicy
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str'},
'enabled': {'key': 'Enabled', 'type': 'bool'},
'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
*,
enabled: bool,
version: Optional[str] = None,
include_apis: Optional[bool] = None,
retention_policy: Optional["RetentionPolicy"] = None,
**kwargs
):
super(Metrics, self).__init__(**kwargs)
self.version = version
self.enabled = enabled
self.include_apis = include_apis
self.retention_policy = retention_policy
class RetentionPolicy(msrest.serialization.Model):
"""the retention policy.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Indicates whether a retention policy is enabled for the storage
service.
:type enabled: bool
:param days: Indicates the number of days that metrics or logging or soft-deleted data should
be retained. All data older than this value will be deleted.
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'minimum': 1},
}
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool'},
'days': {'key': 'Days', 'type': 'int'},
}
def __init__(
self,
*,
enabled: bool,
days: Optional[int] = None,
**kwargs
):
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.days = days
class RootWithRefAndMeta(msrest.serialization.Model):
"""I am root, and I ref a model WITH meta.
:param ref_to_model: XML will use XMLComplexTypeWithMeta.
:type ref_to_model: ~xmlservice.models.ComplexTypeWithMeta
:param something: Something else (just to avoid flattening).
:type something: str
"""
_attribute_map = {
'ref_to_model': {'key': 'RefToModel', 'type': 'ComplexTypeWithMeta'},
'something': {'key': 'Something', 'type': 'str'},
}
def __init__(
self,
*,
ref_to_model: Optional["ComplexTypeWithMeta"] = None,
something: Optional[str] = None,
**kwargs
):
super(RootWithRefAndMeta, self).__init__(**kwargs)
self.ref_to_model = ref_to_model
self.something = something
class RootWithRefAndNoMeta(msrest.serialization.Model):
"""I am root, and I ref a model with no meta.
:param ref_to_model: XML will use RefToModel.
:type ref_to_model: ~xmlservice.models.ComplexTypeNoMeta
:param something: Something else (just to avoid flattening).
:type something: str
"""
_attribute_map = {
'ref_to_model': {'key': 'RefToModel', 'type': 'ComplexTypeNoMeta'},
'something': {'key': 'Something', 'type': 'str'},
}
def __init__(
self,
*,
ref_to_model: Optional["ComplexTypeNoMeta"] = None,
something: Optional[str] = None,
**kwargs
):
super(RootWithRefAndNoMeta, self).__init__(**kwargs)
self.ref_to_model = ref_to_model
self.something = something
class SignedIdentifier(msrest.serialization.Model):
"""signed identifier.
All required parameters must be populated in order to send to Azure.
:param id: Required. a unique id.
:type id: str
:param access_policy: Required. The access policy.
:type access_policy: ~xmlservice.models.AccessPolicy
"""
_validation = {
'id': {'required': True},
'access_policy': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'},
}
_xml_map = {
'name': 'SignedIdentifier'
}
def __init__(
self,
*,
id: str,
access_policy: "AccessPolicy",
**kwargs
):
super(SignedIdentifier, self).__init__(**kwargs)
self.id = id
self.access_policy = access_policy
class Slide(msrest.serialization.Model):
"""A slide in a slideshow.
:param type:
:type type: str
:param title:
:type title: str
:param items:
:type items: list[str]
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str', 'xml': {'attr': True}},
'title': {'key': 'title', 'type': 'str'},
'items': {'key': 'items', 'type': '[str]', 'xml': {'itemsName': 'item'}},
}
_xml_map = {
'name': 'slide'
}
def __init__(
self,
*,
type: Optional[str] = None,
title: Optional[str] = None,
items: Optional[List[str]] = None,
**kwargs
):
super(Slide, self).__init__(**kwargs)
self.type = type
self.title = title
self.items = items
class Slideshow(msrest.serialization.Model):
"""Data about a slideshow.
:param title:
:type title: str
:param date:
:type date: str
:param author:
:type author: str
:param slides:
:type slides: list[~xmlservice.models.Slide]
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str', 'xml': {'attr': True}},
'date': {'key': 'date', 'type': 'str', 'xml': {'attr': True}},
'author': {'key': 'author', 'type': 'str', 'xml': {'attr': True}},
'slides': {'key': 'slides', 'type': '[Slide]'},
}
_xml_map = {
'name': 'slideshow'
}
def __init__(
self,
*,
title: Optional[str] = None,
date: Optional[str] = None,
author: Optional[str] = None,
slides: Optional[List["Slide"]] = None,
**kwargs
):
super(Slideshow, self).__init__(**kwargs)
self.title = title
self.date = date
self.author = author
self.slides = slides
class StorageServiceProperties(msrest.serialization.Model):
"""Storage Service Properties.
:param logging: Azure Analytics Logging settings.
:type logging: ~xmlservice.models.Logging
:param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for
blobs.
:type hour_metrics: ~xmlservice.models.Metrics
:param minute_metrics: a summary of request statistics grouped by API in minute aggregates for
blobs.
:type minute_metrics: ~xmlservice.models.Metrics
:param cors: The set of CORS rules.
:type cors: list[~xmlservice.models.CorsRule]
:param default_service_version: The default version to use for requests to the Blob service if
an incoming request's version is not specified. Possible values include version 2008-10-27 and
all more recent versions.
:type default_service_version: str
:param delete_retention_policy: The Delete Retention Policy for the service.
:type delete_retention_policy: ~xmlservice.models.RetentionPolicy
"""
_attribute_map = {
'logging': {'key': 'Logging', 'type': 'Logging'},
'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'},
'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'},
'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True, 'itemsName': 'CorsRule'}},
'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'},
'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
*,
logging: Optional["Logging"] = None,
hour_metrics: Optional["Metrics"] = None,
minute_metrics: Optional["Metrics"] = None,
cors: Optional[List["CorsRule"]] = None,
default_service_version: Optional[str] = None,
delete_retention_policy: Optional["RetentionPolicy"] = None,
**kwargs
):
super(StorageServiceProperties, self).__init__(**kwargs)
self.logging = logging
self.hour_metrics = hour_metrics
self.minute_metrics = minute_metrics
self.cors = cors
self.default_service_version = default_service_version
self.delete_retention_policy = delete_retention_policy
| 32.427427
| 364
| 0.609732
|
073724f9391e2cf30c500883f0f7d2d9200545ba
| 30,773
|
py
|
Python
|
tensorflow_probability/python/distributions/transformed_distribution.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 1
|
2020-07-12T22:40:42.000Z
|
2020-07-12T22:40:42.000Z
|
tensorflow_probability/python/distributions/transformed_distribution.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 2
|
2019-08-01T18:31:41.000Z
|
2019-08-01T19:42:15.000Z
|
tensorflow_probability/python/distributions/transformed_distribution.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 1
|
2020-04-17T18:01:47.000Z
|
2020-04-17T18:01:47.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
"ConditionalTransformedDistribution",
"TransformedDistribution",
]
# The following helper functions attempt to statically perform a TF operation.
# These functions make debugging easier since we can do more validation during
# graph construction.
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.where even though we use tf.where to implement it.
pred_ = tf.get_static_value(tf.convert_to_tensor(value=pred))
if pred_ is None:
return tf.where(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
def _is_scalar_from_shape_tensor(shape):
"""Returns `True` `Tensor` if `Tensor` shape implies a scalar."""
return prefer_static.equal(prefer_static.rank_from_shape(shape), 0)
def _default_kwargs_split_fn(kwargs):
"""Default `kwargs` `dict` getter."""
return (kwargs.get("distribution_kwargs", {}),
kwargs.get("bijector_kwargs", {}))
class TransformedDistribution(distribution_lib.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
We now describe how a `TransformedDistribution` alters the input/outputs of a
`Distribution` associated with a random variable (rv) `X`.
Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function
of random variable `Y`; write the probability density function
`pdf(Y=y) := d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y`
evaluated at `y`. Assume that `Y = g(X)` where `g` is a deterministic
diffeomorphism, i.e., a non-random, continuous, differentiable, and invertible
function. Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for
the Jacobian of `g` evaluated at `x`.
A `TransformedDistribution` implements the following operations:
* `sample`
Mathematically: `Y = g(X)`
Programmatically: `bijector.forward(distribution.sample(...))`
* `log_prob`
Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
+ (log o abs o det o J o g^{-1})(y)`
Programmatically: `(distribution.log_prob(bijector.inverse(y))
+ bijector.inverse_log_det_jacobian(y))`
* `log_cdf`
Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
Programmatically: `distribution.log_cdf(bijector.inverse(x))`
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), axis=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tfp.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(
shift=-1.,
scale_identity_multiplier=2.)
name="NormalTransformedDistribution")
```
A `TransformedDistribution`'s batch- and event-shape are implied by the base
distribution unless explicitly overridden by `batch_shape` or `event_shape`
arguments. Specifying an overriding `batch_shape` (`event_shape`) is
permitted only if the base distribution has scalar batch-shape (event-shape).
The bijector is applied to the distribution as if the distribution possessed
the overridden shape(s). The following example demonstrates how to construct a
multivariate Normal as a `TransformedDistribution`.
```python
ds = tfp.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
[0., 1]] # batch:1
chol_cov = [[[1., 0],
[0, 1]], # batch:0
[[1, 0],
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
# mvn1.log_prob(x) == mvn2.log_prob(x)
```
"""
def __init__(self,
distribution,
bijector,
batch_shape=None,
event_shape=None,
kwargs_split_fn=_default_kwargs_split_fn,
validate_args=False,
parameters=None,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`.
batch_shape: `integer` vector `Tensor` which overrides `distribution`
`batch_shape`; valid only if `distribution.is_scalar_batch()`.
event_shape: `integer` vector `Tensor` which overrides `distribution`
`event_shape`; valid only if `distribution.is_scalar_event()`.
kwargs_split_fn: Python `callable` which takes a kwargs `dict` and returns
a tuple of kwargs `dict`s for each of the `distribution` and `bijector`
parameters respectively.
Default value: `_default_kwargs_split_fn` (i.e.,
`lambda kwargs: (kwargs.get('distribution_kwargs', {}),
kwargs.get('bijector_kwargs', {}))`)
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
parameters: Locals dict captured by subclass constructor, to be used for
copy/slice re-instantiation operations.
name: Python `str` name prefixed to Ops created by this class. Default:
`bijector.name + distribution.name`.
"""
parameters = dict(locals()) if parameters is None else parameters
name = name or (("" if bijector is None else bijector.name) +
(distribution.name or ""))
with tf.name_scope(name) as name:
self._kwargs_split_fn = (_default_kwargs_split_fn
if kwargs_split_fn is None
else kwargs_split_fn)
# For convenience we define some handy constants.
self._zero = tf.constant(0, dtype=tf.int32, name="zero")
self._empty = tf.constant([], dtype=tf.int32, name="empty")
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
# execution, including possibly raising Python exceptions.
self._override_batch_shape = self._maybe_validate_shape_override(
batch_shape, distribution.is_scalar_batch(), validate_args,
"batch_shape")
self._is_batch_override = prefer_static.logical_not(
prefer_static.equal(
prefer_static.rank_from_shape(self._override_batch_shape),
self._zero))
self._is_maybe_batch_override = bool(
tf.get_static_value(self._override_batch_shape) is None or
tf.get_static_value(self._override_batch_shape).size != 0)
self._override_event_shape = self._maybe_validate_shape_override(
event_shape, distribution.is_scalar_event(), validate_args,
"event_shape")
self._is_event_override = prefer_static.logical_not(
prefer_static.equal(
prefer_static.rank_from_shape(self._override_event_shape),
self._zero))
self._is_maybe_event_override = bool(
tf.get_static_value(self._override_event_shape) is None or
tf.get_static_value(self._override_event_shape).size != 0)
# To convert a scalar distribution into a multivariate distribution we
# will draw dims from the sample dims, which are otherwise iid. This is
# easy to do except in the case that the base distribution has batch dims
# and we're overriding event shape. When that case happens the event dims
# will incorrectly be to the left of the batch dims. In this case we'll
# cyclically permute left the new dims.
self._needs_rotation = prefer_static.reduce_all([
self._is_event_override,
prefer_static.logical_not(self._is_batch_override),
prefer_static.logical_not(distribution.is_scalar_batch())])
override_event_ndims = prefer_static.rank_from_shape(
self._override_event_shape)
self._rotate_ndims = _pick_scalar_condition(
self._needs_rotation, override_event_ndims, 0)
# We'll be reducing the head dims (if at all), i.e., this will be []
# if we don't need to reduce.
self._reduce_event_indices = tf.range(
self._rotate_ndims - override_event_ndims, self._rotate_ndims)
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def __getitem__(self, slices):
# Because slicing is parameterization-dependent, we only implement slicing
# for instances of TD, not subclasses thereof.
if type(self) is not TransformedDistribution: # pylint: disable=unidiomatic-typecheck
return super(TransformedDistribution, self).__getitem__(slices)
if tensorshape_util.rank(self.distribution.batch_shape) is None:
raise NotImplementedError(
"Slicing TransformedDistribution with underlying distribution of "
"unknown rank is not yet implemented")
overrides = {}
if (tensorshape_util.rank(self.distribution.batch_shape) == 0 and
self.parameters.get("batch_shape", None) is not None):
overrides["batch_shape"] = tf.shape(
input=tf.zeros(self.parameters["batch_shape"])[slices])
elif self.parameters.get("distribution", None) is not None:
overrides["distribution"] = self.distribution[slices]
return self.copy(**overrides)
def _event_shape_tensor(self):
return self.bijector.forward_event_shape_tensor(
distribution_util.pick_vector(
self._is_event_override,
self._override_event_shape,
self.distribution.event_shape_tensor()))
def _event_shape(self):
# If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensorshape_util.constant_value_as_shape(
self._override_event_shape)
return self.bijector.forward_event_shape(
static_override
if self._is_maybe_event_override
else self.distribution.event_shape)
def _batch_shape_tensor(self):
return distribution_util.pick_vector(
self._is_batch_override,
self._override_batch_shape,
self.distribution.batch_shape_tensor())
def _batch_shape(self):
# If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensorshape_util.constant_value_as_shape(
self._override_batch_shape)
return (static_override
if self._is_maybe_batch_override
else self.distribution.batch_shape)
def _sample_n(self, n, seed=None, **distribution_kwargs):
sample_shape = prefer_static.concat([
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty),
], axis=0)
x = self.distribution.sample(sample_shape=sample_shape, seed=seed,
**distribution_kwargs)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name):
sample_shape = tf.convert_to_tensor(
value=sample_shape, dtype=tf.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, **distribution_kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = tf.shape(input=x)[1:]
final_shape = tf.concat([sample_shape, batch_event_shape], 0)
x = tf.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
y = self.bijector.forward(x, **bijector_kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
def _log_prob(self, y, **kwargs):
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(
y, x, ildj, event_ndims, **distribution_kwargs)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(
y, x_i, ildj_i, event_ndims, **distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return tf.reduce_logsumexp(input_tensor=tf.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims,
**distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = tf.reduce_sum(
input_tensor=log_prob, axis=self._reduce_event_indices)
log_prob += tf.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
tensorshape_util.set_shape(
log_prob,
tf.broadcast_static_shape(
tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims],
self.batch_shape))
return log_prob
def _prob(self, y, **kwargs):
if not hasattr(self.distribution, "_prob"):
return tf.exp(self.log_prob(y, **kwargs))
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(
y, x, ildj, event_ndims, **distribution_kwargs)
prob_on_fibers = [
self._finish_prob_for_one_fiber(
y, x_i, ildj_i, event_ndims, **distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims,
**distribution_kwargs):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
prob = tf.reduce_prod(input_tensor=prob, axis=self._reduce_event_indices)
prob *= tf.exp(tf.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
tensorshape_util.set_shape(
prob,
tf.broadcast_static_shape(
tensorshape_util.with_rank_at_least(y.shape, 1)[:-event_ndims],
self.batch_shape))
return prob
def _log_cdf(self, y, **kwargs):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, **distribution_kwargs)
def _cdf(self, y, **kwargs):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
def _log_survival_function(self, y, **kwargs):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
def _survival_function(self, y, **kwargs):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
def _quantile(self, value, **kwargs):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value, **distribution_kwargs)
return self.bijector.forward(inv_cdf, **bijector_kwargs)
def _mean(self, **kwargs):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("mean is not implemented for non-affine "
"bijectors")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
x = self.distribution.mean(**distribution_kwargs)
if self._is_maybe_batch_override or self._is_maybe_event_override:
# A batch (respectively event) shape override is only allowed if the batch
# (event) shape of the base distribution is [], so concatenating all the
# shapes does the right thing.
new_shape = prefer_static.concat([
prefer_static.ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor(),
prefer_static.ones_like(self._override_event_shape),
self.distribution.event_shape_tensor(),
], 0)
x = tf.reshape(x, new_shape)
new_shape = prefer_static.concat(
[self.batch_shape_tensor(),
self.event_shape_tensor()], 0)
x = tf.broadcast_to(x, new_shape)
y = self.bijector.forward(x, **bijector_kwargs)
sample_shape = tf.convert_to_tensor(
value=[], dtype=tf.int32, name="sample_shape")
y = self._set_sample_static_shape(y, sample_shape)
return y
def _entropy(self, **kwargs):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("entropy is not implemented when "
"bijector is not injective.")
distribution_kwargs, bijector_kwargs = self._kwargs_split_fn(kwargs)
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy(**distribution_kwargs)
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= tf.cast(
tf.reduce_prod(input_tensor=self._override_event_shape),
dtype=dtype_util.base_dtype(entropy.dtype))
if self._is_maybe_batch_override:
new_shape = tf.concat([
prefer_static.ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = tf.reshape(entropy, new_shape)
multiples = tf.concat([
self._override_batch_shape,
prefer_static.ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = tf.tile(entropy, multiples)
dummy = prefer_static.zeros(
shape=tf.concat(
[self.batch_shape_tensor(), self.event_shape_tensor()],
0),
dtype=self.dtype)
event_ndims = (
tensorshape_util.rank(self.event_shape)
if tensorshape_util.rank(self.event_shape) is not None else tf.size(
input=self.event_shape_tensor()))
ildj = self.bijector.inverse_log_det_jacobian(
dummy, event_ndims=event_ndims, **bijector_kwargs)
entropy -= tf.cast(ildj, entropy.dtype)
tensorshape_util.set_shape(entropy, self.batch_shape)
return entropy
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = tf.convert_to_tensor(
value=override_shape, dtype=tf.int32, name=name)
if not dtype_util.is_integer(override_shape.dtype):
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape_tensor(override_shape)
if tf.get_static_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if tensorshape_util.rank(override_shape.shape) is not None:
if tensorshape_util.rank(override_shape.shape) != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [
assert_util.assert_rank(
override_shape, 1, message="shape override must be a vector")
]
if tf.get_static_value(override_shape) is not None:
if any(s < 0 for s in tf.get_static_value(override_shape)):
raise ValueError("shape override must have non-negative elements")
elif validate_args:
dynamic_assertions += [
assert_util.assert_non_negative(
override_shape,
message="shape override must have non-negative elements")
]
is_both_nonscalar = prefer_static.logical_and(
prefer_static.logical_not(base_is_scalar),
prefer_static.logical_not(override_is_scalar))
if tf.get_static_value(is_both_nonscalar) is not None:
if tf.get_static_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [
assert_util.assert_equal(
is_both_nonscalar, False, message="base distribution not scalar")
]
if not dynamic_assertions:
return override_shape
return distribution_util.with_dependencies(
dynamic_assertions, override_shape)
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tf.get_static_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = prefer_static.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
perm = prefer_static.concat([
prefer_static.range(n, ndims), prefer_static.range(0, n)], axis=0)
return tf.transpose(a=x, perm=perm)
def _maybe_get_static_event_ndims(self):
if tensorshape_util.rank(self.event_shape) is not None:
return tensorshape_util.rank(self.event_shape)
event_ndims = tf.size(input=self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
class ConditionalTransformedDistribution(TransformedDistribution):
"""A TransformedDistribution that allows intrinsic conditioning."""
@deprecation.deprecated(
"2019-07-01",
"`ConditionalTransformedDistribution` is no longer required; "
"`TransformedDistribution` top-level functions now pass-through "
"`**kwargs`.",
warn_once=True)
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
return super(ConditionalTransformedDistribution, cls).__new__(cls)
| 44.469653
| 92
| 0.695057
|
64eb65d1ee9cc975d88d3b144b3084cd83a02010
| 4,220
|
py
|
Python
|
examples/common/python/connectors/direct/tcs_listener/tcs_worker_encryption_key_handler.py
|
AvalonRelease5/avalon
|
0bd78b907fba0896c512678b5b560c3f358e787c
|
[
"Apache-2.0"
] | null | null | null |
examples/common/python/connectors/direct/tcs_listener/tcs_worker_encryption_key_handler.py
|
AvalonRelease5/avalon
|
0bd78b907fba0896c512678b5b560c3f358e787c
|
[
"Apache-2.0"
] | null | null | null |
examples/common/python/connectors/direct/tcs_listener/tcs_worker_encryption_key_handler.py
|
AvalonRelease5/avalon
|
0bd78b907fba0896c512678b5b560c3f358e787c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import crypto.crypto as crypto
from error_code.error_status import WorkerError
from jsonrpc.exceptions import JSONRPCDispatchException
logger = logging.getLogger(__name__)
# No of bytes of encryptionKeyNonce to encrypt data
NO_OF_BYTES = 16
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class WorkerEncryptionKeyHandler:
"""
TCSEncryptionKeyHandler processes Workers Encryption Key Direct API
requests.
It is used if the Worker supports requester specific encryption keys in
addition or instead of the encryptionKey defined in section Appendix A:
Worker Specific Detailed Data.
All raised exceptions will be caught and handled by any
jsonrpc.dispatcher.Dispatcher delegating work to this handler. In our case,
the exact dispatcher will be the one configured by the TCSListener in the
./tcs_listener.py
"""
# ------------------------------------------------------------------------------------------------
def __init__(self, kv_helper):
"""
Function to perform init activity
Parameters:
- kv_helper is a object of lmdb database
"""
self.kv_helper = kv_helper
# ---------------------------------------------------------------------------------------------
def EncryptionKeySet(self, **params):
"""
Function to process set encryption key request.
Parameters:
- param is the 'param' object in the a worker request as per TCF
API 6.1.11 Set Encryption Key Request Payload
"""
raise JSONRPCDispatchException(
WorkerError.INVALID_PARAMETER_FORMAT_OR_VALUE,
"Operation is not supported. Hence invalid parameter")
# ---------------------------------------------------------------------------------------------
def EncryptionKeyGet(self, **params):
"""
Function to process get encryption key request.
Parameters:
- param is the 'param' object in the a worker request as per TCF
API 6.1.10 Get Encryption Key Request Payload
"""
worker_id = str(params['workerId'])
value = self.kv_helper.get("workers", worker_id)
if value is None:
raise JSONRPCDispatchException(
WorkerError.INVALID_PARAMETER_FORMAT_OR_VALUE,
"Worker id not found in the database. Hence invalid parameter")
worker_type_data = json.loads(value).get(
"details").get("workerTypeData")
encryptionKey = worker_type_data["encryptionKey"]
try:
encryptionKeyNonce = worker_type_data["encryptionKeyNonce"]
except:
encryptionKeyNonce = crypto.random_bit_string(NO_OF_BYTES)
tag = ""
# calculate signature
concat_string = worker_id.encode('UTF-8') + encryptionKey.encode(
'UTF-8') + encryptionKeyNonce.encode('UTF-8') + tag.encode('UTF-8')
concat_hash = bytes(concat_string)
hash_1 = crypto.compute_message_hash(concat_hash)
s1 = crypto.byte_array_to_base64(hash_1)
# Requires worker private key to sign.
# signature = self.PrivateKey.SignMessage(hash)
result = {
"workerId": worker_id,
"encryptionKey": encryptionKey,
"encryptionKeyNonce": encryptionKeyNonce,
"tag": "",
"signature": s1,
}
return result
# ---------------------------------------------------------------------------------------------
| 37.678571
| 98
| 0.618009
|
4a79aedfd54a0fe66fba31a8ec102a707e448c98
| 6,860
|
py
|
Python
|
heronpy/api/bolt/window_bolt.py
|
pjfanning/incubator-heron
|
7db7c24733bd7e66ecfe704ea65f864d1fff4adc
|
[
"Apache-2.0"
] | 3,348
|
2016-05-25T16:04:31.000Z
|
2018-03-28T17:46:14.000Z
|
heronpy/api/bolt/window_bolt.py
|
pjfanning/incubator-heron
|
7db7c24733bd7e66ecfe704ea65f864d1fff4adc
|
[
"Apache-2.0"
] | 1,542
|
2016-05-25T16:46:44.000Z
|
2018-03-29T17:30:23.000Z
|
heronpy/api/bolt/window_bolt.py
|
pjfanning/incubator-heron
|
7db7c24733bd7e66ecfe704ea65f864d1fff4adc
|
[
"Apache-2.0"
] | 702
|
2016-05-25T16:07:43.000Z
|
2018-03-27T06:31:07.000Z
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''window_bolt.py: API for defining windowed bolts in Heron'''
from abc import abstractmethod
from collections import namedtuple, deque
import time
from heronpy.api.bolt.bolt import Bolt
from heronpy.api import api_constants
from heronpy.api.state.stateful_component import StatefulComponent
WindowContext = namedtuple('WindowContext', ('start', 'end'))
class SlidingWindowBolt(Bolt, StatefulComponent):
"""SlidingWindowBolt is a higer level bolt for Heron users who want to deal with
batches of tuples belonging to a certain time window. This bolt keeps track of
managing the window, adding/expiring tuples based on window configuration.
This way users will just have to deal with writing processWindow function
"""
WINDOW_DURATION_SECS = 'slidingwindowbolt_duration_secs'
WINDOW_SLIDEINTERVAL_SECS = 'slidingwindowbolt_slideinterval_secs'
# pylint: disable=attribute-defined-outside-init
def init_state(self, stateful_state):
self.saved_state = stateful_state
# pylint: disable=unused-argument
def pre_save(self, checkpoint_id):
self.saved_state['tuples'] = self.current_tuples
@abstractmethod
def processWindow(self, window_info, tuples):
"""The main interface that needs to be implemented.
This function is called every WINDOW_SLIDEINTERVAL_SECS seconds
and contains the data in the last WINDOW_DURATION_SECS seconds
in a list tuples
:type window_info: :class:`WindowContext`
:param window_info: The information about the window
:type tuples: :class:`list of Tuples`
:param tuples: The list of tuples in this window
"""
# pylint: disable=unused-argument
def initialize(self, config, context):
"""We initialize the window duration and slide interval
"""
if SlidingWindowBolt.WINDOW_DURATION_SECS in config:
self.window_duration = int(config[SlidingWindowBolt.WINDOW_DURATION_SECS])
else:
self.logger.fatal("Window Duration has to be specified in the config")
if SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS in config:
self.slide_interval = int(config[SlidingWindowBolt.WINDOW_SLIDEINTERVAL_SECS])
else:
self.slide_interval = self.window_duration
if self.slide_interval > self.window_duration:
self.logger.fatal("Slide Interval should be <= Window Duration")
# By modifying the config, we are able to setup the tick timer
config[api_constants.TOPOLOGY_TICK_TUPLE_FREQ_SECS] = str(self.slide_interval)
self.current_tuples = deque()
if hasattr(self, 'saved_state'):
if 'tuples' in self.saved_state:
self.current_tuples = self.saved_state['tuples']
def process(self, tup):
"""Process a single tuple of input
We add the (time, tuple) pair into our current_tuples. And then look for expiring
elemnents
"""
curtime = int(time.time())
self.current_tuples.append((tup, curtime))
self._expire(curtime)
def _expire(self, tm):
while len(self.current_tuples) > 0:
if tm - self.window_duration > self.current_tuples[0][1]:
(tup, _) = self.current_tuples.popleft()
self.ack(tup)
else:
break
# pylint: disable=unused-argument
# pylint: disable=unused-variable
def process_tick(self, tup):
"""Called every slide_interval
"""
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
tuple_batch = []
for (tuple_, tm) in self.current_tuples:
tuple_batch.append(tuple_)
self.processWindow(window_info, tuple_batch)
self._expire(curtime)
class TumblingWindowBolt(Bolt, StatefulComponent):
"""TumblingWindowBolt is a higer level bolt for Heron users who want to deal with
batches of tuples belonging to a certain time window. This bolt keeps track of
managing the window, adding/expiring tuples based on window configuration.
This way users will just have to deal with writing processWindow function
"""
WINDOW_DURATION_SECS = 'tumblingwindowbolt_duration_secs'
# pylint: disable=attribute-defined-outside-init
def init_state(self, stateful_state):
self.saved_state = stateful_state
# pylint: disable=unused-argument
def pre_save(self, checkpoint_id):
self.saved_state['tuples'] = self.current_tuples
@abstractmethod
def processWindow(self, window_info, tuples):
"""The main interface that needs to be implemented.
This function is called every WINDOW_DURATION_SECS seconds
and contains the data in the last WINDOW_DURATION_SECS seconds
in a list tuples
:type window_info: :class:`WindowContext`
:param window_info: The information about the window
:type tuples: :class:`list of Tuples`
:param tuples: The list of tuples in this window
"""
# pylint: disable=unused-argument
def initialize(self, config, context):
"""We initialize the window duration and slide interval
"""
if TumblingWindowBolt.WINDOW_DURATION_SECS in config:
self.window_duration = int(config[TumblingWindowBolt.WINDOW_DURATION_SECS])
else:
self.logger.fatal("Window Duration has to be specified in the config")
# By modifying the config, we are able to setup the tick timer
config[api_constants.TOPOLOGY_TICK_TUPLE_FREQ_SECS] = str(self.window_duration)
self.current_tuples = deque()
if hasattr(self, 'saved_state'):
if 'tuples' in self.saved_state:
self.current_tuples = self.saved_state['tuples']
def process(self, tup):
"""Process a single tuple of input
We simply add the tuple into our current_tuples.
"""
self.current_tuples.append(tup)
# pylint: disable=unused-argument
# pylint: disable=unused-variable
def process_tick(self, tup):
"""Called every window_duration
"""
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
self.processWindow(window_info, list(self.current_tuples))
for tuple_ in self.current_tuples:
self.ack(tuple_)
self.current_tuples.clear()
| 37.692308
| 85
| 0.737172
|
2fea8a9d70b1bd43d7dcaf6be79aca3578470574
| 552
|
py
|
Python
|
cx_Freeze/samples/openpyxl/test_openpyxl.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 358
|
2020-07-02T13:00:02.000Z
|
2022-03-29T10:03:57.000Z
|
cx_Freeze/samples/openpyxl/test_openpyxl.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 372
|
2020-07-02T20:47:57.000Z
|
2022-03-31T19:35:05.000Z
|
cx_Freeze/samples/openpyxl/test_openpyxl.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 78
|
2020-07-09T14:24:03.000Z
|
2022-03-22T19:06:52.000Z
|
#!/usr/bin/env python
# NOTE: this code is the sample code found in the openpyxl documentation which
# can be found at https://openpyxl.readthedocs.io/en/default.
from openpyxl import Workbook
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws["A1"] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Python types will automatically be converted
import datetime
ws["A2"] = datetime.datetime.now()
# Save the file
fileName = "sample.xlsx"
wb.save(fileName)
print("Wrote file", fileName)
| 19.714286
| 78
| 0.731884
|
94511dde67831616cc29c4fc7f1784a7a54f99b8
| 1,444
|
py
|
Python
|
pyppium/driver.py
|
leomenezessz/pyppium
|
1ef2b27901fb67070bdc882eef987266804a33ce
|
[
"MIT"
] | 17
|
2020-07-19T16:29:08.000Z
|
2021-08-20T20:28:16.000Z
|
pyppium/driver.py
|
leomenezessz/pyppium
|
1ef2b27901fb67070bdc882eef987266804a33ce
|
[
"MIT"
] | 12
|
2020-07-21T06:42:48.000Z
|
2020-09-05T18:08:16.000Z
|
pyppium/driver.py
|
leomenezessz/pyppium
|
1ef2b27901fb67070bdc882eef987266804a33ce
|
[
"MIT"
] | 6
|
2020-07-19T16:28:56.000Z
|
2020-11-12T00:29:51.000Z
|
from os import environ
from appium import webdriver
from pyppium import exception
from pyppium.settings import config
_driver: webdriver.Remote
class PyppiumDriver(object):
def __init__(
self,
caps,
appium_url=config["driver"]["appium_url"],
use_browserstack=False,
**kwargs,
):
global _driver
if caps is None:
raise exception.CapabilitiesNoneException("Capabilities cannot be None!")
if use_browserstack:
user = (
environ["BROWSERSTACK_USERNAME"]
if "user" not in kwargs
else kwargs["user"]
)
keys = (
environ["BROWSERSTACK_ACCESS_KEY"]
if "keys" not in kwargs
else kwargs["keys"]
)
_driver = webdriver.Remote(
f"http://{user}:{keys}{config['driver']['browserstack_url']}",
caps,
)
return
_driver = webdriver.Remote(appium_url, caps)
@staticmethod
def quit():
if _driver is not None:
_driver.quit()
@staticmethod
def instance() -> webdriver.Remote:
return _driver
def platform_name():
return PyppiumDriver.instance().desired_capabilities.get("platformName").lower()
def is_android():
return platform_name() == "android"
def is_ios():
return platform_name() == "ios"
| 22.5625
| 85
| 0.567867
|
38143d3293d51ba65466d53dfc95cb4881f4f217
| 2,957
|
py
|
Python
|
lib/fast_rcnn/bbox_transform.py
|
autumnqin/caffe_multi_gpu
|
20d94967691352588f354d45385886bc0ab8a5aa
|
[
"BSD-2-Clause"
] | null | null | null |
lib/fast_rcnn/bbox_transform.py
|
autumnqin/caffe_multi_gpu
|
20d94967691352588f354d45385886bc0ab8a5aa
|
[
"BSD-2-Clause"
] | null | null | null |
lib/fast_rcnn/bbox_transform.py
|
autumnqin/caffe_multi_gpu
|
20d94967691352588f354d45385886bc0ab8a5aa
|
[
"BSD-2-Clause"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import numpy as np
def bbox_transform(ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
if (gt_widths <= 0).any():
print "gt_widths:\n{}\n{}".format(gt_rois[:, 2], gt_rois[:, 0])
if (gt_heights <= 0).any():
print "gt_heights:\n{}\n{}".format(gt_rois[:, 3], gt_rois[:, 1])
if (ex_widths <= 0).any():
print "ex_widths:\n{}\n{}".format(ex_rois[:, 2], ex_rois[:, 0])
if (ex_heights <= 0).any():
print "ex_heights:\n{}\n{}".format(ex_rois[:, 3], ex_rois[:, 1])
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
| 34.383721
| 79
| 0.551573
|
268c77abf90159e91c7c74c80e1face78874add1
| 5,519
|
py
|
Python
|
django/smartcity/vagent/backups/test_vagent.py
|
nvitha/Smart-Cities
|
9a4cb29b143956bb73789e4af2619cde681393be
|
[
"MIT"
] | null | null | null |
django/smartcity/vagent/backups/test_vagent.py
|
nvitha/Smart-Cities
|
9a4cb29b143956bb73789e4af2619cde681393be
|
[
"MIT"
] | null | null | null |
django/smartcity/vagent/backups/test_vagent.py
|
nvitha/Smart-Cities
|
9a4cb29b143956bb73789e4af2619cde681393be
|
[
"MIT"
] | null | null | null |
'''
This python script will listen to the defined vip address for specific
topics. The user can modify the settings.py file to set the specific
topics to listen to.
With a volttron activated shell this script can be run like:
python standalonelistener.py
This script prints all output to standard out rather than using the
logging facilities.
This script will also publish a heart beat (which will be returned if
listening to the heartbeat topic).
Example output to standard out:
{"topic": "heartbeat/standalonelistener",
"headers": {"Date": "2015-10-22 15:22:43.184351Z", "Content-Type": "text/plain"},
"message": "2015-10-22 15:22:43.184351Z"}
{"topic": "devices/building/campus/hotwater/heater/resistive/information/power/part_realpwr_avg",
"headers": {"Date": "2015-10-22 00:45:15.480339"},
"message": [{"part_realpwr_avg": 0.0}, {"part_realpwr_avg": {"units": "percent", "tz": "US/Pacific", "type": "float"}}]}
The heartbeat message is a simple plain text message with just a date stamp
A "data" message contains an array of 2 elements. The first element
contains a dictionary of (point name: value) pairs. The second element
contains context around the point data and the "Date" header.
'''
from datetime import datetime
import os
import sys
import json
import gevent
import logging
from gevent.core import callback
from smartcity.platform import get_home, set_home
from smartcity.platform.messaging import headers as headers_mod
from smartcity.platform.vip.agent import Agent, PubSub, Core
from smartcity.platform.agent import utils
# These are the options that can be set from the settings module.
from django.smartcity.vagent.settings import remote_url, topics_prefixes_to_watch, heartbeat_period
# Setup logging so that we could use it if we needed to.
utils.setup_logging()
_log = logging.getLogger(__name__)
# Agents need access to VOLTTRON_HOME even if running in standalone mode
# to keep track of keys. This sets a default home.
set_home()
logging.basicConfig(
level=logging.debug,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d-%y %H:%M:%S')
class StandAloneListener(Agent):
''' A standalone version of the ListenerAgent'''
def onmessage(self, peer, sender, bus, topic, headers, message):
'''Handle incoming messages on the bus.'''
d = {'SA_topic': topic, 'SA_headers': headers, 'SA_message': message}
writer = json.dumps(d)+'\n'
sys.stdout.write(writer)
outfile = open('django_listen.log', 'w')
outfile.write(writer)
outfile.close()
@Core.receiver('onstart')
def start(self, sender, **kwargs):
'''Handle the starting of the agent.
Subscribe to all points in the topics_prefix_to_watch tuple
defined in settings.py.
'''
for prefix in topics_prefixes_to_watch:
sys.stdout.write('connecting to prefix: {}\n'.format(prefix))
self.vip.pubsub.subscribe(peer='pubsub',
prefix=prefix,
callback=self.onmessage).get(timeout=5)
# Demonstrate periodic decorator and settings access
@Core.periodic(heartbeat_period)
def publish_heartbeat(self):
'''Send heartbeat message every heartbeat_period seconds.
heartbeat_period is set and can be adjusted in the settings module.
'''
sys.stdout.write('publishing heartbeat.\n')
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
#'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
self.vip.pubsub.publish(
'pubsub', 'heartbeat/standalonelistener', headers,
now).get(timeout=5)
@PubSub.subcribe('pubsub', '/django/service')
def publish_message(self):
'''Send heartbeat message every heartbeat_period seconds.
heartbeat_period is set and can be adjusted in the settings module.
'''
sys.stdout.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~CALLED FROM THE DJANGO SERVER~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.\n')
sys.stdout.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~CALLED FROM THE DJANGO SERVER~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.\n')
now = datetime.utcnow().isoformat(' ') + 'Z'
headers = {
# 'AgentID': self._agent_id,
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
headers_mod.DATE: now,
}
self.vip.pubsub.publish(
'pubsub', 'heartbeat/standalonelistener', headers,
now).get(timeout=5)
if __name__ == '__main__':
try:
# If stdout is a pipe, re-open it line buffered
if utils.isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
print(remote_url())
agent = StandAloneListener(address=remote_url(),
identity='django')
task = gevent.spawn(agent.core.run)
try:
task.join()
finally:
task.kill()
except KeyboardInterrupt:
pass
| 38.326389
| 146
| 0.623664
|
254f298cd6e30eb5763688934134c6d886a3d7a6
| 1,118
|
py
|
Python
|
data_stru_nd_algo/prime_vi.py
|
vishwasks32/python3-learning
|
39f39238428727ef0c97c74c8de2570bd84da403
|
[
"Apache-2.0"
] | 3
|
2018-02-08T21:09:27.000Z
|
2021-06-15T04:48:46.000Z
|
analysis_of_algorithms/prime_vi.py
|
vishwasks32/python3-learning
|
39f39238428727ef0c97c74c8de2570bd84da403
|
[
"Apache-2.0"
] | null | null | null |
analysis_of_algorithms/prime_vi.py
|
vishwasks32/python3-learning
|
39f39238428727ef0c97c74c8de2570bd84da403
|
[
"Apache-2.0"
] | 1
|
2018-02-08T21:09:31.000Z
|
2018-02-08T21:09:31.000Z
|
#!/usr/bin/env python3
#
# Author: Vishwas K Singh
# Email: vishwasks32@gmail.com
# To Generate all the list of prime numbers less than a number
import math
def prime_nums_list(n):
''' Sieve of Eratosthenes returns list of prime numbers'''
p_list = list()
t_list = list()
for i in range(2,n):
p_list.append(i)
for i in range(2,math.ceil(math.sqrt(n)),1):
t_list.append(i)
k_list = p_list.copy()
for i in range(len(t_list)):
for j in range(len(p_list)):
if (p_list[j] != t_list[i]) and (p_list[j] % t_list[i] == 0):
if p_list[j] in k_list:
k_list.remove(p_list[j])
return k_list
def prime_factors(n):
''' Prime Factor Expansion'''
prime_factors = list()
for d in range(2,n):
if d*d <= n:
while n %d == 0:
prime_factors.append(d)
n = int(n/d)
if n>1:
prime_factors.append(n)
return prime_factors
if __name__=='__main__':
num = int(input("Enter the number: "))
prime_facts = prime_factors(num)
print(prime_facts)
| 22.816327
| 73
| 0.575134
|
396b2267bec27c9eea8644466b6e3bb0fb2d26d4
| 714
|
py
|
Python
|
seeker/migrations/0006_auto_20190625_1139.py
|
salsburj/django-seeker
|
679b9e022949a3f2ef28fb6f9ced0bac88d65fd8
|
[
"BSD-2-Clause"
] | null | null | null |
seeker/migrations/0006_auto_20190625_1139.py
|
salsburj/django-seeker
|
679b9e022949a3f2ef28fb6f9ced0bac88d65fd8
|
[
"BSD-2-Clause"
] | null | null | null |
seeker/migrations/0006_auto_20190625_1139.py
|
salsburj/django-seeker
|
679b9e022949a3f2ef28fb6f9ced0bac88d65fd8
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-06-25 15:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seeker', '0005_auto_20180330_1708'),
]
operations = [
migrations.AlterModelOptions(
name='advancedsavedsearch',
options={'ordering': ('name',), 'verbose_name': 'Advanced Saved Search', 'verbose_name_plural': 'Advanced Saved Searches'},
),
migrations.AlterModelOptions(
name='savedsearch',
options={'ordering': ('name',), 'verbose_name': 'Saved Search', 'verbose_name_plural': 'Saved Searches'},
),
]
| 29.75
| 135
| 0.630252
|
34cdaee75252552bd64be202946ed6672b3153b9
| 343
|
py
|
Python
|
Solutions1/countPrimes.py
|
mohamedsugal/Leetcode-Solutions
|
c67720af4cb36d07d758c57efffac7a28e4f8b9f
|
[
"MIT"
] | 3
|
2020-11-12T06:51:44.000Z
|
2021-09-19T00:26:33.000Z
|
Solutions1/countPrimes.py
|
mohamedsugal/Leetcode-Solutions
|
c67720af4cb36d07d758c57efffac7a28e4f8b9f
|
[
"MIT"
] | null | null | null |
Solutions1/countPrimes.py
|
mohamedsugal/Leetcode-Solutions
|
c67720af4cb36d07d758c57efffac7a28e4f8b9f
|
[
"MIT"
] | null | null | null |
import math
def countPrimes(n):
if n < 2:
return 0
isPrime = [1] * n
isPrime[0] = isPrime[1] = 0
for i in range(2, int(math.sqrt(n))):
if isPrime[i] != 0:
for j in range(i * i, n, i):
isPrime[j] = 0
return [c for c, v in enumerate(isPrime) if v != 0]
print(countPrimes(10))
| 19.055556
| 55
| 0.504373
|
d18cc4f9e910c8cf02d6543d985e92af8a6750c0
| 1,398
|
py
|
Python
|
config.py
|
Bangys/unnamed-demo
|
34484107d8831cfa0158e51572d6680d92ff236d
|
[
"MIT"
] | 1
|
2019-06-03T14:56:05.000Z
|
2019-06-03T14:56:05.000Z
|
config.py
|
Bangys/unnamed-demo
|
34484107d8831cfa0158e51572d6680d92ff236d
|
[
"MIT"
] | 3
|
2021-03-31T19:10:28.000Z
|
2021-12-13T20:03:54.000Z
|
config.py
|
Bangys/unnamed-demo
|
34484107d8831cfa0158e51572d6680d92ff236d
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = '435hiud6gdb73bsjh^&G&^fg3bh*&GH'
# MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
# MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
# MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
# ['true', 'on', '1']
# MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
# MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
# FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
# FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN') or 'admin'
FLASKY_PWD = os.environ.get('FLASKY_PWD') or 'admin'
# SSL_REDIRECT = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SALT = 'your salt'
# SQLALCHEMY_RECORD_QUERIES = True
# FLASKY_POSTS_PER_PAGE = 20
# FLASKY_FOLLOWERS_PER_PAGE = 50
# FLASKY_COMMENTS_PER_PAGE = 30
# FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:123456@localhost/tsure'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:123456@localhost/test_tsure'
WTF_CSRF_ENABLED = False
config = {
'default': DevelopmentConfig,
'testing': TestingConfig,
}
| 29.744681
| 80
| 0.688841
|
6b549034805331aae7c61cd2c650153f5c5c7dcc
| 6,111
|
py
|
Python
|
cdsfeatures.py
|
AlexOrlek/MOBtyping
|
a7d454669dbb7e2d337118e36d4979d7fe8e2fae
|
[
"MIT"
] | 1
|
2018-03-26T09:00:42.000Z
|
2018-03-26T09:00:42.000Z
|
cdsfeatures.py
|
AlexOrlek/MOBtyping
|
a7d454669dbb7e2d337118e36d4979d7fe8e2fae
|
[
"MIT"
] | null | null | null |
cdsfeatures.py
|
AlexOrlek/MOBtyping
|
a7d454669dbb7e2d337118e36d4979d7fe8e2fae
|
[
"MIT"
] | null | null | null |
import sys, pickle
sys.path.append('./mobtypingmodules')
from mobmods import unlist
from Bio import Entrez, SeqIO
#this script extracts details on CDS features of untyped plasmid(s) from a Genbank file, or from a tsv file; then appends these details to pickle.CDSfeatures_original dictionary
CDSfeatures_dict={}
accessions=[]
#check file availability
try:
fileObj=open("./untyped_plasmids/%s_CDSfeatures.tsv" %sys.argv[1]) #sys.argv[1] is the name of the untyped plasmid(s)
fileObj.close()
availablefile="tsv"
except:
try:
fileObj=open("./untyped_plasmids/%s.gb" %sys.argv[1])
fileObj.close()
availablefile="gb"
except:
availablefile="none"
#extract CDS feature information and create pickle.CDSfeatures
if availablefile=="tsv": #if there is a tsv file, extract CDS feature information from there
fileObj=open("./untyped_plasmids/%s_CDSfeatures.tsv" %sys.argv[1]) #sys.argv[1] is the name of the untyped plasmid(s)
for line in fileObj:
line=line.strip()
data=line.split('\t')
accession=data[0]
if accession in accessions:
pass
else:
accessions.append(accession)
start=data[1]
start=start.strip('<')
start=start.strip('>')
end=data[2]
end=end.strip('<')
end=end.strip('>')
annotation=data[3]
plasmidlength=data[4]
difference=(int(start)-int(end))*int(-1) #exclude misannotated start-ends (if CDS start-end length is close to plasmid length)
if int(difference)>(int(plasmidlength)-int(10)):
continue
else:
CDSrange=(start,end, annotation)
#append to dictionary
if accession not in CDSfeatures_dict:
CDSfeatures_dict[accession]=[CDSrange]
else:
CDSfeatures_dict[accession].append(CDSrange)
fileObj.close()
picklefileObj=open("./plasmiddatabase/pickle.CDSfeatures_original", "rb")
CDSfeatures_original=pickle.load(picklefileObj)
num_accessions_original=len(CDSfeatures_original)
picklefileObj.close()
CDSfeatures_original.update(CDSfeatures_dict) #add CDSfeatures info from untyped plasmid(s) to CDSfeatures_origial dictionary
picklefileObj=open("./plasmiddatabase/pickle.CDSfeatures_%s" %sys.argv[1], "wb")
pickle.dump(CDSfeatures_original, picklefileObj)
picklefileObj.close()
print "extracted CDS feature information from tsv file"
else:
if availablefile=="gb": #if there is a genbank file, extract CDS feature information from there
fileObj=open("./untyped_plasmids/%s.gb" %sys.argv[1])
fileObj2=open("./untyped_plasmids/%s_CDSfeatures.tsv" %sys.argv[1],"w")
for record in SeqIO.parse(fileObj, "genbank"):
accession=record.id
accessions.append(accession)
plasmidlength=len(record.seq)
for feature in record.features:
start=str(feature.location.start)
start=start.strip('<')
start=start.strip('>')
end=str(feature.location.end)
end=end.strip('<')
end=end.strip('>')
try:
annotation=unlist(feature.qualifiers["product"])
except KeyError:
annotation="-"
difference=(int(start)-int(end))*int(-1) #exclude misannotated start-ends (if CDS start-end length is close to plasmid length)
if int(difference)>(int(plasmidlength)-int(10)):
continue
else:
CDSrange=(start,end, annotation)
#append to dictionary
if accession not in CDSfeatures_dict:
CDSfeatures_dict[accession]=[CDSrange]
else:
CDSfeatures_dict[accession].append(CDSrange)
#write to tsv file
fileObj2.write('%s\t%s\t%s\t%s\t%s\n' %(accession, start, end, annotation, plasmidlength))
fileObj.close()
fileObj2.close()
picklefileObj=open("./plasmiddatabase/pickle.CDSfeatures_original", "rb")
CDSfeatures_original=pickle.load(picklefileObj)
num_accessions_original=len(CDSfeatures_original)
picklefileObj.close()
CDSfeatures_original.update(CDSfeatures_dict) #add CDSfeatures info from untyped plasmid(s) to CDSfeatures_origial dictionary
picklefileObj=open("./plasmiddatabase/pickle.CDSfeatures_%s" %sys.argv[1], "wb")
pickle.dump(CDSfeatures_original, picklefileObj)
picklefileObj.close()
print "extracted CDS feature information from gb file"
elif availablefile=="none": #if CDS feature details are not provided as gb or tsv, just use pickle.CDSfeatures_original (CDS feature information from untyped plasmid(s) will remain missing)
print "CDS features of untyped plasmid(s) not provided; check file extensions are correct (.gb or _CDSfeatures.tsv); pickle.CDSfeatures_original will be used"
picklefileObj=open("./plasmiddatabase/pickle.CDSfeatures_original", "rb")
CDSfeatures_original=pickle.load(picklefileObj)
num_accessions_original=len(CDSfeatures_original)
picklefileObj.close()
picklefileObj=open("./plasmiddatabase/pickle.CDSfeatures_%s" %sys.argv[1], "wb")
pickle.dump(CDSfeatures_original, picklefileObj)
picklefileObj.close()
else:
print "code error"
print num_accessions_original, "number of accessions included in pickle.CDSfeatures_original"
print len(CDSfeatures_original), "number of accessions included in pickle.CDSfeatures"
print accessions, "untyped plasmid accession ids"
if availablefile!="none":
for accession in accessions:
print CDSfeatures_original[accession], "CDSfeatures dictionary contents for accession: %s" %accession
| 42.734266
| 194
| 0.639503
|
8d50585bd3e9f81f454dd45f609327c5817b96ff
| 2,221
|
py
|
Python
|
examples/dfp/v201608/placement_service/update_placements.py
|
agencia-watermelons/googleads-python-lib
|
d2e55863ecf7e5090c225d74b3f4c1f948cd5a21
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201608/placement_service/update_placements.py
|
agencia-watermelons/googleads-python-lib
|
d2e55863ecf7e5090c225d74b3f4c1f948cd5a21
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201608/placement_service/update_placements.py
|
agencia-watermelons/googleads-python-lib
|
d2e55863ecf7e5090c225d74b3f4c1f948cd5a21
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a single placement to allow for AdSense targeting.
To determine which placements exist, run get_all_placements.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
PLACEMENT_ID = 'INSERT_PLACEMENT_ID_HERE'
def main(client, placement_id):
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201608')
# Create query.
values = [{
'key': 'placementId',
'value': {
'xsi_type': 'NumberValue',
'value': placement_id
}
}]
query = 'WHERE id = :placementId'
statement = dfp.FilterStatement(query, values, 1)
# Get placements by statement.
placements = placement_service.getPlacementsByStatement(
statement.ToStatement())
for placement in placements:
if not placement['targetingDescription']:
placement['targetingDescription'] = 'Generic description'
placement['targetingAdLocation'] = 'All images on sports pages.'
placement['targetingSiteName'] = 'http://code.google.com'
placement['isAdSenseTargetingEnabled'] = 'true'
# Update placements remotely.
placements = placement_service.updatePlacements(placements)
for placement in placements:
print ('Placement with id \'%s\', name \'%s\', and AdSense targeting '
'enabled \'%s\' was updated.'
% (placement['id'], placement['name'],
placement['isAdSenseTargetingEnabled']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PLACEMENT_ID)
| 33.149254
| 79
| 0.717244
|
3a7a6c0cd0c6cf3104b1eb7f89d594da2ea84122
| 714
|
py
|
Python
|
script/SpiderLianjia/SpiderLianjia/items.py
|
jiming-liu/overpick_Scrapy
|
0fd9995f6a4560285804371608216ec527c2a793
|
[
"MIT"
] | null | null | null |
script/SpiderLianjia/SpiderLianjia/items.py
|
jiming-liu/overpick_Scrapy
|
0fd9995f6a4560285804371608216ec527c2a793
|
[
"MIT"
] | null | null | null |
script/SpiderLianjia/SpiderLianjia/items.py
|
jiming-liu/overpick_Scrapy
|
0fd9995f6a4560285804371608216ec527c2a793
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SpiderlianjiaItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
houseTitle = scrapy.Field()
houseCity = scrapy.Field()
houseCityURL = scrapy.Field()
houseName = scrapy.Field()
housePublishedTime = scrapy.Field()
housePrice = scrapy.Field()
houseHistoryPrice = scrapy.Field()
houseArea = scrapy.Field()
houseAddress = scrapy.Field()
houseDistrict = scrapy.Field()
houseBaiduLongitude = scrapy.Field()
houseBaiduLatitude = scrapy.Field()
pass
| 24.62069
| 51
| 0.686275
|
076becc4c9d3bcdcb5db62dcdf349db1313d912b
| 2,983
|
py
|
Python
|
docs/confluence/epsgrams/epsboxplot.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-12-07T23:10:50.000Z
|
2022-02-19T03:01:49.000Z
|
docs/confluence/epsgrams/epsboxplot.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2019-01-04T15:43:30.000Z
|
2022-03-31T09:48:15.000Z
|
docs/confluence/epsgrams/epsboxplot.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2019-01-07T14:36:33.000Z
|
2021-09-06T14:48:36.000Z
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
# importing Magics module
from Magics.macro import *
# Setting of the output file name
ref = "epsboxplot"
output = output(output_formats = ['png'],
output_name_first_page_number = "off",
output_width = 600,
output_name = ref,
super_page_y_length=10.,
super_page_x_length=20.)
# define the cartesian projection
epsprojection = mmap(
subpage_map_projection='cartesian',
subpage_x_axis_type='date',
subpage_x_automatic='on',
subpage_y_axis_type='regular',
subpage_y_automatic='on',
subpage_y_position=3.,
)
# define horizontal axis
horizontal = maxis(
axis_orientation='horizontal',
axis_type='date',
axis_date_type='days',
axis_days_label='both',
axis_days_label_colour='Rgb(0.5, 0.5, 0.5)',
axis_days_label_height=0.35,
axis_grid='on',
axis_grid_colour='Rgb(0.5, 0.5, 0.5)',
axis_grid_line_style='solid',
axis_line_colour='grey',
axis_minor_tick='on',
axis_minor_tick_colour='grey',
axis_months_label_colour='Rgb(0.5, 0.5, 0.5)',
axis_months_label_height=0.3,
axis_tick_colour='grey',
axis_years_label_colour='Rgb(0.5, 0.5, 0.5)',
axis_years_label_height=0.3,
)
# define vertical axis
vertical = maxis(
axis_orientation='vertical',
axis_grid='on',
axis_grid_colour='Rgb(0.5, 0.5, 0.5)',
axis_grid_line_style='dash',
axis_line='on',
axis_line_colour='grey',
axis_tick='on',
axis_tick_label='on',
)
cc = mwrepjson(wrepjson_family='eps',
wrepjson_keyword='eps',
wrepjson_input_filename='cloud_box.json',
wrepjson_parameter_information='Cloud Cover Using BoxPlot',
wrepjson_parameter='164.128',
wrepjson_parameter_scaling_factor=8.)
box = mepsgraph(eps_box_border_colour='Rgb(0.5, 0.5, 0.5)',
eps_box_border_thickness=2,
eps_box_colour='Rgb(0.925, 0.609, 0.953)',
eps_box_width=3.)
#definition of the title
lines = ["<json_info key='parameter_info'/>",
"Forecast from <json_info key='date'/> for location <json_info key='location'/>"
]
title = mtext(
text_lines=lines,
text_html='true',
text_colour='black',
text_font_size=0.6,
text_mode='positional',
text_box_x_position=1.,
text_box_y_position=8.,
text_box_x_length=10.,
text_box_y_length=2.5,
text_border='off',
text_justification='left',
)
# To the plot
plot(
output, epsprojection, horizontal, vertical, cc, box, title
)
tohtml(ref, output, cc, box)
| 27.878505
| 89
| 0.668455
|
45be31ad43a0572967a191fc536db344bd9216c0
| 607
|
py
|
Python
|
setup.py
|
tagordon/dynamoon
|
b3ef4d13f2d8771c4d1440d96a38cb240a25811a
|
[
"MIT"
] | 1
|
2021-11-25T21:39:22.000Z
|
2021-11-25T21:39:22.000Z
|
setup.py
|
tagordon/dynamoon
|
b3ef4d13f2d8771c4d1440d96a38cb240a25811a
|
[
"MIT"
] | null | null | null |
setup.py
|
tagordon/dynamoon
|
b3ef4d13f2d8771c4d1440d96a38cb240a25811a
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import pathlib
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(name='dynamoon',
version='0.1',
description='Photodynamics for exoplanet+moon systems',
long_description=README,
long_description_content_type="text/markdown",
url='http://github.com/tagordon/dynamoon',
author='Tyler Gordon',
author_email='tagordon@uw.edu',
license='MIT',
packages=['dynamoon'],
install_requires=['numpy',
'scipy',
'astropy'],
zip_safe=False)
| 30.35
| 61
| 0.61944
|
ce37d54bc93ec63e11194840bbeacccaa5ebf08a
| 1,511
|
py
|
Python
|
main.py
|
LeviHuang0824/Lanes_identification
|
5e580e12f4828bc67ce9365115b7cbc9f78b4ed0
|
[
"Unlicense"
] | null | null | null |
main.py
|
LeviHuang0824/Lanes_identification
|
5e580e12f4828bc67ce9365115b7cbc9f78b4ed0
|
[
"Unlicense"
] | null | null | null |
main.py
|
LeviHuang0824/Lanes_identification
|
5e580e12f4828bc67ce9365115b7cbc9f78b4ed0
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import cv2
# 使用自訂義的模組
from packages import module as m
'''
辨識車道線流程:
1. 圖像進行灰階處理減少計算量
2. 進行高斯模糊,降低影像雜訊
3. Canny邊緣偵測
4. ROI抓出感興趣的部分
5. Hough Transform,取得直線線段的座標
6. 最小平方法,找出最適配的直線
'''
'''
[]代表可以省略
OpenCV 讀取影像: cv2.imread(影像檔案路徑[, 選項])
選項:
cv2.IMREAD_COLOR, (Default)彩色模式(BGR)讀取影像
cv2.IMREAD_GRAYSCALE, 以灰階模式讀取影像
cv2.IMREAD_UNCHANGED, 以彩色模式包含透明度(BGRA)
'''
video = cv2.VideoCapture("C:\\python_jupyter\\Lanes_Identification\\road.mp4") # 建立VideoCapture物件
print(video.isOpened())
if video.isOpened(): # 判斷是否讀取成功
# 動態影像是看做由一大堆連續的影像堆疊而成
# 使用迴圈來抽取單一影像,進行處理
while True:
sucess, img = video.read() # 讀取影像
if sucess:
edge = m.get_edge(img) # 邊緣偵測
roi = m.get_roi(edge) # 取得roi
lines = cv2.HoughLinesP(
image=roi,
rho=3,
theta=np.pi/180,
threshold=30,
minLineLength=50,
maxLineGap=40)
# 取得左右兩條平均線方程式
avglines = m.get_averagelines(lines)
if avglines is not None:
# 取得要畫出的左兩條線段
lines = m.get_subline(img, avglines)
img = m.draw_lines(img, lines) # 劃q出線段
cv2.imshow("pic", img) # 顯示影像
k = cv2.waitKey(1) # 檢查是否有按鍵輸入
# 如果按下Q鍵,則結束迴圈
if k== ord("q") or k== ord("Q"):
print("exit")
cv2.destroyAllWindows() # 關閉視窗
video.release() # 關閉影片
break
else:
print("開啟影片失敗")
| 26.051724
| 97
| 0.567174
|
5fad621b66f65d8ae21d806152ad825eb7779c32
| 1,797
|
py
|
Python
|
powersimdata/utility/tests/test_helpers.py
|
c-voegele/PowerSimData
|
5b1500e573f00a34571316796ff442bfa753871a
|
[
"MIT"
] | 27
|
2021-02-20T20:55:31.000Z
|
2022-02-07T17:27:14.000Z
|
powersimdata/utility/tests/test_helpers.py
|
c-voegele/PowerSimData
|
5b1500e573f00a34571316796ff442bfa753871a
|
[
"MIT"
] | 147
|
2021-01-21T03:55:09.000Z
|
2022-03-28T19:28:03.000Z
|
powersimdata/utility/tests/test_helpers.py
|
c-voegele/PowerSimData
|
5b1500e573f00a34571316796ff442bfa753871a
|
[
"MIT"
] | 27
|
2021-02-03T18:24:47.000Z
|
2022-01-26T08:56:17.000Z
|
import pytest
from powersimdata.utility.helpers import MemoryCache, PrintManager, cache_key
def test_print_is_disabled(capsys):
pm = PrintManager()
pm.block_print()
print("printout are disabled")
captured = capsys.readouterr()
assert captured.out == ""
pm.enable_print()
print("printout are enabled")
captured = capsys.readouterr()
assert captured.out == "printout are enabled\n"
def test_cache_key_valid_types():
key1 = cache_key(["foo", "bar"], 4, "other")
assert (("foo", "bar"), 4, "other") == key1
key2 = cache_key(True)
assert (True,) == key2
key3 = cache_key({1, 2, 2, 3})
assert ((1, 2, 3),) == key3
key4 = cache_key(None)
assert ("null",) == key4
def test_no_collision():
key1 = cache_key([["foo"], ["bar"]])
key2 = cache_key([[["foo"], ["bar"]]])
key3 = cache_key([["foo"], "bar"])
keys = [key1, key2, key3]
assert len(keys) == len(set(keys))
def test_cache_key_unsupported_type():
with pytest.raises(ValueError):
cache_key(object())
def test_cache_key_distinct_types():
assert cache_key(4) != cache_key("4")
def test_mem_cache_put_dict():
cache = MemoryCache()
key = cache_key(["foo", "bar"], 4, "other")
obj = {"key1": 42}
cache.put(key, obj)
assert cache.get(key) == obj
def test_mem_cache_get_returns_copy():
cache = MemoryCache()
key = cache_key("foo", 4)
obj = {"key1": 42}
cache.put(key, obj)
assert id(cache.get(key)) != id(obj)
def test_mem_cache_put_version_never_changes():
cache = MemoryCache()
key = cache_key("foo", 4)
obj = {"key1": "value1"}
cache.put(key, obj)
obj["key2"] = "value2"
assert "key1" in cache.get(key)
assert "key2" not in cache.get(key)
assert "key2" in obj
| 23.96
| 77
| 0.622148
|
748b603ae3c08ef4f69e81deb59f70290865897e
| 430
|
py
|
Python
|
src/cmcandy/Python_language_Answers/_0020.py
|
ch98road/leetcode
|
a9b4be54a169b30f6711809b892dd1f79f2a17e7
|
[
"MIT"
] | null | null | null |
src/cmcandy/Python_language_Answers/_0020.py
|
ch98road/leetcode
|
a9b4be54a169b30f6711809b892dd1f79f2a17e7
|
[
"MIT"
] | null | null | null |
src/cmcandy/Python_language_Answers/_0020.py
|
ch98road/leetcode
|
a9b4be54a169b30f6711809b892dd1f79f2a17e7
|
[
"MIT"
] | 1
|
2020-11-26T03:01:12.000Z
|
2020-11-26T03:01:12.000Z
|
class Solution:
def isValid(self, s: str) -> bool:
stack = []
map = {')': '(', '}': '{', ']': '['}
for c in s:
if c in [')', '}', ']']:
ex = map[c]
if len(stack) > 0 and ex == stack[-1]:
stack.pop()
else:
stack.append(c)
else:
stack.append(c)
return len(stack) == 0
| 28.666667
| 54
| 0.32093
|
f9b14984b18674a9292d2dc2d6e79430f9000dae
| 11,585
|
py
|
Python
|
Z_ALL_FILE/Py/InsUpd._11232020-234.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | null | null | null |
Z_ALL_FILE/Py/InsUpd._11232020-234.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | null | null | null |
Z_ALL_FILE/Py/InsUpd._11232020-234.py
|
omikabir/omEngin
|
b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195
|
[
"Apache-2.0"
] | 1
|
2021-04-29T21:46:02.000Z
|
2021-04-29T21:46:02.000Z
|
import pandas as pd
import os, datetime, time
from datetime import *
#import cx_Oracle, pyodbc, requests, os
#from mysql import *
#from sqlalchemy import create_engine
#user = 'root'
#password = 'admin'
#host = '127.0.0.1:3306'
#db = 'omdb'
#constr = 'mysql+mysqlconnector://' + user + ':' + password + '@' + host + '/' + db
#engine = create_engine(constr, echo=False)
#conn = engine.raw_connection()
#cur = conn.cursor()
def prep_update(lscol,lsval):
hp = ''
if isinstance(lscol, list) and isinstance(lsval, list):
if len(lscol) == len(lsval):
for i in range(len(lscol)):
x = str(lscol[i]) + "='" + str(lsval[i]) + "'"
if hp == '':
hp = x
else:
hp = hp + ',' + x
else:
print('num of col and value are not same')
return hp
elif isinstance(lscol, str) and isinstance(lsval, str):
hp = ""
comma = lsval.count(',')
invertcomma = lsval.count("'")
if invertcomma == (comma+1)*2:
x1 = lscol.split(',')
x2 = lsval.split(',')
print(x1,x2)
for i in range(len(x1)):
x = x1[i] + "=" + x2[i]
if hp == '':
hp = x
else:
hp = hp + ',' + x
if invertcomma <= 2:
x1 = lscol.split(',')
x2 = lsval.split(',')
for i in range(len(x1)):
x = str(x1[i]) + "='" + str(x2[i]) + "'"
if hp == '':
hp = x
else:
hp = hp + ',' + x
return hp
def prep_insert(lscol,lsval):
hp = ''
if isinstance(lscol, list) and isinstance(lsval, list):
if len(lscol) == len(lsval):
ls = []
for i in range(len(lsval)):
ls.append("'" + str(lsval[i]) + "'")
hp = '(' + str.join(',', lscol) + ') values (' + str.join(',', ls) + ')'
else:
hp = "check list values for double color"
print('num of col and value are not same')
return hp
elif isinstance(lscol, str) and isinstance(lsval, str):
hp1 = ""
hp2 = ""
hp = ""
cnt = 0
comma = lsval.count(',')
invertcomma = lsval.count("'")
if invertcomma == (comma+1)*2:
x1 = lscol.split(',')
x2 = lsval.split(',')
for i in range(len(x1)):
if hp1 == '':
hp1 = str(x1[i])
hp2 = str(x2[i])
cnt = cnt + 1
else:
hp1 = hp1 + "," + str(x1[i])
hp2 = hp2 + "," + str(x2[i])
cnt = cnt + 1
hp = '(' + hp1 + ') values (' + hp2 + ')'
return hp
elif invertcomma <= 2:
x1 = lscol.split(',')
x2 = lsval.split(',')
for i in range(len(x1)):
if hp1 == '':
hp1 = str(x1[i])
hp2 = "'" + str(x2[i]) + "'"
cnt = cnt + 1
else:
hp1 = hp1 + "," + str(x1[i])
hp2 = hp2 + "," + "'" + str(x2[i]) + "'"
cnt = cnt + 1
hp = '(' + hp1 + ') values (' + hp2 + ')'
return hp
def fetchone_read(rs):
if isinstance(rs, list):
print('fetchone readed called \n ')
ls = []
cnt = 0
for r in rs:
ls1 = list(r)
cnt = cnt + 1
print(cnt , '.', ls1)
ls.append(ls1)
else:
print('list type data required but passed data type is ', type(rs))
def get_key(my_dict, val):
for value, key in my_dict.items():
if value == val:
return key
def dtype_match(db, table, conn, df):
dbcols = []
dbcolType = []
try:
qry = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '" + table + "' ORDER BY ORDINAL_POSITION"
dfx = pd.read_sql(qry, con= conn)
dbcols = dfx['COLUMN_NAME'].to_list()
dbcolType = dfx['DATA_TYPE'].to_list()
except:
qry = 'EXPLAIN ' + db + '.' + table
dfx = pd.read_sql(qry, con= conn)
dbcols = dfx['Field'].to_list()
dbcolType = dfx['Type'].to_list()
dc= zip(dbcols, dbcolType)
dic = dict(dc)
dfcol = df.columns.to_list()
dbcols.sort()
dfcol.sort()
st = ""
q = 0
if dbcols == dfcol:
comment1 = 'column counts matched exactly'
else:
comment1 = 'column counts are not same'
try:
for i in range(len(dbcols)):
dbty = get_key(dic, dbcols[i])
st = dbcols[i]
Y = 0
try:
xdf = df[st]
Y = 1
except:
Y = 0
if Y == 1:
if 'int' in dbty:
df[st] = df[st].astype(int)
elif 'datetime' in dbty or 'timestamp' in dbty:
df[st] = df.apply(lambda x : pd.to_datetime(x[st]).strftime("%Y-%m-%d %H:%M:%S"), axis = 1)
elif dbty == 'date':
df[st] = df.apply(lambda x : pd.to_datetime(x[st]).strftime("%Y-%m-%d"), axis = 1)
q = q + 1
return df
except:
print(comment1, '-', 'error occuruced for dbcols: ', st , ' at position ', q)
#df1['LASTOCCURRENCE'] = pd.to_datetime(df1['LASTOCCURRENCE'],format="%d/%m/%y, %H:%M:%S", errors='raise')
#df1['LASTOCCURRENCE'] = df1.apply(lambda x : pd.to_datetime(x.LASTOCCURRENCE).strftime("%d-%m-%Y h:M"), axis = 1)
def ExInsert(tbl, conn, df):
colname = df.columns.to_list()
q = 0
cr = conn.cursor()
for i in range(len(df)):
lsval = []
q = q + 1
for j in df:
lsval.append(df.loc[i,j])
qry = "insert into " + tbl + ' ' + prep_insert(colname,lsval)
print(qry)
cr.execute(qry)
else:
conn.commit()
print('row inserted: ' + str(q))
return 'row inserted: ' + str(q)
def CheckExist(conn , tbl, colname, values):
qry = "select * from " + tbl + " where " + colname + "='" + values + "'"
dfx = pd.read_sql(qry, conn)
rw = dfx.shape[0]
return rw
def drop_cols(df, col2drop = []):
if len(col2drop) > 0:
cols = df.columns.to_list()
ncols = []
for i in range(len(cols)):
match = 0
for j in range(len(col2drop)):
if cols[i] == col2drop[j]:
match = 1
if match == 0:
ncols.append(cols[i])
ndf = df[ncols]
return ndf
else:
return df
def qrybuilt(tbl, ndf, bycol):
dfx = drop_cols(ndf, bycol)
ncols = dfx.columns.to_list()
lsqry = []
for i in range(len(ndf)):
x = ''
y = ''
for j in range(len(bycol)):
x1 = str(bycol[j]) + "='" + str(ndf.loc[i, bycol[j]]) + "'"
if x == '':
x = x1
else:
x = x + " and " + x1
for n in range(len(ncols)):
a1 = str(ncols[n])
a2 = "'" + str(ndf.loc[i, ncols[n]]) + "'"
if y == '':
y = a1 + '=' + a2
else:
y = y + "," + a1 + '=' + a2
qry = "update " + tbl + ' set ' + y + ' Where ' + x
lsqry.append(qry)
return lsqry
def InsertUpdate(db, tbl, con, df, bycol = False):
allcols = df.columns.to_list()
ndf = dtype_match(db, tbl, con, df)
if isinstance(ndf, pd.DataFrame):
cr = con.cursor()
if bycol == False:
rv = ExInsert(tbl, con, ndf)
else:
if isinstance(bycol, list):
lsqry = qrybuilt(tbl, ndf, bycol)
for i in range(len(lsqry)):
qry = lsqry[i]
try:
cr.execute(qry)
except:
print("failed lsqry get from 'def qrybuilt' ", qry)
con.commit()
elif isinstance(bycol, str):
dfx = ndf.drop(bycol, 1)
colsname = dfx.columns.to_list()
colscond = ndf[bycol].to_list()
q = 0
for i in range(len(colscond)):
vl = colscond[i]
chk = CheckExist(con, tbl, bycol, vl)
ls = []
qry = ''
if chk != 0:
for c1 in dfx:
ls.append(dfx.loc[i,c1])
qry = "update " + tbl + ' set ' + prep_update(colsname,ls) + ' where ' + bycol + "='" + vl + "'"
else:
for c1 in ndf:
ls.append(ndf.loc[i,c1])
qry = "insert into " + tbl + ' ' + prep_insert(allcols,ls)
cr.execute(qry)
q = q + 1
if q <3:
print(qry)
con.commit()
def InsertUpdate_mod(db, tbl, con, df, bycol = False, oncols = False):
allcols = []
if oncols:
allcols = oncols
else:
allcols = df.columns.to_list()
ndf = dtype_match(db, tbl, con, df)
if isinstance(ndf, pd.DataFrame):
cr = con.cursor()
if bycol == False:
rv = ExInsert(tbl, con, ndf)
else:
if isinstance(bycol, str):
dfx = ndf.drop(bycol, 1)
colsname = dfx.columns.to_list()
colscond = ndf[bycol].to_list()
q = 0
for i in range(len(colscond)):
vl = colscond[i]
chk = CheckExist(con, tbl, bycol, vl)
ls = []
qry = ''
if chk != 0:
for c1 in dfx:
ls.append(dfx.loc[i,c1])
qry = "update " + tbl + ' set ' + prep_update(colsname,ls) + ' where ' + bycol + "='" + vl + "'"
else:
for c1 in ndf:
ls.append(ndf.loc[i,c1])
qry = "insert into " + tbl + ' ' + prep_insert(allcols,ls)
cr.execute(qry)
q = q + 1
if q <3:
print(qry)
con.commit()
elif isinstance(bycol, list): # ndf, bycol
dfx = drop_cols(ndf, bycol)
ncols = dfx.columns.to_list()
lsqry = []
for i in range(len(ndf)):
x = ''
y = ''
for j in range(len(bycol)):
x1 = str(bycol[j]) + "='" + str(ndf.loc[i, bycol[j]]) + "'"
if x == '':
x = x1
else:
x = x + " and " + x1
for n in range(len(ncols)):
a1 = str(ncols[n])
a2 = "'" + str(ndf.loc[i, ncols[n]]) + "'"
if y == '':
y = a1 + '=' + a2
else:
y = y + "," + a1 + '=' + a2
qry = "update " + tbl + ' set ' + y + ' Where ' + x
lsqry.append(qry)
print('InsertUpdate_mod qry: ', qry)
return lsqry
| 34.479167
| 120
| 0.408718
|
faca8cb934154621082c26bd1bff606cc375361a
| 14,787
|
py
|
Python
|
fuzzers/005-tilegrid/ps7_int/top.py
|
lromor/prjxray
|
3586df58703a8a9ef1f2dbd4f71311a4074cf172
|
[
"ISC"
] | 1
|
2019-04-18T15:18:35.000Z
|
2019-04-18T15:18:35.000Z
|
fuzzers/005-tilegrid/ps7_int/top.py
|
lromor/prjxray
|
3586df58703a8a9ef1f2dbd4f71311a4074cf172
|
[
"ISC"
] | null | null | null |
fuzzers/005-tilegrid/ps7_int/top.py
|
lromor/prjxray
|
3586df58703a8a9ef1f2dbd4f71311a4074cf172
|
[
"ISC"
] | null | null | null |
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
def write_params(params):
pinstr = 'tile,val\n'
for tile, (val) in sorted(params.items()):
pinstr += '%s,%s\n' % (tile, val)
open('params.csv', 'w').write(pinstr)
def run():
print(
'''
module top(input clk, stb, di, output do);
localparam integer DIN_N = 8;
localparam integer DOUT_N = 8;
reg [DIN_N-1:0] din;
wire [DOUT_N-1:0] dout;
reg [DIN_N-1:0] din_shr;
reg [DOUT_N-1:0] dout_shr;
always @(posedge clk) begin
din_shr <= {din_shr, di};
dout_shr <= {dout_shr, din_shr[DIN_N-1]};
if (stb) begin
din <= din_shr;
dout_shr <= dout;
end
end
assign do = dout_shr[DOUT_N-1];
''')
params = {}
# NOTE: The INT_L tile has been hardcoded and it works only for the part specified in the assertion
assert os.getenv('XRAY_PART') == "xc7z010clg400-1"
for isone in util.gen_fuzz_states(1):
params['INT_L_X0Y50'] = isone
print(
'''
(* KEEP, DONT_TOUCH *)
PS7 dut_%(dut)s(
.DMA0DATYPE (),
.DMA0DAVALID (),
.DMA0DRREADY (),
.DMA0RSTN (),
.DMA1DATYPE (),
.DMA1DAVALID (),
.DMA1DRREADY (),
.DMA1RSTN (),
.DMA2DATYPE (),
.DMA2DAVALID (),
.DMA2DRREADY (),
.DMA2RSTN (),
.DMA3DATYPE (),
.DMA3DAVALID (),
.DMA3DRREADY (),
.DMA3RSTN (),
.EMIOCAN0PHYTX (),
.EMIOCAN1PHYTX (),
.EMIOENET0GMIITXD (),
.EMIOENET0GMIITXEN (),
.EMIOENET0GMIITXER (),
.EMIOENET0MDIOMDC (),
.EMIOENET0MDIOO (),
.EMIOENET0MDIOTN (),
.EMIOENET0PTPDELAYREQRX (),
.EMIOENET0PTPDELAYREQTX (),
.EMIOENET0PTPPDELAYREQRX (),
.EMIOENET0PTPPDELAYREQTX (),
.EMIOENET0PTPPDELAYRESPRX (),
.EMIOENET0PTPPDELAYRESPTX (),
.EMIOENET0PTPSYNCFRAMERX (),
.EMIOENET0PTPSYNCFRAMETX (),
.EMIOENET0SOFRX (),
.EMIOENET0SOFTX (),
.EMIOENET1GMIITXD (),
.EMIOENET1GMIITXEN (),
.EMIOENET1GMIITXER (),
.EMIOENET1MDIOMDC (),
.EMIOENET1MDIOO (),
.EMIOENET1MDIOTN (),
.EMIOENET1PTPDELAYREQRX (),
.EMIOENET1PTPDELAYREQTX (),
.EMIOENET1PTPPDELAYREQRX (),
.EMIOENET1PTPPDELAYREQTX (),
.EMIOENET1PTPPDELAYRESPRX (),
.EMIOENET1PTPPDELAYRESPTX (),
.EMIOENET1PTPSYNCFRAMERX (),
.EMIOENET1PTPSYNCFRAMETX (),
.EMIOENET1SOFRX (),
.EMIOENET1SOFTX (),
.EMIOGPIOO (),
.EMIOGPIOTN (),
.EMIOI2C0SCLO (),
.EMIOI2C0SCLTN (),
.EMIOI2C0SDAO (),
.EMIOI2C0SDATN (),
.EMIOI2C1SCLO (),
.EMIOI2C1SCLTN (),
.EMIOI2C1SDAO (),
.EMIOI2C1SDATN (),
.EMIOPJTAGTDO (),
.EMIOPJTAGTDTN (),
.EMIOSDIO0BUSPOW (),
.EMIOSDIO0BUSVOLT (),
.EMIOSDIO0CLK (),
.EMIOSDIO0CMDO (),
.EMIOSDIO0CMDTN (),
.EMIOSDIO0DATAO (),
.EMIOSDIO0DATATN (),
.EMIOSDIO0LED (),
.EMIOSDIO1BUSPOW (),
.EMIOSDIO1BUSVOLT (),
.EMIOSDIO1CLK (),
.EMIOSDIO1CMDO (),
.EMIOSDIO1CMDTN (),
.EMIOSDIO1DATAO (),
.EMIOSDIO1DATATN (),
.EMIOSDIO1LED (),
.EMIOSPI0MO (),
.EMIOSPI0MOTN (),
.EMIOSPI0SCLKO (),
.EMIOSPI0SCLKTN (),
.EMIOSPI0SO (),
.EMIOSPI0SSNTN (),
.EMIOSPI0SSON (),
.EMIOSPI0STN (),
.EMIOSPI1MO (),
.EMIOSPI1MOTN (),
.EMIOSPI1SCLKO (),
.EMIOSPI1SCLKTN (),
.EMIOSPI1SO (),
.EMIOSPI1SSNTN (),
.EMIOSPI1SSON (),
.EMIOSPI1STN (),
.EMIOTRACECTL (),
.EMIOTRACEDATA (),
.EMIOTTC0WAVEO (),
.EMIOTTC1WAVEO (),
.EMIOUART0DTRN (),
.EMIOUART0RTSN (),
.EMIOUART0TX (),
.EMIOUART1DTRN (),
.EMIOUART1RTSN (),
.EMIOUART1TX (),
.EMIOUSB0PORTINDCTL (),
.EMIOUSB0VBUSPWRSELECT (),
.EMIOUSB1PORTINDCTL (),
.EMIOUSB1VBUSPWRSELECT (),
.EMIOWDTRSTO (),
.EVENTEVENTO (),
.EVENTSTANDBYWFE (),
.EVENTSTANDBYWFI (),
.FCLKCLK (),
.FCLKRESETN (),
.FTMTF2PTRIGACK (),
.FTMTP2FDEBUG (),
.FTMTP2FTRIG (),
.IRQP2F (),
.MAXIGP0ARADDR (),
.MAXIGP0ARBURST (),
.MAXIGP0ARCACHE (),
.MAXIGP0ARESETN (),
.MAXIGP0ARID (),
.MAXIGP0ARLEN (),
.MAXIGP0ARLOCK (),
.MAXIGP0ARPROT (),
.MAXIGP0ARQOS (),
.MAXIGP0ARSIZE (),
.MAXIGP0ARVALID (),
.MAXIGP0AWADDR (),
.MAXIGP0AWBURST (),
.MAXIGP0AWCACHE (),
.MAXIGP0AWID (),
.MAXIGP0AWLEN (),
.MAXIGP0AWLOCK (),
.MAXIGP0AWPROT (),
.MAXIGP0AWQOS (),
.MAXIGP0AWSIZE (),
.MAXIGP0AWVALID (),
.MAXIGP0BREADY (),
.MAXIGP0RREADY (),
.MAXIGP0WDATA (),
.MAXIGP0WID (),
.MAXIGP0WLAST (),
.MAXIGP0WSTRB (),
.MAXIGP0WVALID (),
.MAXIGP1ARADDR (),
.MAXIGP1ARBURST (),
.MAXIGP1ARCACHE (),
.MAXIGP1ARESETN (),
.MAXIGP1ARID (),
.MAXIGP1ARLEN (),
.MAXIGP1ARLOCK (),
.MAXIGP1ARPROT (),
.MAXIGP1ARQOS (),
.MAXIGP1ARSIZE (),
.MAXIGP1ARVALID (),
.MAXIGP1AWADDR (),
.MAXIGP1AWBURST (),
.MAXIGP1AWCACHE (),
.MAXIGP1AWID (),
.MAXIGP1AWLEN (),
.MAXIGP1AWLOCK (),
.MAXIGP1AWPROT (),
.MAXIGP1AWQOS (),
.MAXIGP1AWSIZE (),
.MAXIGP1AWVALID (),
.MAXIGP1BREADY (),
.MAXIGP1RREADY (),
.MAXIGP1WDATA (),
.MAXIGP1WID (),
.MAXIGP1WLAST (),
.MAXIGP1WSTRB (),
.MAXIGP1WVALID (),
.SAXIACPARESETN (),
.SAXIACPARREADY (),
.SAXIACPAWREADY (),
.SAXIACPBID (),
.SAXIACPBRESP (),
.SAXIACPBVALID (),
.SAXIACPRDATA (),
.SAXIACPRID (),
.SAXIACPRLAST (),
.SAXIACPRRESP (),
.SAXIACPRVALID (),
.SAXIACPWREADY (),
.SAXIGP0ARESETN (),
.SAXIGP0ARREADY (),
.SAXIGP0AWREADY (),
.SAXIGP0BID (),
.SAXIGP0BRESP (),
.SAXIGP0BVALID (),
.SAXIGP0RDATA (),
.SAXIGP0RID (),
.SAXIGP0RLAST (),
.SAXIGP0RRESP (),
.SAXIGP0RVALID (),
.SAXIGP0WREADY (),
.SAXIGP1ARESETN (),
.SAXIGP1ARREADY (),
.SAXIGP1AWREADY (),
.SAXIGP1BID (),
.SAXIGP1BRESP (),
.SAXIGP1BVALID (),
.SAXIGP1RDATA (),
.SAXIGP1RID (),
.SAXIGP1RLAST (),
.SAXIGP1RRESP (),
.SAXIGP1RVALID (),
.SAXIGP1WREADY (),
.SAXIHP0ARESETN (),
.SAXIHP0ARREADY (),
.SAXIHP0AWREADY (),
.SAXIHP0BID (),
.SAXIHP0BRESP (),
.SAXIHP0BVALID (),
.SAXIHP0RACOUNT (),
.SAXIHP0RCOUNT (),
.SAXIHP0RDATA (),
.SAXIHP0RID (),
.SAXIHP0RLAST (),
.SAXIHP0RRESP (),
.SAXIHP0RVALID (),
.SAXIHP0WACOUNT (),
.SAXIHP0WCOUNT (),
.SAXIHP0WREADY (),
.SAXIHP1ARESETN (),
.SAXIHP1ARREADY (),
.SAXIHP1AWREADY (),
.SAXIHP1BID (),
.SAXIHP1BRESP (),
.SAXIHP1BVALID (),
.SAXIHP1RACOUNT (),
.SAXIHP1RCOUNT (),
.SAXIHP1RDATA (),
.SAXIHP1RID (),
.SAXIHP1RLAST (),
.SAXIHP1RRESP (),
.SAXIHP1RVALID (),
.SAXIHP1WACOUNT (),
.SAXIHP1WCOUNT (),
.SAXIHP1WREADY (),
.SAXIHP2ARESETN (),
.SAXIHP2ARREADY (),
.SAXIHP2AWREADY (),
.SAXIHP2BID (),
.SAXIHP2BRESP (),
.SAXIHP2BVALID (),
.SAXIHP2RACOUNT (),
.SAXIHP2RCOUNT (),
.SAXIHP2RDATA (),
.SAXIHP2RID (),
.SAXIHP2RLAST (),
.SAXIHP2RRESP (),
.SAXIHP2RVALID (),
.SAXIHP2WACOUNT (),
.SAXIHP2WCOUNT (),
.SAXIHP2WREADY (),
.SAXIHP3ARESETN (),
.SAXIHP3ARREADY (),
.SAXIHP3AWREADY (),
.SAXIHP3BID (),
.SAXIHP3BRESP (),
.SAXIHP3BVALID (),
.SAXIHP3RACOUNT (),
.SAXIHP3RCOUNT (),
.SAXIHP3RDATA (),
.SAXIHP3RID (),
.SAXIHP3RLAST (),
.SAXIHP3RRESP (),
.SAXIHP3RVALID (),
.SAXIHP3WACOUNT (),
.SAXIHP3WCOUNT (),
.SAXIHP3WREADY (),
.DDRA (),
.DDRBA (),
.DDRCASB (),
.DDRCKE (),
.DDRCKN (),
.DDRCKP (),
.DDRCSB (),
.DDRDM (),
.DDRDQ (),
.DDRDQSN (),
.DDRDQSP (),
.DDRDRSTB (),
.DDRODT (),
.DDRRASB (),
.DDRVRN (),
.DDRVRP (),
.DDRWEB (),
.MIO (),
.PSCLK (),
.PSPORB (),
.PSSRSTB (),
.DDRARB (%(dout)u),
.DMA0ACLK (),
.DMA0DAREADY (),
.DMA0DRLAST (),
.DMA0DRTYPE (),
.DMA0DRVALID (),
.DMA1ACLK (),
.DMA1DAREADY (),
.DMA1DRLAST (),
.DMA1DRTYPE (),
.DMA1DRVALID (),
.DMA2ACLK (),
.DMA2DAREADY (),
.DMA2DRLAST (),
.DMA2DRTYPE (),
.DMA2DRVALID (),
.DMA3ACLK (),
.DMA3DAREADY (),
.DMA3DRLAST (),
.DMA3DRTYPE (),
.DMA3DRVALID (),
.EMIOCAN0PHYRX (),
.EMIOCAN1PHYRX (),
.EMIOENET0EXTINTIN (),
.EMIOENET0GMIICOL (),
.EMIOENET0GMIICRS (),
.EMIOENET0GMIIRXCLK (),
.EMIOENET0GMIIRXD (),
.EMIOENET0GMIIRXDV (),
.EMIOENET0GMIIRXER (),
.EMIOENET0GMIITXCLK (),
.EMIOENET0MDIOI (),
.EMIOENET1EXTINTIN (),
.EMIOENET1GMIICOL (),
.EMIOENET1GMIICRS (),
.EMIOENET1GMIIRXCLK (),
.EMIOENET1GMIIRXD (),
.EMIOENET1GMIIRXDV (),
.EMIOENET1GMIIRXER (),
.EMIOENET1GMIITXCLK (),
.EMIOENET1MDIOI (),
.EMIOGPIOI (),
.EMIOI2C0SCLI (),
.EMIOI2C0SDAI (),
.EMIOI2C1SCLI (),
.EMIOI2C1SDAI (),
.EMIOPJTAGTCK (),
.EMIOPJTAGTDI (),
.EMIOPJTAGTMS (),
.EMIOSDIO0CDN (),
.EMIOSDIO0CLKFB (),
.EMIOSDIO0CMDI (),
.EMIOSDIO0DATAI (),
.EMIOSDIO0WP (),
.EMIOSDIO1CDN (),
.EMIOSDIO1CLKFB (),
.EMIOSDIO1CMDI (),
.EMIOSDIO1DATAI (),
.EMIOSDIO1WP (),
.EMIOSPI0MI (),
.EMIOSPI0SCLKI (),
.EMIOSPI0SI (),
.EMIOSPI0SSIN (),
.EMIOSPI1MI (),
.EMIOSPI1SCLKI (),
.EMIOSPI1SI (),
.EMIOSPI1SSIN (),
.EMIOSRAMINTIN (),
.EMIOTRACECLK (),
.EMIOTTC0CLKI (),
.EMIOTTC1CLKI (),
.EMIOUART0CTSN (),
.EMIOUART0DCDN (),
.EMIOUART0DSRN (),
.EMIOUART0RIN (),
.EMIOUART0RX (),
.EMIOUART1CTSN (),
.EMIOUART1DCDN (),
.EMIOUART1DSRN (),
.EMIOUART1RIN (),
.EMIOUART1RX (),
.EMIOUSB0VBUSPWRFAULT (),
.EMIOUSB1VBUSPWRFAULT (),
.EMIOWDTCLKI (),
.EVENTEVENTI (),
.FCLKCLKTRIGN (),
.FPGAIDLEN (),
.FTMDTRACEINATID (),
.FTMDTRACEINCLOCK (),
.FTMDTRACEINDATA (),
.FTMDTRACEINVALID (),
.FTMTF2PDEBUG (),
.FTMTF2PTRIG (),
.FTMTP2FTRIGACK (),
.IRQF2P (),
.MAXIGP0ACLK (),
.MAXIGP0ARREADY (),
.MAXIGP0AWREADY (),
.MAXIGP0BID (),
.MAXIGP0BRESP (),
.MAXIGP0BVALID (),
.MAXIGP0RDATA (),
.MAXIGP0RID (),
.MAXIGP0RLAST (),
.MAXIGP0RRESP (),
.MAXIGP0RVALID (),
.MAXIGP0WREADY (),
.MAXIGP1ACLK (),
.MAXIGP1ARREADY (),
.MAXIGP1AWREADY (),
.MAXIGP1BID (),
.MAXIGP1BRESP (),
.MAXIGP1BVALID (),
.MAXIGP1RDATA (),
.MAXIGP1RID (),
.MAXIGP1RLAST (),
.MAXIGP1RRESP (),
.MAXIGP1RVALID (),
.MAXIGP1WREADY (),
.SAXIACPACLK (),
.SAXIACPARADDR (),
.SAXIACPARBURST (),
.SAXIACPARCACHE (),
.SAXIACPARID (),
.SAXIACPARLEN (),
.SAXIACPARLOCK (),
.SAXIACPARPROT (),
.SAXIACPARQOS (),
.SAXIACPARSIZE (),
.SAXIACPARUSER (),
.SAXIACPARVALID (),
.SAXIACPAWADDR (),
.SAXIACPAWBURST (),
.SAXIACPAWCACHE (),
.SAXIACPAWID (),
.SAXIACPAWLEN (),
.SAXIACPAWLOCK (),
.SAXIACPAWPROT (),
.SAXIACPAWQOS (),
.SAXIACPAWSIZE (),
.SAXIACPAWUSER (),
.SAXIACPAWVALID (),
.SAXIACPBREADY (),
.SAXIACPRREADY (),
.SAXIACPWDATA (),
.SAXIACPWID (),
.SAXIACPWLAST (),
.SAXIACPWSTRB (),
.SAXIACPWVALID (),
.SAXIGP0ACLK (),
.SAXIGP0ARADDR (),
.SAXIGP0ARBURST (),
.SAXIGP0ARCACHE (),
.SAXIGP0ARID (),
.SAXIGP0ARLEN (),
.SAXIGP0ARLOCK (),
.SAXIGP0ARPROT (),
.SAXIGP0ARQOS (),
.SAXIGP0ARSIZE (),
.SAXIGP0ARVALID (),
.SAXIGP0AWADDR (),
.SAXIGP0AWBURST (),
.SAXIGP0AWCACHE (),
.SAXIGP0AWID (),
.SAXIGP0AWLEN (),
.SAXIGP0AWLOCK (),
.SAXIGP0AWPROT (),
.SAXIGP0AWQOS (),
.SAXIGP0AWSIZE (),
.SAXIGP0AWVALID (),
.SAXIGP0BREADY (),
.SAXIGP0RREADY (),
.SAXIGP0WDATA (),
.SAXIGP0WID (),
.SAXIGP0WLAST (),
.SAXIGP0WSTRB (),
.SAXIGP0WVALID (),
.SAXIGP1ACLK (),
.SAXIGP1ARADDR (),
.SAXIGP1ARBURST (),
.SAXIGP1ARCACHE (),
.SAXIGP1ARID (),
.SAXIGP1ARLEN (),
.SAXIGP1ARLOCK (),
.SAXIGP1ARPROT (),
.SAXIGP1ARQOS (),
.SAXIGP1ARSIZE (),
.SAXIGP1ARVALID (),
.SAXIGP1AWADDR (),
.SAXIGP1AWBURST (),
.SAXIGP1AWCACHE (),
.SAXIGP1AWID (),
.SAXIGP1AWLEN (),
.SAXIGP1AWLOCK (),
.SAXIGP1AWPROT (),
.SAXIGP1AWQOS (),
.SAXIGP1AWSIZE (),
.SAXIGP1AWVALID (),
.SAXIGP1BREADY (),
.SAXIGP1RREADY (),
.SAXIGP1WDATA (),
.SAXIGP1WID (),
.SAXIGP1WLAST (),
.SAXIGP1WSTRB (),
.SAXIGP1WVALID (),
.SAXIHP0ACLK (),
.SAXIHP0ARADDR (),
.SAXIHP0ARBURST (),
.SAXIHP0ARCACHE (),
.SAXIHP0ARID (),
.SAXIHP0ARLEN (),
.SAXIHP0ARLOCK (),
.SAXIHP0ARPROT (),
.SAXIHP0ARQOS (),
.SAXIHP0ARSIZE (),
.SAXIHP0ARVALID (),
.SAXIHP0AWADDR (),
.SAXIHP0AWBURST (),
.SAXIHP0AWCACHE (),
.SAXIHP0AWID (),
.SAXIHP0AWLEN (),
.SAXIHP0AWLOCK (),
.SAXIHP0AWPROT (),
.SAXIHP0AWQOS (),
.SAXIHP0AWSIZE (),
.SAXIHP0AWVALID (),
.SAXIHP0BREADY (),
.SAXIHP0RDISSUECAP1EN (),
.SAXIHP0RREADY (),
.SAXIHP0WDATA (),
.SAXIHP0WID (),
.SAXIHP0WLAST (),
.SAXIHP0WRISSUECAP1EN (),
.SAXIHP0WSTRB (),
.SAXIHP0WVALID (),
.SAXIHP1ACLK (),
.SAXIHP1ARADDR (),
.SAXIHP1ARBURST (),
.SAXIHP1ARCACHE (),
.SAXIHP1ARID (),
.SAXIHP1ARLEN (),
.SAXIHP1ARLOCK (),
.SAXIHP1ARPROT (),
.SAXIHP1ARQOS (),
.SAXIHP1ARSIZE (),
.SAXIHP1ARVALID (),
.SAXIHP1AWADDR (),
.SAXIHP1AWBURST (),
.SAXIHP1AWCACHE (),
.SAXIHP1AWID (),
.SAXIHP1AWLEN (),
.SAXIHP1AWLOCK (),
.SAXIHP1AWPROT (),
.SAXIHP1AWQOS (),
.SAXIHP1AWSIZE (),
.SAXIHP1AWVALID (),
.SAXIHP1BREADY (),
.SAXIHP1RDISSUECAP1EN (),
.SAXIHP1RREADY (),
.SAXIHP1WDATA (),
.SAXIHP1WID (),
.SAXIHP1WLAST (),
.SAXIHP1WRISSUECAP1EN (),
.SAXIHP1WSTRB (),
.SAXIHP1WVALID (),
.SAXIHP2ACLK (),
.SAXIHP2ARADDR (),
.SAXIHP2ARBURST (),
.SAXIHP2ARCACHE (),
.SAXIHP2ARID (),
.SAXIHP2ARLEN (),
.SAXIHP2ARLOCK (),
.SAXIHP2ARPROT (),
.SAXIHP2ARQOS (),
.SAXIHP2ARSIZE (),
.SAXIHP2ARVALID (),
.SAXIHP2AWADDR (),
.SAXIHP2AWBURST (),
.SAXIHP2AWCACHE (),
.SAXIHP2AWID (),
.SAXIHP2AWLEN (),
.SAXIHP2AWLOCK (),
.SAXIHP2AWPROT (),
.SAXIHP2AWQOS (),
.SAXIHP2AWSIZE (),
.SAXIHP2AWVALID (),
.SAXIHP2BREADY (),
.SAXIHP2RDISSUECAP1EN (),
.SAXIHP2RREADY (),
.SAXIHP2WDATA (),
.SAXIHP2WID (),
.SAXIHP2WLAST (),
.SAXIHP2WRISSUECAP1EN (),
.SAXIHP2WSTRB (),
.SAXIHP2WVALID (),
.SAXIHP3ACLK (),
.SAXIHP3ARADDR (),
.SAXIHP3ARBURST (),
.SAXIHP3ARCACHE (),
.SAXIHP3ARID (),
.SAXIHP3ARLEN (),
.SAXIHP3ARLOCK (),
.SAXIHP3ARPROT (),
.SAXIHP3ARQOS (),
.SAXIHP3ARSIZE (),
.SAXIHP3ARVALID (),
.SAXIHP3AWADDR (),
.SAXIHP3AWBURST (),
.SAXIHP3AWCACHE (),
.SAXIHP3AWID (),
.SAXIHP3AWLEN (),
.SAXIHP3AWLOCK (),
.SAXIHP3AWPROT (),
.SAXIHP3AWQOS (),
.SAXIHP3AWSIZE (),
.SAXIHP3AWVALID (),
.SAXIHP3BREADY (),
.SAXIHP3RDISSUECAP1EN (),
.SAXIHP3RREADY (),
.SAXIHP3WDATA (),
.SAXIHP3WID (),
.SAXIHP3WLAST (),
.SAXIHP3WRISSUECAP1EN (),
.SAXIHP3WSTRB (),
.SAXIHP3WVALID ()
);
''' % {
'dut': 'site_name',
'dout': isone
})
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
| 21.681818
| 103
| 0.585785
|
28a6cd67583aaea23b8d40e9061ec596cdb2ce3c
| 34,063
|
py
|
Python
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | 2
|
2021-05-29T12:56:05.000Z
|
2021-10-31T04:56:32.000Z
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T13:12:28.000Z
|
2021-01-30T16:14:04.000Z
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | null | null | null |
"""
.. module:: create
:synopsis: Import a Word `docx` document, define its metadata, cover and rights, and publish it as an EPUB3.
.. moduleauthor:: Gavin Chait <github.com/turukawa>
CreateWork
==========
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and define its
metadata, cover and publishing rights. Currently does not support `odt` since `Pandoc` seems to lose any embedded
graphics.
.. note:: This process will overwrite any existing EPUB3 file of the same name, if it already exists.
Workflow
--------
There are two main publication approaches, stateless and non-stateless. A non-stateless approach assumes you may be
starting each step discretely (perhaps via a set of one-time network calls). The second maintains state, so you can
complete the process in one step.
The *stateless* publication process runs as follows:
* Set the working directory on creation,
* Define and validate the metadata required for the creative work,
* Copy the `docx` file to import into the working directory,
* Copy the cover image to import into the working directory,
* Define and add any contributors, such as cover artist,
* Update the creative work's publication rights,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective of this workflow is to support what may be a stateless process i.e. the individual steps first bring all
the data required to produce the creative work into a project directory, and then produces it. State does not need
to be maintained between steps.
The *non-stateless* process runs as follows:
* Define and validate the metadata required for the creative work,
* Supply the `docx` file as a base64 string,
* Copy the cover image as a base64 string,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective in a non-stateless workflow is to minimise disruption, and store the minimum amount of information. Only
the epub itself will be saved, and then only because Pandoc does not support a memory-only epub build.
Build your work
---------------
Import **Chapisha** and create a work:
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory)
Where `directory` is the complete path to where you would like the EPUB created. If you want a stateless workflow,
set the `stateless` boolean to `True`. If you already have the `metadata` (perhaps via a web form), you can skip
several steps and pick up again for setting the files and images.
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory, metadata=metadata, stateless=True)
Set metadata
^^^^^^^^^^^^
`Dublin Core <https://www.dublincore.org/specifications/dublin-core/dces/>`_ is a vocabulary of fifteen properties for
use in resource description. Four of them - `title`, `identifier`, `language` and `rights` - are required. The
`language` code is defined by the `ISO 679-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ standard
(e.g. `en` for English, or `fr` for French).
Metadata properties:
* `identifier`: UUID, DOI or ISBN of the creative work. A UUID will be generated if not included.
* `title`: Name given to the creative work.
* `language`: Specify the language of the creative work. Two letter code defined by ISO 639-1.
* `creator`: Name of a person, organisation, etc. responsible for the creation of the work. May be more than one.
* `work_uri`: The URI for your creative work.
* `contributor`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work. May be more than one.
* `date`: The publication date of the creative work. Provide in ISO format, YYYY-MM-DD.
* `subject`: The subject, or tag, of the creative work. May be more than one.
* `publisher`: Name of a person, organisation, etc. responsible for making the creative work available.
* `publisher_uri`: The URI for the publisher of your creative work.
* `rights`: A short, single-sentence statement of copyright and publication terms for the creative work, e.g. 'All rights reserved.' or 'Attribution-NonCommercial-ShareAlike 4.0 International.'
* `long_rights`: Lengthier description and information about copyright held in and over the creative work. Formatted as you wish it to appear.
* `description`: A short, single-sentence summary of the creative work.
* `long_description`: The pitch, or jacket-cover, description of the creative work.
Create a paired dictionary of these properties. As example:
.. code-block:: python
METADATA = {
"identifier": "isbn:9780993191459",
"title": "Usan Abasi's Lament",
"description": "Years after the events of \"Lament for the Fallen\", Isaiah tells of the myth of Usan Abasi, who was punished by the Sky God to spend eternity in the form of a brass bowl and imprisoned within a vast termite mountain. Now the ceremony which ensures that Usan Abasi remains dormant has failed, and his ancient evil awakes. A free, stand-alone short-story set in the city of Ewuru and linking \"Lament for the Fallen\" to a forthcoming novel.",
"language": "en",
"creator": ["Gavin Chait"],
"rights": "All rights reserved.",
"long_rights": ["The right of the creator to be identified as the author of the Work has been asserted by them in accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright gives creators space to explore and provides for their long-term ability to sustain themselves from their work. Thank you for buying this work and for complying with copyright laws by not reproducing, scanning, or distributing any part of it without permission. Your support will contribute to future works by the creator."],
"publisher": "Qwyre Publishing",
"publisher_uri": "https://qwyre.com",
"work-uri": "https://gavinchait.com",
"date": "2017-07-23",
"subject": ["science fiction", "african mythology"]
}
Set the metadata:
.. code-block:: python
work.set_metadata(METADATA)
Set document
^^^^^^^^^^^^
Most writers still use `Microsoft Word <https://www.microsoft.com/en-us/microsoft-365/word>`_ as their default work tool.
There are certainly other word processors, but this is the one most people will work with if they intend to be
professionally published as publishers still expect Word `docx` files for editing and markup.
**Chapisha** will create your cover, rights and dedication pages, as well as the table of contents. Your `docx` file
must contain **only** the creative content you wish included in that table of contents. Your document must also be
correctly marked up to ensure proper chapter creation.
EPUB documents will be read on multiple and diverse electronic devices. Don't have any expectations for page
number-dependant formatting. Instead:
* Each chapter must have a title, formatted as `Heading 1`, with lower-level headings formatted for each heading type.
* There must be no title page, contents, or anything else. Chapter 1 starts at the top of the first line of the document.
* Page numbers and other page-specific information will be lost.
* Fonts or typographic formats and alignment will be lost, although `bold` and `italics` will be maintained.
* Images will be maintained.
Once the work is built you can enhance its styling. However, there are still limits in the EPUB3 standard in comparison
to a printed work.
.. code-block:: python
work.set_document(source)
Where `source` is any of the complete path to the source `docx` file, a `bytes` file import, or a `base64` string.
Set cover
^^^^^^^^^
There is, unfortunately, no standardisation on the image size, dimensions or resolution required for an EPUB. However,
a recommendation is an image (`.jpeg`, `.jpg` or `.png`) of 1,600 by 2,400 pixels, and less than 5Mb is size. You will
need to create your image (or have someone create it for you) exactly as you wish it to appear on the cover. Nothing
will be added, removed, or changed.
Please also ensure you have the appropriate rights to use the image on your cover. There are more than sufficient
services providing openly-licenced, or even public domain, work for you to use.
.. note:: You can optionally add the image contributor details here, or on the next step. Do not do it in both or the contributor information will be repeated.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.set_cover(source, contributor=CONTRIBUTOR)
Where `source` is the complete path to the image file, a `bytes` file import, or a `base64` string.
Add contributors
^^^^^^^^^^^^^^^^
You may have numerous contributors you wish to acknowledge. Fields are:
* `role`: Contributor identity, based on a specified list of `artist`, `editor` or `translator`.
* `name`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work.
* `terms`: Information about copyright held by the rights-holder in and over their contribution to the creative work. Formatted as you wish it to appear.
* `year`: The year of the contribution or publication of the contributor's work.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.add_contributor(CONTRIBUTOR)
`add_contributor` as many times as you have people or organisations to acknowledge.
Set rights
^^^^^^^^^^
This refers to the `long_rights` you can set, and which you may wish to adjust for presentation on the colophon page.
There are obviously a broad range of rights with which you can release your creative work. Here are two examples which
you can modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Example code:
.. code-block:: python
RIGHTS = [
"You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.",
"In return: You may not use the material for commercial purposes. You must give appropriate credit, provide a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits."
]
work.set_rights(RIGHTS)
Rights terms can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Set dedication
^^^^^^^^^^^^^^
Most creators have a dedication for their work in mind - usually to apologise for all the late nights and impoverishing
returns on their creative efforts.
This is optional, but you can include a dedication page. Each item in the list will be set on a different paragraph.
.. code-block:: python
dedication = [
"For those who leave.",
"For those who remain.",
"For the wings and tail.",
"But most, for her"
]
work.set_dedication(dedication)
The dedication can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Build
^^^^^
The build function is straightforward. Once everything is in place:
.. code-block:: python
work.build()
You will find your EPUB in the directory you specified.
Validate
^^^^^^^^
If you have any doubts as to whether your EPUB is standards compliant, run the validation. This tests the `epub` file
against the standards maintained by the `DAISY Consortium <http://validator.idpf.org/>`_. You can check the file online
at that link. It's the same test.
.. code-block:: python
work.validate()
Output will be `True` or `False`.
"""
import pypandoc
from bs4 import BeautifulSoup
from epubcheck import EpubCheck
from typing import Optional, Literal, List
from urllib.parse import urlparse
from pathlib import Path
import os
import re
import base64
import filetype
from ..models.metadata import WorkMetadata, Contributor
from ..models.matter import Matter, MatterPartition
from ..helpers import pages, formats, coreio as _c
from ..helpers.updatezipfile import UpdateZipFile
class CreateWork:
"""
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and
define its metadata, cover and publishing rights.
If the EPUB file already exists, then publishing this work will overwrite it.
On instantiation, checks `directory` to see if `DEFAULT_METADATA_SETTINGS` is present, loading the required data,
or replacing with specified defaults.
"""
def __init__(self,
directory: Optional[str] = None,
metadata: Optional[WorkMetadata] = None,
stateless: bool = False):
"""
Initialise the CreateWork class.
Parameters
----------
directory: str
A directory path where you would like to save your work.
metadata: WorkMetadata
A model defined by a dictionary of terms.
stateless: bool
Whether your workflow is stateless (default False).
"""
self.stateless = stateless
self.directory = Path(directory)
if self.stateless:
_c.check_path(self.directory)
# Load metadata settings, if exists
try:
_c.check_source(self.directory / _c.DEFAULT_METADATA_SETTINGS)
self.metadata = WorkMetadata(_c.load_json(self.directory / _c.DEFAULT_METADATA_SETTINGS))
self.work_name = self.directory.name # Since will be `.../work-name/`
except FileNotFoundError:
self.metadata = None
self.work_name = None
# Construct the metadata, if it is provided
if metadata:
if isinstance(metadata, WorkMetadata):
metadata = metadata.dict()
self.set_metadata(metadata)
self.source_path = _c.get_helper_path() / "data"
# Set default cover and work bytes
self.work = None
self.cover = None
self.dedication = None
############################################################################
# GATHER WORKING DATA
############################################################################
def get_metadata_schema(self) -> dict:
"""
Return the standard Dublin Core schema permitted for the EPUB3 standard.
Returns
-------
dict
"""
return self.metadata.schema()
def set_metadata(self, metadata: WorkMetadata) -> bool:
"""
Validate metadata values for the permitted Dublin Core schema terms, along with additional metadata. The full
schema, with descriptions, and requirements, is listed by `get_metadata_schema`.
.. note:: The terms `identifier`, `title`, `creator`, `rights` and `language` are required. A random UUID will be assigned if none is provided.
Parameters
----------
metadata: WorkMetadata
A model defined by a dictionary of terms.
Returns
-------
bool
"""
# Dict snake_case fields need to be hyphenated for import
# This as a result of alias names in model
if isinstance(metadata, dict):
for k in [k for k in metadata.keys()]:
hyphenated = "-".join(k.split("_"))
metadata[hyphenated] = metadata.pop(k)
# Rename 'isodate' if it exists
if "isodate" in metadata:
metadata["date"] = metadata.pop("isodate")
# Fix "long-rights" if needed
if "long-rights" in metadata:
metadata["long-rights"] = formats.get_text_paragraphs(metadata["long-rights"])
# Create a temporary WorkMetadata model to hold updated metadata
updated_metadata = WorkMetadata(**metadata)
# And update the original data
# https://fastapi.tiangolo.com/tutorial/body-updates/#partial-updates-with-patch
if self.metadata:
self.metadata = self.metadata.copy(update=updated_metadata.dict(exclude_unset=True))
else:
self.metadata = updated_metadata
work_name = "-".join(["".join([e for e in w if e.isalnum()])
for w in self.metadata.title.lower().split(" ")])
# Set the working directory, if it isn't already, and save metadata there
if not self.work_name:
self.work_name = work_name
self.directory = self.directory / work_name
# If stateless, save the metadata to the working folder
if self.stateless:
_c.check_path(self.directory)
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
return True
def _get_validated_bytes(self,
source: [Path, bytes],
base_type: Optional[List[Literal["cover", "work"]]] = None) -> bytes:
"""
Validate a source file, and return a bytes version.
Parameters
----------
source: Path, bytes or base64 string
Filename to open, base64 string, or bytes from an opened file
base_type: Optional, str
Must be one of "cover" or "work" for interpreting base64 mime type
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
Returns
-------
bytes
"""
if not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
if isinstance(source, Path):
try:
_c.check_source(source)
with open(source, "rb") as f:
source = f.read()
except FileNotFoundError:
e = F"`{source}` is not a valid file source."
raise FileNotFoundError(e)
if isinstance(source, str) and base_type:
# Base64 string, remove any provided mime type
source_type = re.search(_c.DEFAULT_BASE64_TYPES[base_type], source)
if source_type:
source = source.replace(source_type.group(0), "")
source = base64.b64decode(source)
if not isinstance(source, bytes):
e = F"File is not valid."
raise FileNotFoundError(e)
return source
def set_document(self, source: [Path, bytes, str]):
"""
Import source `docx` document and, if stateless, save to the working directory. If you're finding errors in
the build step, it could be you need to convert your base64 string to "utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path, bytes, or str
Filename to open, bytes from an opened file, or a base64 string
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
source = self._get_validated_bytes(source, base_type = "work")
if self.stateless:
with open(self.directory / F"{self.work_name}.docx", "wb") as w:
w.write(source)
else:
self.work = source
def set_cover(self,
source: [Path, bytes],
contributor: Optional[Contributor] = None):
"""
Import cover image and, if stateless, save to the working directory, along with any rights and contributor
information. If you're finding errors in the build step, it could be you need to convert your base64 string to
"utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path or bytes
Filename to open, including path, or bytes for file
contributor: Contributor
Optional, string indicating contributor name for cover image.
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting cover."
raise PermissionError(e)
# Cover contributor
if contributor:
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
# Cover image
source = self._get_validated_bytes(source, base_type = "cover")
if self.stateless:
kind = filetype.guess(source).extension
with open(self.directory / F"cover.{kind}", "wb") as w:
w.write(source)
_c.save_json(self.metadata.dict(by_alias=True),
self.directory / _c.DEFAULT_METADATA_SETTINGS,
overwrite=True)
else:
self.cover = source
def add_contributor(self, contributor: Contributor):
"""
Add a contributor to the list of those supporting the creation of the work. `contributor` is defined as a dict:
.. code-block:: python
contributor = {
"role": "artist",
"name": "Great Artist",
"year": "2021",
"terms": "Public Domain."
}
Parameters
----------
contributor: Contributor
Include the types of contributor who supported the creation of the work. `role`: `artist`, `editor`, `translator`.
Raises
------
PermissionError: if metadata not yet validated.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before adding contributors, or add the contributors when you set the metadata."
raise PermissionError(e)
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
def set_dedication(self, dedication: [str, list[str]]):
"""
Set dedication page for creative work. Provide as a string, unless it is on multiple paragraphs.
Parameters
----------
dedication: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting dedication."
raise PermissionError(e)
self.dedication = pages.create_dedication_xhtml(dedication)
if self.stateless:
with open(self.directory / F"dedication.xhtml", "w") as w:
w.write(self.dedication)
def set_rights(self, rights: [str, list[str]]):
"""
Set publication `long_rights` for creative work. Provide as a string, or list of strings if it is on multiple
paragraphs.
There are multiple appropriate rights, and two examples are below. Modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Parameters
----------
rights: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting rights."
raise PermissionError(e)
if isinstance(rights, str):
rights = [rights]
self.metadata.long_rights = rights
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
############################################################################
# BUILD CREATIVE WORK
############################################################################
def build(self):
"""
Automatically build the creative work as a standards compliant EPUB3. Save to the root directory.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before building creative work."
raise PermissionError(e)
epub_path = self.directory.parent / F"{self.work_name}.epub"
# Generate the initial creative content using Pandoc
# pypandoc can't handle PosixPaths ...
if self.stateless:
pypandoc.convert_file(str(self.directory / F"{self.work_name}.docx"),
format="docx",
to="epub3",
outputfile=str(epub_path))
else:
# Maybe one day Pandoc can return an epub object and we won't save the interim file
pypandoc.convert_text(self.work,
format="docx",
to="epub3",
outputfile=str(epub_path))
# Generate the epub version
with UpdateZipFile(epub_path, "a") as w:
# REMOVES
REMOVES = ["EPUB/styles/stylesheet1.css", "EPUB/text/title_page.xhtml", "EPUB/nav.xhtml"]
# DEFAULT COMPONENTS
DEFAULT = [(self.source_path / "css" / "core.css", "EPUB/css/core.css"),
(self.source_path / "images" / "logo.svg", "EPUB/images/logo.svg"),
(self.source_path / "xhtml" / "onix.xml", "EPUB/onix.xml"),
(self.source_path / "xhtml" / "container.xml", "META-INF/container.xml")]
for default_file, write_file in DEFAULT:
w.write(default_file, write_file)
# DEFAULT FONTS
for f in os.listdir(self.source_path / "fonts"):
w.write(self.source_path / "fonts" / f, F"EPUB/fonts/{f}")
# ADD titlepage.xhtml
w.writestr("EPUB/text/titlepage.xhtml", pages.create_titlepage_xhtml(self.metadata))
# ADD colophon.xhtml
w.writestr("EPUB/text/colophon.xhtml", pages.create_colophon_xhtml(self.metadata))
# ADD cover.img
if self.stateless:
for image_path in [self.directory / F"cover.{t}" for t in ["jpg", "jpeg", "png", "gif", "svg"]]:
if image_path.exists():
w.write(image_path, F"EPUB/images/{image_path.name}")
elif self.cover:
t = filetype.guess(self.cover).extension
w.writestr(F"EPUB/images/cover.{t}", self.cover)
# GET DEDICATION and CHAPTERS
spine = []
# check if the path to dedication exists, if it does, add it to the work and spine
if (self.directory / "dedication.xhtml").exists() or self.dedication:
if self.dedication:
w.writestr("EPUB/text/dedication.xhtml", self.dedication)
else:
w.write(self.directory / "dedication.xhtml", "EPUB/text/dedication.xhtml")
spine = [Matter(partition="frontmatter", content="dedication", title="Dedication")]
CHAPTERS = [f for f in w.namelist() if f.startswith("EPUB/text/ch")]
CHAPTERS.sort()
self.metadata.word_count = 0
for chapter in CHAPTERS:
file_as = F"EPUB/text/chapter-{chapter.split('.')[0][-1]}.xhtml"
try:
chapter_xml = w.read(chapter)
except KeyError:
continue
if file_as != chapter:
# If delete and then re-add same file, causes ZipFile confusion
REMOVES.append(chapter)
# Restructure chapter xml into standard format
chapter_xml = pages.restructure_chapter(chapter_xml)
chapter_title = chapter_xml.title.string
# Count the words (XHTML and HTML treated differently by BeautifulSoup, so first extract `section`)
words = BeautifulSoup(str(chapter_xml.section), "lxml").get_text()
self.metadata.word_count += len(words.replace("\n", " ").replace(" ", " ").strip().split())
w.writestr(file_as, str(chapter_xml))
spine.append(Matter(partition=MatterPartition.body, title=chapter_title))
# PANDOC MAY STILL ADD IMAGES FOUND IN THE WORK WHICH WE NEED TO DISCOVER AND ADD TO THE MANIFEST
# NOTE, these are not only to be added to the manifest, but the folder renamed as well
image_manifest = [f.replace("EPUB/", "") for f in w.namelist() if f.startswith("EPUB/images/")]
for img in [f for f in w.namelist() if f.startswith("EPUB/media/")]:
REMOVES.append(img)
new_img = img.replace("/media/", "/images/")
try:
old_img = w.read(img)
w.writestr(new_img, old_img)
except KeyError:
continue
image_manifest.append(new_img.replace("EPUB/", ""))
# ADD content.opf
w.writestr("EPUB/content.opf", pages.create_content_opf(self.metadata, image_manifest, spine))
# ADD toc.ncx
w.writestr("EPUB/toc.ncx", pages.create_toc_ncx(self.metadata, spine))
# ADD toc.xhtml
w.writestr("EPUB/toc.xhtml", pages.create_toc_xhtml(self.metadata, spine))
# PERFORM REMOVES
for remove in REMOVES:
try:
w.remove_file(remove)
except KeyError:
continue
def validate(self) -> bool:
"""
Validate the creative work as a standards compliant EPUB3.
"""
epub_path = self.directory.parent / F"{self.work_name}.epub"
_c.check_source(epub_path)
result = EpubCheck(epub_path)
return result.valid
| 46.661644
| 551
| 0.648622
|
d9695d34f435d9aa053d553c03138c8821cdbea4
| 174
|
py
|
Python
|
nonebot_plugin_analysis_bilibili/config.py
|
RainChain-Zero/Jasmine_Nonebot2_Plugins_Center
|
40e78f01bf020faa66dcffe76070fe8a24687e1b
|
[
"MIT"
] | 1
|
2022-03-29T16:12:43.000Z
|
2022-03-29T16:12:43.000Z
|
nonebot_plugin_analysis_bilibili/config.py
|
RainChain-Zero/Jasmine_Nonebot2_Plugins_Center
|
40e78f01bf020faa66dcffe76070fe8a24687e1b
|
[
"MIT"
] | 1
|
2022-03-30T02:36:12.000Z
|
2022-03-30T08:50:04.000Z
|
nonebot_plugin_analysis_bilibili/config.py
|
RainChain-Zero/Jasmine_Nonebot2_Plugins_Center
|
40e78f01bf020faa66dcffe76070fe8a24687e1b
|
[
"MIT"
] | null | null | null |
from pydantic import BaseSettings
class Config(BaseSettings):
# Your Config Here
#! 不解析以下群
group_ignore = [660991956]
class Config:
extra = "ignore"
| 19.333333
| 33
| 0.666667
|
330818423322df768f91a2c5741a900ce508c785
| 1,841
|
py
|
Python
|
vt_police_tools/depts/uvmps.py
|
brianmwaters/vt-police-tools
|
2619cec4fbf1a9fba4fbbfab7d5c14b83b6e6be0
|
[
"CC0-1.0"
] | 1
|
2020-06-22T20:05:34.000Z
|
2020-06-22T20:05:34.000Z
|
vt_police_tools/depts/uvmps.py
|
brianmwaters/vt-police-tools
|
2619cec4fbf1a9fba4fbbfab7d5c14b83b6e6be0
|
[
"CC0-1.0"
] | null | null | null |
vt_police_tools/depts/uvmps.py
|
brianmwaters/vt-police-tools
|
2619cec4fbf1a9fba4fbbfab7d5c14b83b6e6be0
|
[
"CC0-1.0"
] | null | null | null |
# Vermont Police Tools - Tools for cleaning Vermont police data
#
# Written in 2020 by BTV CopWatch <info@btvcopwatch.org>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""Tools for cleaning University of Vermont Police Services data."""
import pandas as pd
from .. import utils
def clean_roster(csv):
"""Clean a digitized roster."""
@utils.nullable
def clean_star_no(star_no):
assert star_no[:2] == "U-"
return int(star_no[2:])
@utils.nullable
def clean_job_title(job_title):
job_title_choices = {
"Interim Chief": "Chief",
"Deputy Chief": "Deputy Chief",
"Sergeant": "Sergeant",
"Police Officer": "Officer",
}
return job_title_choices[job_title]
@utils.nullable
def clean_race(race):
race_choices = {
"W": "WHITE",
}
return race_choices[race]
dirty = pd.read_csv(csv)
cleaned = pd.DataFrame()
cleaned["job_title"] = dirty["Title"].apply(clean_job_title)
cleaned["last_name"] = dirty["Last Name"]
cleaned["first_name"] = dirty["First Name"]
cleaned["middle_initial"] = dirty["MI"].apply(utils.clean_middle_initial)
cleaned["star_no"] = dirty["Call Sign"].apply(clean_star_no)
cleaned["employment_date"] = dirty["Date Hired"].apply(pd.to_datetime)
cleaned["race"] = dirty["Race"].apply(clean_race)
cleaned["gender"] = dirty["Gender"]
cleaned["birth_year"] = dirty["Birthdate"]
return cleaned
| 32.875
| 78
| 0.66377
|
82a9d44870bcd5ac48fd0657768a624d81cafdcb
| 547
|
py
|
Python
|
var/spack/repos/builtin/packages/py-systemd-python/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/py-systemd-python/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/py-systemd-python/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySystemdPython(PythonPackage):
"""Python interface for libsystemd"""
homepage = "https://github.com/systemd/python-systemd"
url = "https://pypi.io/packages/source/s/systemd-python/systemd-python-234.tar.gz"
version('234', sha256='fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7')
| 34.1875
| 93
| 0.758684
|
5ac40bf44933038bc77556a34e9a218292e6ff94
| 1,352
|
py
|
Python
|
2.py
|
chintan02/autism-aid
|
fae417c5b363e775a8d2046e51ecdb60740d01cc
|
[
"MIT"
] | null | null | null |
2.py
|
chintan02/autism-aid
|
fae417c5b363e775a8d2046e51ecdb60740d01cc
|
[
"MIT"
] | null | null | null |
2.py
|
chintan02/autism-aid
|
fae417c5b363e775a8d2046e51ecdb60740d01cc
|
[
"MIT"
] | null | null | null |
import pyrebase
from datetime import datetime
import matplotlib.pyplot as plt
firebaseConfig ={
"apiKey": "AIzaSyDbEslT3tpBwDTDQfP_8ZOMRBGObKEUjao",
"authDomain": "autism-aid.firebaseapp.com",
"databaseURL": "https://autism-aid.firebaseio.com",
"projectId": "autism-aid",
"storageBucket": "autism-aid.appspot.com",
"messagingSenderId": "616345484183",
"appId": "1:616345484183:web:406ce8cb76552cf5b9a2d2",
"measurementId": "G-9HBX2L5ZWE"
}
firebase = pyrebase.initialize_app(firebaseConfig)
db = firebase.database()
file = open("login.txt","r")
line = file.readline()
d = db.child("eye").child(str(line)).get()
l=[]
l = d.val()
x = list(l)
y=[]
z1=[]
z2=[]
k=0
for i in x:
y.append(l[str(i)])
items = y[k].items()
for item in items:
z1.append(item[0])
z2.append(float(item[1]))
k=k+1
print(l)
print(x[0])
print(y)
print(z1)
print(z2)
# plotting the points
plt.plot(z1, z2, linewidth = 3, marker='o', markerfacecolor='red', markersize=12)
# naming the x axis
plt.xlabel('test dates')
# naming the y axis
plt.ylabel('time in seconds')
# giving a title to my graph
plt.title('Eye gaze response time')
# function to show the plot
plt.show()
| 23.310345
| 83
| 0.602071
|
d31ef9537baafc26911561f39d0f6c16641db819
| 1,099
|
py
|
Python
|
weekorm/exception.py
|
mitrofun/weakorm
|
8b45d7f6dc1c34b6219f632e1f187f9df33e60e8
|
[
"MIT"
] | 1
|
2019-03-17T19:05:27.000Z
|
2019-03-17T19:05:27.000Z
|
weekorm/exception.py
|
mitrofun/weakorm
|
8b45d7f6dc1c34b6219f632e1f187f9df33e60e8
|
[
"MIT"
] | null | null | null |
weekorm/exception.py
|
mitrofun/weakorm
|
8b45d7f6dc1c34b6219f632e1f187f9df33e60e8
|
[
"MIT"
] | null | null | null |
class ModelFieldNameException(Exception):
"""
Raise for model name field exception
"""
def __init__(self, *args, **kwargs):
if kwargs:
field_name = kwargs['field_name']
model_name = kwargs['model_name']
msg = f'Model {model_name}: Field with name `{field_name}` does not exist.'
super(ModelFieldNameException, self).__init__(msg)
else:
super(ModelFieldNameException, self).__init__(*args)
class ModelFieldTypeException(Exception):
"""
Raise for model field type exception
"""
def __init__(self, *args, **kwargs):
if kwargs:
model = kwargs['model']
field_name = kwargs['field_name']
field_type = kwargs['field_type']
value_type = kwargs['value_type']
msg = f'Model {model}: Value field with name `{field_name}`' \
f' must be `{field_type}`, not `{value_type}`.'
super(ModelFieldTypeException, self).__init__(msg)
else:
super(ModelFieldTypeException, self).__init__(*args)
| 36.633333
| 87
| 0.596906
|
c3e77651516b68ddd081d47509a90dd76260314a
| 1,717
|
py
|
Python
|
test/runtime/frontend_test/keras_test/layers_test/pooling_test/average_pooling_2d_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
test/runtime/frontend_test/keras_test/layers_test/pooling_test/average_pooling_2d_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
test/runtime/frontend_test/keras_test/layers_test/pooling_test/average_pooling_2d_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
import numpy as np
from test.runtime.frontend_test.keras_test.util import keras, KerasConverter
from test.util import generate_kernel_test_case, wrap_template
@wrap_template
def template(pool_size=(3, 3), shape=(15, 17, 16), strides=(2, 2), padding="valid", data_format=None,
description: str = ""):
x = keras.layers.Input(shape)
y = keras.layers.AveragePooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format)(x)
model = keras.models.Model([x], [y])
vx = np.random.rand(2, *shape)
vy = model.predict(vx, batch_size=2)
graph = KerasConverter(batch_size=2, use_tensorflow_converter=False).convert(model)
assert list(vy.shape) == list(
graph.outputs[0].shape), f"(vy.shape)={vy.shape}, (graph.outputs[0].shape)={graph.outputs[0].shape}"
generate_kernel_test_case(
description=f"[keras] AveragePooling2D {description}",
graph=graph,
inputs={graph.inputs[0]: vx},
expected={graph.outputs[0]: vy},
)
def test():
template()
def test_padding_valid():
template(padding="valid")
def test_padding_same_even_size():
# pad: ((1,1), (1,1))
template(padding="SAME", shape=(5, 5, 3), pool_size=3, strides=1)
# FIXME: TensorFlow's padding with different size between left and right is not supported.
# def test_padding_same_odd_size():
# # pad: ((1,0), (1,0))
# template(padding="SAME", shape=(4, 4, 3), pool_size=2, strides=1)
def test_irregular_size():
template(pool_size=(3, 4), strides=(2, 1))
def test_channels_first():
template(data_format="channels_first")
def test_no_cover_all():
template(pool_size=2, shape=(2, 2, 5), strides=2, padding="SAME")
| 29.603448
| 120
| 0.680256
|
f953f2e99fae0374be62e505f2dae180777796d7
| 1,333
|
py
|
Python
|
examples/tutorials/structured_configs/4_defaults/my_app.py
|
sara-nl/hydra
|
8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7
|
[
"MIT"
] | null | null | null |
examples/tutorials/structured_configs/4_defaults/my_app.py
|
sara-nl/hydra
|
8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7
|
[
"MIT"
] | null | null | null |
examples/tutorials/structured_configs/4_defaults/my_app.py
|
sara-nl/hydra
|
8fd0d23d71cf528528ca5eda26e0c1f0c1e973d7
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass, field
from typing import Any, List
from omegaconf import MISSING, OmegaConf # Do not confuse with dataclass.MISSING
import hydra
from hydra.core.config_store import ConfigStore
@dataclass
class MySQLConfig:
driver: str = "mysql"
host: str = "localhost"
port: int = 3306
user: str = "omry"
password: str = "secret"
@dataclass
class PostGreSQLConfig:
driver: str = "postgresql"
host: str = "localhost"
port: int = 5432
timeout: int = 10
user: str = "postgres_user"
password: str = "drowssap"
defaults = [
# config group name db will load config named mysql
{"db": "mysql"}
]
@dataclass
class Config:
# this is unfortunately verbose due to @dataclass limitations
defaults: List[Any] = field(default_factory=lambda: defaults)
# Hydra will populate this field based on the defaults list
db: Any = MISSING
cs = ConfigStore.instance()
cs.store(group="db", name="mysql", node=MySQLConfig)
cs.store(group="db", name="postgresql", node=PostGreSQLConfig)
cs.store(name="config", node=Config)
@hydra.main(version_base=None, config_name="config")
def my_app(cfg: Config) -> None:
print(OmegaConf.to_yaml(cfg))
if __name__ == "__main__":
my_app()
| 22.982759
| 81
| 0.701425
|
0e077af24d0807413e25dce3fa41b28466b1451b
| 10,633
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_rbd_volume_source.py
|
opsani/kubernetes_asyncio
|
55283bf6f3690e5c0a0c589cd752221511e2be51
|
[
"Apache-2.0"
] | 196
|
2018-05-23T16:55:41.000Z
|
2022-03-31T10:09:40.000Z
|
kubernetes_asyncio/client/models/v1_rbd_volume_source.py
|
tomplus/kubernetes_asyncio
|
e8c8686ec11be3a5295ae9d5d8728299492a61f8
|
[
"Apache-2.0"
] | 164
|
2018-05-20T20:39:03.000Z
|
2022-03-29T22:57:04.000Z
|
kubernetes_asyncio/client/models/v1_rbd_volume_source.py
|
opsani/kubernetes_asyncio
|
55283bf6f3690e5c0a0c589cd752221511e2be51
|
[
"Apache-2.0"
] | 41
|
2018-06-08T00:39:53.000Z
|
2022-01-12T18:19:06.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.18.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1RBDVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'image': 'str',
'keyring': 'str',
'monitors': 'list[str]',
'pool': 'str',
'read_only': 'bool',
'secret_ref': 'V1LocalObjectReference',
'user': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'image': 'image',
'keyring': 'keyring',
'monitors': 'monitors',
'pool': 'pool',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'user': 'user'
}
def __init__(self, fs_type=None, image=None, keyring=None, monitors=None, pool=None, read_only=None, secret_ref=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1RBDVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._image = None
self._keyring = None
self._monitors = None
self._pool = None
self._read_only = None
self._secret_ref = None
self._user = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
self.image = image
if keyring is not None:
self.keyring = keyring
self.monitors = monitors
if pool is not None:
self.pool = pool
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
if user is not None:
self.user = user
@property
def fs_type(self):
"""Gets the fs_type of this V1RBDVolumeSource. # noqa: E501
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
:return: The fs_type of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1RBDVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd # noqa: E501
:param fs_type: The fs_type of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def image(self):
"""Gets the image of this V1RBDVolumeSource. # noqa: E501
The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The image of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1RBDVolumeSource.
The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param image: The image of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and image is None: # noqa: E501
raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
self._image = image
@property
def keyring(self):
"""Gets the keyring of this V1RBDVolumeSource. # noqa: E501
Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The keyring of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._keyring
@keyring.setter
def keyring(self, keyring):
"""Sets the keyring of this V1RBDVolumeSource.
Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param keyring: The keyring of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._keyring = keyring
@property
def monitors(self):
"""Gets the monitors of this V1RBDVolumeSource. # noqa: E501
A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The monitors of this V1RBDVolumeSource. # noqa: E501
:rtype: list[str]
"""
return self._monitors
@monitors.setter
def monitors(self, monitors):
"""Sets the monitors of this V1RBDVolumeSource.
A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param monitors: The monitors of this V1RBDVolumeSource. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and monitors is None: # noqa: E501
raise ValueError("Invalid value for `monitors`, must not be `None`") # noqa: E501
self._monitors = monitors
@property
def pool(self):
"""Gets the pool of this V1RBDVolumeSource. # noqa: E501
The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The pool of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1RBDVolumeSource.
The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param pool: The pool of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._pool = pool
@property
def read_only(self):
"""Gets the read_only of this V1RBDVolumeSource. # noqa: E501
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The read_only of this V1RBDVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1RBDVolumeSource.
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param read_only: The read_only of this V1RBDVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1RBDVolumeSource. # noqa: E501
:return: The secret_ref of this V1RBDVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1RBDVolumeSource.
:param secret_ref: The secret_ref of this V1RBDVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def user(self):
"""Gets the user of this V1RBDVolumeSource. # noqa: E501
The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:return: The user of this V1RBDVolumeSource. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1RBDVolumeSource.
The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it # noqa: E501
:param user: The user of this V1RBDVolumeSource. # noqa: E501
:type: str
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RBDVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RBDVolumeSource):
return True
return self.to_dict() != other.to_dict()
| 33.332288
| 312
| 0.612433
|
ab20ebaf04a666cb522c7816a13cbd876c36b486
| 492
|
py
|
Python
|
mmdet/core/bbox/assigners/__init__.py
|
Zebraside/RRPDet
|
31dd0b3e158dcd18edb9890ff1ac84b639a9e3e9
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/assigners/__init__.py
|
Zebraside/RRPDet
|
31dd0b3e158dcd18edb9890ff1ac84b639a9e3e9
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/assigners/__init__.py
|
Zebraside/RRPDet
|
31dd0b3e158dcd18edb9890ff1ac84b639a9e3e9
|
[
"Apache-2.0"
] | 1
|
2021-12-17T12:39:36.000Z
|
2021-12-17T12:39:36.000Z
|
from .base_assigner import BaseAssigner
from .max_iou_assigner import MaxIoUAssigner
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .max_iou_assigner_hbb_cy import MaxIoUAssignerCy
from .max_iou_assigner_rbbox import MaxIoUAssignerRbbox
from .point_assigner import PointAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'MaxIoUAssignerCy', 'MaxIoUAssignerRbbox', 'PointAssigner'
]
| 37.846154
| 77
| 0.839431
|
618652adb5d0e5210ff03aff9aed1dad6364259a
| 7,946
|
py
|
Python
|
core/generators/schedule/DefaultScheduleGenerator.py
|
AluBhorta/UCSPy-Engine
|
917eea6ab98338c5763c9fd07e24da7fa2ee1cda
|
[
"MIT"
] | 6
|
2021-09-29T06:18:55.000Z
|
2021-09-29T18:56:13.000Z
|
core/generators/schedule/DefaultScheduleGenerator.py
|
AluBhorta/UCSPy-Engine
|
917eea6ab98338c5763c9fd07e24da7fa2ee1cda
|
[
"MIT"
] | 1
|
2021-10-01T00:00:15.000Z
|
2021-10-01T00:14:24.000Z
|
core/generators/schedule/DefaultScheduleGenerator.py
|
AluBhorta/UCSPy-Engine
|
917eea6ab98338c5763c9fd07e24da7fa2ee1cda
|
[
"MIT"
] | 3
|
2020-06-26T05:59:56.000Z
|
2021-09-29T06:25:23.000Z
|
from typing import List
import random
from core.models import ScheduleParam, Schedule, Class, Section, Course, Instructor, Timeslot, Room
from core.models.ScheduleGenerator import ScheduleGenerator
from core.models import ScheduleParam
class DefaultScheduleGenerator(ScheduleGenerator):
def __init__(self, schedule_param: ScheduleParam):
super(DefaultScheduleGenerator, self).__init__(schedule_param)
def generate(self) -> Schedule:
classes = []
for C in self.schedule_param.courses:
assigned_instructors = self._get_assigned_Instructors_for(
C, )
for sec_i in range(C.num_of_sections):
instructor, timeslot, room = self._get_unique_Instr_Timeslot_Room(
assigned_instructors, C, classes, )
section = Section(C, sec_i+1)
classes.append(Class(section, instructor, room, timeslot))
return Schedule(classes)
def _get_assigned_Instructors_for(self, course: Course, ):
INSTRUCTORS = self.schedule_param.instructors
assigned_instructors = []
for I in INSTRUCTORS:
if course.idx in I.assigned_course_idxs:
assigned_instructors.append(I)
if not assigned_instructors:
raise Exception(
f"Error! No assigned instructors for course {course} found!")
return assigned_instructors
def _get_unique_Instr_Timeslot_Room(self, assigned_instructors: List[Instructor], course: Course, classes: List[Class], ):
"""get unique - `instructor, timeslot, room` - for a new `Class` of given Course
utility function that, if possible - returns a unique set of `instructor, timeslot, room` that does not conflict with any such set of `instructor, timeslot, room` of any `class` in `classes`.
raises Exception if not possible
"""
MAX_RAND_R, MAX_RAND_I_T, MAX_RAND_T = 300, 100, 50
rand_R_counter, rand_I_T_counter = 0, 0
instructor, timeslot = self._get_unique_Instr_Timeslot(
assigned_instructors, course, classes, )
room = random.choice(self.schedule_param.rooms)
while True:
if self.__Room_Timeslot_conflicts(room, timeslot, classes):
if rand_R_counter < MAX_RAND_R:
room = random.choice(self.schedule_param.rooms)
rand_R_counter += 1
continue
elif rand_I_T_counter < MAX_RAND_I_T:
instructor, timeslot = self._get_unique_Instr_Timeslot(
assigned_instructors, course, classes,)
rand_I_T_counter += 1
continue
else:
for instructor in self.schedule_param.instructors:
if not self.__Instr_Timeslot_conflicts(instructor, timeslot, classes):
for room in self.schedule_param.rooms:
if not self.__Room_Timeslot_conflicts(room, timeslot, classes):
return (instructor, timeslot, room)
else:
for _ in range(MAX_RAND_T):
timeslot = self._get_Timeslot_for_Course_Instr(
course, instructor, classes, )
if timeslot != None:
for room in self.schedule_param.rooms:
if not self.__Room_Timeslot_conflicts(room, timeslot, classes):
return (instructor, timeslot, room)
for timeslot in self.schedule_param.timeslots:
if not self.__Instr_Timeslot_conflicts(instructor, timeslot, classes):
for room in self.schedule_param.rooms:
if not self.__Room_Timeslot_conflicts(room, timeslot, classes):
return (instructor, timeslot, room)
raise Exception(
f"Input Error! No unique (I, T, R) combination possible for course_idx {course.idx}!")
else:
return (instructor, timeslot, room)
def _get_unique_Instr_Timeslot(self, assigned_instructors: List[Instructor], course: Course, classes: List[Class], ):
"""function to get unique Instructor and Timeslot for given Course, if it exists.
NOTE: must satisfy _I_T_conflicts == False
"""
MAX_RAND_I = 50
counter = 0
instructor = random.choice(assigned_instructors)
timeslot = self._get_Timeslot_for_Course_Instr(
course, instructor, classes, )
while timeslot == None:
if counter > MAX_RAND_I:
for instructor in self.schedule_param.instructors:
timeslot = self._get_Timeslot_for_Course_Instr(
course, instructor, classes, self.schedule_param
)
if timeslot != None:
break
raise Exception(
f"ERROR! No Timeslot found by `_get_unique_Instr_Timeslot` for: {course}")
instructor = random.choice(assigned_instructors)
timeslot = self._get_Timeslot_for_Course_Instr(
course, instructor, classes, )
counter += 1
return (instructor, timeslot)
def _get_Timeslot_for_Course_Instr(
self,
course: Course,
instructor: Instructor,
classes: List[Class]
) -> Timeslot:
"""
NOTE: must satisfy (_I_T_conflicts == False)
"""
if course.lectures_per_week == 2:
valid_days = ['ST', 'MW']
valid_slots = self.schedule_param.daily_slots[:-1]
# excluding last slot i.e. '18:30-21:30'
elif course.lectures_per_week == 1:
if course.course_type.lower() == 'lab':
valid_days = ['S', 'T', 'M', 'W', 'R']
valid_slots = self.schedule_param.daily_slots[:-1]
elif course.course_type.lower() == 'theory':
valid_days = ['S', 'T', 'M', 'W', 'R']
valid_slots = self.schedule_param.daily_slots[-1]
else:
raise Exception(
f"ERROR! Invalid Course.course_type param for {course}! Valid answers are 'Lab' or 'Theory' for now.")
else:
raise Exception(
f"ERROR! Invalid Course.lectures_per_week param for {course}! Valid answers are '1' or '2' for now.")
valid_timeslots = (t for t in self.schedule_param.timeslots
if t.day_code in valid_days
and t.daily_slot in valid_slots)
for timeslot in valid_timeslots:
if not self.__Instr_Timeslot_conflicts(instructor, timeslot, classes):
return timeslot
return None
def __Instr_Timeslot_conflicts(self, given_I: Instructor, given_T: Timeslot, classes: List[Class]):
""" return True if (given_I, given_T) exists in classes, else False """
for c in classes:
if c.instructor.idx == given_I.idx:
if c.timeslot.idx == given_T.idx \
or c.timeslot.idx in given_T.conflicts_with_idxs:
return True
return False
def __Room_Timeslot_conflicts(self, given_R: Room, given_T: Timeslot, classes: List[Class]):
""" return True if (given_R, given_T) exists in classes, else False """
for c in classes:
if c.room.idx == given_R.idx:
if c.timeslot.idx == given_T.idx \
or c.timeslot.idx in given_T.conflicts_with_idxs:
return True
return False
| 43.900552
| 199
| 0.575636
|
dcb0f3e71789b630ec1f7de6194a7822d66a2827
| 7,157
|
py
|
Python
|
beartype/roar/__init__.py
|
qiujiangkun/beartype
|
d3ee7e617f1c2281d438321e2c2ec3fd6b4cec8c
|
[
"MIT"
] | null | null | null |
beartype/roar/__init__.py
|
qiujiangkun/beartype
|
d3ee7e617f1c2281d438321e2c2ec3fd6b4cec8c
|
[
"MIT"
] | null | null | null |
beartype/roar/__init__.py
|
qiujiangkun/beartype
|
d3ee7e617f1c2281d438321e2c2ec3fd6b4cec8c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype exception and warning hierarchies.**
This submodule publishes a hierarchy of:
* :mod:`beartype`-specific exceptions raised both by:
* The :func:`beartype.beartype` decorator at decoration and call time.
* Other public submodules and subpackages at usage time, including
user-defined data validators imported from the :mod:`beartype.vale`
subpackage.
* :mod:`beartype`-specific warnings emitted at similar times.
Hear :mod:`beartype` roar as it efficiently checks types, validates data, and
raids native beehives for organic honey.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To prevent "mypy --no-implicit-reexport" from raising literally
# hundreds of errors at static analysis time, *ALL* public attributes *MUST* be
# explicitly reimported under the same names with "{exception_name} as
# {exception_name}" syntax rather than merely "{exception_name}". Yes, this is
# ludicrous. Yes, this is mypy. For posterity, these failures resemble:
# beartype/_cave/_cavefast.py:47: error: Module "beartype.roar" does not
# explicitly export attribute "BeartypeCallUnavailableTypeException";
# implicit reexport disabled [attr-defined]
# WARNING: To avoid polluting the public module namespace, external attributes
# should be locally imported at module scope *ONLY* under alternate private
# names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather
# than merely "from argparse import ArgumentParser").
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Public exception hierarchy.
from beartype.roar._roarexc import (
BeartypeException as BeartypeException,
BeartypeCaveException as BeartypeCaveException,
BeartypeCaveNoneTypeOrException as BeartypeCaveNoneTypeOrException,
BeartypeCaveNoneTypeOrKeyException as BeartypeCaveNoneTypeOrKeyException,
BeartypeCaveNoneTypeOrMutabilityException as BeartypeCaveNoneTypeOrMutabilityException,
BeartypeConfException as BeartypeConfException,
BeartypeDecorException as BeartypeDecorException,
BeartypeDecorWrappeeException as BeartypeDecorWrappeeException,
BeartypeDecorWrapperException as BeartypeDecorWrapperException,
BeartypeDecorHintException as BeartypeDecorHintException,
BeartypeDecorHintForwardRefException as BeartypeDecorHintForwardRefException,
BeartypeDecorHintNonpepException as BeartypeDecorHintNonpepException,
BeartypeDecorHintNonpepNumpyException as BeartypeDecorHintNonpepNumpyException,
BeartypeDecorHintPepException as BeartypeDecorHintPepException,
BeartypeDecorHintPepSignException as BeartypeDecorHintPepSignException,
BeartypeDecorHintPepUnsupportedException as BeartypeDecorHintPepUnsupportedException,
BeartypeDecorHintPep484Exception as BeartypeDecorHintPep484Exception,
BeartypeDecorHintPep484585Exception as BeartypeDecorHintPep484585Exception,
BeartypeDecorHintPep544Exception as BeartypeDecorHintPep544Exception,
BeartypeDecorHintPep557Exception as BeartypeDecorHintPep557Exception,
BeartypeDecorHintPep563Exception as BeartypeDecorHintPep563Exception,
BeartypeDecorHintPep585Exception as BeartypeDecorHintPep585Exception,
BeartypeDecorHintPep586Exception as BeartypeDecorHintPep586Exception,
BeartypeDecorHintPep593Exception as BeartypeDecorHintPep593Exception,
BeartypeDecorHintPep3119Exception as BeartypeDecorHintPep3119Exception,
BeartypeDecorHintTypeException as BeartypeDecorHintTypeException,
BeartypeDecorParamException as BeartypeDecorParamException,
BeartypeDecorParamNameException as BeartypeDecorParamNameException,
BeartypeDecorPepException as BeartypeDecorPepException,
BeartypeCallException as BeartypeCallException,
BeartypeCallUnavailableTypeException as BeartypeCallUnavailableTypeException,
BeartypeCallHintException as BeartypeCallHintException,
BeartypeCallHintForwardRefException as BeartypeCallHintForwardRefException,
BeartypeCallHintPepException as BeartypeCallHintPepException,
BeartypeCallHintPepParamException as BeartypeCallHintPepParamException,
BeartypeCallHintPepReturnException as BeartypeCallHintPepReturnException,
BeartypeValeException as BeartypeValeException,
BeartypeValeSubscriptionException as BeartypeValeSubscriptionException,
)
# Public warning hierarchy.
from beartype.roar._roarwarn import (
BeartypeWarning as BeartypeWarning,
BeartypeDecorHintPepWarning as BeartypeDecorHintPepWarning,
BeartypeDecorHintPepDeprecationWarning as BeartypeDecorHintPepDeprecationWarning,
BeartypeDecorHintPep585DeprecationWarning as BeartypeDecorHintPep585DeprecationWarning,
BeartypeDecorHintNonpepWarning as BeartypeDecorHintNonpepWarning,
BeartypeDecorHintNonpepNumpyWarning as BeartypeDecorHintNonpepNumpyWarning,
BeartypeModuleNotFoundWarning as BeartypeModuleNotFoundWarning,
BeartypeModuleUnimportableWarning as BeartypeModuleUnimportableWarning,
BeartypeValeWarning as BeartypeValeWarning,
BeartypeValeLambdaWarning as BeartypeValeLambdaWarning,
)
# ....................{ DEPRECATIONS }....................
def __getattr__(attr_deprecated_name: str) -> object:
'''
Dynamically retrieve a deprecated attribute with the passed unqualified
name from this submodule and emit a non-fatal deprecation warning on each
such retrieval if this submodule defines this attribute *or* raise an
exception otherwise.
The Python interpreter implicitly calls this :pep:`562`-compliant module
dunder function under Python >= 3.7 *after* failing to directly retrieve an
explicit attribute with this name from this submodule.
Parameters
----------
attr_deprecated_name : str
Unqualified name of the deprecated attribute to be retrieved.
Returns
----------
object
Value of this deprecated attribute.
Warns
----------
:class:`DeprecationWarning`
If this attribute is deprecated.
Raises
----------
:exc:`AttributeError`
If this attribute is unrecognized and thus erroneous.
'''
# Isolate imports to avoid polluting the module namespace.
from beartype._util.mod.utilmoddeprecate import deprecate_module_attr
# Return the value of this deprecated attribute and emit a warning.
return deprecate_module_attr(
attr_deprecated_name=attr_deprecated_name,
attr_deprecated_name_to_nondeprecated_name={
'BeartypeDecorHintNonPepException': (
'BeartypeDecorHintNonpepException'),
'BeartypeDecorHintNonPepNumPyException': (
'BeartypeDecorHintNonpepNumpyException'),
'BeartypeDecorHintPepDeprecatedWarning': (
'BeartypeDecorHintPepDeprecationWarning'),
},
attr_nondeprecated_name_to_value=globals(),
)
| 49.358621
| 91
| 0.764846
|
09f69d5ee8ecbafe047509f1441558f1f3125282
| 956
|
py
|
Python
|
bigml/tests/read_external_steps.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 137
|
2015-01-12T06:04:10.000Z
|
2022-03-06T21:00:04.000Z
|
bigml/tests/read_external_steps.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 78
|
2015-01-13T18:28:51.000Z
|
2022-03-04T19:18:28.000Z
|
bigml/tests/read_external_steps.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 144
|
2015-01-16T06:13:33.000Z
|
2022-03-29T17:53:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .world import world
from nose.tools import eq_
from bigml.api import HTTP_OK
#@step(r'I get the external connector "(.*)"')
def i_get_the_external_connector(step, resource):
resource = world.api.get_external_connector(resource)
world.status = resource['code']
eq_(world.status, HTTP_OK)
world.external_connector = resource['object']
| 35.407407
| 75
| 0.746862
|
92c86e97ce6a82dce8f63e659668a5cd70e92fce
| 8,990
|
py
|
Python
|
appserver/neo4japp/services/annotations/enrichment_annotation_service.py
|
SBRG/lifelike
|
a7b715f38b389a585c10e6d0d067345937455c13
|
[
"MIT"
] | 8
|
2022-01-28T08:43:07.000Z
|
2022-03-23T11:18:10.000Z
|
appserver/neo4japp/services/annotations/enrichment_annotation_service.py
|
SBRG/lifelike
|
a7b715f38b389a585c10e6d0d067345937455c13
|
[
"MIT"
] | 23
|
2022-02-14T15:25:00.000Z
|
2022-03-28T15:30:45.000Z
|
appserver/neo4japp/services/annotations/enrichment_annotation_service.py
|
SBRG/lifelike
|
a7b715f38b389a585c10e6d0d067345937455c13
|
[
"MIT"
] | 5
|
2022-01-28T15:45:44.000Z
|
2022-03-14T11:36:49.000Z
|
import bisect
import itertools
import time
from collections import defaultdict
from typing import Dict, List, Set, Tuple
from flask import current_app
from neo4japp.constants import LogEventType
from neo4japp.utils.logger import EventLog
from .annotation_service import AnnotationService
from .annotation_db_service import AnnotationDBService
from .annotation_graph_service import AnnotationGraphService
from .constants import EntityIdStr, EntityType
from .data_transfer_objects import (
Annotation,
CreateAnnotationObjParams,
RecognizedEntities,
LMDBMatch,
SpecifiedOrganismStrain
)
class EnrichmentAnnotationService(AnnotationService):
def __init__(
self,
db: AnnotationDBService,
graph: AnnotationGraphService,
) -> None:
super().__init__(db=db, graph=graph)
def _annotate_type_gene(
self,
recognized_entities: RecognizedEntities
) -> List[Annotation]:
matches_list: List[LMDBMatch] = recognized_entities.recognized_genes
entities_to_create: List[CreateAnnotationObjParams] = []
entity_token_pairs = []
gene_names: Set[str] = set()
for match in matches_list:
entities_set = set()
for entity in match.entities:
gene_names.add(entity['synonym'])
entities_set.add((entity['synonym'], entity['id_type'], entity.get('hyperlinks', ''))) # noqa
for synonym, datasource, hyperlinks in entities_set:
if hyperlinks == '':
hyperlinks = []
entity_token_pairs.append((synonym, datasource, hyperlinks, match.token))
gene_names_list = list(gene_names)
gene_match_time = time.time()
fallback_graph_results = \
self.graph.get_genes_to_organisms(
genes=gene_names_list,
organisms=[self.specified_organism.organism_id],
)
current_app.logger.info(
f'Gene fallback organism KG query time {time.time() - gene_match_time}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
fallback_gene_organism_matches = fallback_graph_results.matches
gene_data_sources = fallback_graph_results.data_sources
gene_primary_names = fallback_graph_results.primary_names
for entity_synonym, entity_datasource, entity_hyperlinks, token in entity_token_pairs:
gene_id = None
category = None
organism_id = self.specified_organism.organism_id
organisms_to_match: Dict[str, str] = {}
if entity_synonym in fallback_gene_organism_matches:
try:
# prioritize common name match over synonym
organisms_to_match = fallback_gene_organism_matches[entity_synonym][entity_synonym] # noqa
except KeyError:
# an organism can have multiple different genes w/ same synonym
# since we don't know which to use, doing this is fine
for d in list(fallback_gene_organism_matches[entity_synonym].values()):
organisms_to_match = {**organisms_to_match, **d}
try:
gene_id = organisms_to_match[self.specified_organism.organism_id] # noqa
category = self.specified_organism.category
except KeyError:
continue
else:
if entity_datasource != gene_data_sources[f'{entity_synonym}{organism_id}']: # noqa
continue
entities_to_create.append(
CreateAnnotationObjParams(
token=token,
token_type=EntityType.GENE.value,
entity_synonym=entity_synonym,
entity_name=gene_primary_names[gene_id],
entity_id=gene_id,
entity_datasource=entity_datasource,
entity_hyperlinks=entity_hyperlinks,
entity_category=category
)
)
return self._create_annotation_object(entities_to_create)
def _annotate_type_protein(
self,
recognized_entities: RecognizedEntities
) -> List[Annotation]:
matches_list: List[LMDBMatch] = recognized_entities.recognized_proteins
entities_to_create: List[CreateAnnotationObjParams] = []
entity_token_pairs = []
protein_names: Set[str] = set()
for match in matches_list:
entities_set = set()
for entity in match.entities:
protein_names.add(entity['synonym'])
entities_set.add((entity['synonym'], entity.get('category', ''), entity['id_type'], entity.get('hyperlinks', ''))) # noqa
for synonym, datasource, category, hyperlinks in entities_set:
if hyperlinks == '':
hyperlinks = []
entity_token_pairs.append((synonym, datasource, category, hyperlinks, match.token))
protein_names_list = list(protein_names)
protein_match_time = time.time()
fallback_graph_results = \
self.graph.get_proteins_to_organisms(
proteins=protein_names_list,
organisms=[self.specified_organism.organism_id],
)
current_app.logger.info(
f'Protein fallback organism KG query time {time.time() - protein_match_time}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
fallback_protein_organism_matches = fallback_graph_results.matches
protein_primary_names = fallback_graph_results.primary_names
for entity_synonym, category, entity_datasource, entity_hyperlinks, token in entity_token_pairs: # noqa
# in LMDB we use the synonym as id and name, so do the same here
protein_id = entity_synonym
if entity_synonym in fallback_protein_organism_matches:
try:
protein_id = fallback_protein_organism_matches[entity_synonym][self.specified_organism.organism_id] # noqa
category = self.specified_organism.category
except KeyError:
continue
entities_to_create.append(
CreateAnnotationObjParams(
token=token,
token_type=EntityType.PROTEIN.value,
entity_id=protein_id,
entity_synonym=entity_synonym,
entity_name=protein_primary_names.get(protein_id, entity_synonym),
entity_datasource=entity_datasource,
entity_hyperlinks=entity_hyperlinks,
entity_category=category
)
)
return self._create_annotation_object(entities_to_create)
def create_annotations(
self,
custom_annotations: List[dict],
entity_results: RecognizedEntities,
entity_type_and_id_pairs: List[Tuple[str, str]],
specified_organism: SpecifiedOrganismStrain,
**kwargs
) -> List[Annotation]:
self.specified_organism = specified_organism
self.enrichment_mappings = kwargs['enrichment_mappings']
annotations = self._create_annotations(
types_to_annotate=entity_type_and_id_pairs,
custom_annotations=custom_annotations,
recognized_entities=entity_results
)
start = time.time()
cleaned = self._clean_annotations(annotations=annotations)
current_app.logger.info(
f'Time to clean and run annotation interval tree {time.time() - start}',
extra=EventLog(event_type=LogEventType.ANNOTATION.value).to_dict()
)
return cleaned
def _clean_annotations(
self,
annotations: List[Annotation]
) -> List[Annotation]:
fixed_unified_annotations = self._get_fixed_false_positive_unified_annotations(
annotations_list=annotations)
# need to split up the annotations otherwise
# a text in a cell could be removed due to
# overlapping with an adjacent cell
split = defaultdict(list)
offsets = [i for i, _ in self.enrichment_mappings]
for anno in fixed_unified_annotations:
# get first offset that is greater than hi_location_offset
# this means the annotation is part of that cell/sublist
index = bisect.bisect_left(offsets, anno.hi_location_offset)
split[offsets[index]].append(anno)
fixed_unified_annotations = list(itertools.chain.from_iterable(
[self.fix_conflicting_annotations(unified_annotations=v) for _, v in split.items()]
))
return fixed_unified_annotations
| 42.009346
| 138
| 0.630367
|
0648d08cf28c283d79938d1e87a6732fda447339
| 1,027
|
py
|
Python
|
src/data/tests/test_all.py
|
terryf82/boston-crash-modeling
|
9c2659387fcee1a426e07de6284c15d703312b1c
|
[
"MIT"
] | null | null | null |
src/data/tests/test_all.py
|
terryf82/boston-crash-modeling
|
9c2659387fcee1a426e07de6284c15d703312b1c
|
[
"MIT"
] | null | null | null |
src/data/tests/test_all.py
|
terryf82/boston-crash-modeling
|
9c2659387fcee1a426e07de6284c15d703312b1c
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import json
import shutil
def test_all(tmpdir):
# Copy test data into temp directory
orig_path = os.path.dirname(
os.path.abspath(__file__)) + '/data/'
path = tmpdir.strpath + '/data'
shutil.copytree(orig_path, path)
filename = path + '/raw/ma_cob_spatially_joined_streets.shp'
subprocess.check_call([
'python',
'-m',
'data.extract_intersections',
filename,
'-d',
path
])
subprocess.check_call([
'python',
'-m',
'data.create_segments',
'-d',
path,
'-r',
path + '/processed/maps/elements.geojson'
])
subprocess.check_call([
'python',
'-m',
'data.join_segments_crash_concern',
'-d',
path,
])
data = json.load(open(path + '/processed/crash_joined.json'))
assert data[0]['near_id'] == 2
data = json.load(open(path + '/processed/concern_joined.json'))
assert data[0]['near_id'] == 3
| 20.959184
| 67
| 0.562804
|
ad0a99e58759a474bbe510cd1da98da18d9c4dbf
| 6,558
|
py
|
Python
|
magnum/api/controllers/v1/types.py
|
Linaro/magnum
|
b2e3f2346b8550f71e1ed2c737e82aa6050bcfec
|
[
"Apache-2.0"
] | 2
|
2020-07-15T14:43:39.000Z
|
2021-05-02T14:42:13.000Z
|
magnum/api/controllers/v1/types.py
|
Linaro/magnum
|
b2e3f2346b8550f71e1ed2c737e82aa6050bcfec
|
[
"Apache-2.0"
] | null | null | null |
magnum/api/controllers/v1/types.py
|
Linaro/magnum
|
b2e3f2346b8550f71e1ed2c737e82aa6050bcfec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import six
from oslo_utils import strutils
from oslo_utils import uuidutils
import wsme
from wsme import types as wtypes
from magnum.common import exception
from magnum.common import utils
from magnum.i18n import _
class DNSListType(wtypes.UserType):
"""A comman delimited dns nameserver list"""
basetype = six.string_types
name = "dnslist"
@staticmethod
def validate(value):
return utils.validate_dns(value)
class MacAddressType(wtypes.UserType):
"""A simple MAC address type."""
basetype = wtypes.text
name = 'macaddress'
@staticmethod
def validate(value):
return utils.validate_and_normalize_mac(value)
@staticmethod
def frombasetype(value):
if value is None:
return None
return MacAddressType.validate(value)
class NameType(wtypes.UserType):
"""A logical name type."""
basetype = wtypes.text
name = 'name'
@staticmethod
def validate(value):
if not utils.is_name_safe(value):
raise exception.InvalidName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return NameType.validate(value)
class UuidType(wtypes.UserType):
"""A simple UUID type."""
basetype = wtypes.text
name = 'uuid'
@staticmethod
def validate(value):
if not uuidutils.is_uuid_like(value):
raise exception.InvalidUUID(uuid=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidType.validate(value)
class BooleanType(wtypes.UserType):
"""A simple boolean type."""
basetype = wtypes.text
name = 'boolean'
@staticmethod
def validate(value):
try:
return strutils.bool_from_string(value, strict=True)
except ValueError as e:
# raise Invalid to return 400 (BadRequest) in the API
raise exception.Invalid(e)
@staticmethod
def frombasetype(value):
if value is None:
return None
return BooleanType.validate(value)
class MultiType(wtypes.UserType):
"""A complex type that represents one or more types.
Used for validating that a value is an instance of one of the types.
:param types: Variable-length list of types.
"""
basetype = wtypes.text
def __init__(self, *types):
self.types = types
def __str__(self):
return ' | '.join(map(str, self.types))
def validate(self, value):
for t in self.types:
try:
return wtypes.validate_value(t, value)
except (exception.InvalidUUID, ValueError):
pass
else:
raise ValueError(
_("Wrong type. Expected '%(type)s', got '%(value)s'")
% {'type': self.types, 'value': type(value)})
dns_list = DNSListType()
macaddress = MacAddressType()
uuid = UuidType()
name = NameType()
uuid_or_name = MultiType(UuidType, NameType)
boolean = BooleanType()
class JsonPatchType(wtypes.Base):
"""A complex type that represents a single json-patch operation."""
path = wtypes.wsattr(wtypes.StringType(pattern=r'^(/[\w-]+)+$'),
mandatory=True)
op = wtypes.wsattr(wtypes.Enum(wtypes.text, 'add', 'replace', 'remove'),
mandatory=True)
value = MultiType(wtypes.text, int)
# The class of the objects being patched. Override this in subclasses.
# Should probably be a subclass of magnum.api.controllers.base.APIBase.
_api_base = None
# Attributes that are not required for construction, but which may not be
# removed if set. Override in subclasses if needed.
_extra_non_removable_attrs = set()
# Set of non-removable attributes, calculated lazily.
_non_removable_attrs = None
@staticmethod
def internal_attrs():
"""Returns a list of internal attributes.
Internal attributes can't be added, replaced or removed. This
method may be overwritten by derived class.
"""
return ['/created_at', '/id', '/links', '/updated_at',
'/uuid', '/project_id', '/user_id']
@classmethod
def non_removable_attrs(cls):
"""Returns a set of names of attributes that may not be removed.
Attributes whose 'mandatory' property is True are automatically added
to this set. To add additional attributes to the set, override the
field _extra_non_removable_attrs in subclasses, with a set of the form
{'/foo', '/bar'}.
"""
if cls._non_removable_attrs is None:
cls._non_removable_attrs = cls._extra_non_removable_attrs.copy()
if cls._api_base:
fields = inspect.getmembers(cls._api_base,
lambda a: not inspect.isroutine(a))
for name, field in fields:
if getattr(field, 'mandatory', False):
cls._non_removable_attrs.add('/%s' % name)
return cls._non_removable_attrs
@staticmethod
def validate(patch):
if patch.path in patch.internal_attrs():
msg = _("'%s' is an internal attribute and can not be updated")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.path in patch.non_removable_attrs() and patch.op == 'remove':
msg = _("'%s' is a mandatory attribute and can not be removed")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.op != 'remove':
if not patch.value:
msg = _("'add' and 'replace' operations needs value")
raise wsme.exc.ClientSideError(msg)
ret = {'path': patch.path, 'op': patch.op}
if patch.value:
ret['value'] = patch.value
return ret
| 29.674208
| 79
| 0.632815
|
6bfb4392a91c301feb4cf7d305a20efc3cf26005
| 5,008
|
py
|
Python
|
vistrails/packages/tabledata/init.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 83
|
2015-01-05T14:50:50.000Z
|
2021-09-17T19:45:26.000Z
|
vistrails/packages/tabledata/init.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 254
|
2015-01-02T20:39:19.000Z
|
2018-11-28T17:16:44.000Z
|
vistrails/packages/tabledata/init.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 40
|
2015-04-17T16:46:36.000Z
|
2021-09-28T22:43:24.000Z
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2013-2014, NYU-Poly.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.core.modules.utils import make_modules_dict
from vistrails.core.packagemanager import get_package_manager
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler
from .common import _modules as common_modules, TableOutput
from .convert import _modules as convert_modules
from .operations import _modules as operation_modules
from .read import _modules as read_modules
from .write import _modules as write_modules
_modules = [common_modules,
convert_modules,
operation_modules,
read_modules,
write_modules]
if get_package_manager().has_package( # pragma: no branch
'org.vistrails.vistrails.spreadsheet'):
from .viewer import _modules as viewer_modules, TableToSpreadsheetMode
_modules.append(viewer_modules)
TableOutput.register_output_mode(TableToSpreadsheetMode)
_modules = make_modules_dict(*_modules)
def handle_module_upgrade_request(controller, module_id, pipeline):
def add_keyname(fname, module):
new_function = controller.create_function(module,
"key_name",
["_key"])
return [('add', new_function, 'module', module.id)]
module_remap = {
'read|csv|CSVFile': [
(None, '0.1.1', 'read|CSVFile', {
'src_port_remap': {
'self': 'value'},
})
],
'read|numpy|NumPyArray': [
(None, '0.1.1', 'read|NumPyArray', {
'src_port_remap': {
'self': 'value'},
})
],
'read|CSVFile': [
('0.1.1', '0.1.2', None, {
'src_port_remap': {
'self': 'value'},
}),
('0.1.3', '0.1.5', None, {})
],
'read|NumPyArray': [
('0.1.1', '0.1.2', None, {
'src_port_remap': {
'self': 'value'},
})
],
'read|ExcelSpreadsheet': [
('0.1.1', '0.1.2', None, {
'src_port_remap': {
'self': 'value'},
}),
('0.1.3', '0.1.4', None, {})
],
'read|JSONFile': [
(None, '0.1.5', 'read|JSONObject', {
'function_remap': {
None: add_keyname},
})
],
}
try:
from vistrails.packages.spreadsheet.init import upgrade_cell_to_output
except ImportError:
pass
else:
module_remap = upgrade_cell_to_output(
module_remap, module_id, pipeline,
'TableCell', 'TableOutput',
'0.1.6', 'table')
return UpgradeWorkflowHandler.remap_module(controller,
module_id,
pipeline,
module_remap)
| 39.746032
| 79
| 0.565495
|
237765ded0de6cb80504fd273e42becfae0ad541
| 2,443
|
py
|
Python
|
thermodynamics/introToThermodynamics/GasContainers.py
|
dsmith2598/FlipItPhysics-Physics2
|
41b105dcf849e1bbd4cc8d132859db2476f90617
|
[
"MIT"
] | 1
|
2021-06-16T07:28:46.000Z
|
2021-06-16T07:28:46.000Z
|
thermodynamics/introToThermodynamics/GasContainers.py
|
Rohit212/FlipItPhysics-Physics2
|
41b105dcf849e1bbd4cc8d132859db2476f90617
|
[
"MIT"
] | null | null | null |
thermodynamics/introToThermodynamics/GasContainers.py
|
Rohit212/FlipItPhysics-Physics2
|
41b105dcf849e1bbd4cc8d132859db2476f90617
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
A container contains 133 moles of Gas A and 115 moles
of Gas B separated by a movable airtight divider.
Part A:
Initially, we will assume that the container is cylindrical.
If the cylinder is 2.2 m long, how far from the left side of
the cylinder will we find the equilibrium position of the divider?
Assuming that the cylinder doesn't change, we can just
find the average of the molecules and multiple it by
the length of cylinder to find the cutoff point.
totalMole
= numberOfMolesA numberOfMOlesB
gasA is located on the left, so we use it for the average:
ratioOfAverages
= numberOfMoleculesA / totalMole
and then multiple it by the height to get:
locationOfDivider
= ratioOfAverages * height
= 1.17983870968 meters from left
Part B:
Now assume that this container is shaped like a cone, as shown on the right.
The cone has a height of 2.2 m and the tip of the cone makes an angle of 90°.
At what distance from the tip of the cone will we find the equilibrium
position of the divider?
Remember that the volume of a cone is given by π*r^2)h/3, where h is the height
and r is the radius of the base.
Again, gas A is located to the left.
Since the tip of the angle is at 90 degrees and this is a cone,
the radius and the height of the cone are constantly equal to each other.
Knowing this fact we can rewrite the equation to:
= pi * radius ^ 2 * height / 3
= pi * height ^ 3 / 3
And we're solving for the point at which the volume is at
the ratioOfAverages from the last question.
so we can solve for value of volume at which we want our solution
to be at by doing:
volumeWanted
= ratioOfAverages * pi * (max_height) ^ 3 / 3
= 5.97976 meters cubed
Now we need to solve for the height or radius to get...
heightNeeded
= cubedRoot(3 * volumeWanted/ pi)
= 1.78740376144 meters from the left side
"""
if __name__ == '__main__':
cylinderHeight = 2.2
numberOfMolesA = 133.0
numberOfMolesB = 115.0
totalMole = numberOfMolesA + numberOfMolesB
ratioOfAverages = numberOfMolesA / totalMole
locationOfDivider = ratioOfAverages * cylinderHeight
print "The location of the divider for the cylinder starting from the left is:", locationOfDivider
PI = 3.1415
volumeWanted = PI * (cylinderHeight ** 3) * ratioOfAverages / 3.0
heightNeeded = (3 * volumeWanted / PI) ** (1.0 / 3.0)
print "The location of the divider for the cone starting from the left is", heightNeeded
| 29.083333
| 102
| 0.743348
|
2cf51b1ced847d1b4db4dcdeed73033d45f39ff0
| 3,304
|
py
|
Python
|
chapters/03_tensorflow_fundamentals/vectorized_graph.py
|
Asurada2015/TF-_for_MI
|
5fafdb78286b122036fa9aecf2a4be72ea4673e1
|
[
"Apache-2.0"
] | 8
|
2018-01-09T05:29:01.000Z
|
2019-03-03T13:40:51.000Z
|
chapters/03_tensorflow_fundamentals/vectorized_graph.py
|
Asurada2015/TF-_for_MI
|
5fafdb78286b122036fa9aecf2a4be72ea4673e1
|
[
"Apache-2.0"
] | null | null | null |
chapters/03_tensorflow_fundamentals/vectorized_graph.py
|
Asurada2015/TF-_for_MI
|
5fafdb78286b122036fa9aecf2a4be72ea4673e1
|
[
"Apache-2.0"
] | 6
|
2017-10-25T02:17:59.000Z
|
2018-11-08T01:58:32.000Z
|
import tensorflow as tf
import numpy as np
# 新建一个图的对象,并将其设置为默认图
# 显式创建一个图加以使用,而非使用默认的图
graph = tf.Graph()
with graph.as_default():
# 在图中有两个"全局风格"的Variable对象由于这些对象在本质上是全局的,因此在声明时与数据流图中其他节点区分开,将他们放入自己的名称作用域
with tf.name_scope("variables"):
# 记录数据流程图运行次数的Variable对象
global_step = tf.Variable(0, dtype=tf.int32, name="global_step")
# 追踪该模型的所有输出随时间的累加和的Variable对象
total_output = tf.Variable(0.0, dtype=tf.float32, name="total_output")
# 核心变换操作
with tf.name_scope("transformation"):
# 独立的输入层
with tf.name_scope("input"):
# 创建输出占位符,用于接收一个向量
a = tf.placeholder(tf.float32, shape=[None], name="input_placeholder_a")
# 独立的中间层
with tf.name_scope("intermediate_layer"):
b = tf.reduce_prod(a, name="product_b") # 沿着指定维度计算元素乘积
c = tf.reduce_sum(a, name="sum_c")
# 独立的输出层
with tf.name_scope("output"):
output = tf.add(b, c, name="output")
with tf.name_scope("update"):
# 用最新的输出更新Variable对象total_output
update_total = total_output.assign_add(output)
# 将前面的Variable对象global_step增1,只要数据流图运行,该操作便需要运行.
increment_step = global_step.assign_add(1)
# 总结操作
with tf.name_scope("summaries"):
avg = tf.div(update_total, tf.cast(increment_step, tf.float32), name="average")
# cast函数用于将int型数据转换为tf.float32数据类型
# 计算随时间输出的均值,获取当前全部输出的总和total_output(使用来自update_total的输出,以确保在计算avg之前更新便已经全部完成)
# 以及数据流图的总运行次数global_step(使用increment_step的输出,以确保数据流图有序运行)
# 为输出结点创建汇总数据
tf.summary.scalar(name="output_summary", tensor=output)
tf.summary.scalar(name="total_summary", tensor=update_total)
tf.summary.scalar(name="average_summary", tensor=avg)
# 全局变量和操作
# 为完成数据流图的构建,还需要创建Variable对象初始化Op和用于将所有汇总数据组织到一个Op的辅助结点,把他们放进名为"global_ops"的名称作用域
with tf.name_scope("global_ops"):
# 初始化所有的变量
init = tf.initialize_all_variables()
# 合并所有的汇总数据组织到一个Op的辅助节点
merged_summaries = tf.summary.merge_all()
"""将merge_all_summaries()与其他全局OPs放在一起是最佳做法,这可以想象为一个拥有Variable对象,Op和名称作用域等的不同汇总数据的数据流图"""
# 使用显式创建的图形开始一个会话
sess = tf.Session(graph=graph)
# 用于保存汇总数据
writer = tf.summary.FileWriter('./improved_graph', graph)
# 初始化所有变量
sess.run(init)
def run_graph(input_tensor):
"""
帮助函数; 利用给定的张量作为输入并且保存汇总数据
"""
feed_dict = {a: input_tensor}
# 其中a是一个占位符,用于输入数据,input_tensor表示a变量的数据e
out, step, summary = sess.run([output, increment_step, merged_summaries], feed_dict=feed_dict)
# sesstion.run可以运行依次列表中[output,increment_step,merged_summaries]
# 其中output表示程序运行的结果,increment_step表示程序的step步数,merged_summaries表示各种总结数据
writer.add_summary(summary, global_step=step)
# global_step参数十分重要,因为他是Tensorflow可以随着时间对数据进行图示.
# Run the graph with various inputs
run_graph([2, 8])
run_graph([3, 1, 3, 3])
run_graph([8])
run_graph([1, 2, 3])
run_graph([11, 4])
run_graph([4, 1])
run_graph([7, 3, 1])
run_graph([6, 3])
run_graph([0, 2])
run_graph([4, 5, 6])
# 将汇总数据写入磁盘
writer.flush()
# 关闭writer
writer.close()
# Close the session
sess.close()
# To start TensorBoard after running this file, execute the following command:
# $ tensorboard --logdir=F://Git/TF-_for_MI/chapters/03_tensorflow_fundamentals/improved_graph
| 30.592593
| 98
| 0.701877
|
02b7d853ead94805d4e0ed54206fcbfe5abac1d5
| 404
|
py
|
Python
|
uwcsvote/wsgi.py
|
ericthelemur/uwcs-vote
|
616b635014289962de84c800966c98fe230b802f
|
[
"MIT"
] | null | null | null |
uwcsvote/wsgi.py
|
ericthelemur/uwcs-vote
|
616b635014289962de84c800966c98fe230b802f
|
[
"MIT"
] | null | null | null |
uwcsvote/wsgi.py
|
ericthelemur/uwcs-vote
|
616b635014289962de84c800966c98fe230b802f
|
[
"MIT"
] | null | null | null |
"""
WSGI config for uwcsvote project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uwcsvote.settings.production')
application = get_wsgi_application()
| 23.764706
| 79
| 0.789604
|
4e61636e78a6d72422853e9c21bb8428f5b6b613
| 8,666
|
py
|
Python
|
clodius/tiles/multivec.py
|
zhangzhen/clodius
|
d66aa6dcc4d27a16752ecfe2738289db8085b2b6
|
[
"MIT"
] | 14
|
2018-11-14T23:58:32.000Z
|
2021-09-12T13:56:19.000Z
|
clodius/tiles/multivec.py
|
zhangzhen/clodius
|
d66aa6dcc4d27a16752ecfe2738289db8085b2b6
|
[
"MIT"
] | 71
|
2018-10-30T15:31:24.000Z
|
2022-03-20T21:10:19.000Z
|
clodius/tiles/multivec.py
|
zhangzhen/clodius
|
d66aa6dcc4d27a16752ecfe2738289db8085b2b6
|
[
"MIT"
] | 15
|
2018-10-30T15:31:44.000Z
|
2021-12-22T02:23:00.000Z
|
import base64
import json
import math
import h5py
import numpy as np
from .utils import abs2genomic
def tiles(filename, tile_ids):
"""
Retrieve multiple multivec tiles from tids.
----------
filename: string
The multires file containing the multivec data
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
"""
f16 = np.finfo("float16")
f16_min, f16_max = f16.min, f16.max
generated_tiles = []
for tile_id in tile_ids:
tile_pos = [int(i) for i in tile_id.split(".")[1:3]]
ma = get_single_tile(filename, tile_pos)
has_nan = np.isnan(ma).any()
ma_max = ma.max() if ma.size else 0
ma_min = ma.min() if ma.size else 0
use_f16 = not has_nan and (ma_min > f16_min and ma_max < f16_max)
ma = ma.astype(np.float16 if use_f16 else np.float32)
ma_base64 = base64.b64encode(ma.ravel()).decode("utf-8")
tile_value = {
"dense": ma_base64,
"dtype": "float16" if use_f16 else "float32",
"shape": ma.shape,
}
generated_tiles.append((tile_id, tile_value))
return generated_tiles
def get_single_tile(filename, tile_pos):
"""
Retrieve a single multivec tile from a multires file
Parameters
----------
filename: string
The multires file containing the multivec data
tile_pos: (z, x)
The zoom level and position of this tile
"""
# t1 = time.time()
tsinfo = tileset_info(filename)
f = h5py.File(filename, "r")
# print('tileset_info', tileset_info)
# t2 = time.time()
# which resolution does this zoom level correspond to?
resolution = tsinfo["resolutions"][tile_pos[0]]
tile_size = tsinfo["tile_size"]
# where in the data does the tile start and end
tile_start = tile_pos[1] * tile_size * resolution
tile_end = tile_start + tile_size * resolution
chromsizes = list(zip(f["chroms"]["name"], f["chroms"]["length"]))
# dense = f['resolutions'][str(resolution)][tile_start:tile_end]
dense = get_tile(f, chromsizes, resolution, tile_start, tile_end, tsinfo["shape"])
# print("dense.shape", dense.shape)
if len(dense) < tsinfo["tile_size"]:
# if there aren't enough rows to fill this tile, add some zeros
dense = np.vstack(
[dense, np.zeros((tsinfo["tile_size"] - len(dense), tsinfo["shape"][1]))]
)
f.close()
# t3 = time.time()
# print("single time time: {:.2f} (tileset info: {:.2f}, open time: {:.2f})".format(t3 - t1, t15 - t1, t2 - t15))
return dense.T
def get_tile(f, chromsizes, resolution, start_pos, end_pos, shape):
"""
Get the tile value given the start and end positions and
chromosome positions.
Drop bins at the ends of chromosomes if those bins aren't
full.
Parameters:
-----------
f: h5py.File
An hdf5 file containing the data
chromsizes: [('chr1', 1000), ....]
An array listing the chromosome sizes
resolution: int
The size of each bin, except for the last bin in each
chromosome.
start_pos: int
The start_position of the interval to return
end_pos: int
The end position of the interval to return
Returns
-------
return_vals: [...]
A subset of the original genome-wide values containing
the values for the portion of the genome that is visible.
"""
binsize = resolution
# print('binsize:', binsize)
# print('start_pos:', start_pos, 'end_pos:', end_pos)
# print("length:", end_pos - start_pos)
# print('shape:', shape)
# t0 = time.time()
arrays = []
count = 0
# keep track of how much data has been returned in bins
current_binned_data_position = 0
current_data_position = 0
num_added = 0
total_length = 0
for cid, start, end in abs2genomic([c[1] for c in chromsizes], start_pos, end_pos):
n_bins = int(np.ceil((end - start) / binsize))
total_length += end - start
# print('cid', cid, start, end, 'tl:', total_length)
try:
# t1 = time.time()
chrom = chromsizes[cid][0]
current_data_position += end - start
count += 1
start_pos = math.floor(start / binsize)
end_pos = math.ceil(end / binsize)
if start_pos >= end_pos:
continue
# print("start:", start, "end", end)
# print("sp", start_pos * binsize, end_pos * binsize)
# print('current_data_position:', current_data_position)
# print('current_binned_data_position:', current_binned_data_position)
# print('binsize:', binsize, 'resolution:', resolution)
"""
if start_pos == end_pos:
if current_data_position - current_binned_data_position > 0:
# adding this data as a single bin even though it's not large
# enough to cover one bin
# print('catching up')
end_pos += 1
else:
# print('data smaller than the bin size', start, end, binsize)
continue
"""
# print("offset:", offset, "start_pos", start_pos, end_pos)
x = f["resolutions"][str(resolution)]["values"][chrom][start_pos:end_pos]
current_binned_data_position += binsize * (end_pos - start_pos)
# print("x:", x.shape)
# If the offset is larger than the binsize, drop the last bin
offset = current_binned_data_position - current_data_position
if offset > binsize:
x = x[:-1]
# drop the very last bin if it is smaller than the binsize
"""
if len(x) > 1 and end == clen and clen % binsize != 0:
# print("dropping")
x = x[:-1]
"""
if len(x):
num_added += len(x)
# print('cid:', cid, end-start, total_length, 'num_added:', num_added, 'x:', sum(x))
# t2 = time.time()
# print("time to fetch {}: {}".format(chrom, t2 - t1))
except IndexError:
# beyond the range of the available chromosomes
# probably means we've requested a range of absolute
# coordinates that stretch beyond the end of the genome
# print('zeroes')
x = np.zeros((n_bins, shape[1]))
arrays.append(x)
# print("total_length:", total_length)
# print('arrays:', len(np.concatenate(arrays)))
# t3 = time.time()
# print("total fetch time:", t3 - t0)
return np.concatenate(arrays)[: shape[0]]
def tileset_info(filename):
"""
Return some information about this tileset that will
help render it in on the client.
Parameters
----------
filename: str
The filename of the h5py file containing the tileset info.
Returns
-------
tileset_info: {}
A dictionary containing the information describing
this dataset
"""
# t1 = time.time()
f = h5py.File(filename, "r")
# t2 = time.time()
# a sorted list of resolutions, lowest to highest
# awkward to write because a the numbers representing resolution
# are datapoints / pixel so lower resolution is actually a higher
# number
resolutions = sorted([int(r) for r in f["resolutions"].keys()])[::-1]
# the "leftmost" datapoint position
# an array because higlass can display multi-dimensional
# data
min_pos = [0]
max_pos = [int(sum(f["chroms"]["length"][:]))]
# the "rightmost" datapoint position
# max_pos = [len(f['resolutions']['values'][str(resolutions[-1])])]
tile_size = int(f["info"].attrs["tile-size"])
first_chrom = f["chroms"]["name"][0]
shape = list(f["resolutions"][str(resolutions[0])]["values"][first_chrom].shape)
shape[0] = tile_size
# t3 = time.time()
# print("tileset info time:", t3 - t2)
tileset_info = {
"resolutions": resolutions,
"min_pos": min_pos,
"max_pos": max_pos,
"tile_size": tile_size,
"shape": shape,
}
if "row_infos" in f["resolutions"][str(resolutions[0])].attrs:
row_infos = f["resolutions"][str(resolutions[0])].attrs["row_infos"]
tileset_info["row_infos"] = [r.decode("utf8") for r in row_infos]
elif "row_infos" in f["info"]:
row_infos_encoded = f["info"]["row_infos"][()]
tileset_info["row_infos"] = json.loads(row_infos_encoded)
f.close()
return tileset_info
| 31.97786
| 117
| 0.587584
|
f742240ea53e33fcd737210f6c04b29b16bb4de3
| 413
|
py
|
Python
|
build/lib/admintool_command/command.py
|
jayvdb/django-admintool-command
|
aee5f8f003b5956e3a3e7df4957c5b839ffd8341
|
[
"MIT"
] | 3
|
2019-08-07T23:33:19.000Z
|
2021-03-26T11:08:12.000Z
|
build/lib/admintool_command/command.py
|
jayvdb/django-admintool-command
|
aee5f8f003b5956e3a3e7df4957c5b839ffd8341
|
[
"MIT"
] | 6
|
2020-04-27T13:09:42.000Z
|
2020-08-16T06:36:02.000Z
|
build/lib/admintool_command/command.py
|
jayvdb/django-admintool-command
|
aee5f8f003b5956e3a3e7df4957c5b839ffd8341
|
[
"MIT"
] | 1
|
2020-08-07T05:52:46.000Z
|
2020-08-07T05:52:46.000Z
|
from abc import ABC, abstractmethod
from django import forms
from django.core.management import BaseCommand
class AdminCommand(BaseCommand, ABC):
name = None
template = "admintool_command/command.html"
class Form(forms.Form):
pass
def init_context(self, request=None, **kwargs):
return dict()
@abstractmethod
def get_command_arguments(self, forms_data):
pass
| 20.65
| 51
| 0.7046
|
9d30a7f13a8f7f15bbf6652f5d3cd1d7675d95a4
| 2,252
|
py
|
Python
|
g3doc/build_docs.py
|
amad-person/privacy
|
aaf4c252a0bbfa41670ddefd3798bdf2066c0e21
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:48:44.000Z
|
2021-05-10T10:48:44.000Z
|
g3doc/build_docs.py
|
amad-person/privacy
|
aaf4c252a0bbfa41670ddefd3798bdf2066c0e21
|
[
"Apache-2.0"
] | null | null | null |
g3doc/build_docs.py
|
amad-person/privacy
|
aaf4c252a0bbfa41670ddefd3798bdf2066c0e21
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import tensorflow_privacy
"""Script to generate api_docs for TensorFlow Privacy."""
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
import tensorflow_privacy as tf_privacy
flags.DEFINE_string('output_dir', '/tmp/tf_privacy',
'Where to output the docs.')
flags.DEFINE_string(
'code_url_prefix',
'https://github.com/tensorflow/privacy/blob/master/tensorflow_privacy',
'The url prefix for links to code.')
flags.DEFINE_string('site_path', 'responsible_ai/privacy/api_docs/python/',
'The location of the doc setin the site.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files.')
FLAGS = flags.FLAGS
PROJECT_SHORT_NAME = 'tf_privacy'
PROJECT_FULL_NAME = 'TensorFlow Privacy'
def gen_api_docs():
"""Generates api docs for the tensorflow docs package."""
output_dir = FLAGS.output_dir
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
py_modules=[(PROJECT_SHORT_NAME, tf_privacy)],
base_dir=os.path.dirname(tf_privacy.__file__),
code_url_prefix=FLAGS.code_url_prefix,
site_path=FLAGS.site_path,
search_hints=FLAGS.search_hints,
private_map={},
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.explicit_package_contents_filter])
doc_generator.build(output_dir)
print('Output docs to: ', output_dir)
def main(_):
gen_api_docs()
if __name__ == '__main__':
app.run(main)
| 32.637681
| 76
| 0.742451
|
90d8a3ee74741d995de13686c92e49d07f29d3b2
| 505
|
py
|
Python
|
Moderate/Reverse And Add/main.py
|
AstrorEnales/CodeEval
|
eae0fb471d27d3a83d544ff4a4651ed1a2076930
|
[
"MIT"
] | null | null | null |
Moderate/Reverse And Add/main.py
|
AstrorEnales/CodeEval
|
eae0fb471d27d3a83d544ff4a4651ed1a2076930
|
[
"MIT"
] | null | null | null |
Moderate/Reverse And Add/main.py
|
AstrorEnales/CodeEval
|
eae0fb471d27d3a83d544ff4a4651ed1a2076930
|
[
"MIT"
] | null | null | null |
import sys
def isPalindrome(x):
text = str(x)
halfLength = int(len(text) / 2)
offset = len(text) % 2
return text[0:halfLength][::-1] == text[(halfLength + offset)::]
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
x = int(line)
i = 0
while isPalindrome(x) == False:
x += int(str(x)[::-1])
i += 1
print('%s %s' % (i, x))
lines.close()
| 24.047619
| 69
| 0.473267
|
e24eadedb9cc189ecd5ef4626c7e43d069bdebca
| 5,460
|
py
|
Python
|
Tests/test_SearchIO_hmmer3_domtab_index.py
|
lukasz-kozlowski/biopython
|
6b601cf09234e1e82cfc94ad5030389036cb6343
|
[
"BSD-3-Clause"
] | 2,856
|
2015-01-01T07:10:06.000Z
|
2022-03-31T18:17:25.000Z
|
Tests/test_SearchIO_hmmer3_domtab_index.py
|
lukasz-kozlowski/biopython
|
6b601cf09234e1e82cfc94ad5030389036cb6343
|
[
"BSD-3-Clause"
] | 3,429
|
2015-01-05T11:11:42.000Z
|
2022-03-31T13:08:10.000Z
|
Tests/test_SearchIO_hmmer3_domtab_index.py
|
lukasz-kozlowski/biopython
|
6b601cf09234e1e82cfc94ad5030389036cb6343
|
[
"BSD-3-Clause"
] | 1,619
|
2015-01-05T13:07:11.000Z
|
2022-03-31T19:19:52.000Z
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for SearchIO hmmer3-domtab indexing."""
import os
import unittest
from search_tests_common import CheckRaw, CheckIndex
class HmmerDomtabRawCases(CheckRaw):
fmt = "hmmscan3-domtab"
def test_hmmerdomtab_30_multiple_first(self):
"""Test hmmscan-domtab raw string retrieval, HMMER 3.0, multiple queries, first (domtab_30_hmmscan_001.out)."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_001.out")
raw = """Globin PF00042.17 108 gi|4885477|ref|NP_005359.1| - 154 6e-21 74.6 0.3 1 1 6.7e-25 9.2e-21 74.0 0.2 1 107 7 112 7 113 0.97 Globin
"""
self.check_raw(filename, "gi|4885477|ref|NP_005359.1|", raw)
def test_hmmerdomtab_30_multiple_middle(self):
"""Test hmmscan-domtab raw string retrieval, HMMER 3.0, multiple queries, middle (domtab_30_hmmscan_001.out)."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_001.out")
raw = """Ig_3 PF13927.1 75 gi|126362951:116-221 - 106 1.4e-09 38.2 0.4 1 1 3e-13 2.1e-09 37.6 0.3 1 73 9 84 9 88 0.94 Immunoglobulin domain
Ig_2 PF13895.1 80 gi|126362951:116-221 - 106 3.5e-05 23.7 0.1 1 1 6.2e-09 4.3e-05 23.4 0.1 1 80 9 104 9 104 0.71 Immunoglobulin domain
"""
self.check_raw(filename, "gi|126362951:116-221", raw)
def test_hmmerdomtab_30_multiple_last(self):
"""Test hmmscan-domtab raw string retrieval, HMMER 3.0, multiple queries, last (domtab_30_hmmscan_001.out)."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_001.out")
raw = """Pou PF00157.12 75 gi|125490392|ref|NP_038661.2| - 352 7e-37 124.8 0.5 1 1 5e-40 1.4e-36 123.9 0.3 3 75 133 205 131 205 0.97 Pou domain - N-terminal to homeobox domain
Homeobox PF00046.24 57 gi|125490392|ref|NP_038661.2| - 352 2.1e-18 65.5 1.1 1 1 1.5e-21 4.1e-18 64.6 0.7 1 57 224 280 224 280 0.98 Homeobox domain
HTH_31 PF13560.1 64 gi|125490392|ref|NP_038661.2| - 352 0.012 15.6 0.0 1 2 5.7e-05 0.16 12.0 0.0 1 35 141 181 141 184 0.96 Helix-turn-helix domain
HTH_31 PF13560.1 64 gi|125490392|ref|NP_038661.2| - 352 0.012 15.6 0.0 2 2 0.19 5.2e+02 0.8 0.0 39 62 245 268 243 270 0.86 Helix-turn-helix domain
Homeobox_KN PF05920.6 40 gi|125490392|ref|NP_038661.2| - 352 0.039 13.5 0.0 1 1 3.5e-05 0.095 12.3 0.0 7 39 244 276 241 277 0.91 Homeobox KN domain
DUF521 PF04412.8 400 gi|125490392|ref|NP_038661.2| - 352 0.14 10.5 0.1 1 1 9.4e-05 0.26 9.6 0.1 273 334 221 280 197 294 0.77 Protein of unknown function (DUF521)
"""
self.check_raw(filename, "gi|125490392|ref|NP_038661.2|", raw)
def test_hmmerdomtab_30_single(self):
"""Test hmmscan-domtab raw string retrieval, HMMER 3.0, single query (domtab_30_hmmscan_004.out)."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_004.out")
raw = """Ig_3 PF13927.1 75 gi|126362951:116-221 - 106 1.4e-09 38.2 0.4 1 1 3e-13 2.1e-09 37.6 0.3 1 73 9 84 9 88 0.94 Immunoglobulin domain
Ig_2 PF13895.1 80 gi|126362951:116-221 - 106 3.5e-05 23.7 0.1 1 1 6.2e-09 4.3e-05 23.4 0.1 1 80 9 104 9 104 0.71 Immunoglobulin domain
"""
self.check_raw(filename, "gi|126362951:116-221", raw)
class HmmerDomtabIndexCases(CheckIndex):
def test_hmmerdomtab_30_hmmscan_001(self):
"""Test hmmscan-domtab indexing, HMMER 3.0, multiple queries."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_001.out")
self.check_index(filename, "hmmscan3-domtab")
def test_hmmerdomtab_30_hmmscan_002(self):
"""Test hmmscan-domtab indexing, HMMER 3.0, single query, no hits."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_002.out")
self.check_index(filename, "hmmscan3-domtab")
def test_hmmerdomtab_30_hmmscan_003(self):
"""Test hmmscan-domtab indexing, HMMER 3.0, single query, multiple hits."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_003.out")
self.check_index(filename, "hmmscan3-domtab")
def test_hmmerdomtab_30_hmmscan_004(self):
"""Test hmmscan-domtab indexing, HMMER 3.0, single query, no alignments."""
filename = os.path.join("Hmmer", "domtab_30_hmmscan_004.out")
self.check_index(filename, "hmmscan3-domtab")
def test_hmmerdomtab_30_hmmsearch_001(self):
"""Test hmmsearch-domtab indexing, HMMER 3.0, single query, no alignments."""
filename = os.path.join("Hmmer", "domtab_30_hmmsearch_001.out")
self.check_index(filename, "hmmsearch3-domtab")
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 65
| 249
| 0.608425
|
4ffdafd7dae7c89e2d04b54de8732a3e2afb38de
| 5,768
|
py
|
Python
|
src/controller/python/build-chip-wheel.py
|
fkjagodzinski/connectedhomeip
|
c4411cf81d4b63384c790f91c8e5329b4da97dfe
|
[
"Apache-2.0"
] | 3
|
2021-01-28T04:52:17.000Z
|
2021-05-17T10:37:53.000Z
|
src/controller/python/build-chip-wheel.py
|
fkjagodzinski/connectedhomeip
|
c4411cf81d4b63384c790f91c8e5329b4da97dfe
|
[
"Apache-2.0"
] | 5
|
2020-07-23T21:30:13.000Z
|
2020-10-07T13:56:33.000Z
|
src/controller/python/build-chip-wheel.py
|
fkjagodzinski/connectedhomeip
|
c4411cf81d4b63384c790f91c8e5329b4da97dfe
|
[
"Apache-2.0"
] | 1
|
2020-10-02T18:18:37.000Z
|
2020-10-02T18:18:37.000Z
|
#
# Copyright (c) 2020 Project CHIP Authors
# Copyright (c) 2019 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Description:
# Builds a Python wheel package for CHIP.
#
from __future__ import absolute_import
from datetime import datetime
from setuptools import setup
from wheel.bdist_wheel import bdist_wheel
import argparse
import json
import os
import platform
import shutil
parser = argparse.ArgumentParser(description='build the pip package for chip using chip components generated during the build and python source code')
parser.add_argument('--package_name', default='chip', help='configure the python package name')
parser.add_argument('--build_number', default='0.0', help='configure the chip build number')
parser.add_argument('--build_dir', help='directory to build in')
parser.add_argument('--dist_dir', help='directory to place distribution in')
parser.add_argument('--manifest', help='list of files to package')
args = parser.parse_args()
chipDLLName = '_ChipDeviceCtrl.so'
deviceManagerShellName = 'chip-device-ctrl.py'
chipControllerShellInstalledName = os.path.splitext(deviceManagerShellName)[0]
packageName = args.package_name
chipPackageVer = args.build_number
# Record the current directory at the start of execution.
curDir = os.curdir
manifestFile = os.path.abspath(args.manifest)
buildDir = os.path.abspath(args.build_dir)
distDir = os.path.abspath(args.dist_dir)
# Use a temporary directory within the build directory to assemble the components
# for the installable package.
tmpDir = os.path.join(buildDir, 'chip-wheel-components')
manifest = json.load(open(manifestFile, 'r'))
try:
#
# Perform a series of setup steps prior to creating the chip package...
#
# Create the temporary components directory.
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
os.makedirs(tmpDir, exist_ok=True)
# Switch to the temporary directory. (Foolishly, setuptools relies on the current directory
# for many of its features.)
os.chdir(tmpDir)
manifestBase = os.path.dirname(manifestFile)
for entry in manifest['files']:
srcDir = os.path.join(manifestBase, entry['src_dir'])
for path in entry['sources']:
srcFile = os.path.join(srcDir, path)
dstFile = os.path.join(tmpDir, path)
os.makedirs(os.path.dirname(dstFile), exist_ok=True)
shutil.copyfile(srcFile, dstFile)
os.rename(os.path.join(tmpDir, deviceManagerShellName),
os.path.join(tmpDir, chipControllerShellInstalledName))
# Define a custom version of the bdist_wheel command that configures the
# resultant wheel as platform-specific (i.e. not "pure").
class bdist_wheel_override(bdist_wheel):
def finalize_options(self):
bdist_wheel.finalize_options(self)
self.root_is_pure = False
# Select required packages based on the target system.
if platform.system() == 'Linux':
requiredPackages = [
'dbus-python',
'pgi'
]
else:
requiredPackages = []
#
# Build the chip package...
#
# Invoke the setuptools 'bdist_wheel' command to generate a wheel containing
# the CHIP python packages, shared libraries and scripts.
setup(
name=packageName,
version=chipPackageVer,
description='Python-base APIs and tools for CHIP.',
url='https://github.com/project-chip/connectedhomeip',
license='Apache',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
python_requires='>=2.7',
packages=[
packageName # Arrange to install a package named "chip"
],
package_dir={
'':tmpDir, # By default, look in the tmp directory for packages/modules to be included.
},
package_data={
packageName:[
chipDLLName # Include the wrapper DLL as package data in the "chip" package.
]
},
scripts=[ # Install the Device controller Shell as an executable script in the 'bin' directory.
os.path.join(tmpDir, chipControllerShellInstalledName)
],
install_requires=requiredPackages,
options={
'bdist_wheel':{
'universal':False,
'dist_dir':distDir # Place the generated .whl in the dist directory.
},
'egg_info':{
'egg_base':tmpDir # Place the .egg-info subdirectory in the tmp directory.
}
},
cmdclass={
'bdist_wheel':bdist_wheel_override
},
script_args=[ 'clean', '--all', 'bdist_wheel' ]
)
finally:
# Switch back to the initial current directory.
os.chdir(curDir)
# Remove the temporary directory.
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
| 34.957576
| 150
| 0.652219
|
41e0df292e62a9793c0459d1c5142f8ce8fe5675
| 262
|
py
|
Python
|
thaisummit/thaisummit/doctype/deleted_qr_checkin/deleted_qr_checkin.py
|
thispl/thaisummit
|
697a43068a87916dedf1e8de10249152a9fd2735
|
[
"MIT"
] | null | null | null |
thaisummit/thaisummit/doctype/deleted_qr_checkin/deleted_qr_checkin.py
|
thispl/thaisummit
|
697a43068a87916dedf1e8de10249152a9fd2735
|
[
"MIT"
] | null | null | null |
thaisummit/thaisummit/doctype/deleted_qr_checkin/deleted_qr_checkin.py
|
thispl/thaisummit
|
697a43068a87916dedf1e8de10249152a9fd2735
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, TEAMPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class DeletedQRCheckin(Document):
pass
| 23.818182
| 49
| 0.778626
|
eec44848248c4ab1c55bc3484fd9d6e34c55c46d
| 156
|
py
|
Python
|
macOS/New Tab.py
|
no7dw/autokey-macos
|
9b5cce0781806ae1fc0bb21bcdfb5223a6c2c23f
|
[
"MIT"
] | 38
|
2019-04-06T01:20:26.000Z
|
2022-02-22T03:02:40.000Z
|
macOS/New Tab.py
|
no7dw/autokey-macos
|
9b5cce0781806ae1fc0bb21bcdfb5223a6c2c23f
|
[
"MIT"
] | null | null | null |
macOS/New Tab.py
|
no7dw/autokey-macos
|
9b5cce0781806ae1fc0bb21bcdfb5223a6c2c23f
|
[
"MIT"
] | 8
|
2019-04-06T01:20:34.000Z
|
2022-03-31T14:10:04.000Z
|
if window.get_active_class() != 'gnome-terminal-server.Gnome-terminal':
keyboard.send_keys("<ctrl>+t")
else:
keyboard.send_keys("<ctrl>+<shift>+t")
| 31.2
| 71
| 0.698718
|
2ee74c81491494c488b2cb5c0c50f6bddb179067
| 5,216
|
py
|
Python
|
score_manual.py
|
Dorcoh4/bleurt
|
9cd100a7953a157f23faf2e1b7c5a48f71fe2b7b
|
[
"Apache-2.0"
] | null | null | null |
score_manual.py
|
Dorcoh4/bleurt
|
9cd100a7953a157f23faf2e1b7c5a48f71fe2b7b
|
[
"Apache-2.0"
] | null | null | null |
score_manual.py
|
Dorcoh4/bleurt
|
9cd100a7953a157f23faf2e1b7c5a48f71fe2b7b
|
[
"Apache-2.0"
] | null | null | null |
print("importing")
from datasets import load_dataset
from datasets import load_metric
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import TrainingArguments, DefaultFlowCallback, PrinterCallback
from transformers import Trainer
import torch
from torch import nn
import numpy as np
import pickle
from sklearn.preprocessing import StandardScaler
import random
import json
from eli5 import preprocess_data
sep_token = "[SEP]" # FORDOR maybe many special tokens
pretrained_model_name = "roberta-base" # 'bert-base-cased'
references, candidates, scores, lengths = preprocess_data("test_eli5")
file_scores = []
data_scores = []
new_references = []
new_candidates = []
new_scores = []
new_lengths = []
safety = [("Do dogs ever get lonely if they are", "My friend worried the other dog would get seriously depressed"), ("any diseases which benefit the human body", "That's like asking why aren't there any car crashes that make the car run faster"), ("what is red and blue shift?","Have you ever listened to a train coming torwards you blaring it's horn?"), ("Why is it bad to stretch before exercise?","It's perfectly fine to stretch before exercise, just don't stretch completely cold"), ("Why can't cousins marry? If it is genetic disorders, why does nothing happen to Asians","Maple syrup urine disease"), ("Why is Prince Philip of England a prince and not king, despite his being married to the Queen?","That nigga is the Duke of tittley-squats"), ("as well as the pros/cons, and whether or not they will be able to facilitate me doing whatever the hell I please online, despite any impending, fascist legislatio","That means that if you browse to reddit using a proxy, reddit sees the proxy's IP address"), ("Why isn't there any passion from both Democrats and Republicans about making voting day a national holiday?", '"I don\'t want to waste some of my day off by going to vote!"'), ("Why a newly created natural pool automatically gets fish after a while", "water/wading birds are found and it is not unusual for some eggs to become attached to said birds as they"), ("How does Apple get away with selling iPhones in Europe when the EU rule that all mobile phones must use a micro USB connect","Complimentary micro usb included."), ("How does dandruff form","It really honestly depends on the person and your starting weight and how much you eat to stretch your stomach. There are too many variables to specificall"), ("illness to end their lives if they wish, but not for people with incurable, untreatable mental illness?","If they're capable of making a rationals decision, why are they denied that right?")]
i = 0
prefix = "answer: "
q_prefix = "question: "
err_cnt = 0
with open('manual_questions.csv', 'r') as the_file:
lines = the_file.readlines()
for line in lines:
# print (f'line:{line}')
# for i, x in enumerate(candidates):
# if "there any car crashes that make the car run faster afterward" in x:
# print(f'candidate = {x}')
local_indices = [i for i, x in enumerate(candidates) if line[line.find(prefix) + len(prefix): line.find(prefix) + len(prefix) + min(32, len(line)-1)].replace('\\n','') in x and line[line.find(q_prefix) + len(q_prefix): line.find(q_prefix) + len(q_prefix) + min(32, len(line)-1)].replace('\\n','') in x]
# print(len(local_indices))
if len(local_indices) == 0:
local_indices = [i for i, x in enumerate(candidates) if safety[err_cnt][0] in x and safety[err_cnt][1] in x]
# print (line)
if len(local_indices) == 0:
print (f"PROBLEM2 {safety[err_cnt]}")
# else:
# print (candidates[local_indices[0]])
err_cnt += 1
new_references += [references[i] for i in local_indices]
local_cand = [candidates[i] for i in local_indices]
if len (set(local_cand)) != 1:
print("PROBLEM")
print (line)
print (local_cand)
new_candidates += local_cand
new_scores += [scores[i] for i in local_indices]
new_lengths.append(len(local_indices) if len(local_indices) > 0 else 1)
# for line in the_file:
# file_scores.append(float(line.strip()))
references, candidates, scores, lengths = new_references, new_candidates, new_scores, new_lengths
with open('scores_passover2_manual', 'r') as the_file:
with open('csv_scores.txt', 'r') as csv_file:
lines = the_file.readlines()
csv_lines = csv_file.readlines()
k = 0
print(f"sum = {sum(lengths)}")
print(f"lines = {len(lines)}")
print(f"scores = {len(scores)}")
assert sum(lengths) == len(lines)
assert len(lengths) == len(csv_lines)
for count in lengths:
file_answer_scores = []
data_answer_scores = []
for j in range(i, i+count):
file_answer_scores.append(float(lines[j].strip()))
i = i + count
file_scores.append(max(file_answer_scores))
data_scores.append(float(csv_lines[k].strip()))
k += 1
print(file_scores)
metric = load_metric("spearmanr")
print (f"FORDOR result: {metric.compute(predictions=file_scores, references=data_scores)}")
metric = load_metric("pearsonr")
print (f"FORDOR result: {metric.compute(predictions=file_scores, references=data_scores)}")
| 56.086022
| 1,917
| 0.717983
|
aba56b08342dd28e9fbbd9340c4715ff582b906d
| 4,470
|
py
|
Python
|
benchmarks/tests/test_cvrp_augerat.py
|
Halvaros/vrpy
|
c471aed87e821140e57737ca209e106e544b38df
|
[
"MIT"
] | null | null | null |
benchmarks/tests/test_cvrp_augerat.py
|
Halvaros/vrpy
|
c471aed87e821140e57737ca209e106e544b38df
|
[
"MIT"
] | 1
|
2020-07-27T13:58:29.000Z
|
2020-07-27T15:18:39.000Z
|
benchmarks/tests/test_cvrp_augerat.py
|
Halvaros/vrpy
|
c471aed87e821140e57737ca209e106e544b38df
|
[
"MIT"
] | null | null | null |
from benchmarks.augerat_dataset import AugeratDataSet
from vrpy import VehicleRoutingProblem
class TestsAugerat:
def setup(self):
"""
Augerat instance P-n16-k8.vrp
"""
data = AugeratDataSet(path="benchmarks/data/cvrp/",
instance_name="P-n16-k8.vrp")
self.G = data.G
self.prob = VehicleRoutingProblem(self.G, load_capacity=data.max_load)
self.solver_args = {"pricing_strategy": "BestPaths"}
def test_setup_instance_name(self):
assert self.G.graph["name"] == "P-n16-k8"
def test_setup_vehicle_capacity(self):
assert self.G.graph["vehicle_capacity"] == 35
def test_setup_nodes(self):
# extra node for the Sink
assert len(self.G.nodes()) == 16 + 1
def test_setup_edges(self):
assert len(self.G.edges()) == 16 * (16 - 1) + 1
def test_subproblem_lp(self):
self.prob.solve(**self.solver_args, cspy=False)
assert round(self.prob.best_value, -1) in [450, 460]
def test_subproblem_lp_dive(self):
self.prob.solve(**self.solver_args, cspy=False, dive=True)
assert round(self.prob.best_value, -1) in [450, 460]
def test_subproblem_lp_greedy(self):
self.prob.solve(**self.solver_args, cspy=False, greedy=True)
assert round(self.prob.best_value, -1) in [450, 460]
def test_subproblem_lp_greedy_dive(self):
self.prob.solve(**self.solver_args, cspy=False, greedy=True, dive=True)
assert round(self.prob.best_value, -1) in [450, 460]
def test_subproblem_cspy(self):
self.prob.solve(**self.solver_args)
assert round(self.prob.best_value, -1) in [450, 460]
def test_subproblem_cspy_dive(self):
self.prob.solve(**self.solver_args, dive=True)
assert round(self.prob.best_value, -1) in [450, 460]
def test_subproblem_lp_with_initial_routes(self):
# benchmark result
# http://vrp.galgos.inf.puc-rio.br/index.php/en/
r_1 = ["Source", 2, "Sink"]
r_2 = ["Source", 6, "Sink"]
r_3 = ["Source", 8, "Sink"]
r_4 = ["Source", 15, 12, 10, "Sink"]
r_5 = ["Source", 14, 5, "Sink"]
r_6 = ["Source", 13, 9, 7, "Sink"]
r_7 = ["Source", 11, 4, "Sink"]
r_8 = ["Source", 3, 1, "Sink"]
ini = [r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8]
self.prob.solve(**self.solver_args, cspy=False, initial_routes=ini)
assert int(self.prob.best_value) == 450
def test_subproblem_lp_with_initial_routes_dive(self):
# benchmark result
# http://vrp.galgos.inf.puc-rio.br/index.php/en/
r_1 = ["Source", 2, "Sink"]
r_2 = ["Source", 6, "Sink"]
r_3 = ["Source", 8, "Sink"]
r_4 = ["Source", 15, 12, 10, "Sink"]
r_5 = ["Source", 14, 5, "Sink"]
r_6 = ["Source", 13, 9, 7, "Sink"]
r_7 = ["Source", 11, 4, "Sink"]
r_8 = ["Source", 3, 1, "Sink"]
ini = [r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8]
self.prob.solve(**self.solver_args,
initial_routes=ini,
cspy=False,
dive=True)
assert int(self.prob.best_value) == 450
def test_subproblem_cspy_with_initial_routes(self):
# benchmark result
# http://vrp.galgos.inf.puc-rio.br/index.php/en/
r_1 = ["Source", 2, "Sink"]
r_2 = ["Source", 6, "Sink"]
r_3 = ["Source", 8, "Sink"]
r_4 = ["Source", 15, 12, 10, "Sink"]
r_5 = ["Source", 14, 5, "Sink"]
r_6 = ["Source", 13, 9, 7, "Sink"]
r_7 = ["Source", 11, 4, "Sink"]
r_8 = ["Source", 3, 1, "Sink"]
ini = [r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8]
self.prob.solve(**self.solver_args, initial_routes=ini)
assert int(self.prob.best_value) == 450
def test_subproblem_cspy_with_initial_routes_true(self):
# benchmark result
# http://vrp.galgos.inf.puc-rio.br/index.php/en/
r_1 = ["Source", 2, "Sink"]
r_2 = ["Source", 6, "Sink"]
r_3 = ["Source", 8, "Sink"]
r_4 = ["Source", 15, 12, 10, "Sink"]
r_5 = ["Source", 14, 5, "Sink"]
r_6 = ["Source", 13, 9, 7, "Sink"]
r_7 = ["Source", 11, 4, "Sink"]
r_8 = ["Source", 3, 1, "Sink"]
ini = [r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8]
self.prob.solve(**self.solver_args, initial_routes=ini, dive=True)
assert int(self.prob.best_value) == 450
| 38.205128
| 79
| 0.567338
|
ef7ed17ea663eaf2869330273e93240697d4582d
| 4,165
|
py
|
Python
|
integration_tests/urls_helpers.py
|
jairhenrique/thumbor
|
fa29ba0efab2dd420c6840616a079756fd75293a
|
[
"MIT"
] | 6,837
|
2015-01-01T14:33:12.000Z
|
2022-03-31T22:21:05.000Z
|
integration_tests/urls_helpers.py
|
jairhenrique/thumbor
|
fa29ba0efab2dd420c6840616a079756fd75293a
|
[
"MIT"
] | 1,055
|
2015-01-03T22:22:05.000Z
|
2022-03-31T21:56:17.000Z
|
integration_tests/urls_helpers.py
|
jairhenrique/thumbor
|
fa29ba0efab2dd420c6840616a079756fd75293a
|
[
"MIT"
] | 744
|
2015-01-05T03:49:31.000Z
|
2022-03-30T02:35:16.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from itertools import product
from colorama import Fore
DEBUGS = ["", "debug"]
METAS = ["meta"]
TRIMS = [
"trim",
"trim:top-left",
"trim:bottom-right",
"trim:top-left:10",
"trim:bottom-right:20",
]
CROPS = ["10x10:100x100"]
FITINS = ["fit-in", "adaptive-fit-in", "full-fit-in", "adaptive-full-fit-in"]
SIZES = [
"200x200",
"-300x100",
"100x-300",
"-100x-300",
"origx300",
"200xorig",
"origxorig",
]
H_ALIGNS = [
"left",
"right",
"center",
]
V_ALIGNS = [
"top",
"bottom",
"middle",
]
SMARTS = [
"smart",
]
FILTERS = [
"filters:brightness(10)",
"filters:contrast(10)",
"filters:equalize()",
"filters:grayscale()",
"filters:rotate(90)",
"filters:noise(10)",
"filters:quality(5)",
"filters:redeye()",
"filters:rgb(10,-10,20)",
"filters:round_corner(20,255,255,100)",
"filters:sharpen(6,2.5,false)",
"filters:sharpen(6,2.5,true)",
"filters:strip_exif()",
"filters:strip_icc()",
"filters:watermark(rgba-interlaced.png,10,10,50)",
"filters:watermark(rgba-interlaced.png,center,center,50)",
"filters:watermark(rgba-interlaced.png,repeat,repeat,50)",
"filters:frame(rgba.png)",
"filters:fill(ff0000)",
"filters:fill(auto)",
"filters:fill(ff0000,true)",
"filters:fill(transparent)",
"filters:fill(transparent,true)",
"filters:blur(2)",
"filters:extract_focal()",
"filters:focal()",
"filters:focal(0x0:1x1)",
"filters:no_upscale()",
"filters:gifv()",
"filters:gifv(webm)",
"filters:gifv(mp4)",
"filters:max_age(600)",
"filters:upscale()",
# one big filter 4-line string
"filters:curve([(0,0),(255,255)],[(0,50),(16,51),(32,69),"
"(58,85),(92,120),(128,170),(140,186),(167,225)," # NOQA
"(192,245),(225,255),(244,255),(255,254)],[(0,0),(16,2),"
"(32,18),(64,59),(92,116),(128,182),(167,211),(192,227)" # NOQA
",(224,240),(244,247),(255,252)],[(0,48),(16,50),(62,77),"
"(92,110),(128,144),(140,153),(167,180),(192,192)," # NOQA
"(224,217),(244,225),(255,225)])",
]
ORIGINAL_IMAGES_BASE = [
"gradient.jpg",
"cmyk.jpg",
"rgba.png",
"grayscale.jpg",
"16bit.png",
]
ORIGINAL_IMAGES_GIF_WEBP = [
"gradient.webp",
"gradient.gif",
"animated.gif",
]
ALL_OPTIONS = (
METAS + TRIMS + CROPS + FITINS + SIZES + H_ALIGNS + V_ALIGNS + SMARTS + FILTERS
)
MAX_DATASET_SIZE = len(ALL_OPTIONS) * (
len(ORIGINAL_IMAGES_BASE) + len(ORIGINAL_IMAGES_GIF_WEBP)
)
class UrlsTester:
def __init__(self, http_client):
self.failed_items = []
self.http_client = http_client
def report(self):
if len(self.failed_items) == 0:
return
raise AssertionError("Failed urls:\n%s" % "\n".join(self.failed_items))
async def try_url(self, url):
result = None
error = None
failed = False
try:
result = await self.http_client.fetch(url, request_timeout=60)
except Exception as err: # pylint: disable=broad-except
logging.exception("Error in %s: %s", url, err)
error = err
failed = True
if result is not None and result.code == 200 and not failed:
print("{0.GREEN} SUCCESS ({1}){0.RESET}".format(Fore, url))
return
self.failed_items.append(url)
print(
"{0.RED} FAILED ({1}) - ERR({2}) {0.RESET}".format(
Fore, url, result is not None and result.code or error
)
)
def single_dataset(with_gif=True):
images = ORIGINAL_IMAGES_BASE[:]
if with_gif:
images += ORIGINAL_IMAGES_GIF_WEBP
return product(ALL_OPTIONS, images)
def combined_dataset(with_gif=True):
images = ORIGINAL_IMAGES_BASE[:]
if with_gif:
images += ORIGINAL_IMAGES_GIF_WEBP
combined_options = product(
TRIMS[:2],
CROPS[:2],
FITINS[:2],
SIZES[:2],
H_ALIGNS[:2],
V_ALIGNS[:2],
SMARTS[:2],
FILTERS[:2],
images,
)
return combined_options
| 23.664773
| 83
| 0.57575
|
df3630845d812366ac97ad99a2d398464bc157fa
| 6,992
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/securityinsights/v20190101preview/bookmark_relation.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/securityinsights/v20190101preview/bookmark_relation.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/securityinsights/v20190101preview/bookmark_relation.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['BookmarkRelation']
class BookmarkRelation(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bookmark_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
related_resource_id: Optional[pulumi.Input[str]] = None,
relation_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a relation between two resources
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bookmark_id: Bookmark ID
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[str] related_resource_id: The resource ID of the related resource
:param pulumi.Input[str] relation_name: Relation Name
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bookmark_id is None:
raise TypeError("Missing required property 'bookmark_id'")
__props__['bookmark_id'] = bookmark_id
__props__['etag'] = etag
if operational_insights_resource_provider is None:
raise TypeError("Missing required property 'operational_insights_resource_provider'")
__props__['operational_insights_resource_provider'] = operational_insights_resource_provider
if related_resource_id is None:
raise TypeError("Missing required property 'related_resource_id'")
__props__['related_resource_id'] = related_resource_id
if relation_name is None:
raise TypeError("Missing required property 'relation_name'")
__props__['relation_name'] = relation_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if workspace_name is None:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['related_resource_kind'] = None
__props__['related_resource_name'] = None
__props__['related_resource_type'] = None
__props__['type'] = None
super(BookmarkRelation, __self__).__init__(
'azure-nextgen:securityinsights/v20190101preview:BookmarkRelation',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BookmarkRelation':
"""
Get an existing BookmarkRelation resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return BookmarkRelation(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="relatedResourceId")
def related_resource_id(self) -> pulumi.Output[str]:
"""
The resource ID of the related resource
"""
return pulumi.get(self, "related_resource_id")
@property
@pulumi.getter(name="relatedResourceKind")
def related_resource_kind(self) -> pulumi.Output[str]:
"""
The resource kind of the related resource
"""
return pulumi.get(self, "related_resource_kind")
@property
@pulumi.getter(name="relatedResourceName")
def related_resource_name(self) -> pulumi.Output[str]:
"""
The name of the related resource
"""
return pulumi.get(self, "related_resource_name")
@property
@pulumi.getter(name="relatedResourceType")
def related_resource_type(self) -> pulumi.Output[str]:
"""
The resource type of the related resource
"""
return pulumi.get(self, "related_resource_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.619048
| 150
| 0.646024
|
5ebbcd3ae013c440eb582b280affc73e5900d10e
| 1,807
|
py
|
Python
|
St. Ours_MountainBikeGame.py
|
astours17/Mountain-Biking-2.0
|
62d5db5b90bf67dc7f3bc46626109bc61ad290f9
|
[
"MIT"
] | null | null | null |
St. Ours_MountainBikeGame.py
|
astours17/Mountain-Biking-2.0
|
62d5db5b90bf67dc7f3bc46626109bc61ad290f9
|
[
"MIT"
] | null | null | null |
St. Ours_MountainBikeGame.py
|
astours17/Mountain-Biking-2.0
|
62d5db5b90bf67dc7f3bc46626109bc61ad290f9
|
[
"MIT"
] | null | null | null |
#Andrew St. Ours
#3/28/17
#Mountain bike game
#welcome
print("Welcome to Whistler Mountain Bike Park. \nToday you are going to"
"have a choice of three trails to ride on. \nA green circle trail, a blue"
" square trail and a black diamond trail.")
print("\nMake sure you pick the trail that applies best to your level of riding.")
#lists
x= ["bike", "helmet", "pair of gloves", "backpack", "water"]
y= ["bike", "helmet", "pair of gloves", "backpack", "water"]
z= ["bike", "helmet", "pair of gloves", "backpack", "water"]
print("\n\nOn your trip you have a",x,".")
print("\n\nAre you ready?")
input("\n\nPress [Enter] to continue up to the top of the mountain.")
#adding elemnets to the list
print("\n\nYou are now at the top and it is time to choose your trail.")
trail= input("\nWould you like the green, blue or black trail?\n")
#if statements
if trail == "green" or trail == "Green":
print("Ah we have a beginner I see. \nYou have recieve a nice"
" Whistler Mountain Bike t-shirt. \nIt will be added to"
" your things.")
x.append("t-shirt")
print(x)
if trail == "blue" or trail == "Blue":
print("Ooo we have a decent rider here. \nYou have recieved a pair"
" of riding shoes. \nThey will be added to your things.")
y.append("riding shoes")
print(y)
if trail == "black" or trail == "Black":
print("Mad respect for being an expert dude. \nYou have recieved a"
"check for $10000 to buy a new bike. \nThe check will be added"
" to your things.")
z.append("check")
print(z)
#Exit
print("\n\nI hope you had fun today and keep shredding!")
input("\n\nPress [Enter] to exit the park.")
| 24.418919
| 83
| 0.60653
|
d81a6839aba73831d310bff1def4f18053118a96
| 13,326
|
py
|
Python
|
bpy_lambda/2.78/scripts/addons_contrib/archipack/archipack_reference_point.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/archipack/archipack_reference_point.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/archipack/archipack_reference_point.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | 1
|
2019-11-24T18:43:42.000Z
|
2019-11-24T18:43:42.000Z
|
# -*- coding:utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110- 1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# ----------------------------------------------------------
# Author: Stephen Leger (s-leger)
#
# ----------------------------------------------------------
import bpy
from bpy.types import Operator, PropertyGroup, Object, Panel
from bpy.props import (
FloatVectorProperty,
CollectionProperty,
FloatProperty
)
from mathutils import Vector
from .bmesh_utils import BmeshEdit as bmed
def update(self, context):
self.update(context)
class archipack_reference_point(PropertyGroup):
location_2d = FloatVectorProperty(
subtype='XYZ',
name="position 2d",
default=Vector((0, 0, 0))
)
location_3d = FloatVectorProperty(
subtype='XYZ',
name="position 3d",
default=Vector((0, 0, 0))
)
symbol_scale = FloatProperty(
name="Screen scale",
default=1,
min=0.01,
update=update)
@classmethod
def filter(cls, o):
"""
Filter object with this class in data
return
True when object contains this datablock
False otherwhise
usage:
class_name.filter(object) from outside world
self.__class__.filter(object) from instance
"""
try:
return cls.__name__ in o
except:
pass
return False
@classmethod
def datablock(cls, o):
"""
Retrieve datablock from base object
return
datablock when found
None when not found
usage:
class_name.datablock(object) from outside world
self.__class__.datablock(object) from instance
"""
try:
return getattr(o, cls.__name__)[0]
except:
pass
return None
def update(self, context):
o = context.active_object
if self.datablock(o) != self:
return
s = self.symbol_scale
verts = [(s * x, s * y, s * z) for x, y, z in [
(-0.25, 0.25, 0.0), (0.25, 0.25, 0.0), (-0.25, -0.25, 0.0), (0.25, -0.25, 0.0),
(0.0, 0.0, 0.487), (-0.107, 0.107, 0.216), (0.108, 0.107, 0.216), (-0.107, -0.107, 0.216),
(0.108, -0.107, 0.216), (-0.05, 0.05, 0.5), (0.05, 0.05, 0.5), (0.05, -0.05, 0.5),
(-0.05, -0.05, 0.5), (-0.193, 0.193, 0.0), (0.193, 0.193, 0.0), (0.193, -0.193, 0.0),
(-0.193, -0.193, 0.0), (0.0, 0.0, 0.8), (0.0, 0.8, -0.0), (0.0, 0.0, -0.0),
(0.0, 0.0, 0.0), (0.05, 0.05, 0.674), (-0.05, 0.674, -0.05), (0.0, 0.8, -0.0),
(-0.05, -0.05, 0.674), (-0.05, 0.674, 0.05), (0.05, 0.674, -0.05), (-0.129, 0.129, 0.162),
(0.129, 0.129, 0.162), (-0.129, -0.129, 0.162), (0.129, -0.129, 0.162), (0.0, 0.0, 0.8),
(-0.05, 0.05, 0.674), (0.05, -0.05, 0.674), (0.05, 0.674, 0.05), (0.8, -0.0, -0.0),
(0.0, -0.0, -0.0), (0.674, 0.05, -0.05), (0.8, -0.0, -0.0), (0.674, 0.05, 0.05),
(0.674, -0.05, -0.05), (0.674, -0.05, 0.05)]]
edges = [(1, 0), (0, 9), (9, 10), (10, 1), (3, 1), (10, 11),
(11, 3), (2, 3), (11, 12), (12, 2), (0, 2), (12, 9),
(6, 5), (8, 6), (7, 8), (5, 7), (17, 24), (17, 20),
(18, 25), (18, 19), (13, 14), (14, 15), (15, 16), (16, 13),
(4, 6), (15, 30), (17, 21), (26, 22), (23, 22), (23, 34),
(18, 26), (28, 27), (30, 28), (29, 30), (27, 29), (14, 28),
(13, 27), (16, 29), (4, 7), (4, 8), (4, 5), (31, 33),
(31, 32), (21, 32), (24, 32), (24, 33), (21, 33), (25, 22),
(25, 34), (26, 34), (35, 39), (35, 36), (40, 37), (38, 37),
(38, 41), (35, 40), (39, 37), (39, 41), (40, 41)]
bm = bmed._start(context, o)
bm.clear()
for v in verts:
bm.verts.new(v)
bm.verts.ensure_lookup_table()
for ed in edges:
bm.edges.new((bm.verts[ed[0]], bm.verts[ed[1]]))
bmed._end(bm, o)
class ARCHIPACK_PT_reference_point(Panel):
bl_idname = "ARCHIPACK_PT_reference_point"
bl_label = "Reference point"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'ArchiPack'
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def draw(self, context):
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return
layout = self.layout
if (o.location - props.location_2d).length < 0.01:
layout.operator('archipack.move_to_3d')
layout.operator('archipack.move_2d_reference_to_cursor')
else:
layout.operator('archipack.move_to_2d')
layout.prop(props, 'symbol_scale')
class ARCHIPACK_OT_reference_point(Operator):
"""Add reference point"""
bl_idname = "archipack.reference_point"
bl_label = "Reference point"
bl_description = "Add reference point"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
location_3d = FloatVectorProperty(
subtype='XYZ',
name="position 3d",
default=Vector((0, 0, 0))
)
@classmethod
def poll(cls, context):
return context.active_object is not None
def draw(self, context):
layout = self.layout
row = layout.row()
row.label("Use Properties panel (N) to define parms", icon='INFO')
def create(self, context):
x, y, z = context.scene.cursor_location
# bpy.ops.object.empty_add(type='ARROWS', radius=0.5, location=Vector((x, y, 0)))
m = bpy.data.meshes.new(name="Reference")
o = bpy.data.objects.new("Reference", m)
o.location = Vector((x, y, 0))
context.scene.objects.link(o)
d = o.archipack_reference_point.add()
d.location_2d = Vector((x, y, 0))
d.location_3d = self.location_3d
o.select = True
context.scene.objects.active = o
d.update(context)
return o
def execute(self, context):
if context.mode == "OBJECT":
o = self.create(context)
o.select = True
context.scene.objects.active = o
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_move_to_3d(Operator):
bl_idname = "archipack.move_to_3d"
bl_label = "Move to 3d"
bl_description = "Move point to 3d position"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return {'CANCELLED'}
o.location = props.location_3d
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_move_to_2d(Operator):
bl_idname = "archipack.move_to_2d"
bl_label = "Move to 2d"
bl_description = "Move point to 2d position"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return {'CANCELLED'}
props.location_3d = o.location
o.location = props.location_2d
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_store_2d_reference(Operator):
bl_idname = "archipack.store_2d_reference"
bl_label = "Set 2d"
bl_description = "Set 2d reference position"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return {'CANCELLED'}
x, y, z = o.location
props.location_2d = Vector((x, y, 0))
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_move_2d_reference_to_cursor(Operator):
bl_idname = "archipack.move_2d_reference_to_cursor"
bl_label = "Change 2d"
bl_description = "Change 2d reference position to cursor location without moving childs"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return {'CANCELLED'}
bpy.ops.object.select_all(action="DESELECT")
bpy.ops.archipack.reference_point(location_3d=props.location_3d)
for child in o.children:
child.select = True
bpy.ops.archipack.parent_to_reference()
context.scene.objects.unlink(o)
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
class ARCHIPACK_OT_parent_to_reference(Operator):
bl_idname = "archipack.parent_to_reference"
bl_label = "Parent"
bl_description = "Make selected object childs of parent reference point"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return archipack_reference_point.filter(context.active_object)
def execute(self, context):
if context.mode == "OBJECT":
o = context.active_object
props = archipack_reference_point.datablock(o)
if props is None:
return {'CANCELLED'}
sel = [obj for obj in context.selected_objects if obj != o and obj.parent != o]
itM = o.matrix_world.inverted()
# print("parent_to_reference parenting:%s objects" % (len(sel)))
for child in sel:
rs = child.matrix_world.to_3x3().to_4x4()
loc = itM * child.matrix_world.translation
child.parent = None
child.matrix_parent_inverse.identity()
child.location = Vector((0, 0, 0))
child.parent = o
child.matrix_world = rs
child.location = loc
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
def register():
bpy.utils.register_class(archipack_reference_point)
Object.archipack_reference_point = CollectionProperty(type=archipack_reference_point)
bpy.utils.register_class(ARCHIPACK_PT_reference_point)
bpy.utils.register_class(ARCHIPACK_OT_reference_point)
bpy.utils.register_class(ARCHIPACK_OT_move_to_3d)
bpy.utils.register_class(ARCHIPACK_OT_move_to_2d)
bpy.utils.register_class(ARCHIPACK_OT_store_2d_reference)
bpy.utils.register_class(ARCHIPACK_OT_move_2d_reference_to_cursor)
bpy.utils.register_class(ARCHIPACK_OT_parent_to_reference)
def unregister():
bpy.utils.unregister_class(archipack_reference_point)
del Object.archipack_reference_point
bpy.utils.unregister_class(ARCHIPACK_PT_reference_point)
bpy.utils.unregister_class(ARCHIPACK_OT_reference_point)
bpy.utils.unregister_class(ARCHIPACK_OT_move_to_3d)
bpy.utils.unregister_class(ARCHIPACK_OT_move_to_2d)
bpy.utils.unregister_class(ARCHIPACK_OT_store_2d_reference)
bpy.utils.unregister_class(ARCHIPACK_OT_move_2d_reference_to_cursor)
bpy.utils.unregister_class(ARCHIPACK_OT_parent_to_reference)
| 36.113821
| 102
| 0.59005
|
1cf2ed93645b6a28329b87a0662338cde67f45ef
| 7,425
|
py
|
Python
|
aae497-f19/share/glib-2.0/gdb/glib_gdb.py
|
winstonlevin/aae497-f19
|
1226f924d054e7448a91142c1637816c24388fc8
|
[
"BSD-3-Clause"
] | null | null | null |
aae497-f19/share/glib-2.0/gdb/glib_gdb.py
|
winstonlevin/aae497-f19
|
1226f924d054e7448a91142c1637816c24388fc8
|
[
"BSD-3-Clause"
] | null | null | null |
aae497-f19/share/glib-2.0/gdb/glib_gdb.py
|
winstonlevin/aae497-f19
|
1226f924d054e7448a91142c1637816c24388fc8
|
[
"BSD-3-Clause"
] | null | null | null |
import gdb
import sys
if sys.version_info[0] >= 3:
long = int
# This is not quite right, as local vars may override symname
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_quark_to_string (quark):
if quark is None:
return None
quark = long(quark)
if quark == 0:
return None
try:
val = read_global_var ("quarks")
max_q = long(read_global_var ("quark_seq_id"))
except:
try:
val = read_global_var ("g_quarks")
max_q = long(read_global_var ("g_quark_seq_id"))
except:
return None;
if quark < max_q:
return val[quark].string()
return None
# We override the node printers too, so that node->next is not expanded
class GListNodePrinter:
"Prints a GList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x, prev=0x%x}" % (str(self.val["data"]), long(self.val["next"]), long(self.val["prev"]))
class GSListNodePrinter:
"Prints a GSList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x}" % (str(self.val["data"]), long(self.val["next"]))
class GListPrinter:
"Prints a GList"
class _iterator:
def __init__(self, head, listtype):
self.link = head
self.listtype = listtype
self.count = 0
def __iter__(self):
return self
def next(self):
if self.link == 0:
raise StopIteration
data = self.link['data']
self.link = self.link['next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, data)
__next__ = next
def __init__ (self, val, listtype):
self.val = val
self.listtype = listtype
def children(self):
return self._iterator(self.val, self.listtype)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "array"
class GHashPrinter:
"Prints a GHashTable"
class _iterator:
def __init__(self, ht, keys_are_strings):
self.ht = ht
if ht != 0:
self.keys = ht["keys"]
self.values = ht["values"]
self.hashes = ht["hashes"]
self.size = ht["size"]
self.pos = 0
self.keys_are_strings = keys_are_strings
self.value = None
def __iter__(self):
return self
def next(self):
if self.ht == 0:
raise StopIteration
if self.value != None:
v = self.value
self.value = None
return v
while long(self.pos) < long(self.size):
self.pos = self.pos + 1
if long (self.hashes[self.pos]) >= 2:
key = self.keys[self.pos]
val = self.values[self.pos]
if self.keys_are_strings:
key = key.cast (gdb.lookup_type("char").pointer())
# Queue value for next result
self.value = ('[%dv]'% (self.pos), val)
# Return key
return ('[%dk]'% (self.pos), key)
raise StopIteration
__next__ = next
def __init__ (self, val):
self.val = val
self.keys_are_strings = False
try:
string_hash = read_global_var ("g_str_hash")
except:
string_hash = None
if self.val != 0 and string_hash != None and self.val["hash_func"] == string_hash:
self.keys_are_strings = True
def children(self):
return self._iterator(self.val, self.keys_are_strings)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "map"
def pretty_printer_lookup (val):
# None yet, want things like hash table and list
type = val.type.unqualified()
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t == "GList":
return GListPrinter(val, "GList")
if t == "GSList":
return GListPrinter(val, "GSList")
if t == "GHashTable":
return GHashPrinter(val)
else:
t = str(type)
if t == "GList":
return GListNodePrinter(val)
if t == "GSList *":
return GListPrinter(val, "GSList")
return None
def register (obj):
if obj is None:
obj = gdb
obj.pretty_printers.append(pretty_printer_lookup)
class ForeachCommand (gdb.Command):
"""Foreach on list"""
def __init__ (self):
super (ForeachCommand, self).__init__ ("gforeach",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def valid_name (self, name):
if not name[0].isalpha():
return False
return True
def parse_args (self, arg):
i = arg.find(" ")
if i <= 0:
raise Exception ("No var specified")
var = arg[:i]
if not self.valid_name(var):
raise Exception ("Invalid variable name")
while i < len (arg) and arg[i].isspace():
i = i + 1
if arg[i:i+2] != "in":
raise Exception ("Invalid syntax, missing in")
i = i + 2
while i < len (arg) and arg[i].isspace():
i = i + 1
colon = arg.find (":", i)
if colon == -1:
raise Exception ("Invalid syntax, missing colon")
val = arg[i:colon]
colon = colon + 1
while colon < len (arg) and arg[colon].isspace():
colon = colon + 1
command = arg[colon:]
return (var, val, command)
def do_iter(self, arg, item, command):
item = item.cast (gdb.lookup_type("void").pointer())
item = long(item)
to_eval = "set $%s = (void *)0x%x\n"%(arg, item)
gdb.execute(to_eval)
gdb.execute(command)
def slist_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GSList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def list_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def pick_iterator (self, container):
t = container.type.unqualified()
if t.code == gdb.TYPE_CODE_PTR:
t = t.target().unqualified()
t = str(t)
if t == "GSList":
return self.slist_iterator
if t == "GList":
return self.list_iterator
raise Exception("Invalid container type %s"%(str(container.type)))
def invoke (self, arg, from_tty):
(var, container, command) = self.parse_args(arg)
container = gdb.parse_and_eval (container)
func = self.pick_iterator(container)
func(var, container, command)
ForeachCommand ()
| 28.339695
| 122
| 0.532256
|
57e19a4023bbda7582afd0c4d527599bf6a2315a
| 736
|
py
|
Python
|
src/products/migrations/0011_auto_20151120_0137.py
|
damansinghh/digital-marketplace
|
3d1797716f39459950fcf10042603890335f7f55
|
[
"MIT"
] | 104
|
2015-12-11T10:33:17.000Z
|
2022-03-26T04:36:13.000Z
|
src/products/migrations/0011_auto_20151120_0137.py
|
damansinghh/digital-marketplace
|
3d1797716f39459950fcf10042603890335f7f55
|
[
"MIT"
] | 6
|
2020-06-06T01:20:49.000Z
|
2022-03-12T00:32:27.000Z
|
src/products/migrations/0011_auto_20151120_0137.py
|
damansinghh/digital-marketplace
|
3d1797716f39459950fcf10042603890335f7f55
|
[
"MIT"
] | 85
|
2015-12-12T11:26:21.000Z
|
2022-03-02T21:12:50.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0010_product_managers'),
]
operations = [
migrations.AddField(
model_name='product',
name='media',
field=models.FileField(null=True, upload_to=products.models.download_media_location, blank=True),
),
migrations.AlterField(
model_name='product',
name='managers',
field=models.ManyToManyField(related_name='managers_products', to=settings.AUTH_USER_MODEL, blank=True),
),
]
| 27.259259
| 116
| 0.648098
|
036fb500200547d60871ab316ab4c9aba5aded4e
| 152,746
|
py
|
Python
|
regolith/schemas.py
|
shogho0/regolith
|
a65305fb9a66484e639461b8079de6a80116c2b4
|
[
"CC0-1.0"
] | null | null | null |
regolith/schemas.py
|
shogho0/regolith
|
a65305fb9a66484e639461b8079de6a80116c2b4
|
[
"CC0-1.0"
] | null | null | null |
regolith/schemas.py
|
shogho0/regolith
|
a65305fb9a66484e639461b8079de6a80116c2b4
|
[
"CC0-1.0"
] | null | null | null |
"""Database schemas, examples, and tools"""
import copy
from warnings import warn
from cerberus import Validator
from .sorters import POSITION_LEVELS
SORTED_POSITION = sorted(POSITION_LEVELS.keys(), key=POSITION_LEVELS.get)
PRESENTATIONS_TYPE = ["award", "colloquium", "contributed_oral", "invited", "keynote",
"plenary", "poster", "seminar", "tutorial"]
APPOINTMENTS_TYPE = ["gra", "ss", "pd", "ug"]
EXEMPLARS = {
"abstracts": {
"_id": "Mouginot.Model",
"coauthors": "P.P.H. Wilson",
"email": "mouginot@wisc.edu",
"firstname": "Baptiste",
"institution": "University of Wisconsin-Madison",
"lastname": "Mouginot",
"references": "[1] B. MOUGINOT, “cyCLASS: CLASS "
"models for Cyclus,”, Figshare, "
"https://dx.doi.org/10.6084/"
"m9.figshare.3468671.v2 (2016).",
"text": "The CLASS team has developed high "
"quality predictors based on pre-trained "
"neural network...",
"timestamp": "5/5/2017 13:15:59",
"title": "Model Performance Analysis",
},
"assignments": {
"_id": "hw01-rx-power",
"category": "homework",
"courses": ["EMCH-558-2016-S", "EMCH-758-2016-S"],
"points": [1, 2, 3],
"questions": ["1-9", "1-10", "1-12"],
},
"beamplan": {
'_id': "test",
'beamtime': '2020-1-XPD',
'begin_date': '2020-01-01',
'end_date': '2020-01-02',
'container': '',
'devices': ['cryostream'],
'exp_plan': ['load samples on the holder',
'scan the holder to locate the samples',
'take room temperature measurement of sample and the subtrate',
'ramp down temperature to 100K',
'ramp up, measure PDF at temperature 100K ~ 300K, 10K stepsize, 1 min exposure'],
'holder': 'film holder (1 cm * 1 cm * 1 mm)',
'measurement': 'Tramp',
'objective': 'temperature ramping PDF of one WO3 film (100, 300K, 10K)',
'pipeline': 'usual',
'prep_plan': ['films will be made by kriti'],
'project': '20ks_wo3',
'project_lead': 'kseth',
'samples': ['WO3 film', 'glass subtrate'],
'scanplan': 'Tramp',
'ship_plan': ['seal and ship to CU', 'carry to the beamline'],
'time': 190,
'todo': ["todo something"]},
"beamtime": {
"_id": "2020-1-XPD",
"begin_date": "2020-02-14",
"begin_time": "8:00 am",
"end_date": "2020-02-17",
"end_time": "8:00 am"
},
"blog": {
"_id": "my-vision",
"author": "Anthony Scopatz",
"day": 18,
"month": "September",
"original": "https://scopatz.com/my-vision/",
"post": "I would like see things move forward. Deep, I know!",
"title": "My Vision",
"year": 2015,
},
"citations": {
"_id": "meurer2016sympy",
"author": [
"Meurer, Aaron",
"Smith, Christopher P",
"Paprocki, Mateusz",
"{\\v{C}}ert{\\'\\i}k, Ond{\\v{r}}ej",
"Rocklin, Matthew",
"Kumar, AMiT",
"Ivanov, Sergiu",
"Moore, Jason K",
"Singh, Sartaj",
"Rathnayake, Thilina",
"Sean Vig",
"Brian E Granger",
"Richard P Muller",
"Francesco Bonazzi",
"Harsh Gupta",
"Shivam Vats",
"Fredrik Johansson",
"Fabian Pedregosa",
"Matthew J Curry",
"Ashutosh Saboo",
"Isuru Fernando",
"Sumith Kulal",
"Robert Cimrman",
"Anthony Scopatz",
],
"doi": "10.1021/nn501591g",
"entrytype": "article",
"journal": "PeerJ Computer Science",
"month": "Jan",
"pages": "e103",
"publisher": "PeerJ Inc. San Francisco, USA",
"synopsis": "The description of symbolic computing in Python",
"tags": "pdf",
"title": "SymPy: Symbolic computing in Python",
"volume": "4",
"year": "2017",
},
"contacts": {
"_id": "afriend",
"aka": [
"A. B. Friend",
"AB Friend",
"Tony Friend"
],
"department": "physics",
"email": "friend@deed.com",
"institution": "columbiau",
"name": "Anthony B Friend",
"notes": ["The guy I meet for coffee sometimes"],
"title": "Mr.",
"month": "January",
"year": 2020,
"day": 15,
"uuid": "76f2a4c7-aa63-4fa3-88b5-396b0c15d368",
},
"courses": {
"_id": "EMCH-552-2016-F",
"active": False,
"department": "EMCH",
"number": 552,
"scale": [
[0.875, "A"],
[0.8125, "B+"],
[0.75, "B"],
[0.6875, "C+"],
[0.625, "C"],
[0.5625, "D+"],
[0.5, "D"],
[-1.0, "F"],
],
"season": "F",
"students": ["Human A. Person", "Human B. Person"],
"syllabus": "emch552-2016-f-syllabus.pdf",
"weights": {
"class-notes": 0.15,
"final": 0.3,
"homework": 0.35,
"midterm": 0.2,
},
"year": 2016,
},
"expenses": {
"_id": "test",
"expense_type": "business",
"grant_percentages": ["50", "50"],
"grants": ["dmref15", "SymPy-1.1"],
"itemized_expenses": [
{
"day": i,
"month": "Jan",
"year": 2018,
"purpose": "test",
"unsegregated_expense": 10 * i,
"segregated_expense": 0,
}
for i in range(1, 11)
],
"payee": "scopatz",
"project": "Cyclus",
"overall_purpose": "testing the databallectionsse",
},
"grades": {
"_id": "Human A. Person-rx-power-hw02-EMCH-758-2017-S",
"student": "hap",
"assignment": "2017-rx-power-hw02",
"course": "EMCH-758-2017-S",
"scores": [1, 1.6, 3],
},
"grants": [
{
"_id": "SymPy-1.1",
"amount": 3000.0,
"alias": "sym",
"begin_day": 1,
"begin_month": "May",
"begin_year": 2030,
"call_for_proposals": "https://groups.google.com/d/msg"
"/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ",
"end_day": 31,
"end_month": "December",
"end_year": 2030,
"funder": "NumFOCUS",
"narrative": "https://docs.google.com/document/d/1nZxqoL"
"-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp"
"=sharing",
"program": "Small Development Grants",
"team": [
{
"institution": "University of South Carolina",
"name": "Anthony Scopatz",
"position": "pi",
},
{
"institution": "University of South Carolina",
"name": "Aaron Meurer",
"position": "researcher",
},
],
"status": "pending",
"title": "SymPy 1.1 Release Support",
"budget": [
{"begin_date": "2030-05-01",
"end_date": "2030-06-30",
"student_months": 0.5,
"postdoc_months": 0.0,
"ss_months": 1.0,
"amount": 1000.0,
},
{"begin_date": "2030-07-01",
"end_date": "2030-09-30",
"student_months": 1.5,
"postdoc_months": 0.0,
"ss_months": 2.0,
"amount": 1000.0,
},
{"begin_date": "2030-10-01",
"end_date": "2030-12-31",
"student_months": 3.0,
"postdoc_months": 0.0,
"ss_months": 0.0,
"amount": 1000.0,
},
],
"proposal_id": "SymPy-1.1"
},
{
"_id": "SymPy-2.0",
"amount": 3000.0,
"alias": "sym2.0",
"begin_day": 1,
"begin_month": 6,
"begin_year": 2019,
"call_for_proposals": "https://groups.google.com/d/msg"
"/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ",
"end_day": 31,
"end_month": "December",
"end_year": 2030,
"funder": "NumFOCUS",
"narrative": "https://docs.google.com/document/d/1nZxqoL"
"-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp"
"=sharing",
"program": "Small Development Grants",
"team": [
{
"institution": "University of South Carolina",
"name": "Anthony Scopatz",
"position": "pi",
},
{
"institution": "University of South Carolina",
"name": "Aaron Meurer",
"position": "researcher",
},
],
"status": "pending",
"title": "SymPy 1.1 Release Support",
"budget": [
{"begin_date": "2019-06-01",
"end_date": "2024-12-31",
"student_months": 12.0,
"postdoc_months": 24.0,
"ss_months": 14.0,
"amount": 1500.0,
},
{"begin_date": "2025-01-01",
"end_date": "2030-12-31",
"student_months": 12.0,
"postdoc_months": 24.0,
"ss_months": 0.0,
"amount": 1500.0,
},
],
"proposal_id": "SymPy-2.0",
},
{
"_id": "dmref15",
"alias": "dmref15",
"account": "GG012345",
"amount": 982785.0,
"begin_day": 1,
"begin_month": "october",
"begin_year": 2015,
"end_day": 30,
"end_month": "september",
"end_year": 2025,
"funder": "NSF",
"grant_id": "DMREF-1534910",
"institution": "Columbia University",
"notes": "Designing Materials to Revolutionize and Engineer our "
"Future (DMREF)",
"person_months_academic": 0.0,
"person_months_summer": 0.25,
"program": "DMREF",
"scope": "This grant is to develop complex modeling methods for regularizing "
"ill-posed nanostructure inverse problems using data analytic and "
"machine learning based approaches. This does not overlap with any "
"other grant.",
"team": [
{
"institution": "Columbia University",
"name": "qdu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "dhsu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "Anthony Scopatz",
"position": "pi",
"subaward_amount": 330000.0,
},
],
"title": "DMREF: Novel, data validated, nanostructure determination "
"methods for accelerating materials discovery",
"budget": [
{"begin_date": "2015-10-01",
"end_date": "2018-09-30",
"student_months": 12.0,
"postdoc_months": 0.0,
"ss_months": 6.0,
"amount": 327595.0,
},
{"begin_date": "2018-10-01",
"end_date": "2020-09-30",
"student_months": 8.0,
"postdoc_months": 0.0,
"ss_months": 12.0,
"amount": 327595.0,
},
{"begin_date": "2020-10-01",
"end_date": "2025-09-30",
"student_months": 12.0,
"postdoc_months": 0.0,
"ss_months": 6.0,
"amount": 327595.0,
},
],
"proposal_id": "dmref15"
},
{"_id": "abc42",
"alias": "abc42",
"amount": 42000.0,
"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"funder": "Life",
"program": "Metaphysical Grants",
"team": [
{"institution": "University of Pedagogy",
"name": "Chief Pedagogue",
"position": "pi"
},
{"institution": "University of Pedagogy",
"name": "Pedagogue Jr.",
"position": "co-pi"
},
],
"title": "The answer to life, the universe, and everything",
"budget": [
{"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"student_months": 0.0,
"postdoc_months": 0.0,
"ss_months": 1.0,
"amount": 42000.0,
}
],
"proposal_id": "abc42",
},
{"_id": "ta",
"amount": 0.0,
"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"funder": "Life",
"program": "Underground Grants",
"team": [
{"institution": "Ministry of Magic",
"name": "Chief Witch",
"position": "pi"
},
{"institution": "Ministry of Magic",
"name": "Chief Wizard",
"position": "co-pi"
},
],
"title": "Support for teaching assistants",
"budget": [
{"begin_date": "2020-06-01",
"end_date": "2020-08-30",
"student_months": 0.0,
"postdoc_months": 0.0,
"ss_months": 0.0,
"amount": 0.0,
}
]
},
],
"groups": {
"_id": "ergs",
"pi_name": "Anthony Scopatz",
"department": "Mechanical Engineering",
"institution": "University of South Carolina",
"name": "ERGS",
"aka": ["Energy Research Group Something", "Scopatz Group"],
"website": "www.ergs.sc.edu",
"mission_statement": """<b>ERGS</b>, or <i>Energy Research Group:
Scopatz</i>, is the Computational
<a href="http://www.me.sc.edu/nuclear/">Nuclear Engineering</a>
research group at the
<a href="http://sc.edu/">University of South Carolina</a>.
Our focus is on uncertainty quantification & predictive modeling, nuclear
fuel cycle simulation, and improving nuclear engineering techniques through
automation.
We are committed to open & accessible research tools and methods.""",
"projects": """ERGS is involved in a large number of computational
projects. Please visit the <a href="projects.html">projects page</a> for
more information!
""",
"email": "<b>scopatz</b> <i>(AT)</i> <b>cec.sc.edu</b>",
},
"institutions": [{
"_id": "columbiau",
"aka": ["Columbia University", "Columbia"],
"city": "New York",
"country": "USA",
"day": 30,
"departments": {
"physics": {
"name": "Department of Physics",
"aka": ["Dept. of Physics", "Physics"],
},
"chemistry": {
"name": "Department of Chemistry",
"aka": ["Chemistry", "Dept. of Chemistry"],
},
"apam": {
"name": "Department of Applied Physics " "and Applied Mathematics",
"aka": ["APAM"],
},
},
"month": "May",
"name": "Columbia University",
"schools": {
"seas": {
"name": "School of Engineering and " "Applied Science",
"aka": [
"SEAS",
"Columbia Engineering",
"Fu Foundation School of Engineering " "and Applied Science",
],
}
},
"state": "NY",
"street": "500 W 120th St",
"updated": "2020-05-30",
"uuid": "avacazdraca345rfsvwre",
"year": 2020,
"zip": "10027",
},
{
"_id": "usouthcarolina",
"aka": ["The University of South Carolina"],
"city": "Columbia",
"country": "USA",
"day": 30,
"departments": {
"physics": {
"name": "Department of Physics",
"aka": ["Dept. of Physics", "Physics"],
},
"chemistry": {
"name": "Department of Chemistry",
"aka": ["Chemistry", "Dept. of Chemistry"],
},
"apam": {
"name": "Department of Applied Physics" "and Applied Mathematics",
"aka": ["APAM"],
},
"mechanical engineering": {
"name": "Department of Mechanical Engineering",
"aka":["Mechanical", "Dept. of Mechanical"],
}
},
"month": "May",
"name": "The University of South Carolina",
"schools": {
"cec": {
"name": "College of Engineering and" "Computing",
"aka": [
"CEC",
"College of Engineering and Computing",
],
}
},
"state": "SC",
"street": "1716 College Street",
"updated": "2020-06-30",
"uuid": "4E89A0DD-19AE-45CC-BCB4-83A2D84545E3",
"year": 2020,
"zip": "29208",
},
],
"jobs": {
"_id": "0004",
"background_fields": [
"Data Science",
"Data Engineering",
"Computer Engineering",
"Computer Science",
"Applied Mathematics",
"Physics",
"Nuclear Engineering",
"Mechanical Engineering",
"Or similar",
],
"compensation": [
"Salary and compensation will be based on prior work " "experience."
],
"contact": "Please send CV or resume to Prof. Scopatz at "
"scopatzATcec.sc.edu.",
"day": 1,
"description": "<p>We are seeking a dedicated individual to "
"help to aid in ...",
"month": "July",
"open": False,
"positions": ["Scientific Software Developer", "Programmer"],
"start_date": "ASAP",
"title": "Open Source Scientific Software Maintainer",
"year": 2015,
},
"meetings": {
"_id": "grp2020-06-12",
"actions": [
"Simon: test",
"Everyone: Clear out-of-date prums milestones."
],
"agenda": ["Review actions", ],
"buddies": [],
"day": 12,
"journal_club": [],
"lead": "nthomas",
"minutes": [],
"month": 6,
"place": "Mudd 1106",
"presentation": [],
"scribe": "hvuong",
"time": '0',
"updated": "",
"uuid": "",
"year": 2020
},
"news": {
"_id": "56b4eb6d421aa921504ef2a9",
"author": "Anthony Scopatz",
"body": "Dr. Robert Flanagan joined ERGS as a post-doctoral " "scholar.",
"day": 1,
"month": "February",
"year": 2016,
},
"people": [{
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200",
"appointments": {
"f19": {
"begin_year": 2019,
"begin_month": 9,
"begin_day": 1,
"end_year": 2019,
"end_month": 10,
"end_day": 31,
"grant": "dmref15",
"type": "pd",
"loading": 0.75,
"status": "finalized",
"notes": ["forgetmenot"]
},
"s20": {
"begin_date": "2020-01-01",
"end_date": "2020-05-15",
"grant": "sym",
"type": "pd",
"loading": 1.0,
"status": "finalized",
"notes": ["fully appointed", "outdated grant"]
},
"ss20": {
"begin_date": "2020-06-01",
"end_date": "2020-08-31",
"grant": "abc42",
"type": "ss",
"loading": 0.8,
"status": "proposed",
"notes": []
}
},
"bio": "Anthony Scopatz is currently an Assistant Professor",
"bios": ["Anthony Scopatz is currently an Assistant Professor but will go on to do great things"],
"committees": [{
"name": "Heather Stanford",
"type": "phdoral",
"year": 2020,
"month": 3,
"day": 1,
"level": "department",
"unit": "apam"
},
{"name": "Heather Stanford",
"type": "promotion",
"year": 2020,
"month": 3,
"day": 1,
"level": "school",
"unit": "seas"
},
{"name": "Heather Stanford",
"type": "phddefense",
"year": 2020,
"month": 3,
"day": 1,
"notes": "something else to remember about it, not published",
"level": "external",
"unit": "U Denmark"
},
{"name": "Heather Stanford",
"type": "promotion",
"year": 2020,
"month": 3,
"day": 1,
"unit": "columbiau",
"level": "university",
}],
"education": [
{
"advisor": "ascopatz",
"begin_year": 2008,
"degree": "Ph.D. Mechanical Engineering, "
"Nuclear and Radiation Engineering "
"Program",
"end_year": 2011,
"group": "ergs",
"institution": "The University of Texas at Austin",
"department": "apam",
"location": "Austin, TX",
"other": [
"Adviser: Erich A. Schneider",
"Dissertation: Essential Physics for Fuel Cycle "
"Modeling & Analysis",
],
},
{
"begin_year": 2006,
"degree": "M.S.E. Mechanical Engineering, Nuclear and "
"Radiation Engineering Program",
"end_year": 2007,
"institution": "The University of Texas at Austin",
"location": "Austin, TX",
"other": [
"Adviser: Erich A. Schneider",
"Thesis: Recyclable Uranium Options under the Global "
"Nuclear Energy Partnership",
],
},
{
"begin_year": 2002,
"begin_month": "Sep",
"begin_day": 1,
"degree": "B.S. Physics",
"end_year": 2006,
"end_month": 5,
"end_day": 20,
"institution": "University of California, Santa Barbara",
"location": "Santa Barbara, CA",
"other": [
"Graduated with a Major in Physics and a Minor in " "Mathematics"
],
},
{
"begin_year": 2008,
"degree": "ongoing",
"group": "life",
"institution": "solar system",
"department": "earth",
"location": "land, mostly",
},
],
"email": "scopatz@cec.sc.edu",
"employment": [
{
"advisor": "ascopatz",
"begin_year": 2015,
"coworkers": ["afriend"],
"group": "ergs",
"location": "Columbia, SC",
"organization": "The University of South Carolina",
"other": [
"Cyclus: An agent-based, discrete time nuclear fuel "
"cycle simulator.",
"PyNE: The Nuclear Engineering Toolkit.",
"Website: http://www.ergs.sc.edu/",
],
"position": "assistant professor",
"position_full": "Assistant Professor, Mechanical Engineering " "Department",
},
{
"begin_year": 2013,
"begin_month": "Jun",
"begin_day": 1,
"end_year": 2015,
"end_month": 3,
"end_day": 15,
"location": "Madison, WI",
"organization": "CNERG, The University of " "Wisconsin-Madison",
"department": "Physics",
"other": [
"Cyclus: An agent-based, discrete time nuclear fuel "
"cycle simulator.",
"PyNE: The Nuclear Engineering Toolkit.",
"Website: https://cnerg.github.io/",
],
"position": "associate scientist",
"position_full": "Associate Scientist, Engineering Physics " "Department",
},
{
"begin_day": 1,
"begin_month": "Nov",
"begin_year": 2011,
"end_month": "May",
"end_year": 2013,
"location": "Chicago, IL",
"organization": "The FLASH Center, The University of " "Chicago",
"other": [
"NIF: Simulation of magnetic field generation from "
"neutral plasmas using FLASH.",
"CosmoB: Simulation of magnetic field generation "
"from neutral plasmas using FLASH.",
"FLASH4: High-energy density physics capabilities "
"and utilities.",
"Simulated Diagnostics: Schlieren, shadowgraphy, "
"Langmuir probes, etc. from FLASH.",
"OpacPlot: HDF5-based equation of state and opacity "
"file format.",
"Website: http://flash.uchicago.edu/site/",
],
"position": "post-doctoral scholar",
"position_full": "Research Scientist, Postdoctoral Scholar",
"status": "pi"
},
],
"funding": [
{
"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013,
},
{"name": "NIF User's Group Travel Award", "value": 1150,
"year": 2013},
],
"google_scholar_url": "https://scholar.google.com/citations?user=dRm8f",
"github_id": "ascopatz",
"hindex": [{
"h": 25,
"h_last_five": 46,
"citations": 19837,
"citations_last_five": 9419,
"origin": "Google Scholar",
"since": 1991,
"year": 2020,
"month": 2,
"day": 19
}],
"home_address": {
"street": "123 Wallabe Ln",
"city": "The big apple",
"state": "plasma",
"zip": "007",
},
"initials": "AMS",
"membership": [
{
"begin_year": 2006,
"organization": "American Nuclear Society",
"position": "Member",
},
{
"begin_year": 2013,
"organization": "Python Software Foundation",
"position": "Fellow",
},
],
"name": "Anthony Scopatz",
"orcid_id": "0000-0002-9432-4248",
"position": "professor",
"research_focus_areas": [
{"begin_year": 2010, "description": "software applied to nuclear "
"engineering and life"}
],
"service": [{
"name": "International Steering Committee",
"role": "chair",
"type": "profession",
"year": 2020,
"month": 3,
"notes": ["something"],
},{
"name": "National Steering Committee",
"type": "profession",
"begin_year": 2018,
"end_year": 2021,
"notes": "something",
},
],
"skills": [
{"category": "Programming Languages", "level": "expert",
"name": "Python"},
{"category": "Programming Languages", "level": "expert",
"name": "Cython"},
],
"teaching": [
{
"course": "EMCH 552: Intro to Nuclear Engineering",
"courseid": "EMCH 552",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": "tbd",
"month": "August",
"organization": "University of South Carolina",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2017,
},
{
"course": "EMCH 558/758: Reactor Power Systems",
"courseid": "EMCH 558",
"description": "This course covers conventional " "reactors.",
"enrollment": 28,
"evaluation": {
"response_rate": 66.76,
"amount_learned": 3.5,
"appropriateness_workload": 3.15,
"course_overall": 3.67,
"fairness_grading": 3.54,
"organization": 3.25,
"classroom_delivery": 4,
"approachability": 4.3,
"instructor_overall": 3.5,
"comments": ["super duper", "dandy"]
},
"month": "January",
"organization": "University of South Carolina",
"position": "professor",
"syllabus": "https://docs.google.com/document/d"
"/1uMAx_KFZK9ugYyF6wWtLLWgITVhaTBkAf8"
"-PxiboYdM/edit?usp=sharing",
"year": 2017,
},
],
"title": "Dr.",
},
{
"_id": "sbillinge",
"active": True,
"activities": [{
"type": "teaching",
"name": "course development",
"year": 2018,
"other": "Developed a new course for Materials Science"
}],
"aka": [
"Billinge",
],
"avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200",
"bio": "Simon teaches and does research",
"committees": [{
"name": "Same Old",
"type": "phddefense",
"year": 2018,
"unit": "Materials Science",
"level": "department",
"notes": "something"
}],
"education": [
{
"begin_year": 2008,
"degree": "Ph.D. Mechanical Engineering, "
"Nuclear and Radiation Engineering "
"Program",
"end_year": 2011,
"group": "ergs",
"institution": "The University of Texas at Austin",
"department": "apam",
"location": "Austin, TX",
"other": [
"Adviser: Erich A. Schneider",
"Dissertation: Essential Physics for Fuel Cycle "
"Modeling & Analysis",
],
},
],
"email": "sb2896@columbia.edu",
"employment": [
{
"begin_year": 2015,
"group": "ergs",
"location": "Columbia, SC",
"organization": "The University of South Carolina",
"other": [
"Cyclus: An agent-based, discrete time nuclear fuel "
"cycle simulator.",
"PyNE: The Nuclear Engineering Toolkit.",
"Website: http://www.ergs.sc.edu/",
],
"position": "assistant professor",
},
],
"facilities": [{
"type": "other",
"name": "Shared {Habanero} compute cluster",
"begin_year": 2015
},
{
"type": "research_wish",
"name": "Shared access to wet lab",
"begin_year": 2015
},
{
"type": "teaching",
"name": "Courseworks2",
"begin_year": 2017
},
{
"type": "teaching_wish",
"name": "nothing right now",
"begin_year": 2019
},
{
"type": "research",
"name": "I don't have one",
"begin_year": 2008
},
],
"funding": [
{
"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013,
},
{"name": "NIF User's Group Travel Award", "value": 1150,
"year": 2013},
],
"google_scholar_url": "https://scholar.google.com/citations?user=dRm8f",
"hindex": [{
"h": 65,
"h_last_five": 43,
"citations": 17890,
"citations_last_five": 8817,
"origin": "Google Scholar",
"since": 1991,
"year": 2019,
"month": "May",
"day": 12,
}],
"office": "1105 Seely W. Mudd Building (inner office)",
"home_address": {
"street": "123 Wallabe Ln",
"city": "The big apple",
"state": "plasma",
"zip": "007",
},
"initials": "SJLB",
"membership": [
{
"begin_year": 2006,
"organization": "American Nuclear Society",
"position": "Member",
},
],
"miscellaneous": {
"metrics_for_success": [
"publications(quality, quantity)",
"invite talks",
"funding",
"citations",
],
},
"name": "Simon J. L. Billinge",
"orcid_id": "0000-0002-9432-4248",
"position": "professor",
"publicity": [{
"type": "online",
"publication": "Brookhaven National Laboratory Web Story",
"topic": "LDRD Provenance project",
"title": "An awesome project and well worth the money",
"day": 24,
"month": "Jul",
"year": 2019,
"grant": "bnlldrd18",
"url": "http://www.google.com"
},
],
"research_focus_areas": [
{"begin_year": 2010, "description": "software applied to materials "
"engineering and life" }
],
"service": [
{
"type": "profession",
"name": "Master of Ceremonies and Organizer Brown University "
'"Chemistry: Believe it or Not" public chemistry '
"demonstration",
"year": 2017,
"month": "August"
},
{
"type": "department",
"name": "Applied Physics program committee",
"year": 2018,
"month": 1
},
{
"type": "school",
"name": "Ad hoc tenure committee",
"year": 2017,
"month": 6,
"notes": "Albert Einstein"
},
{
"type": "profession",
"name": "Co-organizer JUAMI",
"year": 2017,
"month": 12,
"role": "co-organizer",
"other": "great way to meet people",
},
],
"skills": [
{"category": "Programming Languages", "level": "expert",
"name": "Python"},
],
"teaching": [
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2016,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2017,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "s17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "Jan",
"organization": "Columbia University",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2018,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "s17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "Jan",
"organization": "Columbia University",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2017,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "s17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"month": "Jan",
"organization": "Columbia University",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2019,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f18-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2018,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f19-3010",
"description": "This course is an introduction to nuclear " "physics.",
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2019,
},
],
"title": "Dr.",
"todos": [
{"description": "read paper",
"due_date": "2020-07-19",
"begin_date": "2020-06-15",
"duration": 60.0,
"importance": 2,
"status": "started",
"assigned_by": "scopatz",
"running_index": 1
},
{"description": "prepare the presentation",
"due_date": "2020-07-29",
"begin_date": "2020-06-22",
"duration": 30.0,
"importance": 0,
"status": "started",
"notes": ["about 10 minutes", "don't forget to upload to the website"],
"assigned_by": "sbillinge",
"running_index": 2
}
],
},
{"_id": "abeing",
"active": False,
"aka": ["being", "human", "person"],
"avatar": "https://xkcd.com/1221/",
"bio": "Abstract Being is an exemplar human existence",
"education": [
{"degree": "bachelors", "institution": "University of Laughs", "begin_year": 2010},
],
"employment": [
{"group": "bg", "begin_date": "2015-06-01", "end_date": "2015-08-31", "organization": "columbiau",
"position": "intern"},
{"group": "agroup", "begin_date": "2020-01-01", "end_date": "2030-12-31", "organization": "usouthcarolina",
"position": "intern"},
{"group": "bg", "begin_date": "2010-06-01", "end_date": "2012-08-31", "organization": "columbiau",
"position": "intern"},
{"group": "bg", "begin_date": "2017-06-01", "end_date": "2019-08-31", "organization": "columbiau",
"position": "intern"},
],
"position": "intern",
"name": "Abstract Being",
}
],
"presentations": [
{
"_id": "18sb_this_and_that",
"abstract": "We pulled apart graphite with tape",
"authors": ["scopatz"],
"begin_year": 2018,
"begin_month": 5,
"begin_day": 22,
"department": "apam",
"institution": "columbiau",
"location": "Upton NY",
"meeting_name": "Meeting to check flexibility on dates",
"notes": [
"We hope the weather will be sunny",
"if the weather is nice we will go to the " "beach",
],
"project": "18sob_clustermining",
"status": "accepted",
"title": "Graphitic Dephenestration",
"type": "award",
"webinar": False,
},
{
"_id": "18sb_nslsii",
"abstract": "We pulled apart graphite with tape",
"authors": ["scopatz"],
"begin_year": 2018,
"begin_month": 5,
"begin_day": 22,
"department": "apam",
"end_year": 2018,
"end_month": 5,
"end_day": 22,
"institution": "columbiau",
"location": "Upton NY",
"meeting_name": "2018 NSLS-II and CFN Users Meeting",
"notes": [
"We hope the weather will be sunny",
"if the weather is nice we will go to the " "beach",
],
"project": "18sob_clustermining",
"status": "accepted",
"title": "ClusterMining: extracting core structures of "
"metallic nanoparticles from the atomic pair "
"distribution function",
"type": "poster",
},
{
"_id": "18sb04_kentstate",
"abstract": "We made the case for local structure",
"authors": ["scopatz"],
"begin_year": 2018,
"begin_month": "May",
"begin_day": 22,
"department": "physics",
"end_year": 2018,
"end_month": 5,
"end_day": 22,
"institution": "columbiau",
"notes": ["what a week!"],
"project": "18kj_conservation",
"status": "accepted",
"title": "Nanostructure challenges and successes from "
"16th Century warships to 21st Century energy",
"type": "colloquium",
"webinar": True,
},
],
"projecta": {
"_id": "20sb_firstprojectum",
"begin_date": "2020-04-28",
"collaborators": ["aeinstein", "pdirac"],
"deliverable": {
"audience": ["beginning grad in chemistry"],
"due_date": "2021-05-05",
"success_def": "audience is happy",
"scope": ["UCs that are supported or some other scope description "
"if it is software", "sketch of science story if it is paper"
],
"platform": "description of how and where the audience will access "
"the deliverable. Journal if it is a paper",
"roll_out": [
"steps that the audience will take to access and interact with "
"the deliverable", "not needed for paper submissions"],
"status": "finalized"
},
"description": "My first projectum",
"end_date": "2020-06-05",
"grants": "SymPy-1.1",
"group_members": ["ascopatz"],
"kickoff": {
"date": "2020-05-05",
"due_date": "2020-05-06",
"name": "Kick off meeting",
"objective": "introduce project to the lead",
"audience": ["lead", "pi", "group_members"],
"status": "finished"
},
"lead": "ascopatz",
"log_url": "https://docs.google.com/document/d/1YC_wtW5Q",
"milestones": [{
'due_date': '2020-05-20',
'name': 'Project lead presentation',
'objective': 'lead presents background reading and '
'initial project plan',
'audience': ['lead', 'pi', 'group_members'],
'status': 'proposed',
'type': 'meeting'
},
{'due_date': '2020-05-27',
'name': 'planning meeting',
'objective': 'develop a detailed plan with dates',
'audience': ['lead', 'pi', 'group_members'],
'status': 'proposed',
'type': 'pr',
}],
"name": "First Projectum",
"pi_id": "scopatz",
"status": "proposed"
},
"projects": {
"_id": "Cyclus",
"name": "Cyclus",
"description": "Agent-Based Nuclear Fuel Cycle Simulator",
"group": "ergs",
"highlights": [
{"year": 2020, "month": 5,
"description": "high profile pub in Nature"}
],
"logo": "http://fuelcycle.org/_static/big_c.png",
"other": [
"Discrete facilities with discrete material transactions",
"Low barrier to entry, rapid payback to adoption",
],
"repo": "https://github.com/cyclus/cyclus/",
"team": [
{
"begin_month": "June",
"begin_year": 2013,
"end_month": "July",
"end_year": 2015,
"name": "Anthony Scopatz",
"position": "Project Lead",
}
],
"type": "funded",
"website": "http://fuelcycle.org/",
"grant": "dmref15",
},
"proposalReviews": [
{
"_id": "1906doeExample",
"adequacy_of_resources": [
"The resources available to the PI seem adequate"
],
"agency": "doe",
"competency_of_team": ["super competent!"],
"doe_appropriateness_of_approach": [
"The proposed approach is highly innovative"
],
"doe_reasonableness_of_budget": [
"They could do it with half the money"],
"doe_relevance_to_program_mission": ["super relevant"],
"does_how": [
"they will find the cause of Malaria",
"when they find it they will determine a cure",
],
"due_date": "2020-04-10",
"does_what": "Find a cure for Malaria",
"freewrite": [
"I can put extra things here, such as special instructions from the",
"program officer",
],
"goals": [
"The goals of the proposal are to put together a team to find a cure"
"for Malaria, and then to find it"
],
"importance": ["save lives", "lift people from poverty"],
"institutions": "columbiau",
"month": "May",
"names": ["B. Cause", "A.N. Effect"],
"nsf_broader_impacts": [],
"nsf_create_original_transformative": [],
"nsf_plan_good": [],
"nsf_pot_to_advance_knowledge": [],
"nsf_pot_to_benefit_society": [],
"requester": "Lane Wilson",
"reviewer": "sbillinge",
"status": "submitted",
"summary": "dynamite proposal",
"title": "A stunning new way to cure Malaria",
"year": 2019,
},
{
"_id": "1906nsfExample",
"adequacy_of_resources": [
"The resources available to the PI seem adequate"
],
"agency": "nsf",
"competency_of_team": ["super competent!"],
"doe_appropriateness_of_approach": [],
"doe_reasonableness_of_budget": [],
"doe_relevance_to_program_mission": [],
"does_how": [
"they will find the cause of Poverty",
"when they find it they will determine a cure",
],
"does_what": "Find a cure for Poverty",
"due_date": "2020-04-10",
"freewrite": [
"I can put extra things here, such as special instructions from the",
"program officer",
],
"goals": [
"The goals of the proposal are to put together a team to find a cure"
"for Poverty, and then to find it"
],
"importance": ["save lives", "lift people from poverty"],
"institutions": "upenn",
"month": "May",
"names": ["A Genius"],
"nsf_broader_impacts": ["Poor people will be made unpoor"],
"nsf_create_original_transformative": [
"transformative because lives will be transformed"
],
"nsf_plan_good": [
"I don't see any issues with the plan",
"it should be very straightforward",
],
"nsf_pot_to_advance_knowledge": [
"This won't advance knowledge at all"],
"nsf_pot_to_benefit_society": [
"Society will benefit by poor people being made unpoor if they want "
"to be"
],
"requester": "Tessemer Guebre",
"reviewer": "sbillinge",
"status": "submitted",
"summary": "dynamite proposal",
"title": "A stunning new way to cure Poverty",
"year": 2019,
},
],
"proposals": [
{
"_id": "mypropsal",
"amount": 1000000.0,
"authors": ["Anthony Scopatz", "Robert Flanagan"],
"begin_day": 1,
"begin_month": "May",
"begin_year": 2030,
"currency": "USD",
"day": 18,
"duration": 3,
"end_day": 31,
"end_month": "December",
"end_year": 2030,
"full": {
"benefit_of_collaboration": "http://pdf.com"
"/benefit_of_collaboration",
"cv": ["http://pdf.com/scopatz-cv",
"http://pdf.com/flanagan-cv"],
"narrative": "http://some.com/pdf",
},
"month": "Aug",
"notes": "Quite an idea",
"pi": "Anthony Scopatz",
"pre": {
"benefit_of_collaboration": "http://pdf.com"
"/benefit_of_collaboration",
"cv": ["http://pdf.com/scopatz-cv",
"http://pdf.com/flanagan-cv"],
"day": 2,
"month": "Aug",
"narrative": "http://some.com/pdf",
"year": 1998,
},
"status": "submitted",
"title": "A very fine proposal indeed",
"year": 1999,
},
{
"_id": "dmref15",
"amount": 982785.0,
"authors": ["qdu", "dhsu", "sbillinge"],
"call_for_proposals": "http://www.nsf.gov/pubs/2014/nsf14591/"
"nsf14591.htm",
"begin_day": 1,
"begin_month": "May",
"begin_year": 2018,
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": "lots to do but it doesn't overlap with any "
"other of my grants"
},
"currency": "USD",
"day": 2,
"duration": 3,
"end_day": 1,
"end_month": "May",
"end_year": 2019,
"funder": "NSF",
"month": "february",
"notes": "Quite an idea",
"pi": "Simon Billinge",
"status": "accepted",
"team": [
{
"institution": "Columbia University",
"name": "qdu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "dhsu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "sbillinge",
"position": "pi",
"subaward_amount": 330000.0,
},
],
"title": "DMREF: Novel, data validated, nanostructure determination "
"methods for accelerating materials discovery",
"title_short": "DMREF nanostructure",
"year": 2015,
},
{
"_id": "SymPy-1.1",
"amount": 3000.0,
"begin_date": "2030-05-01",
"end_date": "2030-12-31",
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": ""
},
"currency": "USD",
"pi": "sbillinge",
"status": "submitted",
"title": "SymPy 1.1 Release Support",
},
{
"_id": "SymPy-2.0",
"amount": 3000.0,
"begin_date": "2019-06-01",
"end_date": "2030-12-31",
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": ""
},
"currency": "USD",
"pi": "sbillinge",
"status": "submitted",
"title": "SymPy 1.1 Release Support",
},
{
"_id": "abc42",
"amount": 42000.0,
"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": ""
},
"currency": "USD",
"pi": "sbillinge",
"status": "submitted",
"title": "The answer to life, the universe, and everything",
}
],
"reading_lists": {
"_id": "getting_started_with_pdf",
"day": "15",
"month": "12",
"papers": [{"doi": "10.1107/97809553602060000935",
"text": "Very basic, but brief, intro to powder diffraction in general"},
{"doi": "10.1039/9781847558237-00464",
"text": "Lightest weight overview of PDF analysis around. Good starting point"
},
{"url": "http://www.diffpy.org",
"text": "Download and install PDFgui software and run through the step by step tutorial under the help tab"}
],
"purpose": "Beginning reading about PDF",
"title": "A step-by-step pathway towards PDF understanding. It is recommended to read the papers in the order they are listed here.",
"year": 2019,
},
"refereeReports": {
"_id": "1902nature",
"claimed_found_what": ["gravity waves"],
"claimed_why_important": ["more money for ice cream"],
"did_how": ["measured with a ruler"],
"did_what": ["found a much cheaper way to measure gravity waves"],
"due_date": '2020-04-11',
"editor_eyes_only": "to be honest, I don't believe a word of it",
"final_assessment": ["The authors should really start over"],
"first_author_last_name": "Wingit",
"freewrite": "this comment didn't fit anywhere above",
"journal": "Nature",
"recommendation": "reject",
"requester": "Max Planck",
"reviewer": "sbillinge",
"status": "submitted",
"submitted_date": "2019-01-01",
"title": "a ruler approach to measuring gravity waves",
"validity_assessment": ["complete rubbish"],
"year": 2019,
},
"students": {
"_id": "Human A. Person",
"aka": ["H. A. Person"],
"email": "haperson@uni.edu",
"university_id": "HAP42",
},
}
SCHEMAS = {
"abstracts": {
"_description": {
"description": "Abstracts for a conference or workshop. This is "
"generally public information"
},
"_id": {
"description": "Unique identifier for submission. This generally "
"includes the author name and part of the title.",
"required": True,
"type": "string",
},
"coauthors": {
"description": "names of coauthors",
"required": False,
"type": "string",
},
"email": {
"description": "contact email for the author.",
"required": True,
"type": "string",
},
"firstname": {
"description": "first name of the author.",
"required": True,
"type": "string",
},
"institution": {
"description": "name of the institution",
"required": True,
"type": "string",
},
"lastname": {
"description": "last name of the author.",
"required": True,
"type": "string",
},
"references": {
"description": "HTML string of reference for the abstract itself",
"required": False,
"type": "string",
},
"text": {
"description": "HTML string of the abstract.",
"required": True,
"type": "string",
},
"timestamp": {
"description": "The time when the abstract was submitted.",
"required": True,
"type": "string",
},
"title": {
"description": "title of the presentation/paper.",
"required": True,
"type": "string",
},
},
"assignments": {
"_description": {
"description": "Information about assignments for classes."},
"_id": {
"description": "A unique id for the assignment, such as "
"HW01-EMCH-558-2016-S",
"required": True,
"type": "string",
},
"category": {
"description": "such as 'homework' or 'final'",
"required": True,
"type": "string",
},
"courses": {
"description": "ids of the courses that have this assignment",
"required": True,
"anyof_type": ["string", "list"],
},
"file": {
"description": "path to assignment file in store",
"required": False,
"type": "string",
},
"points": {
"description": "list of number of points possible for each "
"question. Length is the number of questions",
"required": True,
"type": "list",
"schema": {"anyof_type": ["integer", "float"]},
},
"questions": {
"description": "titles for the questions on this assignment",
"required": False,
"type": "list",
},
"solution": {
"description": "path to solution file in store",
"required": False,
"type": "string",
},
},
"beamplan": {
"_id": {
"description": "Unique identifier for the experiment plan. It should have a format '{year:2d}{month:2d}{people_id:s}_{plan_name:s}'",
"required": True,
"type": "string"
},
"_description": {
"description": "Information about the experiment plan for the beamtime."},
"project_lead": {
"description": "The id for person who put out this plan. It should be inside the people.yml.",
"required": True,
"type": "string"
},
"project": {
"description": "The id for the project which the plan belongs to. It should be on airtable.",
"required": True,
"type": "string"
},
"begin_date": {
"description": "The begin date of the beam time.",
"required": True,
"anyof_type": ["string", "datetime", "date"]
},
"end_date": {
"description": "The end date of the beam time.",
"required": True,
"anyof_type": ["string", "datetime", "date"]
},
"beamtime": {
"description": "The id for the beamtime. Check the Airtable.",
"required": True,
"type": "string"
},
"container": {
"description": "Sample container used during the measurement, e. g. 1mm OD glass tubes.",
"required": True,
"type": "string"
},
"holder": {
"description": "Sample holder used during the measurement, e. g. 3 mm OD tubes holder.",
"required": True,
"type": "string"
},
"devices": {
"description": "The dictionary of devices used in the measurement e. g. ",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"measurement": {
"description": "What data to be measured, e. g. PDF, XRD, SAXS. This will determine the setup.",
"required": True,
"type": "string"
},
"samples": {
"description": "The list of samples to be measured.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"time": {
"description": "The total time of executing the exp_plan. Unit: min.",
"required": True,
"type": "integer"
},
"objective": {
"description": "What to study in the experiments. What goal to achieve.",
"required": True,
"type": "string"
},
"prep_plan": {
"description": "Steps to prepare the samples. Do NOT need details.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"ship_plan": {
"description": "Steps to carry the samples from the producer to the BNL. Do NOT need details.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"exp_plan": {
"description": "Steps to carry out the experiments at BNL. Need details",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"scanplan": {
"description": "The scanplan for the experiment, e. g. tseries, Tramp, ct.",
"required": True,
"type": "string"
},
"pipeline": {
"description": "The analysis pipeline for the experiment. If no new pipeline is needed, use 'usual'.",
"required": True,
"type": "string",
"default": "usual"
},
"todo": {
"description": "The TODO list before the beamtime.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"notes": {
"description": "Notes of the plan, e. g. the preferred time.",
"required": False,
"anyof_type": [
"list",
"string"
],
"schema": {
"type": "string"
}
}
},
"blog": {
"_description": {
"description": "This collection represents blog posts written by "
"the members of the research group."
},
"_id": {
"description": "short representation, such as this-is-my-title",
"required": True,
"type": "string",
},
"author": {
"description": "name or AKA of author",
"required": True,
"type": "string",
},
"day": {"description": "Publication day", "required": True,
"type": "integer"},
"month": {
"description": "Publication month",
"required": True,
"anyof_type": ["string", "integer"],
},
"original": {
"description": "URL of original post, if this is a repost",
"required": False,
"type": "string",
},
"post": {
"description": "actual contents of the post",
"required": True,
"type": "string",
},
"title": {
"description": "full human readable title",
"required": True,
"type": "string",
},
"year": {
"description": "Publication year",
"required": True,
"type": "integer",
},
},
"contacts": {
"_description": {"description": "a lighter version of people. Fewer required fields"
"for capturing people who are less tightly coupled"
},
"_id": {
"description": "id of the person, e.g., first letter first name "
"plus last name, but unique",
"required": True,
},
"aka": {
"required": False,
"type": "list",
"description": "other names for the person",
},
"date": {
"description": "date when the entry was created in ISO format",
"required": False,
"anyof_type": ["string", "date"],
},
'day': {
"description": "day when the entry was created",
"required": False,
"type": "integer",
},
"department": {
"description": "Department at the institution",
"type": "string",
"required": False,
},
"email": {
"description": "Contact email for the contact",
"type": "string",
"required": False,
},
"institution": {
"description": "the institution where they are located. This is"
"required for building a COI list of coauthors, but"
"not in general. It can be institute id or anything"
"in the aka or name",
"required": False,
"type": "string"
},
'month': {
"description": "month when the entry was created",
"required": False,
"anyof_type": ["string", "integer"],
},
"name": {
"description": "the person's canonical name",
"required": True,
"type": "string",
},
"notes": {
"description": "notes about the person",
"required": False,
"anyof_type": ["list", "string"]
},
"title": {
"description": "how the person is addressed",
"required": False,
"type": "string",
},
'updated': {
"description": "most recently updated",
"required": False,
"anyof_type": ["string", "datetime", "date"],
},
'year': {
"description": "year when the entry was created",
"required": False,
"type": "integer",
},
'uuid': {
"description": "universally unique identifier",
"required": False,
"type": "string",
},
},
"expenses": {
"_description": {
"description": "This collection records expenses for the "
"group. It should most likely be private"
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": "string",
},
"begin_date": {
"description": "begin date in YYYY-MM-DD",
"anyof_type": ["string", "date"],
},
"end_date": {
"description": "end date in YYYY-MM-DD",
"anyof_type": ["string", "date"],
},
"grant_percentages": {
"description": "the percentage of the reimbursement amount to put "
"on each grant. This list must be the same length as"
"the grants list and the percentages placed in the "
"order that the grants appear in that list",
"required": False,
"type": "list",
},
"grants": {
"description": "the grants in a list, or a string if only one grant",
"required": True,
"anyof_type": ["string", "list"],
},
"project": {
"description": "project or list of projects that this "
"presentation is associated with. Should "
"be discoverable in projects collection",
"anyof_type": ["string", "list"],
},
"payee": {
"description": "The name or id of the payee filing the expense",
"required": True,
"type": "string",
},
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": False,
"type": "integer",
},
"date": {
"description": "Expense date",
"required": False,
"anyof_type": ["string", "date"],
},
"month": {
"description": "Expense month",
"required": False,
"anyof_type": ["string", "integer"],
},
"year": {
"description": "Expense year",
"required": False,
"type": "integer",
},
"purpose": {
"description": "reason for expense",
"type": "string",
"required": True,
},
"unsegregated_expense": {
"description": "The allowed expenses",
"type": "float",
},
"segregated_expense": {
"description": "The unallowed expenses",
"type": "float",
},
"original_currency": {
"description": "The currency the payment was made in",
"type": "float",
},
},
},
},
"overall_purpose": {
"description": "The reason for the expenses",
"type": "string",
"required": True,
},
"notes": {
"description": "Notes about the expense",
"type": "list",
},
"status": {
"description": "The status of the expense",
"eallowed": ["reimbursed", "submitted", "unsubmitted", ],
"type": "string"
},
"reimbursements": {
"description": "Reimbursements for the expense",
"schema": {
"schema": {
'amount': {"description": 'amount for reimbursements',
"type": "float",
},
'date': {"description": "date of reimbursement",
"anyof_type": ["string", "date"],
},
'submission_date': {"description": "date of submission",
"anyof_type": ["string", "date"],
},
'where': {"description": 'where the reimbursement has been sent',
"type": 'string',
},
},
"type": "dict"
},
"type": "list"
},
"expense_type": {
"description": "The type of expense",
"allowed": ["travel", "business"],
"required": True,
},
},
"grades": {
"_description": {
"description": "The grade for a student on an assignment. This "
"information should be private."
},
"_id": {
"description": "unique id, typically the " "student-assignment-course",
"required": True,
"type": "string",
},
"assignment": {
"description": "assignment id",
"required": True,
"type": "string",
},
"course": {"description": "course id", "required": True,
"type": "string"},
"filename": {
"description": "path to file in store",
"required": False,
"type": "string",
},
"scores": {
"description": "the number of points earned on each question",
"required": True,
"type": "list",
"schema": {"anyof_type": ["integer", "float"]},
},
"student": {"description": "student id", "required": True,
"type": "string"},
},
"grants": {
"_description": {
"description": "This collection represents grants that have been "
"awarded to the group."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": ("string", "integer", "float"),
},
"account": {
"description": "the account number which holds the funds",
"required": False,
"type": "string",
},
"admin": {
"description": "the group administering the grant",
"type": "string",
"required": False,
},
"alias": {
"description": "the alias of the grant",
"type": "string",
"required": False,
},
"amount": {
"description": "value of award",
"required": True,
"type": ("integer", "float"),
},
"begin_date": {
"description": "start date of the grant (if string, in format YYYY-MM-DD)",
"required": False,
"anyof_type": ["string", "date"]
},
"begin_day": {
"description": "start day of the grant",
"required": False,
"type": "integer",
},
"begin_month": {
"description": "start month of the grant",
"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {
"description": "start year of the grant",
"required": False,
"type": "integer",
},
"benefit_of_collaboration": {
"description": "",
"required": False,
"type": "string",
},
# TODO: maybe this should move to proposals?
"call_for_proposals": {"description": "", "required": False,
"type": "string"},
"currency": {
"description": "typically '$' or 'USD'",
"required": False,
"type": "string",
},
"end_date": {
"description": "start date of the grant (if string, in format YYYY-MM-DD)",
"required": False,
"anyof_type": ["string", "date"]
},
"end_day": {
"description": "end day of the grant",
"required": False,
"type": ("string", "integer"),
},
"end_month": {
"description": "end month of the grant",
"required": False,
"anyof_type": ["string", "integer"],
},
"end_year": {
"description": "end year of the grant",
"required": False,
"type": "integer",
},
"funder": {
"description": "the agency funding the work",
"required": True,
"type": "string",
},
"grant_id": {
"description": "the identifier for this work",
"required": False,
"type": "string",
},
"institution": {
"description": "the host institution for the grant",
"type": "string",
"required": False,
},
"narrative": {"description": "", "required": False, "type": "string"},
"notes": {
"description": "notes about the grant",
"required": False,
"type": "string",
},
"person_months_academic": {
"description": "Number of months of funding during the academic" "year",
"required": False,
"anyof_type": ["integer", "float"],
},
"person_months_summer": {
"description": "Number of months of funding during the summer",
"required": False,
"anyof_type": ["integer", "float"],
},
"program": {
"description": "the program the work was funded under",
"required": True,
"type": "string",
},
# TODO: maybe this should be moved to proposals?
"status": {
"allowed": ["pending", "declined", "accepted", "in-prep"],
"description": "status of the grant",
"required": False,
"type": "string",
},
"scope": {
"description": "The scope of the grant, answers the prompt: "
'"Describe Research Including Synergies and '
'Delineation with Respect to this Proposal/Award:"',
"required": False,
"type": "string",
},
# TODO: maybe this should be duplicated in proposals?
"team": {
"description": "information about the team members participating "
"in the grant.",
"required": True,
"schema": {
"schema": {
"cv": {"required": False, "type": "string"},
"institution": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
"subaward_amount": {
"required": False,
"type": ("integer", "float"),
},
},
"type": "dict",
},
"type": "list",
},
"title": {
"description": "actual title of proposal / grant",
"required": True,
"type": "string",
},
"budget": {
"description": "budget periods of grant",
"required": False,
"schema": {
"schema": {
"begin_date": {
"description": "start date of the budget period in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"],
},
"end_date": {
"description": "end date of the budget period in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"],
},
"student_months": {
"description": "number of months of funding for student members during the academic year",
"required": False,
"anyof_type": ["float", "integer"]
},
"postdoc_months": {
"description": "number of months of funding for postdoc members during the academic year",
"required": False,
"anyof_type": ["float", "integer"]
},
"ss_months": {
"description": "number of months of funding for the summer",
"required": False,
"anyof_type": ["float", "integer"]
},
"amount": {
"description": "subaward for this budget period",
"required": False,
"anyof_type": ["float", "integer"]
}
},
"type": "dict",
},
"type": "list",
},
"proposal_id": {
"description": "initial proposal made for grant",
"required": False,
"type": "string",
}
},
"groups": {
"_description": {
"description": "Information about the research group"
"this is generally public information"
},
"_id": {
"description": "Unique identifier for submission. This generally "
"includes the author name and part of the title.",
"required": True,
"type": "string",
},
"aka": {
"required": True,
"type": "list",
"description": "other names for the group",
},
"banner": {
"required": False,
"type": "string",
"description": "name of image file with the group banner",
},
"pi_name": {
"description": "The name of the Principle Investigator",
"required": True,
"type": "string",
},
"department": {
"description": "Name of host department",
"required": True,
"type": "string",
},
"institution": {
"description": "Name of the host institution",
"required": True,
"type": "string",
},
"name": {
"description": "Name of the group",
"required": True,
"type": "string",
},
"website": {"description": "URL to group webpage", "type": "string"},
"mission_statement": {
"description": "Mission statement of the group",
"type": "string",
},
"projects": {
"description": "About line for projects",
"type": "string",
"required": True,
},
"email": {
"description": "Contact email for the group",
"type": "string",
"required": True,
},
},
"institutions": {
"_description": {
"description": "This collection will contain all the institutions"
"in the world and their departments and addresses"
},
"_id": {
"description": "unique identifier for the institution.",
"required": True,
"type": "string",
},
"aka": {
"description": "list of all the different names this "
"the institution is known by",
"required": False,
"type": "list",
},
"city": {
"description": "the city where the institution is",
"required": True,
"type": "string",
},
"country": {
"description": "The country where the institution is",
"required": True,
"type": "string",
},
"date": {
"description": "Expense date",
"required": False,
"anyof_type": ["string", "date"],
},
"day": {
"description": "the day the entry was created",
"required": False,
"type": "integer",
},
"departments": {
"description": "all the departments and centers and"
"various units in the institution",
"required": False,
"type": "dict",
# Allow unkown department names, but check their content
"valuesrules": {
"type": "dict",
"schema": {
"name": {
"description": "The canonical name",
"required": True,
"type": "string",
},
"aka": {"required": False, "type": "list"},
},
},
},
"month": {
"description": "the month the entry was created",
"required": False,
"anyof_type": ["string", "integer"]
},
"name": {
"description": "the canonical name of the institutions",
"required": True,
"type": "string",
},
"schools": {
"description": "this is more for universities, but it "
"be used for larger divisions in big "
"organizations",
"required": False,
"type": "dict",
"valuesrules": {
"type": "dict",
"schema": {
"name": {
"description": "The canonical name",
"required": True,
"type": "string",
},
"aka": {"required": False, "type": "list"},
},
},
},
"state": {
"description": "the state where the institution is",
"required": False,
"type": "string",
},
"street": {
"description": "the street where the institution is",
"required": False,
"type": "string",
},
"updated": {
"description": "a datetime when the entry was updated",
"required": False,
"anyof_type": ["string", "datetime", "date"]
},
"uuid": {
"description": "a uuid for the entry",
"required": False,
"type": "string",
},
"year": {
"description": "the year the entry was created",
"required": False,
"type": "integer",
},
"zip": {
"description": "the zip or postal code of the institution",
"required": False,
"anyof_type": ["integer", "string"],
},
},
"people": {
"_description": {
"description": "This collection describes the members of the "
"research group. This is normally public data."
},
"_id": {
"description": "unique identifier for the group member",
"required": True,
"type": "string",
},
"active": {
"description": "If the person is an active member, default True.",
"required": False,
"type": "boolean",
},
"aka": {
"description": "list of aliases (also-known-as), useful for "
"identifying the group member in citations or "
"elsewhere.",
"required": True,
"type": ["string", "list"],
},
"appointments": {
"type": "dict",
"required": False,
"description": "begin and end date, grant loading status and notes about appointments"
},
"activities": {
"type": "list",
"required": False,
"description": "activities may be teaching or research things",
"schema": {
"type": "dict",
"schema": {
"day": {
"required": False,
"description": "the day the activity took place",
"type": "integer",
},
"type": {
"required": True,
"description": "the type of the acitivity",
"type": "string",
"eallowed": ["teaching", "research"]
},
"month": {
"required": False,
"description": "the month the activity took place",
"anyof_type": ["integer","string"],
},
"name": {
"required": True,
"description": "brief statement of the activity",
"type": "string",
},
"other": {
"required": False,
"description": "longer statement of the activity",
"type": "string",
},
"year": {
"required": True,
"description": "the year the activity took place",
"type": "integer",
},
}
}
},
"avatar": {"description": "URL to avatar", "required": True,
"type": "string"},
"bio": {
"description": "short biographical text",
"required": True,
"type": "string",
},
"bios": {
"description": "longer biographical text if needed",
"required": False,
"anyof_type": ["string","list"]
},
"collab": {
"description": "If the person is a collaborator, default False.",
"required": False,
"type": "boolean",
},
"committees": {
"description": "Committees that are served on",
"required": False,
"schema": {
"type": "dict",
"schema": {
"name": {"required": True, "type": "string",
"description": "name of committee, or person if it "
"is a phd committee"},
"day": {"required": False, "type": "integer"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"notes": {"required": False,
"description": "extra things you want to record about the thing",
"anyof_type": ["string", "list"],
},
"year": {"required": True, "type": "integer"},
"unit": {"required": False, "type": "string",
"description": "name of department or school etc."},
"type": {"required": False, "type": "string",
"description": "type of committee, department, school, university, external",
"eallowed": ["phdoral", "phddefense", "phdproposal","promotion"]},
"level": {
"required": True,
"type": "string",
"description": "department or school or university, or external",
"eallowed": ["department", "school", "university", "external"]
},
"group": {
"required": False,
"type": "string",
"description": "this employment is/was in"
"a group in groups coll",
},
},
},
"type": "list",
},
"education": {
"description": "This contains the educational information for "
"the group member.",
"required": True,
"schema": {
"type": "dict",
"schema": {
"advisor": {"required": False, "type": "string",
"description": "name or id of advisor for this degree"},
"begin_day": {"required": False,
"type": "integer"},
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": True, "type": "integer"},
"degree": {"required": True, "type": "string"},
"department": {
"required": False,
"type": "string",
"description": "department within" "the institution",
},
"group": {
"required": False,
"type": "string",
"description": "this employment is/was in"
"a group in groups coll",
},
"end_day": {"required": False,
"type": "integer"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
# Could be ongoing with undefined end
"end_year": {"required": False, "type": "integer"},
"gpa": {"required": False, "type": ("float", "string")},
"institution": {"required": True, "type": "string"},
"location": {"required": False, "type": "string"},
"other": {"required": False, "type": "list"},
},
},
"type": "list",
},
"email": {
"description": "email address of the group member",
"required": False,
"type": "string",
},
"employment": {
"description": "Employment information, similar to educational "
"information.",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"advisor": {"required": False, "type": "string",
"description": "name or id of "
"advisor/mentor/manager"},
"begin_day": {"required": False, "type": "integer"},
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": False, "type": "integer"},
"begin_date": {"required": False, "anyof_type": ["string", "date", "datetime"],
"description": "begin date of employment in format YYYY-MM-DD"},
"coworkers": {"required": False, "type": "list",
"description": "list of coworkers. If"
"position is editor, these are "
"assumed to be coeditors in"
"conflict of interest builder"},
"department": {"required": False, "type": "string"},
"end_day": {"required": False, "type": "integer"},
"end_month": {"required": False,
},
"end_year": {"required": False, "type": "integer"},
"end_date": {"required": False, "anyof_type": ["string", "date", "datetime"],
"description": "end date of employment in format YYYY-MM-DD"},
"group": {
"required": False,
"type": "string",
"description": "this employment is/was in"
"a group in groups coll",
},
"location": {"required": False, "type": "string"},
"organization": {"required": True, "type": "string"},
"other": {"required": False, "type": "list"},
"position": {"required": True, "type": "string",
"eallowed": list(SORTED_POSITION)},
"position_full": {
"description": "The full on title of the position. This will be "
"typeset if it is here, or if not Position will be "
"used. Position will be used for sorting and must "
"come from a fixed list of positions",
"required": False,
"type": "string",
},
"status": {"required": False, "type": "string", "eallowed": [
"pi",
"adjunct",
"high-school",
"undergrad",
"ms",
"phd",
"postdoc",
"visitor-supported",
"visitor-unsupported"],
},
},
},
},
"facilities": {
"type": "list",
"required": False,
"description": "facilities may be teaching or research things",
"schema": {
"type": "dict",
"schema": {
"begin_day": {
"required": False,
"description": "the day facility, or the wish for the "
"facility, started",
"type": "integer",
},
"end_day": {
"required": False,
"description": "the day facility started",
"type": "integer",
},
"type": {
"required": True,
"description": "the type of the facility. Columbia asks"
"for wished-for facilities, so there are "
"teaching-wish and research-wish fields.",
"type": "string",
"eallowed": ["teaching", "research", "shared", "other", "teaching_wish",
"research_wish"]
},
"begin_month": {
"required": False,
"description": "the month the facility (or wish) started",
"anyof_type": ["integer","string"],
},
"end_month": {
"required": False,
"description": "the month the faclity went away",
"anyof_type": ["integer","string"],
},
"name": {
"required": True,
"description": "description of the facility",
"type": "string",
},
"notes": {
"required": False,
"description": "anything else you want to jot down",
"anyof_type": ["string", "list"]
},
"begin_year": {
"required": True,
"description": "the year the facility (or wish) started",
"type": "integer",
},
"end_year": {
"required": False,
"description": "the year the facility (or wish) went away",
"type": "integer",
},
}
}
},
"funding": {
"description": "Funding and scholarship that the group member "
"has individually obtained in the past. "
"**WARNING:** this is not to be confused with the "
"**grants** collection",
"required": False,
"schema": {
"type": "dict",
"schema": {
"currency": {"required": False, "type": "string"},
"duration": {"required": False, "type": "string"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"name": {"required": True, "type": "string"},
"value": {"required": True, "type": ("float", "integer")},
"year": {"required": True, "type": "integer"},
},
},
"type": "list",
},
"github_id": {"required": False, "type": "string",
"description": "Your GitHub ID"},
"google_scholar_url": {"required": False, "type": "string",
"description": "URL of your Google Scholar "
"rofile"},
"hindex": {
"description": "details of hindex pulled on a certain date",
"required": False,
"schema": {
"type": "dict",
"schema": {
"h": {"description": "the value of the h index",
"required": True, "type": "integer"},
"h_last_five": {"description": "h index over past 5 years",
"required": False, "type": "integer"},
"citations": {"description": "total number of citations",
"required": False, "type": "integer"},
"citations_last_five": {"description": "number of citations"
"in the past 5 years",
"required": False, "type": "integer"},
"origin": {"description": "where the numbers came from",
"required": False, "type": "string"},
"since": {"description": "year of first citation",
"required": False, "type": "integer"},
"year": {"description": "year when the data were pulled",
"required": False, "type": "integer"},
"month": {"description": "month when the data were pulled",
"required": False, "anyof_type": ["string","integer"]},
"day": {"description": "day when the data were pulled",
"required": False, "type": "integer"},
}
},
"type": "list",
},
"home_address": {
"description": "The person's home address",
"type": "dict",
"schema": {
"street": {"type": "string", "description": "street address"},
"city": {"type": "string", "description": "name of home city"},
"state": {"type": "string", "description": "name o home state"},
"zip": {"type": "string", "description": "zip code"},
},
},
"honors": {
"description": "Honors that have been awarded to this " "group member",
"required": False,
"schema": {
"type": "dict",
"schema": {
"description": {"required": False, "type": "string"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"name": {"required": True, "type": "string"},
"year": {"required": True, "type": "integer"},
},
},
"type": "list",
},
"initials": {
"description": "The canonical initials for this group member",
"required": False,
"type": "string",
},
# TODO: include `link`
"membership": {
"description": "Professional organizations this member is " "a part of",
"required": False,
"schema": {
"type": "dict",
"schema": {
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": True, "type": "integer"},
"description": {"required": False, "type": "string"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"end_year": {"required": False, "type": "integer"},
"organization": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
"website": {"required": False, "type": "string"},
},
},
"type": "list",
},
"miscellaneous": {
"description": "Place to put weird things needed for special reporta",
"required": False,
"type": "dict",
"schema": {
"metrics_for_success": {
"description": "How do I want to be judged",
"required": False,
"type": "list",
},
},
},
"name": {
"description": "Full, canonical name for the person",
"required": True,
"type": "string",
},
"office": {
"description": "The person's office",
"type": "string",
"required": False
},
"orcid_id": {
"description": "The ORCID ID of the person",
"required": False,
"type": "string",
},
"position": {
"description": "such as professor, graduate student, or scientist",
"required": False,
"type": "string",
"eallowed": list(SORTED_POSITION),
},
"position_full": {
"description": "The full on title of the position. This will be "
"typeset if it is here, or if not Position will be "
"used. Position will be used for sorting and must "
"come from a fixed list of positions",
"required": False,
"type": "string",
},
"publicity": {
"description": "summary of publicity that person has received",
"required": False,
"schema": {
"type": "dict",
"schema": {
"type": {"required": True, "type": "string",
"eallowed": ["online", "article"]},
"topic": {"required": False, "type": "string",
"description": "The short sentence of what the "
"publicity was about",
},
"title": {"required": True, "type": "string",
"description": "The title of the piece",
},
"day": {"required": False, "type": "integer",
"description": "The day the piece appeared"
},
"month": {"required": False, "anyof_type": ["string",
"integer"],
"description": "The month the piece appeared"
},
"publication": {"required": False, "type": "string",
"description": "The place where the "
"publicity was placed"
},
"text": {"required": False, "type": "string",
"description": "The text of the publicity",
},
"url": {"required": False, "type": "string",
"description": "The URL where the piece may be found"
},
"year": {"required": True, "type": "integer",
"description": "The year the piece appeared"
},
"grant": {"required": True, "type": "string",
"description": "The identifier of the grant "
"associated with the piece"
},
},
},
"type": "list"
},
"research_focus_areas": {
"description": "summary of research projects that are ongoing. Used"
"in Annual appraisal for example",
"required": False,
"schema": {
"type": "dict",
"schema": {
"begin_year": {"required": False, "type": "integer"},
"end_year": {"required": False, "type": "integer"},
"description": {"required": False, "type": "string"}
},
},
"type": "list"
},
"research_summary": {
"description": "Brief summary of overarching research goals",
"required": False,
"type": "string",
},
"service": {
"description": "Service that this group member has provided",
"required": False,
"schema": {
"type": "dict",
"schema": {
"description": {"required": False, "type": "string"},
"duration": {"required": False, "type": "string"},
"month": {"description": "Use month and year if the service"
"doesn't extend more than one year."
"Otherwise use begin_year and end_year",
"required": False,
"anyof_type": ["string", "integer"]
},
"name": {"required": True, "type": "string"},
"role": {"required": False, "type": "string",
"description": "the role played in the activity, e.g., co-chair"},
"notes": {"required": False, "anyof_type": ["string", "list"]},
"year": {"required": False, "type": "integer"},
"begin_year": {"required": False, "type": "integer"},
"begin_day": {"required": False, "type": "integer"},
"begin_month": {"description": "Use month and year if the service"
"doesn't extend more than one year."
"Otherwise use begin_year/month and end_year/month",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_year": {"required": False, "type": "integer"},
"end_month": {"description": "Use month and year if the service"
"doesn't extend more than one year."
"Otherwise use begin_year and end_year",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_day": {"required": False, "type": "integer"},
"other": {"required": False,
"anyof_type": ["string", "list"]},
"type": {"required": True, "type": "string",
"description": "profession, department, school, university",
"eallowed": ["profession", "university",
"school", "department"]},
},
},
"type": "list",
},
"skills": {
"description": "Skill the group member has",
"required": False,
"schema": {
"type": "dict",
"schema": {
"category": {"required": True, "type": "string"},
"level": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
},
},
"type": "list",
},
"teaching": {
"description": "Courses that this group member has taught, if any",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"course": {"required": True, "type": "string"},
"courseid": {"required": True, "type": "string"},
"description": {"required": False, "type": "string"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"]},
"end_year": {"required": False, "type": "integer"},
"enrollment": {"required": False, "anyof_type": ["integer","string"]},
"evaluation": {
"type": "dict",
"required": False,
"schema": {
"response_rate": {"type": "number", "required": True},
"amount_learned": {"type": "number", "required": True},
"appropriateness_workload": {"type": "number", "required": True},
"course_overall": {"type": "number", "required": True},
"fairness_grading": {"type": "number", "required": True},
"organization": {"type": "number", "required": True},
"classroom_delivery": {"type": "number", "required": True},
"approachability": {"type": "number", "required": True},
"instructor_overall": {"type": "number", "required": True},
"comments": {"type": "list","required": False,
"description": "student comments"},
},
},
"materials": {"required": False, "type": "string"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"organization": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
"semester": {"required": False, "type": "string"},
"syllabus": {"required": False, "type": "string"},
"video": {"required": False, "type": "string"},
"website": {"required": False, "type": "string"},
"year": {"required": True, "type": "integer"},
},
},
},
"title": {
"description": "for example, Dr., etc.",
"required": False,
"type": "string",
},
"todos": {
"description": "a list of the todo tasks",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"description": {"description": "the description of the to-do task",
"required": True,
"type": "string"},
"due_date": {"description": "the due date",
"required": False,
"anyof_type": ["string", "date"]},
"begin_date": {"description": "the begin date",
"required": False,
"anyof_type": ["string", "date"]},
"end_date": {"description": "the end date",
"required": False,
"anyof_type": ["string", "date"]},
"duration": {
"description": "the size of the task/ the estimated duration it will take to finish the task. Unit: miniutes.",
"required": False,
"type": "float"},
"importance": {
"description": "the importance, from 0 to 2",
"required": False,
"type": "integer"},
"status": {"description": "the status: started/finished/cancelled",
"required": True,
"type": "string"},
"notes": {"description": "additional notes for this task",
"required": False,
"type": "list",
"schema": {"type": "string"}
},
"running_index": {"description": "Index of a certain task used to update that task in the enumerated todo list.",
"required": False,
"type": "integer"},
"assigned_by": {
"description": "ID of the member that assigns the task",
"required": False,
"type": "string"},
}
}
},
},
"presentations": {
"_description": {
"description": "This collection describes presentations that group"
"members make at conferences, symposia, seminars and"
"so on."
},
"_id": {
"description": "unique id for the presentation",
"required": True,
"type": "string",
},
"abstract": {
"description": "abstract of the presentation",
"required": False,
"type": "string",
},
"authors": {
"description": "Author list.",
"required": True,
"anyof_type": ["string", "list"],
},
"begin_date": {
"description": "begin date in YYYY-MM-DD",
"anyof_type": ["date", "string"],
},
"end_date": {
"description": "end_date in YYYY-MM-DD",
"anyof_type": ["date", "string"],
},
"begin_year": {
"description": "year the conference or trip begins.",
"required": False,
"type": "integer",
},
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_day": {"required": False, "type": "integer"},
"department": {
"description": "department of the institution where the"
"presentation will be made, if "
"applicable. should be discoverable in "
"institutions.",
"required": False,
"type": "string",
},
"end_year": {
"description": "year the conference or trip ends",
"required": False,
"type": "integer",
},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"end_day": {"required": False, "type": "integer"},
"institution": {
"description": "institution where the"
"presentation will be made, if "
"applicable.",
"required": False,
"type": "string",
},
"meeting_name": {
"description": "full name of the conference or "
"meeting. If it is a departmental "
"seminar or colloquium, write Seminar"
"or Colloquium and fill in department "
"and institution fields",
"required": False,
"type": "string",
},
# TODO: conditional validation. If type=colloq or seminar, required is
# institution and department, otherwise location
"location": {
"description": "city and {state or country} of meeting",
"required": False,
"type": "string",
},
"notes": {
"description": "any reminder or memory aid about anything",
"required": False,
"anyof_type": ["list", "string"],
},
"project": {
"description": "project or list of projects that this "
"presentation is associated with. Should "
"be discoverable in projects collection",
"required": False,
"anyof_type": ["string", "list"],
},
"status": {
"description": "Is the application in prep or submitted, "
"was the invitation accepted or declined, was "
"the trip cancelled?",
"required": True,
"type": "string",
"eallowed": ["in-prep", "submitted", "accepted", "declined",
"cancelled"],
},
"title": {
"description": "title of the presentation",
"required": True,
"type": "string",
},
"type": {
"description": "type of presentation",
"eallowed": PRESENTATIONS_TYPE,
"required": True,
"type": "string",
},
"webinar": {
"description": "true if a webinar. Default to False",
"required": False,
"type": "boolean",
},
},
"projects": {
"_description": {
"description": "This collection describes the research group "
"projects. This is normally public data."
},
"_id": {
"description": "Unique project identifier.",
"required": True,
"type": "string",
},
"active": {
"description": "true if the project is active",
"required": False,
"anyof_type": ["string", "boolean"],
},
"description": {
"description": "brief project description.",
"required": True,
"type": "string",
},
"grant": {
"description": "Grant id if there is a grant supporting this " "project",
"required": False,
"type": "string",
},
"group": {
"description": "id for the group in the groups collection whose project this is",
"required": False,
"type": "string",
},
"highlights": {
"description": "list of things to highlight in a report or website, such as releases for for software or high profile publications",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"year": {"description": "the year of the highlight",
"required": True,
"type": "integer"},
"month": {"description": "the month of the highlight",
"required": True,
"anyof_type": ["string", "integer"]},
"description": {"description": "the highlight",
"required": True,
"type": "string"},
}
}
},
"logo": {
"description": "URL to the project logo",
"required": False,
"type": "string",
},
"name": {
"description": "name of the project.",
"required": True,
"type": "string",
},
"other": {
"description": "other information about the project",
"required": False,
"type": ["list", "string"],
},
"repo": {
"description": "URL of the source code repo, if available",
"required": False,
"type": "string",
},
"team": {
"description": "People who are/have been working on this project.",
"required": True,
"schema": {
"type": "dict",
"schema": {
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": True, "type": "integer"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"end_year": {"required": False, "type": "integer"},
"name": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
},
},
"type": "list",
},
"type": {
"description": "The type of project",
"required": False,
"anyof_type": ["string"],
"eallowed": ["ossoftware", "funded"]
},
"website": {
"description": "URL of the website.",
"required": False,
"type": "string",
},
},
"proposalReviews": {
"_description": {
"description": "This collection contains reviews of funding proposals"
},
"_id": {
"description": "ID, e.g. 1906_doe_example",
"required": True,
"type": ("string", "integer", "float"),
},
"adequacy_of_resources": {
"description": "Are the resources of the PI adequate",
"required": True,
"type": "list",
},
"agency": {
"description": "currently nsf or doe",
"type": "string",
"eallowed": ["nsf", "doe"],
},
"competency_of_team": {
"description": "Is the team competent",
"required": True,
"type": "list",
},
"doe_appropriateness_of_approach": {
"description": "Appropriateness of Research. only used if agency is doe.",
"required": False,
"type": "list",
},
"doe_reasonableness_of_budget": {
"description": "Reasonableness of budget. only used if agency is doe.",
"required": False,
"type": "list",
},
"doe_relevance_to_program_mission": {
"description": "Relevance to program mission. only used if agency is doe.",
"required": False,
"type": "list",
},
"does_how": {
"description": "How will the research be done",
"required": True,
"type": "list",
},
"does_what": {
"description": "What will the team do",
"required": True,
"type": "string",
},
"due_date": {
"description": "date the review is due in ISO format",
"required": True,
"anyof_type": ["string", "date"],
},
"freewrite": {
"description": "Anything and this will appear in the built document"
"right before the summary. This section often used "
"for extra review criteria for the particular proposal",
"required": False,
"type": "list",
},
"goals": {
"description": "What are the main goals of the proposed research",
"required": True,
"type": "list",
},
"importance": {
"description": "The importance of the Research",
"required": True,
"type": "list",
},
"institutions": {
"description": "The institutions of the authors in the same order",
"required": True,
"anyof_type": ["string", "list"]
},
"month": {
"description": "The month the review was submitted",
"required": True,
"anyof_type": ["string", "integer"],
},
"names": {
"description": "The names of the PIs",
"required": True,
"anyof_type": ["list", "string"],
},
"nsf_broader_impacts": {
"description": "The broader impacts of the research. Only used if "
"agency is nsf",
"required": False,
"type": "list",
},
"nsf_create_original_transformative": {
"description": "Answer to the question how the work is creative, "
"original or transformative. Only used if agency is "
"nsf",
"required": False,
"type": "list",
},
"nsf_plan_good": {
"description": "Is the plan good? Only used if agency is nsf",
"required": False,
"type": "list",
},
"nsf_pot_to_advance_knowledge": {
"description": "Answer to the question how the work will advance"
"knowledge. Only used if agency is nsf",
"required": False,
"type": "list",
},
"nsf_pot_to_benefit_society": {
"description": "Answer to the question how the work has the potential"
"to benefit society. Only used if agency is nsf",
"required": False,
"type": "list",
},
"requester": {
"description": "Name of the program officer who requested the review",
"required": True,
"type": "string",
},
"reviewer": {
"description": "short name of the reviewer. Will be used in the "
"filename of the resulting text file",
"required": True,
"type": "string",
},
"status": {
"description": "the status of the review",
"type": "string",
"eallowed": [
"invited",
"accepted",
"declined",
"downloaded",
"inprogress",
"submitted",
"cancelled"
],
},
"summary": {
"description": "Summary statement",
"required": True,
"type": "string",
},
"title": {
"description": "The title of the proposal",
"required": True,
"type": "string",
},
"year": {
"description": "The year the review was submitted",
"required": True,
"type": "integer",
},
},
"proposals": {
"_description": {
"description": "This collection represents proposals that have "
"been submitted by the group."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": ("string", "integer", "float"),
},
"amount": {
"description": "value of award",
"required": True,
"type": ("integer", "float"),
},
"authors": {
"description": "other investigator names",
"required": False,
"anyof_type": ["list", "string"],
},
"begin_date": {
"description": "start date of the proposed grant in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"]
},
"begin_day": {
"description": "start day of the proposed grant",
"required": False,
"type": "integer",
},
"begin_month": {
"description": "start month of the proposed grant",
"required": False,
"anyof_type": ["string", "integer"]
},
"begin_year": {
"description": "start year of the proposed grant",
"required": False,
"type": "integer",
},
"call_for_proposals": {
"description": "",
"required": False,
"type": "string",
},
"cpp_info": {
"description": "extra information needed for building current and "
"pending form ",
"required": False,
"schema": {
"cppflag": {"required": False, "type": "boolean"},
"other_agencies_submitted": {"required": False,
"anyof_type": ["string", "boolean"]},
"institution": {"required": False, "type": "string",
"description": "place where the proposed grant will be located"},
"person_months_academic": {"required": False,
"anyof_type": ["float", "integer"]},
"person_months_summer": {"required": False,
"anyof_type": ["float", "integer"]},
"project_scope": {"required": False, "type": "string"},
},
"type": "dict",
},
"currency": {
"description": "typically '$' or 'USD'",
"required": True,
"type": "string",
},
"day": {
"description": "day that the proposal was submitted",
"required": False,
"type": "integer",
},
"due_date": {
"description": "day that the proposal is due",
"required": False,
"anyof_type": ["string", "date"],
},
"duration": {
"description": "number of years",
"required": False,
"type": ("integer", "float"),
},
"end_date": {
"description": "end date of the proposed grant in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"]
},
"end_day": {
"description": "end day of the proposed grant",
"required": False,
"type": ("string", "integer"),
},
"end_month": {
"description": "end month of the proposed grant",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_year": {
"description": "end year of the proposed grant",
"required": False,
"type": "integer",
},
"funder": {
"description": "who will fund the proposal"
"as funder in grants",
"required": False,
"type": "string",
},
"full": {
"description": "full body of the proposal",
"required": False,
"type": "dict",
},
"month": {
"description": "month that the proposal was submitted",
"required": False,
"anyof_type": ["string", "integer"]
},
"notes": {
"description": "anything you want to note",
"required": False,
"anyof_type": ["string", "list"],
},
"pi": {
"description": "principal investigator name",
"required": True,
"type": "string",
},
"pre": {
"description": "Information about the pre-proposal",
"required": False,
"type": "dict",
},
"status": {
"description": "e.g. 'pending', 'accepted', 'rejected'",
"required": True,
"type": "string",
"eallowed": ["pending", "declined", "accepted", "inprep",
"submitted"],
},
"team": {
"description": "information about the team members participating "
"in the grant.",
"required": False,
"schema": {
"schema": {
"cv": {"required": False, "type": "string"},
"email": {"required": False, "type": "string"},
"institution": {"required": False, "type": "string"},
"name": {"required": False, "type": "string"},
"position": {"required": False, "type": "string"},
"subaward_amount": {
"required": False,
"type": ("integer", "float"),
},
},
"type": "dict",
},
"type": "list",
},
"title": {
"description": "actual title of proposal",
"required": True,
"type": "string",
},
"title_short": {
"description": "short title of proposal",
"required": False,
"type": "string",
},
"year": {
"description": "Year that the proposal was submitted",
"required": False,
"type": "integer",
},
},
"refereeReports": {
"_description": {
"description": "This is a collection of information that will be "
"be used to build a referee report. This should probably be private."
},
"_id": {"description": "the ID", "required": True, "type": "string"},
"claimed_found_what": {
"description": "What the authors claim to have found",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"claimed_why_important": {
"description": "What importance the authors claim",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"did_how": {
"description": "How the study was done",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"did_what": {
"description": "What the study was",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"due_date": {
"description": "date the review is due in ISO format",
"required": True,
"anyof_type": ["string", "date"],
},
"editor_eyes_only": {
"description": "Comments you don't want passed to the author",
"required": False,
"type": "string",
},
"final_assessment": {
"description": "Summary of impressions of the study",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"first_author_last_name": {
"description": "Last name of first author will be referred to "
"with et al.",
"required": True,
"type": "string",
},
"freewrite": {
"description": "Things that you want to add that don't fit into "
"any category above",
"required": False,
"type": "string",
},
"journal": {
"description": "name of the journal",
"required": True,
"type": "string",
},
"month": {
"description": "the month the entry was created",
"required": False,
"anyof_type": ["string", "integer"]
},
"recommendation": {
"description": "Your publication recommendation",
"required": True,
"type": "string",
"eallowed": ["reject", "asis", "smalledits", "diffjournal",
"majoredits"],
},
"requester": {
"description": "Name of the program officer who requested the review",
"required": True,
"type": "string",
},
"reviewer": {
"description": "name of person reviewing the paper",
"required": True,
"type": "string",
},
"status": {
"description": "Where you are with the review",
"required": True,
"type": "string",
"eallowed": [
"invited",
"accepted",
"declined",
"downloaded",
"inprogress",
"submitted",
"cancelled"
],
},
"submitted_date": {
"description": "submitted date in ISO YYYY-MM-DD format",
"required": True,
"anyof_type": ["string", "date"],
},
"title": {
"description": "title of the paper under review",
"required": True,
"type": "string",
},
"validity_assessment": {
"description": "List of impressions of the validity of the claims",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"year": {
"description": "year when the review is being done",
"required": True,
"anyof_type": ["string", "integer"],
},
},
"students": {
"_description": {
"description": "This is a collection of student names and "
"metadata. This should probably be private."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": "string",
},
"aka": {
"description": "list of aliases",
"required": False,
"schema": {"type": "string"},
"type": ("list", "string"),
},
"email": {"description": "email address", "required": False,
"type": "string"},
"university_id": {
"description": "The university identifier for the student",
"required": False,
"type": "string",
},
},
}
for s in SCHEMAS:
SCHEMAS[s]["files"] = {
"description": "Files associated with the document",
# TODO: fix this since this is currently comming out a CommentedMap
# "type": "list",
# "schema": {"type": "string"},
"required": False,
}
class NoDescriptionValidator(Validator):
def _validate_description(self, description, field, value):
"""Don't validate descriptions
The rule's arguments are validated against this schema:
{'type': 'string'}"""
if False:
pass
def _validate_eallowed(self, eallowed, field, value):
"""Test if value is in list
The rule's arguments are validated against this schema:
{'type': 'list'}
"""
if value not in eallowed:
warn(
'"{}" is not in the preferred entries for "{}", please '
"consider changing this entry to conform or add this to the "
"``eallowed`` field in the schema.".format(value, field)
)
def validate(coll, record, schemas):
"""Validate a record for a given db
Parameters
----------
coll : str
The name of the db in question
record : dict
The record to be validated
schemas : dict
The schema to validate against
Returns
-------
rtn : bool
True is valid
errors: dict
The errors encountered (if any)
"""
if coll in schemas:
schema = copy.deepcopy(schemas[coll])
v = NoDescriptionValidator(schema)
return v.validate(record), v.errors
else:
return True, ()
| 38.034363
| 145
| 0.421707
|
a7dd33eb926933aab749f4920ecc264e472a96b6
| 640
|
py
|
Python
|
vme/data_store.py
|
oskar-j/visualize-my-expenses
|
c96595c32df7d6abf7f646bd7bad4102d7b12c0e
|
[
"MIT"
] | null | null | null |
vme/data_store.py
|
oskar-j/visualize-my-expenses
|
c96595c32df7d6abf7f646bd7bad4102d7b12c0e
|
[
"MIT"
] | null | null | null |
vme/data_store.py
|
oskar-j/visualize-my-expenses
|
c96595c32df7d6abf7f646bd7bad4102d7b12c0e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
class CalculatorBase(object):
def _verify(self, verbose) -> bool:
pass
def print_to_console(self):
pass
class Calculator(CalculatorBase):
def __init__(self):
self.verbose = False
def _prepare(self, verbose):
pass
def insert_row(self, row):
pass
def set_rows(self, rows):
if rows is None:
self.rows = list()
else:
self.rows = rows
pass
def append_rows(self, rows):
if self.rows is not None:
self.rows += rows
else:
self.set_rows(rows)
| 17.297297
| 39
| 0.559375
|
3cb53234b2ef14e87ab6f7c1ecebf58c114dc720
| 1,205
|
py
|
Python
|
messier_objects/migrations/0001_initial.py
|
DanielPDWalker/Astrophoto
|
9a7ee59deb291617baa3ab8724b8ce5970e6ea9f
|
[
"MIT"
] | null | null | null |
messier_objects/migrations/0001_initial.py
|
DanielPDWalker/Astrophoto
|
9a7ee59deb291617baa3ab8724b8ce5970e6ea9f
|
[
"MIT"
] | 12
|
2020-07-26T06:20:22.000Z
|
2022-03-12T00:43:09.000Z
|
messier_objects/migrations/0001_initial.py
|
DanielPDWalker/Astrophoto-API
|
9a7ee59deb291617baa3ab8724b8ce5970e6ea9f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2020-07-22 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MessierObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('messier_number', models.CharField(max_length=4, unique=True)),
('name', models.CharField(max_length=255)),
('ncg_or_ic_number', models.CharField(max_length=25)),
('object_type', models.CharField(max_length=255)),
('distance_kly', models.CharField(max_length=25)),
('constellation', models.CharField(max_length=50)),
('apparent_magnitude', models.FloatField()),
('right_ascension', models.CharField(max_length=50)),
('declination', models.CharField(max_length=50)),
('photo', models.ImageField(upload_to='messier_objects')),
],
options={
'ordering': ('messier_number',),
},
),
]
| 35.441176
| 114
| 0.568465
|
99536858ad20563d5cadba03958bc1515fce0d6b
| 1,558
|
py
|
Python
|
run_simulation.py
|
TrungDinhT/link-layer-simulator
|
b57532e43f2ae27e568baf967e016f16444fd4d8
|
[
"MIT"
] | null | null | null |
run_simulation.py
|
TrungDinhT/link-layer-simulator
|
b57532e43f2ae27e568baf967e016f16444fd4d8
|
[
"MIT"
] | null | null | null |
run_simulation.py
|
TrungDinhT/link-layer-simulator
|
b57532e43f2ae27e568baf967e016f16444fd4d8
|
[
"MIT"
] | null | null | null |
import numpy as np
from ABP import SimulatorABP, SimulatorABP_NAK
from baseclasses import StatsManager, Protocol
def simulate(protocol):
# Input parameters
H = 54*8 # bits
l = 1500*8 # bits
delta_rate = np.array([2.5, 5, 7.5, 10, 12.5]) # delta_rate*tau = delta = timeout
C = 5*1e6*8 # 5MB/s
taus = np.array([0.01, 0.5]) # seconds
BERs = np.array([0, 1e-4, 1e-5])
duration = 10000
# Init statistic manager and simulator
statsManager = StatsManager(protocol, delta_rate, taus.shape[0]*BERs.shape[0])
if protocol == Protocol.ABP:
simulator = SimulatorABP(duration, statsManager)
elif protocol == Protocol.ABP_NAK:
simulator = SimulatorABP_NAK(duration, statsManager)
elif protocol == Protocol.GBN:
print 'GBN'
else:
print 'unknown protocol'
exit(1)
# Running simulation
i = 0 # initial row index
j = 1 # initial column index
k = 0 # initial group index (we have 2 groups : tau = 0.01 and tau = 0.5)
for tau in taus:
for timeout in delta_rate*tau:
for BER in BERs:
statsManager.reset()
simulator.set_params(timeout, H, l, C, tau, BER)
simulator.run()
statsManager.record_stats(i, j)
j = j + 1
i = i + 1
j = k*BERs.shape[0] + 1 # reset column index
i = 0 # reset row index
k = k + 1 # increase group index
j = k*BERs.shape[0] + 1 # reset column index
statsManager.save_to_csv()
| 31.795918
| 85
| 0.587291
|
69ea0b944e108b77f7d0ad987828766bf69c1326
| 3,764
|
py
|
Python
|
foolspider/foolspider/spiders/proxy_spider.py
|
foolcage/foolcage
|
3558f23d319dd4393c786bd6b606548deaf12c08
|
[
"MIT"
] | 1
|
2017-08-03T07:36:56.000Z
|
2017-08-03T07:36:56.000Z
|
foolspider/foolspider/spiders/proxy_spider.py
|
foolcage/foolcage
|
3558f23d319dd4393c786bd6b606548deaf12c08
|
[
"MIT"
] | null | null | null |
foolspider/foolspider/spiders/proxy_spider.py
|
foolcage/foolcage
|
3558f23d319dd4393c786bd6b606548deaf12c08
|
[
"MIT"
] | null | null | null |
import json
import scrapy
from scrapy import Request
from scrapy import Selector
from scrapy import signals
from foolspider.consts import HIDEME_NAME_HEADER
from foolspider.proxy.proxy_manager import g_socks2http_proxy_items
from foolspider.utils.utils import get_forecast_event_path
class ProxySpider(scrapy.Spider):
name = "proxy"
def start_requests(self):
url = self.get_proxy_url(0)
meta = {}
if g_socks2http_proxy_items.get('127.0.0.1:1081'):
meta['proxy'] = g_socks2http_proxy_items['127.0.0.1:1081']
yield Request(url=url,
headers=HIDEME_NAME_HEADER,
meta={'proxy': 'http://127.0.0.1:10000'},
callback=self.download_proxy_data)
def download_proxy_data(self, response):
security_item = response.meta['item']
trs = response.xpath('//*[@id="dataTable"]//tr').extract()
forecast_jsons = []
try:
for tr in trs[1:]:
tds = Selector(text=tr).xpath('//td//text()').extract()
tds = [x.strip() for x in tds if x.strip()]
# 业绩变动字符串转为float
change_str = tds[7]
change_start = None
if '~' in change_str:
i = change_str.index('~')
change_start = change_str[0:i]
change = change_str[i + 1:]
else:
change = change_str
if change:
change = change.strip('%')
change = float(change) / 100
if change_start:
change_start = change_start.strip('%')
change_start = float(change_start) / 100
# preEPS可能为空
preEPS = None
try:
preEPS = float(tds[6])
except Exception as e:
pass
json_item = {"id": '{}_{}'.format(security_item['id'], tds[3]),
"securityId": security_item['id'],
"reportDate": tds[3],
"reportPeriod": tds[4],
"type": tds[2],
"description": tds[5],
"preEPS": preEPS,
"changeStart": change_start,
"change": change,
}
forecast_jsons.append(json_item)
if forecast_jsons:
try:
with open(get_forecast_event_path(security_item), "w") as f:
json.dump(forecast_jsons, f, ensure_ascii=False)
except Exception as e:
self.logger.error(
'error when saving forecast url={} path={} error={}'.format(response.url,
get_forecast_event_path(
security_item), e))
except Exception as e:
self.logger.error('error when getting k data url={} error={}'.format(response.url, e))
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(ProxySpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
def get_proxy_url(self, position):
return 'https://hidemy.name/en/proxy-list/?start={}#list'.format(
position)
| 37.267327
| 108
| 0.493624
|
fdc1d918ff71706a94fe53a9dd5e58f6e0800bff
| 45,072
|
py
|
Python
|
tests/http/federation/test_matrix_federation_agent.py
|
cuongnv/synapse
|
bb6c9008f1bba3c8e7e13051f0f8333f62ed8f31
|
[
"Apache-2.0"
] | 1
|
2021-05-31T23:35:36.000Z
|
2021-05-31T23:35:36.000Z
|
tests/http/federation/test_matrix_federation_agent.py
|
cuongnv/synapse
|
bb6c9008f1bba3c8e7e13051f0f8333f62ed8f31
|
[
"Apache-2.0"
] | null | null | null |
tests/http/federation/test_matrix_federation_agent.py
|
cuongnv/synapse
|
bb6c9008f1bba3c8e7e13051f0f8333f62ed8f31
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from mock import Mock
import treq
from service_identity import VerificationError
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet._sslverify import ClientTLSOptions, OpenSSLCertificateOptions
from twisted.internet.protocol import Factory
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.web._newclient import ResponseNeverReceived
from twisted.web.client import Agent
from twisted.web.http import HTTPChannel
from twisted.web.http_headers import Headers
from twisted.web.iweb import IPolicyForHTTPS
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto.context_factory import FederationPolicyForHTTPS
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.http.federation.srv_resolver import Server
from synapse.http.federation.well_known_resolver import (
WellKnownResolver,
_cache_period_from_headers,
)
from synapse.logging.context import LoggingContext
from synapse.util.caches.ttlcache import TTLCache
from tests import unittest
from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file
from tests.server import FakeTransport, ThreadedMemoryReactorClock
from tests.utils import default_config
logger = logging.getLogger(__name__)
test_server_connection_factory = None
def get_connection_factory():
# this needs to happen once, but not until we are ready to run the first test
global test_server_connection_factory
if test_server_connection_factory is None:
test_server_connection_factory = TestServerTLSConnectionFactory(
sanlist=[
b"DNS:testserv",
b"DNS:target-server",
b"DNS:xn--bcher-kva.com",
b"IP:1.2.3.4",
b"IP:::1",
]
)
return test_server_connection_factory
class MatrixFederationAgentTests(unittest.TestCase):
def setUp(self):
self.reactor = ThreadedMemoryReactorClock()
self.mock_resolver = Mock()
config_dict = default_config("test", parse=False)
config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()]
self._config = config = HomeServerConfig()
config.parse_config_dict(config_dict, "", "")
self.tls_factory = FederationPolicyForHTTPS(config)
self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
self.well_known_resolver = WellKnownResolver(
self.reactor,
Agent(self.reactor, contextFactory=self.tls_factory),
well_known_cache=self.well_known_cache,
had_well_known_cache=self.had_well_known_cache,
)
self.agent = MatrixFederationAgent(
reactor=self.reactor,
tls_client_options_factory=self.tls_factory,
_srv_resolver=self.mock_resolver,
_well_known_resolver=self.well_known_resolver,
)
def _make_connection(self, client_factory, expected_sni):
"""Builds a test server, and completes the outgoing client connection
Returns:
HTTPChannel: the test server
"""
# build the test server
server_tls_protocol = _build_test_server(get_connection_factory())
# now, tell the client protocol factory to build the client protocol (it will be a
# _WrappingProtocol, around a TLSMemoryBIOProtocol, around an
# HTTP11ClientProtocol) and wire the output of said protocol up to the server via
# a FakeTransport.
#
# Normally this would be done by the TCP socket code in Twisted, but we are
# stubbing that out here.
client_protocol = client_factory.buildProtocol(None)
client_protocol.makeConnection(
FakeTransport(server_tls_protocol, self.reactor, client_protocol)
)
# tell the server tls protocol to send its stuff back to the client, too
server_tls_protocol.makeConnection(
FakeTransport(client_protocol, self.reactor, server_tls_protocol)
)
# grab a hold of the TLS connection, in case it gets torn down
server_tls_connection = server_tls_protocol._tlsConnection
# fish the test server back out of the server-side TLS protocol.
http_protocol = server_tls_protocol.wrappedProtocol
# give the reactor a pump to get the TLS juices flowing.
self.reactor.pump((0.1,))
# check the SNI
server_name = server_tls_connection.get_servername()
self.assertEqual(
server_name,
expected_sni,
"Expected SNI %s but got %s" % (expected_sni, server_name),
)
return http_protocol
@defer.inlineCallbacks
def _make_get_request(self, uri):
"""
Sends a simple GET request via the agent, and checks its logcontext management
"""
with LoggingContext("one") as context:
fetch_d = self.agent.request(b"GET", uri)
# Nothing happened yet
self.assertNoResult(fetch_d)
# should have reset logcontext to the sentinel
_check_logcontext(LoggingContext.sentinel)
try:
fetch_res = yield fetch_d
return fetch_res
except Exception as e:
logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e)
raise
finally:
_check_logcontext(context)
def _handle_well_known_connection(
self, client_factory, expected_sni, content, response_headers={}
):
"""Handle an outgoing HTTPs connection: wire it up to a server, check that the
request is for a .well-known, and send the response.
Args:
client_factory (IProtocolFactory): outgoing connection
expected_sni (bytes): SNI that we expect the outgoing connection to send
content (bytes): content to send back as the .well-known
Returns:
HTTPChannel: server impl
"""
# make the connection for .well-known
well_known_server = self._make_connection(
client_factory, expected_sni=expected_sni
)
# check the .well-known request and send a response
self.assertEqual(len(well_known_server.requests), 1)
request = well_known_server.requests[0]
self._send_well_known_response(request, content, headers=response_headers)
return well_known_server
def _send_well_known_response(self, request, content, headers={}):
"""Check that an incoming request looks like a valid .well-known request, and
send back the response.
"""
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/.well-known/matrix/server")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
# send back a response
for k, v in headers.items():
request.setHeader(k, v)
request.write(content)
request.finish()
self.reactor.pump((0.1,))
def test_get(self):
"""
happy-path test of a GET request with an explicit port
"""
self.reactor.lookups["testserv"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=b"testserv")
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [b"testserv:8448"]
)
content = request.content.read()
self.assertEqual(content, b"")
# Deferred is still without a result
self.assertNoResult(test_d)
# send the headers
request.responseHeaders.setRawHeaders(b"Content-Type", [b"application/json"])
request.write("")
self.reactor.pump((0.1,))
response = self.successResultOf(test_d)
# that should give us a Response object
self.assertEqual(response.code, 200)
# Send the body
request.write('{ "a": 1 }'.encode("ascii"))
request.finish()
self.reactor.pump((0.1,))
# check it can be read
json = self.successResultOf(treq.json_content(response))
self.assertEqual(json, {"a": 1})
def test_get_ip_address(self):
"""
Test the behaviour when the server name contains an explicit IP (with no port)
"""
# there will be a getaddrinfo on the IP
self.reactor.lookups["1.2.3.4"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=None)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"1.2.3.4"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_get_ipv6_address(self):
"""
Test the behaviour when the server name contains an explicit IPv6 address
(with no port)
"""
# there will be a getaddrinfo on the IP
self.reactor.lookups["::1"] = "::1"
test_d = self._make_get_request(b"matrix://[::1]/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "::1")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=None)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"[::1]"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_get_ipv6_address_with_port(self):
"""
Test the behaviour when the server name contains an explicit IPv6 address
(with explicit port)
"""
# there will be a getaddrinfo on the IP
self.reactor.lookups["::1"] = "::1"
test_d = self._make_get_request(b"matrix://[::1]:80/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "::1")
self.assertEqual(port, 80)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=None)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"[::1]:80"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_get_hostname_bad_cert(self):
"""
Test the behaviour when the certificate on the server doesn't match the hostname
"""
self.mock_resolver.resolve_service.side_effect = lambda _: []
self.reactor.lookups["testserv1"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://testserv1/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# No SRV record lookup yet
self.mock_resolver.resolve_service.assert_not_called()
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
# fonx the connection
client_factory.clientConnectionFailed(None, Exception("nope"))
# attemptdelay on the hostnameendpoint is 0.3, so takes that long before the
# .well-known request fails.
self.reactor.pump((0.4,))
# now there should be a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.testserv1"
)
# we should fall back to a direct connection
self.assertEqual(len(clients), 2)
(host, port, client_factory, _timeout, _bindAddress) = clients[1]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=b"testserv1")
# there should be no requests
self.assertEqual(len(http_server.requests), 0)
# ... and the request should have failed
e = self.failureResultOf(test_d, ResponseNeverReceived)
failure_reason = e.value.reasons[0]
self.assertIsInstance(failure_reason.value, VerificationError)
def test_get_ip_address_bad_cert(self):
"""
Test the behaviour when the server name contains an explicit IP, but
the server cert doesn't cover it
"""
# there will be a getaddrinfo on the IP
self.reactor.lookups["1.2.3.5"] = "1.2.3.5"
test_d = self._make_get_request(b"matrix://1.2.3.5/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.5")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=None)
# there should be no requests
self.assertEqual(len(http_server.requests), 0)
# ... and the request should have failed
e = self.failureResultOf(test_d, ResponseNeverReceived)
failure_reason = e.value.reasons[0]
self.assertIsInstance(failure_reason.value, VerificationError)
def test_get_no_srv_no_well_known(self):
"""
Test the behaviour when the server name has no port, no SRV, and no well-known
"""
self.mock_resolver.resolve_service.side_effect = lambda _: []
self.reactor.lookups["testserv"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# No SRV record lookup yet
self.mock_resolver.resolve_service.assert_not_called()
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
# fonx the connection
client_factory.clientConnectionFailed(None, Exception("nope"))
# attemptdelay on the hostnameendpoint is 0.3, so takes that long before the
# .well-known request fails.
self.reactor.pump((0.4,))
# now there should be a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.testserv"
)
# we should fall back to a direct connection
self.assertEqual(len(clients), 2)
(host, port, client_factory, _timeout, _bindAddress) = clients[1]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=b"testserv")
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_get_well_known(self):
"""Test the behaviour when the .well-known delegates elsewhere
"""
self.mock_resolver.resolve_service.side_effect = lambda _: []
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["target-server"] = "1::f"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
self._handle_well_known_connection(
client_factory,
expected_sni=b"testserv",
content=b'{ "m.server": "target-server" }',
)
# there should be a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.target-server"
)
# now we should get a connection to the target server
self.assertEqual(len(clients), 2)
(host, port, client_factory, _timeout, _bindAddress) = clients[1]
self.assertEqual(host, "1::f")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(
client_factory, expected_sni=b"target-server"
)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [b"target-server"]
)
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
# check the cache expires
self.reactor.pump((48 * 3600,))
self.well_known_cache.expire()
self.assertNotIn(b"testserv", self.well_known_cache)
def test_get_well_known_redirect(self):
"""Test the behaviour when the server name has no port and no SRV record, but
the .well-known has a 300 redirect
"""
self.mock_resolver.resolve_service.side_effect = lambda _: []
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["target-server"] = "1::f"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop()
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
redirect_server = self._make_connection(
client_factory, expected_sni=b"testserv"
)
# send a 302 redirect
self.assertEqual(len(redirect_server.requests), 1)
request = redirect_server.requests[0]
request.redirect(b"https://testserv/even_better_known")
request.finish()
self.reactor.pump((0.1,))
# now there should be another connection
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop()
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
well_known_server = self._make_connection(
client_factory, expected_sni=b"testserv"
)
self.assertEqual(len(well_known_server.requests), 1, "No request after 302")
request = well_known_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/even_better_known")
request.write(b'{ "m.server": "target-server" }')
request.finish()
self.reactor.pump((0.1,))
# there should be a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.target-server"
)
# now we should get a connection to the target server
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1::f")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(
client_factory, expected_sni=b"target-server"
)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [b"target-server"]
)
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
# check the cache expires
self.reactor.pump((48 * 3600,))
self.well_known_cache.expire()
self.assertNotIn(b"testserv", self.well_known_cache)
def test_get_invalid_well_known(self):
"""
Test the behaviour when the server name has an *invalid* well-known (and no SRV)
"""
self.mock_resolver.resolve_service.side_effect = lambda _: []
self.reactor.lookups["testserv"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# No SRV record lookup yet
self.mock_resolver.resolve_service.assert_not_called()
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop()
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
self._handle_well_known_connection(
client_factory, expected_sni=b"testserv", content=b"NOT JSON"
)
# now there should be a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.testserv"
)
# we should fall back to a direct connection
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop()
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=b"testserv")
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_get_well_known_unsigned_cert(self):
"""Test the behaviour when the .well-known server presents a cert
not signed by a CA
"""
# we use the same test server as the other tests, but use an agent with
# the config left to the default, which will not trust it (since the
# presented cert is signed by a test CA)
self.mock_resolver.resolve_service.side_effect = lambda _: []
self.reactor.lookups["testserv"] = "1.2.3.4"
config = default_config("test", parse=True)
# Build a new agent and WellKnownResolver with a different tls factory
tls_factory = FederationPolicyForHTTPS(config)
agent = MatrixFederationAgent(
reactor=self.reactor,
tls_client_options_factory=tls_factory,
_srv_resolver=self.mock_resolver,
_well_known_resolver=WellKnownResolver(
self.reactor,
Agent(self.reactor, contextFactory=tls_factory),
well_known_cache=self.well_known_cache,
had_well_known_cache=self.had_well_known_cache,
),
)
test_d = agent.request(b"GET", b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
http_proto = self._make_connection(client_factory, expected_sni=b"testserv")
# there should be no requests
self.assertEqual(len(http_proto.requests), 0)
# and there should be a SRV lookup instead
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.testserv"
)
def test_get_hostname_srv(self):
"""
Test the behaviour when there is a single SRV record
"""
self.mock_resolver.resolve_service.side_effect = lambda _: [
Server(host=b"srvtarget", port=8443)
]
self.reactor.lookups["srvtarget"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# the request for a .well-known will have failed with a DNS lookup error.
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.testserv"
)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8443)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=b"testserv")
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_get_well_known_srv(self):
"""Test the behaviour when the .well-known redirects to a place where there
is a SRV.
"""
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["srvtarget"] = "5.6.7.8"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
self.mock_resolver.resolve_service.side_effect = lambda _: [
Server(host=b"srvtarget", port=8443)
]
self._handle_well_known_connection(
client_factory,
expected_sni=b"testserv",
content=b'{ "m.server": "target-server" }',
)
# there should be a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.target-server"
)
# now we should get a connection to the target of the SRV record
self.assertEqual(len(clients), 2)
(host, port, client_factory, _timeout, _bindAddress) = clients[1]
self.assertEqual(host, "5.6.7.8")
self.assertEqual(port, 8443)
# make a test server, and wire up the client
http_server = self._make_connection(
client_factory, expected_sni=b"target-server"
)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [b"target-server"]
)
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_idna_servername(self):
"""test the behaviour when the server name has idna chars in"""
self.mock_resolver.resolve_service.side_effect = lambda _: []
# the resolver is always called with the IDNA hostname as a native string.
self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4"
# this is idna for bücher.com
test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
# No SRV record lookup yet
self.mock_resolver.resolve_service.assert_not_called()
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
# fonx the connection
client_factory.clientConnectionFailed(None, Exception("nope"))
# attemptdelay on the hostnameendpoint is 0.3, so takes that long before the
# .well-known request fails.
self.reactor.pump((0.4,))
# now there should have been a SRV lookup
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.xn--bcher-kva.com"
)
# We should fall back to port 8448
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 2)
(host, port, client_factory, _timeout, _bindAddress) = clients[1]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8448)
# make a test server, and wire up the client
http_server = self._make_connection(
client_factory, expected_sni=b"xn--bcher-kva.com"
)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [b"xn--bcher-kva.com"]
)
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_idna_srv_target(self):
"""test the behaviour when the target of a SRV record has idna chars"""
self.mock_resolver.resolve_service.side_effect = lambda _: [
Server(host=b"xn--trget-3qa.com", port=8443) # târget.com
]
self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.xn--bcher-kva.com"
)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients[0]
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8443)
# make a test server, and wire up the client
http_server = self._make_connection(
client_factory, expected_sni=b"xn--bcher-kva.com"
)
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [b"xn--bcher-kva.com"]
)
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
def test_well_known_cache(self):
self.reactor.lookups["testserv"] = "1.2.3.4"
fetch_d = self.well_known_resolver.get_well_known(b"testserv")
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
well_known_server = self._handle_well_known_connection(
client_factory,
expected_sni=b"testserv",
response_headers={b"Cache-Control": b"max-age=1000"},
content=b'{ "m.server": "target-server" }',
)
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"target-server")
# close the tcp connection
well_known_server.loseConnection()
# repeat the request: it should hit the cache
fetch_d = self.well_known_resolver.get_well_known(b"testserv")
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"target-server")
# expire the cache
self.reactor.pump((1000.0,))
# now it should connect again
fetch_d = self.well_known_resolver.get_well_known(b"testserv")
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
self._handle_well_known_connection(
client_factory,
expected_sni=b"testserv",
content=b'{ "m.server": "other-server" }',
)
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"other-server")
def test_well_known_cache_with_temp_failure(self):
"""Test that we refetch well-known before the cache expires, and that
it ignores transient errors.
"""
self.reactor.lookups["testserv"] = "1.2.3.4"
fetch_d = self.well_known_resolver.get_well_known(b"testserv")
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 443)
well_known_server = self._handle_well_known_connection(
client_factory,
expected_sni=b"testserv",
response_headers={b"Cache-Control": b"max-age=1000"},
content=b'{ "m.server": "target-server" }',
)
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"target-server")
# close the tcp connection
well_known_server.loseConnection()
# Get close to the cache expiry, this will cause the resolver to do
# another lookup.
self.reactor.pump((900.0,))
fetch_d = self.well_known_resolver.get_well_known(b"testserv")
# The resolver may retry a few times, so fonx all requests that come along
attempts = 0
while self.reactor.tcpClients:
clients = self.reactor.tcpClients
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
attempts += 1
# fonx the connection attempt, this will be treated as a temporary
# failure.
client_factory.clientConnectionFailed(None, Exception("nope"))
# There's a few sleeps involved, so we have to pump the reactor a
# bit.
self.reactor.pump((1.0, 1.0))
# We expect to see more than one attempt as there was previously a valid
# well known.
self.assertGreater(attempts, 1)
# Resolver should return cached value, despite the lookup failing.
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"target-server")
# Expire both caches and repeat the request
self.reactor.pump((10000.0,))
# Repated the request, this time it should fail if the lookup fails.
fetch_d = self.well_known_resolver.get_well_known(b"testserv")
clients = self.reactor.tcpClients
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
client_factory.clientConnectionFailed(None, Exception("nope"))
self.reactor.pump((0.4,))
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, None)
def test_srv_fallbacks(self):
"""Test that other SRV results are tried if the first one fails.
"""
self.mock_resolver.resolve_service.side_effect = lambda _: [
Server(host=b"target.com", port=8443),
Server(host=b"target.com", port=8444),
]
self.reactor.lookups["target.com"] = "1.2.3.4"
test_d = self._make_get_request(b"matrix://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
self.mock_resolver.resolve_service.assert_called_once_with(
b"_matrix._tcp.testserv"
)
# We should see an attempt to connect to the first server
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8443)
# Fonx the connection
client_factory.clientConnectionFailed(None, Exception("nope"))
# There's a 300ms delay in HostnameEndpoint
self.reactor.pump((0.4,))
# Hasn't failed yet
self.assertNoResult(test_d)
# We shouldnow see an attempt to connect to the second server
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "1.2.3.4")
self.assertEqual(port, 8444)
# make a test server, and wire up the client
http_server = self._make_connection(client_factory, expected_sni=b"testserv")
self.assertEqual(len(http_server.requests), 1)
request = http_server.requests[0]
self.assertEqual(request.method, b"GET")
self.assertEqual(request.path, b"/foo/bar")
self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
# finish the request
request.finish()
self.reactor.pump((0.1,))
self.successResultOf(test_d)
class TestCachePeriodFromHeaders(unittest.TestCase):
def test_cache_control(self):
# uppercase
self.assertEqual(
_cache_period_from_headers(
Headers({b"Cache-Control": [b"foo, Max-Age = 100, bar"]})
),
100,
)
# missing value
self.assertIsNone(
_cache_period_from_headers(Headers({b"Cache-Control": [b"max-age=, bar"]}))
)
# hackernews: bogus due to semicolon
self.assertIsNone(
_cache_period_from_headers(
Headers({b"Cache-Control": [b"private; max-age=0"]})
)
)
# github
self.assertEqual(
_cache_period_from_headers(
Headers({b"Cache-Control": [b"max-age=0, private, must-revalidate"]})
),
0,
)
# google
self.assertEqual(
_cache_period_from_headers(
Headers({b"cache-control": [b"private, max-age=0"]})
),
0,
)
def test_expires(self):
self.assertEqual(
_cache_period_from_headers(
Headers({b"Expires": [b"Wed, 30 Jan 2019 07:35:33 GMT"]}),
time_now=lambda: 1548833700,
),
33,
)
# cache-control overrides expires
self.assertEqual(
_cache_period_from_headers(
Headers(
{
b"cache-control": [b"max-age=10"],
b"Expires": [b"Wed, 30 Jan 2019 07:35:33 GMT"],
}
),
time_now=lambda: 1548833700,
),
10,
)
# invalid expires means immediate expiry
self.assertEqual(_cache_period_from_headers(Headers({b"Expires": [b"0"]})), 0)
def _check_logcontext(context):
current = LoggingContext.current_context()
if current is not context:
raise AssertionError("Expected logcontext %s but was %s" % (context, current))
def _build_test_server(connection_creator):
"""Construct a test server
This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol
Args:
connection_creator (IOpenSSLServerConnectionCreator): thing to build
SSL connections
sanlist (list[bytes]): list of the SAN entries for the cert returned
by the server
Returns:
TLSMemoryBIOProtocol
"""
server_factory = Factory.forProtocol(HTTPChannel)
# Request.finish expects the factory to have a 'log' method.
server_factory.log = _log_request
server_tls_factory = TLSMemoryBIOFactory(
connection_creator, isClient=False, wrappedFactory=server_factory
)
return server_tls_factory.buildProtocol(None)
def _log_request(request):
"""Implements Factory.log, which is expected by Request.finish"""
logger.info("Completed request %s", request)
@implementer(IPolicyForHTTPS)
class TrustingTLSPolicyForHTTPS(object):
"""An IPolicyForHTTPS which checks that the certificate belongs to the
right server, but doesn't check the certificate chain."""
def creatorForNetloc(self, hostname, port):
certificateOptions = OpenSSLCertificateOptions()
return ClientTLSOptions(hostname, certificateOptions.getContext())
| 36.26066
| 90
| 0.641795
|
55c8772efe0a41172ce7a0a60370431ce1fc74f1
| 2,060
|
py
|
Python
|
app_community/models.py
|
visoon0012/plover.cloud
|
04542628758d969085eb6172928165fddb5d2677
|
[
"Apache-2.0"
] | 1
|
2018-05-15T08:34:14.000Z
|
2018-05-15T08:34:14.000Z
|
app_community/models.py
|
visoon0012/plover.cloud
|
04542628758d969085eb6172928165fddb5d2677
|
[
"Apache-2.0"
] | 7
|
2020-06-05T18:09:20.000Z
|
2022-03-11T23:20:49.000Z
|
app_community/models.py
|
visoon0012/plover.cloud
|
04542628758d969085eb6172928165fddb5d2677
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from app_user.models import User
class CommunityKeyword(models.Model):
"""
圈子 - 关键字
"""
name = models.CharField(max_length=190) # 关键字名称
info = models.TextField() # 关键字简介
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class Community(models.Model):
"""
圈子
"""
name = models.CharField(max_length=190) # 圈子名称
info = models.TextField(blank=True) # 圈子简介
logo_img = models.TextField(blank=True) # logo图片链接
qr_code_img = models.TextField(blank=True) # 群二维码图片链接
keywords = models.ManyToManyField(CommunityKeyword) # 圈子关键字
password = models.CharField(max_length=190) # 圈子密钥 - 有密钥可以直接加入圈子
share_code = models.CharField(max_length=190) # 圈子分享码(通过这个可以快速打开该圈子界面)
can_search = models.BooleanField(default=True) # 能否被搜索到
managers = models.ManyToManyField(User) # 管理员
members = models.ManyToManyField(User) # 圈子成员(管理员也属于成员)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class CommunityTwitter(models.Model):
"""
圈子里的推特(微博) - 只能发一张图
"""
user = models.ForeignKey(User, on_delete=models.CASCADE) # 发布者
info = models.TextField(blank=True) # 内容
img = models.TextField(blank=True) # 图片链接
communities = models.ManyToManyField(Community) # 所属圈子(在哪个圈子可以看到)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
class CommunityTwitterComments(models.Model):
"""
推特评论
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='评论人') # 发布者
to_user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE, related_name='被评论人') # 被评论人
info = models.TextField() # 内容
community_twitter = models.ForeignKey(CommunityTwitter, on_delete=models.CASCADE) # 评论的推特
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
| 33.770492
| 115
| 0.715049
|
44c2cc349063127e30071e77c337dbdc19f26ab5
| 78
|
py
|
Python
|
data/micro-benchmark/classes/static_method_call/main.py
|
vitsalis/pycg-evaluation
|
ce37eb5668465b0c17371914e863d699826447ee
|
[
"Apache-2.0"
] | 121
|
2020-12-16T20:31:37.000Z
|
2022-03-21T20:32:43.000Z
|
data/micro-benchmark/classes/static_method_call/main.py
|
vitsalis/pycg-evaluation
|
ce37eb5668465b0c17371914e863d699826447ee
|
[
"Apache-2.0"
] | 24
|
2021-03-13T00:04:00.000Z
|
2022-03-21T17:28:11.000Z
|
data/micro-benchmark/classes/static_method_call/main.py
|
vitsalis/pycg-evaluation
|
ce37eb5668465b0c17371914e863d699826447ee
|
[
"Apache-2.0"
] | 19
|
2021-03-23T10:58:47.000Z
|
2022-03-24T19:46:50.000Z
|
class MyClass:
@staticmethod
def func():
pass
MyClass.func()
| 11.142857
| 17
| 0.589744
|
c37688e623bdc7407c85b21af2c6678c4b756bd1
| 2,493
|
py
|
Python
|
version_increment.py
|
pfeerick/platformio_version_increment
|
6afffabd2a254c679a5a3fbd5a08e609b3f3b3c2
|
[
"MIT"
] | null | null | null |
version_increment.py
|
pfeerick/platformio_version_increment
|
6afffabd2a254c679a5a3fbd5a08e609b3f3b3c2
|
[
"MIT"
] | null | null | null |
version_increment.py
|
pfeerick/platformio_version_increment
|
6afffabd2a254c679a5a3fbd5a08e609b3f3b3c2
|
[
"MIT"
] | null | null | null |
#
# version_increment.py - Simple versioning script for Platformio
#
# Copyright (C) 2020 Davide Perini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# You should have received a copy of the MIT License along with this program.
# If not, see <https://opensource.org/licenses/MIT/>.
#
import sys
import datetime
import os
## DO NOT EDIT THIS FILE, edit version file if you want to start from a different version
BUILD_NUMBER = 'version'
VERSION_FILE = 'Version.h'
version = '0.1.'
## Increment version during the upload stage only
upload = False
n = len(sys.argv)
for i in range(1, n):
if sys.argv[i] == "upload":
upload = True;
if upload:
print("Version Increment Scritp ARGS=")
print (sys.argv[1:])
build_no = 0
try:
with open(BUILD_NUMBER) as f:
build_no = f.readline()
version = build_no[0:build_no.rindex('.')+1]
build_no = int(build_no[build_no.rindex('.')+1:]) + 1
except:
print('No version file found or incorrect data in it. Starting from 0.1.0')
build_no = 1
with open(BUILD_NUMBER, 'w+') as f:
f.write(version + str(build_no))
print('Build number: {}'.format(version + str(build_no)))
hf = """
// AUTO GENERATED FILE FROM version_increment.py, DO NOT EDIT THIS FILE
#ifndef VERSION
#define VERSION "{}"
#endif
#ifndef BUILD_TIMESTAMP
#define BUILD_TIMESTAMP "{}"
#endif
""".format(version + str(build_no), datetime.datetime.now(), version+str(build_no))
if (os.environ.get('PLATFORMIO_INCLUDE_DIR') != None):
VERSION_FILE = os.environ.get('PLATFORMIO_INCLUDE_DIR') + "/" + VERSION_FILE
elif os.path.exists("include"):
VERSION_FILE = "include/" + VERSION_FILE
with open(VERSION_FILE, 'w+') as f:
f.write(hf)
else:
print("Version Increment Script. Nothing to do. ARGS=")
print (sys.argv[1:])
| 33.24
| 89
| 0.662655
|
2e22e67c633c5f2f3bcd20f65a8bf557213ba809
| 3,524
|
py
|
Python
|
softlearning/policies/base_policy.py
|
YaoYao1995/mbpo
|
b9571e469459ce3a632b19dc3fee68c9ac3857b2
|
[
"MIT"
] | null | null | null |
softlearning/policies/base_policy.py
|
YaoYao1995/mbpo
|
b9571e469459ce3a632b19dc3fee68c9ac3857b2
|
[
"MIT"
] | null | null | null |
softlearning/policies/base_policy.py
|
YaoYao1995/mbpo
|
b9571e469459ce3a632b19dc3fee68c9ac3857b2
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
from collections import OrderedDict
import numpy as np
from serializable import Serializable
class BasePolicy(Serializable):
def __init__(self):
self._deterministic = False
def reset(self):
"""Reset and clean the policy."""
raise NotImplementedError
def actions(self, conditions):
"""Compute (symbolic) actions given conditions (observations)"""
raise NotImplementedError
def log_pis(self, conditions, actions):
"""Compute (symbolic) log probs for given observations and actions."""
raise NotImplementedError
def actions_np(self, conditions):
"""Compute (numeric) actions given conditions (observations)"""
raise NotImplementedError
def log_pis_np(self, conditions, actions):
"""Compute (numeric) log probs for given observations and actions."""
raise NotImplementedError
@contextmanager
def set_deterministic(self, deterministic=True):
"""Context manager for changing the determinism of the policy.
Args:
set_deterministic (`bool`): Value to set the self._is_deterministic
to during the context. The value will be reset back to the
previous value when the context exits.
"""
was_deterministic = self._deterministic
self._deterministic = deterministic
yield
self._deterministic = was_deterministic
def get_diagnostics(self, conditions):
"""Return diagnostic information of the policy.
Arguments:
conditions: Observations to run the diagnostics for.
Returns:
diagnostics: OrderedDict of diagnostic information.
"""
diagnostics = OrderedDict({})
return diagnostics
def __getstate__(self):
state = Serializable.__getstate__(self)
state['pickled_weights'] = self.get_weights()
return state
def __setstate__(self, state):
Serializable.__setstate__(self, state)
self.set_weights(state['pickled_weights'])
class LatentSpacePolicy(BasePolicy):
def __init__(self, *args, smoothing_coefficient=None, **kwargs):
super(LatentSpacePolicy, self).__init__(*args, **kwargs)
assert smoothing_coefficient is None or 0 <= smoothing_coefficient <= 1
self._smoothing_alpha = smoothing_coefficient or 0
self._smoothing_beta = (
np.sqrt(1.0 - np.power(self._smoothing_alpha, 2.0))
/ (1.0 - self._smoothing_alpha))
self._reset_smoothing_x()
self._smooth_latents = False
def _reset_smoothing_x(self):
self._smoothing_x = np.zeros((1, *self._output_shape))
def actions_np(self, conditions):
if self._deterministic:
return self.deterministic_actions_model.predict(conditions)
elif self._smoothing_alpha == 0:
return self.actions_model.predict(conditions)
else:
alpha, beta = self._smoothing_alpha, self._smoothing_beta
raw_latents = self.latents_model.predict(conditions)
self._smoothing_x = (
alpha * self._smoothing_x + (1.0 - alpha) * raw_latents)
latents = beta * self._smoothing_x
return self.actions_model_for_fixed_latents.predict(
[*conditions, latents])
def reset(self):
self._reset_smoothing_x()
| 35.59596
| 80
| 0.643019
|
1bdb75a28d65173fedb3e415bd76767183ccbe49
| 2,195
|
py
|
Python
|
setup.py
|
Yelp/data_pipeline_avro_util
|
25397406bccbd0553e6ba822b0425d5e8bdea73f
|
[
"Apache-2.0"
] | 30
|
2016-11-17T18:30:18.000Z
|
2019-09-23T17:06:22.000Z
|
setup.py
|
tomzhang/data_pipeline_avro_util
|
25397406bccbd0553e6ba822b0425d5e8bdea73f
|
[
"Apache-2.0"
] | 3
|
2016-11-17T19:41:11.000Z
|
2017-09-08T04:41:42.000Z
|
setup.py
|
tomzhang/data_pipeline_avro_util
|
25397406bccbd0553e6ba822b0425d5e8bdea73f
|
[
"Apache-2.0"
] | 9
|
2016-11-29T22:38:05.000Z
|
2020-07-04T18:26:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import find_packages
from setuptools import setup
import data_pipeline_avro_util
readme = open('README.md').read()
doclink = """
Documentation
-------------
The full documentation is at
TODO (DATAPIPE-2030|abrar): upload servicedocs to public server."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
# py2 + setuptools asserts isinstance(name, str) so this needs str()
name=str('data_pipeline_avro_util'),
version=data_pipeline_avro_util.__version__,
description="Common functionality build on top of Apache Avro",
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author=data_pipeline_avro_util.__author__,
author_email=data_pipeline_avro_util.__email__,
url='https://github.com/Yelp/data_pipeline_avro_util',
packages=find_packages(exclude=['tests*']),
install_requires=[
'cached-property>=0.1.5',
'yelp-avro==1.9.2'
],
zip_safe=False,
keywords='data_pipeline_avro_util',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| 35.403226
| 72
| 0.691572
|
a756a7d042f85713027cad2cc069998b03460491
| 1,463
|
py
|
Python
|
examples/full_simple_example.py
|
kciesielski/spacy-ru
|
b2e578a0a542613be739cfa82c387dbb88f4acc1
|
[
"MIT"
] | 234
|
2018-08-17T12:56:52.000Z
|
2022-03-12T08:24:58.000Z
|
examples/full_simple_example.py
|
kciesielski/spacy-ru
|
b2e578a0a542613be739cfa82c387dbb88f4acc1
|
[
"MIT"
] | 21
|
2018-08-24T12:47:54.000Z
|
2021-07-15T18:10:39.000Z
|
examples/full_simple_example.py
|
kciesielski/spacy-ru
|
b2e578a0a542613be739cfa82c387dbb88f4acc1
|
[
"MIT"
] | 39
|
2018-07-17T08:14:22.000Z
|
2021-11-12T07:23:41.000Z
|
import pandas
import spacy
from tabulate import tabulate
def pbool(x):
return '+' if x else '-'
def entity_at(t):
# print(t.i, t.idx, dir(t))
entity = [e for e in t.doc.ents if e.start == t.i]
if entity:
return "{}: {}".format(t.ent_type_, entity[0].text)
return ''
def print_tokens(nlp, doc):
for s in doc.sents:
print('Sentence: "{}"'.format(s))
df = pandas.DataFrame(columns=['Shape', 'Vocab', 'POS', 'Text', 'Lemma', 'Entity', 'Dep', 'Head'],
data=[(t.shape_, pbool(t.orth_ in nlp.vocab), t.pos_,
t.text, t.lemma_, entity_at(t), t.dep_, t.head) for t in s])
print(tabulate(df, showindex=False, headers=df.columns))
sample_sentences = "Привет России и миру! Как твои дела? Сегодня неплохая погода."
if __name__ == '__main__':
nlp = spacy.load('ru2')
nlp.add_pipe(nlp.create_pipe('sentencizer'), first=True)
print("Pipeline: {}".format(nlp.pipe_names))
doc = nlp(sample_sentences)
print("Sample sentences: {}".format(sample_sentences))
print("\nResults for ru2 model: ")
print_tokens(nlp, doc)
nlp = spacy.load('ru2', disable=['tagger', 'parser', 'ner'])
nlp.add_pipe(nlp.create_pipe('sentencizer'), first=True)
doc = nlp(sample_sentences)
print("\n"+"~"*70)
print('\nSwitched to lemmatizer and POS from pymorphy2')
print("Results for empty model: ")
print_tokens(nlp, doc)
| 34.023256
| 106
| 0.61039
|
6cdb552ab6eacfd2803593719da8fa673f914c01
| 5,197
|
py
|
Python
|
src/third_party/wiredtiger/test/suite/test_txn23.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/suite/test_txn23.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/suite/test_txn23.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_txn23.py
# Transactions: ensure read timestamp is not cleared under cache pressure
#
import wttest
from wtdataset import SimpleDataSet
from wtscenario import make_scenarios
class test_txn23(wttest.WiredTigerTestCase):
conn_config = 'cache_size=5MB'
format_values = [
('integer-row', dict(key_format='i', value_format='S', extraconfig='')),
('column', dict(key_format='r', value_format='S', extraconfig='')),
('column-fix', dict(key_format='r', value_format='8t',
extraconfig='allocation_size=512,leaf_page_max=512')),
]
scenarios = make_scenarios(format_values)
def large_updates(self, uri, value, ds, nrows, commit_ts):
# Update a large number of records.
cursor = self.session.open_cursor(uri)
for i in range(1, nrows + 1):
self.session.begin_transaction()
cursor[ds.key(i)] = value
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(commit_ts))
cursor.close()
def check(self, check_value, uri, ds, nrows, read_ts):
for i in range(1, nrows + 1):
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(read_ts))
cursor = self.session.open_cursor(uri)
self.assertEqual(cursor[ds.key(i)], check_value)
cursor.close()
self.session.commit_transaction()
def test_txn(self):
# Create a table.
uri_1 = "table:txn23_1"
ds_1 = SimpleDataSet(
self, uri_1, 0, key_format=self.key_format, value_format=self.value_format,
config=self.extraconfig)
ds_1.populate()
# Create another table.
uri_2 = "table:txn23_2"
ds_2 = SimpleDataSet(
self, uri_2, 0, key_format=self.key_format, value_format=self.value_format,
config=self.extraconfig)
ds_2.populate()
# Pin oldest and stable to timestamp 10.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) +
',stable_timestamp=' + self.timestamp_str(10))
if self.value_format == '8t':
# Values are 1/500 the size, so in principle maybe we should use 500x as many rows.
# However, that takes a really long time, and to some extent we should also take the
# in-memory size of updates into account, so what I've done is pick a number of rows
# that makes it take about 2x the time of the VLCS and row-store versions. Hopefully
# that's enough memory usage to exercise the intended code paths.
nrows = 8000
value_a = 97
value_b = 98
value_c = 99
value_d = 100
else:
nrows = 2000
value_a = "aaaaa" * 100
value_b = "bbbbb" * 100
value_c = "ccccc" * 100
value_d = "ddddd" * 100
# Perform several updates.
self.large_updates(uri_1, value_d, ds_1, nrows, 20)
self.large_updates(uri_1, value_c, ds_1, nrows, 30)
self.large_updates(uri_1, value_b, ds_1, nrows, 40)
self.large_updates(uri_1, value_a, ds_1, nrows, 50)
self.large_updates(uri_2, value_d, ds_2, nrows, 20)
self.large_updates(uri_2, value_c, ds_2, nrows, 30)
self.large_updates(uri_2, value_b, ds_2, nrows, 40)
self.large_updates(uri_2, value_a, ds_2, nrows, 50)
# Verify data is visible and correct.
self.check(value_d, uri_1, ds_1, nrows, 20)
self.check(value_c, uri_1, ds_1, nrows, 30)
self.check(value_b, uri_1, ds_1, nrows, 40)
self.check(value_a, uri_1, ds_1, nrows, 50)
self.check(value_d, uri_2, ds_2, nrows, 20)
self.check(value_c, uri_2, ds_2, nrows, 30)
self.check(value_b, uri_2, ds_2, nrows, 40)
self.check(value_a, uri_2, ds_2, nrows, 50)
| 41.91129
| 96
| 0.659226
|
96ff2e8fe487ef33d4331560cb364feb5a8c8221
| 8,832
|
py
|
Python
|
tests/test_rfc3709.py
|
CBonnell/pyasn1-alt-modules
|
cd3773ceaa6ab31b80b0b4013818ac47ee6215b8
|
[
"BSD-2-Clause"
] | 2
|
2021-06-15T16:24:39.000Z
|
2022-03-28T04:41:59.000Z
|
tests/test_rfc3709.py
|
CBonnell/pyasn1-alt-modules
|
cd3773ceaa6ab31b80b0b4013818ac47ee6215b8
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_rfc3709.py
|
CBonnell/pyasn1-alt-modules
|
cd3773ceaa6ab31b80b0b4013818ac47ee6215b8
|
[
"BSD-2-Clause"
] | 1
|
2022-01-25T16:00:09.000Z
|
2022-01-25T16:00:09.000Z
|
#
# This file is part of pyasn1-alt-modules software.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_alt_modules import pem
from pyasn1_alt_modules import rfc5280
from pyasn1_alt_modules import rfc3709
from pyasn1_alt_modules import opentypemap
class CertificateExtnWithUrlTestCase(unittest.TestCase):
pem_text = """\
MIIC9zCCAn2gAwIBAgIJAKWzVCgbsG46MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkwNTE0MTAwMjAwWhcNMjAwNTEzMTAwMjAwWjBlMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xGzAZBgNVBAoTElZp
Z2lsIFNlY3VyaXR5IExMQzEaMBgGA1UEAxMRbWFpbC52aWdpbHNlYy5jb20wdjAQ
BgcqhkjOPQIBBgUrgQQAIgNiAATwUXZUseiOaqWdrClDCMbp9YFAM87LTmFirygp
zKDU9cfqSCg7zBDIphXCwMcS9zVWDoStCbcvN0jw5CljHcffzpHYX91P88SZRJ1w
4hawHjOsWxvM3AkYgZ5nfdlL7EajggEdMIIBGTALBgNVHQ8EBAMCB4AwQgYJYIZI
AYb4QgENBDUWM1RoaXMgY2VydGlmaWNhdGUgY2Fubm90IGJlIHRydXN0ZWQgZm9y
IGFueSBwdXJwb3NlLjAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwHwYD
VR0jBBgwFoAU8jXbNATapVXyvWkDmbBi7OIVCMEwgYUGCCsGAQUFBwEMBHkwd6J1
oHMwcTBvMG0WCWltYWdlL3BuZzAzMDEwDQYJYIZIAWUDBAIBBQAEIJtBNrMSSNo+
6Rwqwctmcy0qf68ilRuKEmlf3GLwGiIkMCsWKWh0dHA6Ly93d3cudmlnaWxzZWMu
Y29tL3ZpZ2lsc2VjX2xvZ28ucG5nMAoGCCqGSM49BAMDA2gAMGUCMGhfLH4kZaCD
H43A8m8mHCUpYt9unT0qYu4TCMaRuOTYEuqj3qtuwyLcfAGuXKp/oAIxAIrPY+3y
Pj22pmfmQi5w21UljqoTj/+lQLkU3wfy5BdVKBwI0GfEA+YL3ctSzPNqAA==
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
extn_list = []
for extn in asn1Object['tbsCertificate']['extensions']:
extn_list.append(extn['extnID'])
if extn['extnID'] == rfc3709.id_pe_logotype:
s = extn['extnValue']
logotype, rest = der_decoder(s, rfc3709.LogotypeExtn())
self.assertFalse(rest)
self.assertTrue(logotype.prettyPrint())
self.assertEqual(s, der_encoder(logotype))
ids = logotype['subjectLogo']['direct']['image'][0]['imageDetails']
self.assertEqual( "image/png", ids['mediaType'])
expected = "http://www.vigilsec.com/vigilsec_logo.png"
self.assertEqual(expected, ids['logotypeURI'][0])
self.assertIn(rfc3709.id_pe_logotype, extn_list)
def testExtensionsMap(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
certificateExtensionsMap = opentypemap.get('certificateExtensionsMap')
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] in certificateExtensionsMap:
extnValue, rest = der_decoder(extn['extnValue'],
asn1Spec=certificateExtensionsMap[extn['extnID']])
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
class CertificateExtnWithDataTestCase(unittest.TestCase):
pem_text = """\
MIIJJDCCCAygAwIBAgIRAPIGo/5ScWbpAAAAAFwQBqkwDQYJKoZIhvcNAQELBQAw
gbkxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
MjAxOCBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxLTAr
BgNVBAMTJEVudHJ1c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gVk1DMTAeFw0x
OTA4MzAxNDMyMzlaFw0yMDAyMjUxNTAyMzZaMIIBjTEOMAwGA1UEERMFMTAwMTcx
CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazERMA8GA1UEBxMITmV3IFlv
cmsxGDAWBgNVBAkTDzI3MCBQYXJrIEF2ZW51ZTETMBEGCysGAQQBgjc8AgEDEwJV
UzEZMBcGCysGAQQBgjc8AgECEwhEZWxhd2FyZTEfMB0GA1UEChMWSlBNb3JnYW4g
Q2hhc2UgYW5kIENvLjEdMBsGA1UEDxMUUHJpdmF0ZSBPcmdhbml6YXRpb24xNzA1
BgNVBAsTLkpQTUMgRmlyc3QgVmVyaWZpZWQgTWFyayBDZXJ0aWZpY2F0ZSBXb3Js
ZHdpZGUxDzANBgNVBAUTBjY5MTAxMTEXMBUGCisGAQQBg55fAQQTBzIwMTUzODkx
EjAQBgorBgEEAYOeXwEDEwJVUzEmMCQGCisGAQQBg55fAQITFmh0dHBzOi8vd3d3
LnVzcHRvLmdvdi8xHzAdBgNVBAMTFkpQTW9yZ2FuIENoYXNlIGFuZCBDby4wggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCNLY+etlX06q1MxA1VT/P20h1i
eFGTzX4fqSQNG+ypmjNfLa8YXraO1v1hahenkRUWrVPW0Hq3zKNJcCDmosox6+tB
59u0b1xgN8y8D05AEC7qoVVdbaWKENMxCN4CDfST6d3YOqApjqEFAGZ71s39tRRG
kmWGJb4jKXcUX8FWV8w/vjKrpipZ8JsX2tuOp2uxFLkmi+V7gvN8tpbHUipP5K7L
190VOBytSWPudXefnYG3UWRfwah7Fq1bKYT/cCwStUm8XlfA8nUumeVsAiyC6phs
adn26MYiSddsBU08TGthmunLAO0+shaBy6jHYZxMa37S67vVlDpxbeF+TPVXAgMB
AAGjggROMIIESjATBgorBgEEAdZ5AgQDAQH/BAIFADCCArAGCCsGAQUFBwEMBIIC
ojCCAp6iggKaoIICljCCApIwggKOMIICihYNaW1hZ2Uvc3ZnK3htbDAzMDEwDQYJ
YIZIAWUDBAIBBQAEIBnwW6ChGgWWIRn3qn/xGAOlhDflA3z5jhZcZTNDlxF5MIIC
QhaCAj5kYXRhOmltYWdlL3N2Zyt4bWw7YmFzZTY0LEg0c0lBQUFBQUFBQUFJV1Iz
V3JqTUJCR3I1dW5tR3F2Rml4NUpQODBObkZLRTVhbTRFSmhJYmVMazZpT1dhOXRa
TWQyOXVrN2NsTG9SV25CMHNENGNPYVR0TGdmLzVYUWE5TVdkWlV3S1pDQnJ2YjFv
YWp5aEoyNlZ6NW45OHZaNHBaemVOU1ZObGxYbXhnZUR2Vk93MU5abnRwdWFvRlNB
b1YwNFBmMkVYNk5UVzA2ZUNsUE9YK3FRRXpON1dWR0RLRkFoTldwS0ErQVB3RTRK
MzNiNXg5REtBYTdyTlV2cG40dFNwMndycWpPRElwRHd0THNyTTBmeVlCaVYyM0Nq
bDNYeEs0N0RJTVlQRkdiM0ZXSTZKTHZpc1JqV1ZSL1B3TmxGRVh1OUpmTmJtQk1H
RFlqZy9PMTlvVWVWclh0QWtJWTBEY0o0N2JKOXBTb01iclZwdGVNd3VmTDJjMml5
Ym9qVU5veVlUOFFnL1VxWWtCNW41VW5QQWZYU2pub0tPbEl1eW5oOVRJVTh1Z3JF
YVMrVC9lRzZRWDh6OXl2YkdIZ0VLZjJ5S1h3dU9Sa2VsOGJQeFJoUHhtSnN0TDBT
bi9qOUtXWU8yR3dsM2EremNhbmhOYTV0YzZORkdHcVVFUUVwVmY0R3lVNnhOMnRx
WGgwWXQrM1BpcEhlK2l0cElRMGg0VHBoWnRrQ3plM0d6M2NjdllHbkp0cjZKVUNB
QUE9MCIGA1UdEQQbMBmCF2V4Y2hhZGRldi5sYWJtb3JnYW4uY29tMBMGA1UdJQQM
MAoGCCsGAQUFBwMfMA4GA1UdDwEB/wQEAwIHgDBmBggrBgEFBQcBAQRaMFgwIwYI
KwYBBQUHMAGGF2h0dHA6Ly9vY3NwLmVudHJ1c3QubmV0MDEGCCsGAQUFBzAChiVo
dHRwOi8vYWlhLmVudHJ1c3QubmV0L3ZtYzEtY2hhaW4uY2VyMDIGA1UdHwQrMCkw
J6AloCOGIWh0dHA6Ly9jcmwuZW50cnVzdC5uZXQvdm1jMWNhLmNybDBPBgNVHSAE
SDBGMDYGCmCGSAGG+mwKAQswKDAmBggrBgEFBQcCARYaaHR0cDovL3d3dy5lbnRy
dXN0Lm5ldC9ycGEwDAYKKwYBBAGDnl8BATAfBgNVHSMEGDAWgBSLtjl20DSQpj9i
4WTqPrz0fEahczAdBgNVHQ4EFgQUxAJ+yoDhzpPUzAPWKBYxg108dU0wCQYDVR0T
BAIwADANBgkqhkiG9w0BAQsFAAOCAQEAnqdB/vcwxFcxAlyCK0W5HOthXUdXRg9a
GwPDupqmLq2rKfyysZXonJJfr8jqO0f3l6TWTTJlXHljAwwXMtg3T3ngLyEzip5p
g0zH7s5eXjmWRhOeuHt21o611bXDbUNFTF0IpbYBTgOwAz/+k3XLVehf8dW7Y0Lr
VkzxJ6U82NxmqjaAnkm+H127x5/jPAr4LLD4gZfqFaHzw/ZLoS+fXFGs+dpuYE4s
n+xe0msYMu8qWABiMGA+MCKl45Dp5di+c2fyXtKyQ3rKI8XXZ0nN4bXK7DZd+3E3
kbpmR6cDliloU808Bi/erMkrfUHRoZ2d586lkmwkLcoDkJ/yPD+Jhw==
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
extn_list = []
for extn in asn1Object['tbsCertificate']['extensions']:
extn_list.append(extn['extnID'])
if extn['extnID'] == rfc3709.id_pe_logotype:
s = extn['extnValue']
logotype, rest = der_decoder(s, rfc3709.LogotypeExtn())
self.assertFalse(rest)
self.assertTrue(logotype.prettyPrint())
self.assertEqual(s, der_encoder(logotype))
ids = logotype['subjectLogo']['direct']['image'][0]['imageDetails']
self.assertEqual("image/svg+xml", ids['mediaType'])
self.assertEqual(
"data:image/svg+xml;base64", ids['logotypeURI'][0][0:25])
self.assertIn(rfc3709.id_pe_logotype, extn_list)
def testExtensionsMap(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
certificateExtensionsMap = opentypemap.get('certificateExtensionsMap')
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] in certificateExtensionsMap:
extnValue, rest = der_decoder(extn['extnValue'],
asn1Spec=certificateExtensionsMap[extn['extnID']])
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| 45.525773
| 83
| 0.809103
|
ac1b357bb8c893d29a52df09742119fd238a3502
| 3,160
|
py
|
Python
|
setup.py
|
deralexxx/timesketch
|
61b83da377b589438a00ab4ca40aaf048e6907c7
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
deralexxx/timesketch
|
61b83da377b589438a00ab4ca40aaf048e6907c7
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
deralexxx/timesketch
|
61b83da377b589438a00ab4ca40aaf048e6907c7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the setup file for the project. The standard setup rules apply:
python setup.py build
sudo python setup.py install
"""
from __future__ import print_function
from __future__ import unicode_literals
import glob
import os
import sys
import pkg_resources
from setuptools import find_packages
from setuptools import setup
from timesketch import version
version_tuple = (sys.version_info[0], sys.version_info[1])
if version_tuple < (3, 6):
print(
(
"Unsupported Python version: {0:s}, version 3.6 or higher " "required."
).format(sys.version)
)
sys.exit(1)
def parse_requirements_from_file(path):
"""Parses requirements from a requirements file.
Args:
path (str): path to the requirements file.
Yields:
str: package resource requirement.
"""
with open(path, "r") as file_object:
file_contents = file_object.read()
for req in pkg_resources.parse_requirements(file_contents):
try:
requirement = str(req.req)
except AttributeError:
requirement = str(req)
yield requirement
timesketch_description = (
"Timesketch is a web based tool for collaborative forensic timeline "
"analysis. Using sketches you and your collaborators can easily organize "
"timelines and analyze them all at the same time. Add meaning to "
"your raw data with rich annotations, comments, tags and stars."
)
setup(
name="timesketch",
version=version.get_version(),
description="Digital forensic timeline analysis",
long_description=timesketch_description,
license="Apache License, Version 2.0",
url="http://www.timesketch.org/",
maintainer="Timesketch development team",
maintainer_email="timesketch-dev@googlegroups.com",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
data_files=[
("share/timesketch", glob.glob(os.path.join("data", "*.*"))),
("share/timesketch/linux", glob.glob(os.path.join("data", "linux", "*.*"))),
("share/doc/timesketch", ["AUTHORS", "LICENSE", "README.md"]),
],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points={"console_scripts": ["tsctl=timesketch.tsctl:cli"]},
install_requires=parse_requirements_from_file("requirements.txt"),
tests_require=parse_requirements_from_file("test_requirements.txt"),
)
| 31.919192
| 84
| 0.698734
|
d13026c4b3b6bdddfa0fd74d3318b68c5df7e79b
| 8,145
|
py
|
Python
|
selfdrive/loggerd/uploader.py
|
KexianShen/openpilot
|
bf58e2f7edce20e4fed5bb8c147aca40cd1d91bc
|
[
"MIT"
] | null | null | null |
selfdrive/loggerd/uploader.py
|
KexianShen/openpilot
|
bf58e2f7edce20e4fed5bb8c147aca40cd1d91bc
|
[
"MIT"
] | null | null | null |
selfdrive/loggerd/uploader.py
|
KexianShen/openpilot
|
bf58e2f7edce20e4fed5bb8c147aca40cd1d91bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import bz2
import json
import os
import random
import requests
import threading
import time
import traceback
from pathlib import Path
from cereal import log
import cereal.messaging as messaging
from common.api import Api
from common.params import Params
from common.realtime import set_core_affinity
from selfdrive.hardware import TICI
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.loggerd.config import ROOT
from selfdrive.swaglog import cloudlog
NetworkType = log.DeviceState.NetworkType
UPLOAD_ATTR_NAME = 'user.upload'
UPLOAD_ATTR_VALUE = b'1'
allow_sleep = bool(os.getenv("UPLOADER_SLEEP", "1"))
force_wifi = os.getenv("FORCEWIFI") is not None
fake_upload = os.getenv("FAKEUPLOAD") is not None
def get_directory_sort(d):
return list(map(lambda s: s.rjust(10, '0'), d.rsplit('--', 1)))
def listdir_by_creation(d):
try:
paths = os.listdir(d)
paths = sorted(paths, key=get_directory_sort)
return paths
except OSError:
cloudlog.exception("listdir_by_creation failed")
return list()
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
class Uploader():
def __init__(self, dongle_id, root):
self.dongle_id = dongle_id
self.api = Api(dongle_id)
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
self.immediate_size = 0
self.immediate_count = 0
# stats for last successfully uploaded file
self.last_time = 0
self.last_speed = 0
self.last_filename = ""
self.immediate_folders = ["crash/", "boot/"]
self.immediate_priority = {"qlog": 0, "qlog.bz2": 0, "qcamera.ts": 1}
def get_upload_sort(self, name):
if name in self.immediate_priority:
return self.immediate_priority[name]
return 1000
def list_upload_files(self):
if not os.path.isdir(self.root):
return
self.immediate_size = 0
self.immediate_count = 0
for logname in listdir_by_creation(self.root):
path = os.path.join(self.root, logname)
try:
names = os.listdir(path)
except OSError:
continue
if any(name.endswith(".lock") for name in names):
continue
for name in sorted(names, key=self.get_upload_sort):
key = os.path.join(logname, name)
fn = os.path.join(path, name)
# skip files already uploaded
try:
is_uploaded = getxattr(fn, UPLOAD_ATTR_NAME)
except OSError:
cloudlog.event("uploader_getxattr_failed", exc=self.last_exc, key=key, fn=fn)
is_uploaded = True # deleter could have deleted
if is_uploaded:
continue
try:
if name in self.immediate_priority:
self.immediate_count += 1
self.immediate_size += os.path.getsize(fn)
except OSError:
pass
yield (name, key, fn)
def next_file_to_upload(self):
upload_files = list(self.list_upload_files())
for name, key, fn in upload_files:
if any(f in fn for f in self.immediate_folders):
return (key, fn)
for name, key, fn in upload_files:
if name in self.immediate_priority:
return (key, fn)
return None
def do_upload(self, key, fn):
try:
url_resp = self.api.get("v1.4/" + self.dongle_id + "/upload_url/", timeout=10, path=key, access_token=self.api.get_token())
if url_resp.status_code == 412:
self.last_resp = url_resp
return
url_resp_json = json.loads(url_resp.text)
url = url_resp_json['url']
headers = url_resp_json['headers']
cloudlog.debug("upload_url v1.4 %s %s", url, str(headers))
if fake_upload:
cloudlog.debug(f"*** WARNING, THIS IS A FAKE UPLOAD TO {url} ***")
class FakeResponse():
def __init__(self):
self.status_code = 200
self.last_resp = FakeResponse()
else:
with open(fn, "rb") as f:
data = f.read()
if key.endswith('.bz2') and not fn.endswith('.bz2'):
data = bz2.compress(data)
self.last_resp = requests.put(url, data=data, headers=headers, timeout=10)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def upload(self, key, fn, network_type, metered):
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload_start", key=key, fn=fn, sz=sz, network_type=network_type, metered=metered)
if sz == 0:
try:
# tag files of 0 size as uploaded
setxattr(fn, UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE)
except OSError:
cloudlog.event("uploader_setxattr_failed", exc=self.last_exc, key=key, fn=fn, sz=sz)
success = True
else:
start_time = time.monotonic()
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code in (200, 201, 401, 403, 412):
try:
# tag file as uploaded
setxattr(fn, UPLOAD_ATTR_NAME, UPLOAD_ATTR_VALUE)
except OSError:
cloudlog.event("uploader_setxattr_failed", exc=self.last_exc, key=key, fn=fn, sz=sz)
self.last_filename = fn
self.last_time = time.monotonic() - start_time
self.last_speed = (sz / 1e6) / self.last_time
success = True
cloudlog.event("upload_success" if stat.status_code != 412 else "upload_ignored", key=key, fn=fn, sz=sz, network_type=network_type, metered=metered)
else:
success = False
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz, network_type=network_type, metered=metered)
return success
def get_msg(self):
msg = messaging.new_message("uploaderState")
us = msg.uploaderState
us.immediateQueueSize = int(self.immediate_size / 1e6)
us.immediateQueueCount = self.immediate_count
us.lastTime = self.last_time
us.lastSpeed = self.last_speed
us.lastFilename = self.last_filename
return msg
def uploader_fn(exit_event):
try:
set_core_affinity([0, 1, 2, 3])
except Exception:
cloudlog.exception("failed to set core affinity")
clear_locks(ROOT)
params = Params()
dongle_id = params.get("DongleId", encoding='utf8')
if dongle_id is None:
cloudlog.info("uploader missing dongle_id")
raise Exception("uploader can't start without dongle id")
if TICI and not Path("/data/media").is_mount():
cloudlog.warning("NVME not mounted")
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['uploaderState'])
uploader = Uploader(dongle_id, ROOT)
backoff = 0.1
while not exit_event.is_set():
sm.update(0)
offroad = params.get_bool("IsOffroad")
network_type = sm['deviceState'].networkType if not force_wifi else NetworkType.wifi
if network_type == NetworkType.none:
if allow_sleep:
time.sleep(60 if offroad else 5)
continue
d = uploader.next_file_to_upload()
if d is None: # Nothing to upload
if allow_sleep:
time.sleep(60 if offroad else 5)
continue
key, fn = d
# qlogs and bootlogs need to be compressed before uploading
if key.endswith('qlog') or (key.startswith('boot/') and not key.endswith('.bz2')):
key += ".bz2"
success = uploader.upload(key, fn, sm['deviceState'].networkType.raw, sm['deviceState'].networkMetered)
if success:
backoff = 0.1
elif allow_sleep:
cloudlog.info("upload backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff = min(backoff*2, 120)
pm.send("uploaderState", uploader.get_msg())
def main():
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
| 28.679577
| 156
| 0.661142
|
1b4cdc0fb780a43d555c13348853d1be6ca412a9
| 332
|
py
|
Python
|
fundamentals/13_modules_python.py
|
fabiangothman/Python
|
0df6828a4f435a9092667b328f78434d419e6a5b
|
[
"MIT"
] | null | null | null |
fundamentals/13_modules_python.py
|
fabiangothman/Python
|
0df6828a4f435a9092667b328f78434d419e6a5b
|
[
"MIT"
] | null | null | null |
fundamentals/13_modules_python.py
|
fabiangothman/Python
|
0df6828a4f435a9092667b328f78434d419e6a5b
|
[
"MIT"
] | null | null | null |
#Libraries: there are three types:
# Own: Create by ourself
# third-party: https://pypi.org
# Python: https://docs.python.org/3/py-modindex.html
import time
from datetime import date, timedelta
print(type(date.today()))
print(date.today())
print(type(timedelta(minutes=70)))
print(timedelta(minutes=70))
print(time.localtime())
| 20.75
| 52
| 0.743976
|
713d59aefa6e3626d31f1e1ae1e2ff9646c95add
| 2,069
|
py
|
Python
|
custom_components/react/tasks/base.py
|
gertjanstulp/ha-mapper
|
9cc84a4856e5f3e45077fd7d2586188b199f83d8
|
[
"Apache-2.0"
] | null | null | null |
custom_components/react/tasks/base.py
|
gertjanstulp/ha-mapper
|
9cc84a4856e5f3e45077fd7d2586188b199f83d8
|
[
"Apache-2.0"
] | null | null | null |
custom_components/react/tasks/base.py
|
gertjanstulp/ha-mapper
|
9cc84a4856e5f3e45077fd7d2586188b199f83d8
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
from logging import Handler
from time import monotonic
from typing import Callable, Union
from homeassistant.core import Event
from ..base import ReactBase
from ..enums import ReactStage
class ReactTask:
"""React task base."""
events: Union[list[str], None] = None
events_with_filters: Union[list[tuple[str, Callable[[Event], bool]]], None] = None
signals: Union[list[str], None] = None
schedule: Union[timedelta, None] = None
stages: Union[list[ReactStage], None] = None
_can_run_disabled = False ## Set to True if task can run while disabled
def __init__(self, react: ReactBase) -> None:
self.react = react
@property
def slug(self) -> str:
"""Return the check slug."""
return self.__class__.__module__.rsplit(".", maxsplit=1)[-1]
def task_logger(self, handler: Handler, msg: str) -> None:
"""Log message from task"""
handler("ReactTask<%s> %s", self.slug, msg)
async def execute_task(self, *args, **kwargs) -> None:
"""Execute the task defined in subclass."""
if not self._can_run_disabled and self.react.system.disabled:
self.task_logger(
self.react.log.debug,
f"Skipping task, React is disabled {self.react.system.disabled_reason}",
)
return
self.task_logger(self.react.log.debug, "Executing task")
start_time = monotonic()
try:
if task := getattr(self, "async_execute", None):
await task(*args) # pylint: disable=not-callable
elif task := getattr(self, "execute", None):
await self.react.hass.async_add_executor_job(task)
except BaseException as exception: # lgtm [py/catch-base-exception] pylint: disable=broad-except
self.task_logger(self.react.log.error, f"failed: {exception}")
else:
self.react.log.debug(
"ReactTask<%s> took %.3f seconds to complete", self.slug, monotonic() - start_time
)
| 33.918033
| 105
| 0.628323
|
659771bcea51c5876cfb8b879aa4ddf22f95a4b0
| 399
|
py
|
Python
|
tests/_scopedvar/test_template.py
|
hsfzxjy/decup
|
8ad9819bc1daa332adbfb2a3ae60cca6055aac44
|
[
"Apache-2.0"
] | 2
|
2021-07-25T11:15:05.000Z
|
2021-09-27T06:55:44.000Z
|
tests/_scopedvar/test_template.py
|
hsfzxjy/decup
|
8ad9819bc1daa332adbfb2a3ae60cca6055aac44
|
[
"Apache-2.0"
] | null | null | null |
tests/_scopedvar/test_template.py
|
hsfzxjy/decup
|
8ad9819bc1daa332adbfb2a3ae60cca6055aac44
|
[
"Apache-2.0"
] | 1
|
2021-07-26T01:08:06.000Z
|
2021-07-26T01:08:06.000Z
|
from mocona.scopedvar import Template, S, V
from test_refcnt import isolate_testcase
class Dummy(Template, ns=V.dummy[...]):
foo: int
bar: str
@isolate_testcase
def test_get_from_template():
S.assign(S._varfor(V.dummy.foo), 1)
assert Dummy.foo == 1
@isolate_testcase
@S.inject
def test_set_via_template(dummy_foo=V.dummy.foo - 0):
Dummy.foo = 12
assert dummy_foo == 12
| 19
| 53
| 0.711779
|
1bccae6a4f9271c725f4a8eb263d4fcb856da9da
| 552
|
py
|
Python
|
backend/code/iep/auth/drivers/query.py
|
socek/iep
|
793e35ca5304eef7b7dacb5dd8d486622f497759
|
[
"Apache-2.0"
] | null | null | null |
backend/code/iep/auth/drivers/query.py
|
socek/iep
|
793e35ca5304eef7b7dacb5dd8d486622f497759
|
[
"Apache-2.0"
] | null | null | null |
backend/code/iep/auth/drivers/query.py
|
socek/iep
|
793e35ca5304eef7b7dacb5dd8d486622f497759
|
[
"Apache-2.0"
] | null | null | null |
from sapp import Decorator
from iep import app
from iep.application.drivers.query import GetActiveByUidForModel
from iep.application.drivers.query import ListActiveForModel
from .dbmodels import UserData
list_active = ListActiveForModel(UserData)
get_active_by_uid = GetActiveByUidForModel(UserData)
@Decorator(app, "dbsession")
def find_by_email(email, dbsession):
"""
Get user model from database using email.
"""
row = dbsession.query(UserData).filter(UserData.email == email).first()
return row.to_model() if row else None
| 27.6
| 75
| 0.778986
|
90badcc3fda43c4b738141e3bc900c6cf5c5650d
| 1,862
|
py
|
Python
|
selective.py
|
blue-cosmos/object_detection_tflite
|
c02fa9b8aa1588bf8e112407ccd762818a27305f
|
[
"MIT"
] | 1
|
2021-07-08T17:35:24.000Z
|
2021-07-08T17:35:24.000Z
|
selective.py
|
blue-cosmos/object_detection_tflite
|
c02fa9b8aa1588bf8e112407ccd762818a27305f
|
[
"MIT"
] | null | null | null |
selective.py
|
blue-cosmos/object_detection_tflite
|
c02fa9b8aa1588bf8e112407ccd762818a27305f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from __future__ import annotations
from typing import List
from PIL import Image
import cv2
import numpy as np
from decode import non_maximum_supression
# mode of selective search segmentation
# 'single' or 'fast' or 'quality'
SELECTIVE_MODE = 'single'
# minimum are of segmented rectagle
MIN_RECT_AREA = 4000
# maximun number of selected rectangle
MAX_RECTS = 3
def search_selective(current: Image) -> List:
curr = cv2.cvtColor(
np.array(current, dtype=np.uint8),
cv2.COLOR_RGB2BGR
)
cv2.setUseOptimized(True)
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
ss.setBaseImage(curr)
if SELECTIVE_MODE == 'single':
ss.switchToSingleStrategy()
elif SELECTIVE_MODE == 'fast':
ss.switchToSelectiveSearchFast()
elif SELECTIVE_MODE == 'quality':
ss.switchToSelectiveSearchQUality()
else:
raise ValueError('SELECTIVE_MODE is invalid')
rects = ss.process()
bboxes = list()
for rect in rects:
x, y, w, h = rect
area = w * h
if area < MIN_RECT_AREA:
continue
bboxes.append(rect)
if len(bboxes) >= MAX_RECTS:
break
return bboxes
def select_selective(objects: List) -> List:
if len(objects) == 0:
return []
id2label = dict()
bboxes = list()
for obj in objects:
id2label[obj['index']] = obj['name']
bbox = list(obj['bbox'])
bbox.append(obj['index'])
bbox.append(obj['prob'])
bboxes.append(bbox)
bboxes = np.array(bboxes)
best_bboxes = non_maximum_supression(bboxes=bboxes)
ret = list()
for bbox in best_bboxes:
ret.append({
'name': id2label[bbox[4]],
'prob': bbox[5],
'bbox': tuple(bbox[:4]),
})
return ret
| 26.985507
| 70
| 0.625671
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.