hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3f17a70386052db02c11c15fab00823b5a363bc | 1,245 | py | Python | Missing Numbers.py | Shaikh-Nabeel/HackerRankAlgorithms | 3d11fd2c45bd045fa320691c8ac1c89acadc6d8a | [
"Apache-2.0"
] | null | null | null | Missing Numbers.py | Shaikh-Nabeel/HackerRankAlgorithms | 3d11fd2c45bd045fa320691c8ac1c89acadc6d8a | [
"Apache-2.0"
] | null | null | null | Missing Numbers.py | Shaikh-Nabeel/HackerRankAlgorithms | 3d11fd2c45bd045fa320691c8ac1c89acadc6d8a | [
"Apache-2.0"
] | null | null | null | """
Problem Statement
Numeros, The Artist, had two lists A and B, such that, B was a permutation of A. Numeros was very proud of these lists.
Unfortunately, while transporting them from one exhibition to another, some numbers from List A got left out. Can you
find out the numbers missing from A?
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
:param cipher: the cipher
"""
m, A, n, B = cipher
result = set() # {} is for dictionary
hm = {}
for a in A:
if a not in hm:
hm[a] = 1
else:
hm[a] += 1
for b in B:
if b not in hm or hm[b] <= 0:
result.add(b)
else:
hm[b] -= 1
result = sorted(list(result))
return " ".join(map(str, result))
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
solution = Solution()
m = int(f.readline().strip())
A = map(int, f.readline().strip().split(' '))
n = int(f.readline().strip())
B = map(int, f.readline().strip().split(' '))
cipher = m, A, n, B
# solve
s = "%s\n" % (solution.solve(cipher))
print s,
| 24.411765 | 119 | 0.519679 | 540 | 0.433735 | 0 | 0 | 0 | 0 | 0 | 0 | 438 | 0.351807 |
c3f1f74b466b17b50d5baf80f81ac4960271ecd0 | 3,187 | py | Python | jmap/account/imap/mailbox.py | filiphanes/jmap-proxy-python | 86d7aba07c5faad652dd46f418f2b8dd3fe03bc3 | [
"MIT"
] | 14 | 2020-05-12T14:21:23.000Z | 2022-03-17T07:20:25.000Z | jmap/account/imap/mailbox.py | filiphanes/jmap-proxy-python | 86d7aba07c5faad652dd46f418f2b8dd3fe03bc3 | [
"MIT"
] | 1 | 2020-11-28T12:52:09.000Z | 2020-11-28T12:52:09.000Z | jmap/account/imap/mailbox.py | filiphanes/jmap-proxy-python | 86d7aba07c5faad652dd46f418f2b8dd3fe03bc3 | [
"MIT"
] | 4 | 2020-06-03T09:21:01.000Z | 2022-03-11T20:43:58.000Z | from jmap.account.imap.imap_utf7 import imap_utf7_decode, imap_utf7_encode
KNOWN_SPECIALS = set('\\HasChildren \\HasNoChildren \\NoSelect \\NoInferiors \\UnMarked \\Subscribed'.lower().split())
# special use or name magic
ROLE_MAP = {
'inbox': 'inbox',
'drafts': 'drafts',
'draft': 'drafts',
'draft messages': 'drafts',
'bulk': 'junk',
'bulk mail': 'junk',
'junk': 'junk',
'junk mail': 'junk',
'spam mail': 'junk',
'spam messages': 'junk',
'archive': 'archive',
'sent': 'sent',
'sent items': 'sent',
'sent messages': 'sent',
'deleted messages': 'trash',
'trash': 'trash',
'\\inbox': 'inbox',
'\\trash': 'trash',
'\\sent': 'sent',
'\\junk': 'junk',
'\\spam': 'junk',
'\\archive': 'archive',
'\\drafts': 'drafts',
'\\all': 'all',
}
class ImapMailbox(dict):
__slots__ = ('db',)
def __missing__(self, key):
return getattr(self, key)()
def name(self):
try:
parentname, name = self['imapname'].rsplit(self['sep'], maxsplit=1)
except ValueError:
name = self['imapname']
self['name'] = imap_utf7_decode(name.encode())
return self['name']
def parentId(self):
try:
parentname, name = self['imapname'].rsplit(self['sep'], maxsplit=1)
self['parentId'] = self.db.byimapname[parentname]['id']
except ValueError:
self['parentId'] = None
return self['parentId']
def role(self):
for f in self['flags']:
if f not in KNOWN_SPECIALS:
self['role'] = ROLE_MAP.get(f, None)
break
else:
self['role'] = ROLE_MAP.get(self['imapname'].lower(), None)
return self['role']
def sortOrder(self):
return 2 if self['role'] else (1 if self['role'] == 'inbox' else 3)
def isSubscribed(self):
return '\\subscribed' in self['flags']
def totalEmails(self):
return 0
def unreadEmails(self):
return 0
def totalThreads(self):
return self['totalEmails']
def unreadThreads(self):
return self['unreadEmails']
def myRights(self):
can_select = '\\noselect' not in self['flags']
self['myRights'] = {
'mayReadItems': can_select,
'mayAddItems': can_select,
'mayRemoveItems': can_select,
'maySetSeen': can_select,
'maySetKeywords': can_select,
'mayCreateChild': True,
'mayRename': False if self['role'] else True,
'mayDelete': False if self['role'] else True,
'maySubmit': can_select,
}
return self['myRights']
def imapname(self):
encname = imap_utf7_encode(self['name']).decode()
if self['parentId']:
parent = self.db.mailboxes[self['parentId']]
self['imapname'] = parent['imapname'] + parent['sep'] + encname
else:
self['imapname'] = encname
return self['imapname']
def created(self):
return self['uidvalidity']
def updated(self):
return self['uidvalidity'] * self['uidnext']
def deleted(self):
return None
| 26.338843 | 118 | 0.557891 | 2,398 | 0.752432 | 0 | 0 | 0 | 0 | 0 | 0 | 973 | 0.305303 |
c3f30ba18a8ccc9b5eb6bda010feb4e01b778707 | 2,987 | py | Python | pyieee1905/multiap_msg.py | evanslai/pyieee1905 | 1007f964c1ebcaf3825a809fb98bc20430e26b64 | [
"MIT"
] | 3 | 2020-07-17T15:58:49.000Z | 2022-01-03T11:40:04.000Z | pyieee1905/multiap_msg.py | jayakumar-ananthakrishnan/pyieee1905 | 8aab72a1bccb9c6c753ca7d3d8913b3cba4c74a0 | [
"MIT"
] | 3 | 2019-03-29T19:31:23.000Z | 2021-09-16T11:51:55.000Z | pyieee1905/multiap_msg.py | jayakumar-ananthakrishnan/pyieee1905 | 8aab72a1bccb9c6c753ca7d3d8913b3cba4c74a0 | [
"MIT"
] | 1 | 2021-09-14T13:58:43.000Z | 2021-09-14T13:58:43.000Z | from pyieee1905.ieee1905_tlv import IEEE1905_TLV
from scapy.packet import Packet, bind_layers
from scapy.fields import BitField, XByteField, XShortField, XShortEnumField
from scapy.layers.l2 import Ether
IEEE1905_MCAST = "01:80:c2:00:00:13"
ieee1905_msg_type = {
0x0000:"TOPOLOGY_DISCOVERY_MESSAGE",
0x0001:"TOPOLOGY_NOTIFICATION_MESSAGE",
0x0002:"TOPOLOGY_QUERY_MESSAGE",
0x0003:"TOPOLOGY_RESPONSE_MESSAGE",
0x0004:"VENDOR_SPECIFIC_MESSAGE",
0x0005:"LINK_METRIC_QUERY_MESSAGE",
0x0006:"LINK_METRIC_RESPONSE_MESSAGE",
0x0007:"AP_AUTOCONFIGURATION_SEARCH_MESSAGE",
0x0008:"AP_AUTOCONFIGURATION_RESPONSE_MESSAGE",
0x0009:"AP_AUTOCONFIGURATION_WSC_MESSAGE",
0x000A:"AP_AUTOCONFIGURATION_RENEW_MESSAGE",
0x000B:"IEEE1905_PUSH_BUTTON_EVENT_NOTIFICATION_MESSAGE",
0x000C:"IEEE1905_PUSH_BUTTON_JOIN_NOTIFICATION_MESSAGE",
0x000D:"HIGHER_LAYER_QUERY_MESSAGE",
0x000E:"HIGHER_LAYER_RESPONSE_MESSAGE",
0x000F:"INTERFACE_POWER_CHANGE_REQUEST_MESSAGE",
0x0010:"INTERFACE_POWER_CHANGE_RESPONSE_MESSAGE",
0x0011:"GENERIC_PHY_QUERY_MESSAGE",
0x0012:"GENERIC_PHY_RESPONSE_MESSAGE",
0x8000:"IEEE1905_ACK_MESSAGE",
0x8001:"AP_CAPABILITY_QUERY_MESSAGE",
0x8002:"AP_CAPABILITY_REPORT_MESSAGE",
0x8003:"MULTI_AP_POLICY_CONFIG_REQUEST_MESSAGE",
0x8004:"CHANNEL_PREFERENCE_QUERY_MESSAGE",
0x8005:"CHANNEL_PREFERENCE_REPORT_MESSAGE",
0x8006:"CHANNEL_SELECTION_REQUEST_MESSAGE",
0x8007:"CHANNEL_SELECTION_RESPONSE_MESSAGE",
0x8008:"OPERATING_CHANNEL_REPORT_MESSAGE",
0x8009:"CLIENT_CAPABILITIES_QUERY_MESSAGE",
0x800A:"CLIENT_CAPABILITIES_REPORT_MESSAGE",
0x800B:"AP_METRICS_QUERY_MESSAGE",
0x800C:"AP_METRICS_RESPONSE_MESSAGE",
0x800D:"ASSOCIATED_STA_LINK_METRICS_QUERY_MESSAGE",
0x800E:"ASSOCIATED_STA_LINK_METRICS_RESPONSE_MESSAGE",
0x800F:"UNASSOCIATED_STA_LINK_METRICS_QUERY_MESSAGE",
0x8010:"UNASSOCIATED_STA_LINK_METRICS_RESPONSE_MESSAGE",
0x8011:"BEACON_METRICS_QUERY_MESSAGE",
0x8012:"BEACON_METRICS_REPONSE_METRICS",
0x8013:"COMBINED_INFRASTRUCTURE_METRICS_MESSAGE",
0x8014:"CLIENT_STEERING_REQUEST_MESSAGE",
0x8015:"CLIENT_STEERING_BTM_REPORT_MESSAGE",
0x8016:"CLIENT_ASSOCIATION_CONTROL_REQUEST_MESSAGE",
0x8017:"STEERING_COMPLETED_MESSAGE",
0x8018:"HIGHER_LAYER_DATA_MESSAGE",
0x8019:"BACKHAUL_STEERING_REQUEST_MESSAGE",
0x801A:"BACKHAUL_STEERING_RESPONSE_MESSAGE"
}
class MultiAP_Message(Packet):
name = "IEEE 1905 MultiAP Message"
fields_desc = [
XByteField("msg_version", None),
XByteField("msg_reserved", None),
XShortEnumField("msg_type", None, ieee1905_msg_type),
XShortField("msg_id", None),
XByteField("frag_id", None),
BitField("flag_last_frag_ind", 0, 1),
BitField("flag_relay_ind", 0, 1),
BitField("flag_reserved", 0, 6)
]
bind_layers(Ether, MultiAP_Message, type=0x893a)
bind_layers(MultiAP_Message, IEEE1905_TLV, )
| 37.810127 | 75 | 0.781051 | 442 | 0.147975 | 0 | 0 | 0 | 0 | 0 | 0 | 1,728 | 0.578507 |
c3f439b48efe360b61fbb39a47da1c6f455643ce | 2,719 | py | Python | daily_practice/tools/GetMem.py | joakimzhang/qa_study | ff8930e674d45c49bea4e130d14d73d17b090e48 | [
"Apache-2.0"
] | null | null | null | daily_practice/tools/GetMem.py | joakimzhang/qa_study | ff8930e674d45c49bea4e130d14d73d17b090e48 | [
"Apache-2.0"
] | null | null | null | daily_practice/tools/GetMem.py | joakimzhang/qa_study | ff8930e674d45c49bea4e130d14d73d17b090e48 | [
"Apache-2.0"
] | null | null | null | # coding:utf8
'''
Created on 2016��8��30��
@author: zhangq
'''
from serial import Serial
import re
from threading import Thread
import time
import datetime
import pygal
import os
class FilterMem(object):
def __init__(self, port, baudrate):
self.serial_obj = Serial()
self.serial_obj.port = port-1
self.serial_obj.baudrate = baudrate
self.connect_uart()
def connect_uart(self):
try:
self.serial_obj.open()
except Exception, e:
if self.serial_obj.isOpen():
self.serial_obj.close()
print e
return 0
def send_thread(self, _command, _period):
self.sent_thread = Thread(target=self.sendfunc,args=(_command, _period))
self.sent_thread.setDaemon(True)
self.sent_thread.start()
#self.getmem()
def getmem(self, keyword, file_name):
today = datetime.date.today()
self.file_name = r"%s_%s" % (file_name, today)
x_list = []
y_list = []
with open("%s.log"%self.file_name, "w") as f:
while 1:
self.info = self.serial_obj.readline()
print self.info
current = datetime.datetime.now()
f_time = "%s-%s-%s %s:%s:%s" % (current.year, current.month, current.day, current.hour, current.minute, current.second)
f.write("%s:%s" % (f_time, self.info))
match_info = re.search("%s.+?(\d+).+bytes" % keyword, self.info)
if match_info:
mem_val = match_info.group(1)
y_list.append(int(mem_val))
x_list.append(current)
print mem_val
if len(y_list)%10 == 0:
self.make_pic(x_list, y_list)
#print match_info.group(0)
#print "bbb"
#time.sleep(1)
def sendfunc(self, _char, _period):
self.serial_obj.write("mon\n")
while 1:
self.serial_obj.write("%s\n" % _char)
time.sleep(_period)
#print _char
# plot a sine wave from 0 to 4pi
def make_pic(self, x_list, y_list):
line_chart = pygal.Line()
line_chart.title = 'Mem usage evolution (in %)'
line_chart.x_labels = x_list
line_chart.add('Mem', y_list)
line_chart.render()
f = open('%s.html' % self.file_name, 'w')
f.write(line_chart.render())
f.close()
if __name__ == "__main__":
my_obj = FilterMem(9, 115200)
my_obj.send_thread("mid", 10)
#my_obj.connect_uart()
my_obj.getmem("Used")
# my_obj.sent_thread.join()
| 29.554348 | 135 | 0.544318 | 2,355 | 0.862321 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.137679 |
c3f60d6a60312b81c0e1bdb097c0cb726e1268b3 | 4,582 | py | Python | worky/server.py | asuol/worky | 362257e77486af05941cc977055c01e49b09a2dd | [
"MIT"
] | null | null | null | worky/server.py | asuol/worky | 362257e77486af05941cc977055c01e49b09a2dd | [
"MIT"
] | null | null | null | worky/server.py | asuol/worky | 362257e77486af05941cc977055c01e49b09a2dd | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020 André Lousa Marques <andre.lousa.marques at gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from flask import Flask, request, redirect
from flask.templating import render_template
from datetime import datetime, timedelta
from worky.models.index_model import IndexModel
from worky.models.completed_model import CompletedModel
from worky.models.create_task_model import CreateTaskModel
from worky.models.update_task_model import UpdateTaskModel
from worky.models.confirm_form_model import ConfirmFormModel
from worky.storage import Storage
from waitress import serve
app = Flask(__name__)
def run(db, host, port):
app.config['STORAGE'] = Storage(db)
serve(app, host=host, port=port)
@app.route('/')
def index():
storage = app.config['STORAGE']
overdue_tasks = storage.get_overdue_tasks()
active_tasks = storage.get_active_tasks()
index_model = IndexModel(active_tasks, overdue_tasks)
return render_template("index.html", model=index_model)
@app.route('/createForm')
def create_form():
default_due_date = datetime.utcnow() + timedelta(weeks=2)
default_due_date = datetime.strftime(default_due_date, '%Y-%m-%d')
create_task_model = CreateTaskModel("/createTask", default_due_date)
return render_template("createTask.html", model=create_task_model)
@app.route('/createTask')
def create_task():
storage = app.config['STORAGE']
description = request.args.get('description')
due_date = request.args.get('dueDate')
storage.create_task(description, due_date)
return redirect("/", code=302)
@app.route('/updateForm')
def update_form():
storage = app.config['STORAGE']
task_id = request.args.get('id')
task = storage.get_task(task_id)
update_task_model = UpdateTaskModel("/updateTask", task.due_date, task)
return render_template("updateTask.html", model=update_task_model)
@app.route('/updateTask')
def update_task():
storage = app.config['STORAGE']
task_id = request.args.get('id')
description = request.args.get('description')
due_date = request.args.get('dueDate')
storage.update_task(task_id, description, due_date)
return redirect("/", code=302)
@app.route('/deleteForm')
def delete_form():
storage = app.config['STORAGE']
task_id = request.args.get('id')
task = storage.get_task(task_id)
action = "Delete"
form_action = "/deleteTask"
confirm_model = ConfirmFormModel(form_action, task.due_date, task, action)
return render_template("confirmForm.html", model=confirm_model)
@app.route('/deleteTask')
def delete_task():
storage = app.config['STORAGE']
task_id = request.args.get('id')
storage.delete_task(task_id)
return redirect("/", code=302)
@app.route('/completeForm')
def complete_form():
storage = app.config['STORAGE']
task_id = request.args.get('id')
task = storage.get_task(task_id)
action = "Complete"
form_action = "/completeTask"
confirm_model = ConfirmFormModel(form_action, task.due_date, task, action)
return render_template("confirmForm.html", model=confirm_model)
@app.route('/completeTask')
def complete_task():
storage = app.config['STORAGE']
task_id = request.args.get('id')
storage.complete_task(task_id)
return redirect("/", code=302)
@app.route('/completed')
def completed():
storage = app.config['STORAGE']
completed_tasks = storage.get_completed_tasks()
completed_model = CompletedModel(completed_tasks)
return render_template("completed.html", model=completed_model)
| 27.60241 | 78 | 0.742034 | 0 | 0 | 0 | 0 | 2,832 | 0.617936 | 0 | 0 | 1,592 | 0.347371 |
c3f743cc4af3a71c5d1f33fa1729e7222543c974 | 561 | py | Python | main.py | tyleralgigi/NCAA-NN | e2482846d6a588a80f2ae7641fc8109eab62523e | [
"MIT"
] | 3 | 2020-02-11T19:40:30.000Z | 2020-12-02T13:15:48.000Z | main.py | tyleralgigi/ALBURT_AI | e2482846d6a588a80f2ae7641fc8109eab62523e | [
"MIT"
] | 4 | 2020-02-07T19:42:08.000Z | 2021-06-02T01:00:59.000Z | main.py | tyleralgigi/NCAA-NN | e2482846d6a588a80f2ae7641fc8109eab62523e | [
"MIT"
] | null | null | null | #files
import schedule
import test
import ATS
import editGameList
import getGames
import getData
#libs
import pathlib
import time
if __name__ == '__main__':
path = pathlib.Path(__file__).parent.absolute()
getData.getData()
print('1/6')
time.sleep(2)
getGames.start(path)
print('2/6')
time.sleep(2)
editGameList.editList()
print('3/6')
time.sleep(2)
ATS.ATS()
print('4/6')
time.sleep(2)
test.func()
print('5/6')
time.sleep(2)
schedule.scheduleStart(path)
print('Data Collection Complete')
| 18.096774 | 51 | 0.654189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.128342 |
c3f80a30cddcf300830226429e92b2147122720d | 12,689 | py | Python | waffle/tests/test_management.py | theunraveler/django-waffle | 4b27b27c5ce3f044eb8b2c94f40df841bc982884 | [
"BSD-3-Clause"
] | 1 | 2021-09-12T00:44:29.000Z | 2021-09-12T00:44:29.000Z | waffle/tests/test_management.py | theunraveler/django-waffle | 4b27b27c5ce3f044eb8b2c94f40df841bc982884 | [
"BSD-3-Clause"
] | 4 | 2016-06-01T16:23:26.000Z | 2017-01-04T13:04:06.000Z | waffle/tests/test_management.py | theunraveler/django-waffle | 4b27b27c5ce3f044eb8b2c94f40df841bc982884 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import six
from django.core.management import call_command, CommandError
from django.contrib.auth.models import Group, User
from waffle import get_waffle_flag_model
from waffle.models import Sample, Switch
from waffle.tests.base import TestCase
class WaffleFlagManagementCommandTests(TestCase):
def test_create(self):
""" The command should create a new flag. """
name = 'test'
percent = 20
Group.objects.create(name='waffle_group')
call_command('waffle_flag', name, percent=percent,
superusers=True, staff=True, authenticated=True,
rollout=True, create=True, group=['waffle_group'])
flag = get_waffle_flag_model().objects.get(name=name)
self.assertEqual(flag.percent, percent)
self.assertIsNone(flag.everyone)
self.assertTrue(flag.superusers)
self.assertTrue(flag.staff)
self.assertTrue(flag.authenticated)
self.assertTrue(flag.rollout)
self.assertEqual(list(flag.groups.values_list('name', flat=True)),
['waffle_group'])
def test_not_create(self):
""" The command shouldn't create a new flag if the create flag is
not set.
"""
name = 'test'
with self.assertRaisesRegexp(CommandError, 'This flag does not exist.'):
call_command('waffle_flag', name, everyone=True, percent=20,
superusers=True, staff=True, authenticated=True,
rollout=True)
self.assertFalse(get_waffle_flag_model().objects.filter(name=name).exists())
def test_update(self):
""" The command should update an existing flag. """
name = 'test'
flag = get_waffle_flag_model().objects.create(name=name)
self.assertIsNone(flag.percent)
self.assertIsNone(flag.everyone)
self.assertTrue(flag.superusers)
self.assertFalse(flag.staff)
self.assertFalse(flag.authenticated)
self.assertFalse(flag.rollout)
percent = 30
call_command('waffle_flag', name, percent=percent,
superusers=False, staff=True, authenticated=True,
rollout=True)
flag.refresh_from_db()
self.assertEqual(flag.percent, percent)
self.assertIsNone(flag.everyone)
self.assertFalse(flag.superusers)
self.assertTrue(flag.staff)
self.assertTrue(flag.authenticated)
self.assertTrue(flag.rollout)
def test_update_activate_everyone(self):
""" The command should update everyone field to True """
name = 'test'
flag = get_waffle_flag_model().objects.create(name=name)
self.assertIsNone(flag.percent)
self.assertIsNone(flag.everyone)
self.assertTrue(flag.superusers)
self.assertFalse(flag.staff)
self.assertFalse(flag.authenticated)
self.assertFalse(flag.rollout)
percent = 30
call_command('waffle_flag', name, everyone=True, percent=percent,
superusers=False, staff=True, authenticated=True,
rollout=True)
flag.refresh_from_db()
self.assertEqual(flag.percent, percent)
self.assertTrue(flag.everyone)
self.assertFalse(flag.superusers)
self.assertTrue(flag.staff)
self.assertTrue(flag.authenticated)
self.assertTrue(flag.rollout)
def test_update_deactivate_everyone(self):
""" The command should update everyone field to False"""
name = 'test'
flag = get_waffle_flag_model().objects.create(name=name)
self.assertIsNone(flag.percent)
self.assertIsNone(flag.everyone)
self.assertTrue(flag.superusers)
self.assertFalse(flag.staff)
self.assertFalse(flag.authenticated)
self.assertFalse(flag.rollout)
percent = 30
call_command('waffle_flag', name, everyone=False, percent=percent,
superusers=False, staff=True, authenticated=True,
rollout=True)
flag.refresh_from_db()
self.assertEqual(flag.percent, percent)
self.assertFalse(flag.everyone)
self.assertFalse(flag.superusers)
self.assertTrue(flag.staff)
self.assertTrue(flag.authenticated)
self.assertTrue(flag.rollout)
def test_list(self):
""" The command should list all flags."""
stdout = six.StringIO()
get_waffle_flag_model().objects.create(name='test')
call_command('waffle_flag', list_flags=True, stdout=stdout)
expected = 'Flags:\nNAME: test\nSUPERUSERS: True\nEVERYONE: None\n' \
'AUTHENTICATED: False\nPERCENT: None\nTESTING: False\n' \
'ROLLOUT: False\nSTAFF: False\nGROUPS: []\nUSERS: []'
actual = stdout.getvalue().strip()
self.assertEqual(actual, expected)
def test_group_append(self):
""" The command should append a group to a flag."""
original_group = Group.objects.create(name='waffle_group')
Group.objects.create(name='append_group')
flag = get_waffle_flag_model().objects.create(name='test')
flag.groups.add(original_group)
flag.refresh_from_db()
self.assertEqual(list(flag.groups.values_list('name', flat=True)),
['waffle_group'])
call_command('waffle_flag', 'test', group=['append_group'],
append=True)
flag.refresh_from_db()
self.assertEqual(list(flag.groups.values_list('name', flat=True)),
['waffle_group', 'append_group'])
self.assertIsNone(flag.everyone)
def test_user(self):
""" The command should replace a user to a flag."""
original_user = User.objects.create_user('waffle_test')
User.objects.create_user('add_user')
flag = get_waffle_flag_model().objects.create(name='test')
flag.users.add(original_user)
flag.refresh_from_db()
self.assertEqual(list(flag.users.values_list('username', flat=True)),
['waffle_test'])
call_command('waffle_flag', 'test', user=['add_user'])
flag.refresh_from_db()
self.assertEqual(list(flag.users.values_list('username', flat=True)),
['add_user'])
self.assertIsNone(flag.everyone)
def test_user_append(self):
""" The command should append a user to a flag."""
original_user = User.objects.create_user('waffle_test')
User.objects.create_user('append_user')
User.objects.create_user('append_user_email', email='test@example.com')
flag = get_waffle_flag_model().objects.create(name='test')
flag.users.add(original_user)
flag.refresh_from_db()
self.assertEqual(list(flag.users.values_list('username', flat=True)),
['waffle_test'])
call_command('waffle_flag', 'test', user=['append_user'],
append=True)
flag.refresh_from_db()
self.assertEqual(list(flag.users.values_list('username', flat=True)),
['waffle_test', 'append_user'])
self.assertIsNone(flag.everyone)
call_command('waffle_flag', 'test', user=['test@example.com'],
append=True)
flag.refresh_from_db()
self.assertEqual(list(flag.users.values_list('username', flat=True)),
['waffle_test', 'append_user', 'append_user_email'])
self.assertIsNone(flag.everyone)
class WaffleSampleManagementCommandTests(TestCase):
def test_create(self):
""" The command should create a new sample. """
name = 'test'
percent = 20
call_command('waffle_sample', name, str(percent), create=True)
sample = Sample.objects.get(name=name)
self.assertEqual(sample.percent, percent)
def test_not_create(self):
""" The command shouldn't create a new sample if the create flag is
not set.
"""
name = 'test'
with self.assertRaisesRegexp(CommandError, 'This sample does not exist'):
call_command('waffle_sample', name, '20')
self.assertFalse(Sample.objects.filter(name=name).exists())
def test_update(self):
""" The command should update an existing sample. """
name = 'test'
sample = Sample.objects.create(name=name, percent=0)
self.assertEqual(sample.percent, 0)
percent = 50
call_command('waffle_sample', name, str(percent))
sample.refresh_from_db()
self.assertEqual(sample.percent, percent)
def test_list(self):
""" The command should list all samples."""
stdout = six.StringIO()
Sample.objects.create(name='test', percent=34)
call_command('waffle_sample', list_samples=True, stdout=stdout)
expected = 'Samples:\ntest: 34.0%'
actual = stdout.getvalue().strip()
self.assertEqual(actual, expected)
class WaffleSwitchManagementCommandTests(TestCase):
def test_create(self):
""" The command should create a new switch. """
name = 'test'
call_command('waffle_switch', name, 'on', create=True)
switch = Switch.objects.get(name=name, active=True)
switch.delete()
call_command('waffle_switch', name, 'off', create=True)
Switch.objects.get(name=name, active=False)
def test_not_create(self):
""" The command shouldn't create a new switch if the create flag is
not set.
"""
name = 'test'
with self.assertRaisesRegexp(CommandError, 'This switch does not exist.'):
call_command('waffle_switch', name, 'on')
self.assertFalse(Switch.objects.filter(name=name).exists())
def test_update(self):
""" The command should update an existing switch. """
name = 'test'
switch = Switch.objects.create(name=name, active=True)
call_command('waffle_switch', name, 'off')
switch.refresh_from_db()
self.assertFalse(switch.active)
call_command('waffle_switch', name, 'on')
switch.refresh_from_db()
self.assertTrue(switch.active)
def test_list(self):
""" The command should list all switches."""
stdout = six.StringIO()
Switch.objects.create(name='switch1', active=True)
Switch.objects.create(name='switch2', active=False)
call_command('waffle_switch', list_switches=True, stdout=stdout)
expected = 'Switches:\nswitch1: on\nswitch2: off'
actual = stdout.getvalue().strip()
self.assertEqual(actual, expected)
class WaffleDeleteManagementCommandTests(TestCase):
def test_delete_flag(self):
""" The command should delete a flag. """
name = 'test_flag'
get_waffle_flag_model().objects.create(name=name)
call_command('waffle_delete', flag_names=[name])
self.assertEqual(get_waffle_flag_model().objects.count(), 0)
def test_delete_swtich(self):
""" The command should delete a switch. """
name = 'test_switch'
Switch.objects.create(name=name)
call_command('waffle_delete', switch_names=[name])
self.assertEqual(Switch.objects.count(), 0)
def test_delete_sample(self):
""" The command should delete a sample. """
name = 'test_sample'
Sample.objects.create(name=name, percent=0)
call_command('waffle_delete', sample_names=[name])
self.assertEqual(Sample.objects.count(), 0)
def test_delete_mix_of_types(self):
""" The command should delete different types of records. """
name = 'test'
get_waffle_flag_model().objects.create(name=name)
Switch.objects.create(name=name)
Sample.objects.create(name=name, percent=0)
call_command('waffle_delete', switch_names=[name], flag_names=[name],
sample_names=[name])
self.assertEqual(get_waffle_flag_model().objects.count(), 0)
self.assertEqual(Switch.objects.count(), 0)
self.assertEqual(Sample.objects.count(), 0)
def test_delete_some_but_not_all_records(self):
""" The command should delete specified records, but leave records
not specified alone. """
flag_1 = 'test_flag_1'
flag_2 = 'test_flag_2'
get_waffle_flag_model().objects.create(name=flag_1)
get_waffle_flag_model().objects.create(name=flag_2)
call_command('waffle_delete', flag_names=[flag_1])
self.assertTrue(get_waffle_flag_model().objects.filter(name=flag_2).exists())
| 38.568389 | 85 | 0.638033 | 12,390 | 0.976436 | 0 | 0 | 0 | 0 | 0 | 0 | 2,586 | 0.203799 |
c3fa1a868dbcea9b71565113431a5f4f8d72d153 | 8,733 | py | Python | cdma.py | SouppuoS/CDMA | 8377fa2e238f0802c306869018746305cc5122c9 | [
"MIT"
] | 7 | 2021-11-02T12:18:02.000Z | 2022-02-14T12:30:46.000Z | cdma.py | Pashasah/CDMA | 8377fa2e238f0802c306869018746305cc5122c9 | [
"MIT"
] | 1 | 2021-12-14T20:56:48.000Z | 2021-12-18T04:30:01.000Z | cdma.py | Pashasah/CDMA | 8377fa2e238f0802c306869018746305cc5122c9 | [
"MIT"
] | 3 | 2021-12-12T07:34:16.000Z | 2022-03-30T07:21:02.000Z | """
Implement of Circular Differential Microphone Arrays (CDMA)
Reference:
[1] Benesty, Jacob, Jingdong Chen, and Israel Cohen. Design of circular differential microphone arrays. Vol. 12. Berlin, Germany:: Springer, 2015.
"""
import numpy as np
c = 340
class circular_microphone_arrays():
def __init__(self,
r : float = 1.0, # r : radius of array (cm)
M : int = 4, # M : number of microphones
fs : int = 8000, # fs : sample rate
f_bin : int = 256, # f_bin : number of freq bins
sa_bin : int = 360, # sa_bin : number of steer angle
) -> None:
self.r = r / 100. # cm -> m
self.M = M
self.fs = fs
self.f_bin = f_bin
self.sa_bin = sa_bin
# steer
self.phi = np.arange(0, self.M, 1) * 2 * np.pi / self.M
self.rad = np.arange(0, 360, 360. / self.sa_bin) * np.pi / 180.
self.freq = np.linspace(0, self.fs / 2, self.f_bin) # [f_bin]
self.freq[0] = 1 # set the first freq to 1Hz to avoid singular matrix
_cos_degdif = np.cos(self.rad[:,None] - self.phi[None]) # [sa_bin x M]
self.steer = np.exp(2j * np.pi * self.r / c \
* _cos_degdif[...,None] * self.freq[None, None]) # [sa_bin x M x f_bin]
# gamma_dn
_dij = np.arange(0, self.M, 1)[None] - np.arange(0, self.M, 1)[:, None] # delta_{ij}, [M x M]
_delta = 2 * self.r * np.sin(np.pi * _dij / self.M)
self.g_dn_sphere = np.sinc(2 * np.pi * self.freq[:, None, None] * _delta[None] / c)
self.g_dn_cylinder = np.i0(2 * np.pi * self.freq[:, None, None] * _delta[None] / c)
@classmethod
def theta_validation(
self,
theta: float,
) -> float :
while theta < 0:
theta += 360.
while theta >= 360.:
theta -= 360.
return theta
@classmethod
def build_sym(
self,
M : int, # M : number of microphones
sa : float = 0, # steer angle (distortless directgion)
) -> np.ndarray:
sa = sa + 360 if sa < 0 else sa
deg_cand = np.array([v * 360 / M for v in range(M)])
tgt_dir = np.argmin(np.abs(deg_cand - sa))
_sy = np.zeros((M // 2 - 1, M + 1), dtype=np.float)
for i in range(1, M // 2):
_sy[i - 1, i], _sy[i - 1, M - i] = 1., -1.
_sy_r = np.concatenate((np.roll(_sy[:, :-1], shift=tgt_dir, axis=1), _sy[:, [-1]]), axis=1)
return _sy_r
def get_steer(
self,
theta: float = 0 # theta: steer direction (degree)
) -> np.ndarray:
theta_val = self.theta_validation(theta)
rad = theta_val / 180. * np.pi
tgt_theta = np.argmin(np.abs(self.rad - rad))
print(f'LOG:: get steer from theta = {theta_val} - tgt = {tgt_theta}')
return self.steer[tgt_theta] # [M x f_bin]
class FixedBeamformor():
def __init__(
self,
) -> None:
self.flg_calc_weight = True
def calc_weight(
self
) -> None:
pass
def get_weight(
self,
recalc : bool = False
) -> np.ndarray:
if self.flg_calc_weight or recalc:
self.calc_weight()
self.flg_calc_weight = False
# weight, [f_bin x M x 1]
return self.weight
def apply(
self,
spec_x : np.ndarray, # input signal, [M x f_bin x N]
) -> np.ndarray:
return np.einsum('mfn,fmi->fni', spec_x, self.get_weight().conjugate())[..., 0]
class CDMA(FixedBeamformor):
"""Circular Differential Microphone Arrays (CDMA)
The order of CDMA depend on the number of null point in null_list.
"""
def __init__(
self,
cma : circular_microphone_arrays,
sa : float, # steer angle (distortless directgion)
sym : np.ndarray = None, # symmetric constrain for the weight, [C, M + 1]
null_list : list = [], # list of null point (degree)
b : np.ndarray = None, # b for each point, [len(null_list) + 1]
mic_mask : list = [], # list of microphone mask
) -> None:
super(CDMA, self).__init__()
self.cma = cma
self.null_list = null_list
self.mic_mask = mic_mask
self.sa = sa + 360 if sa < 0 else sa
self.b = b
self.sym = self.cma.build_sym(self.cma.M, self.sa) if sym is None else sym
def build_eq(
self,
) -> tuple:
_eq = np.array([self.cma.get_steer(d).conjugate() for d in [self.sa,] + self.null_list])
_b = np.zeros((_eq.shape[0], 1, _eq.shape[-1])) # [N x 1 x f_bin]
_b[0] = 1.
if self.b is not None:
_b = self.null_b[:, None, None].repeat(_eq.shape[-1], axis=-1)
_sy = self.sym[..., None].repeat(self.cma.f_bin, axis=-1).astype(np.float)
_eq = np.concatenate((_eq, _sy[..., :-1, :]), axis=0) # [N x M x f_bin], N : number of constrains
_b = np.concatenate((_b, _sy[..., [-1], :]), axis=0) # [N x 1 x f_bin], N : number of constrains
# DC part
_eq[...,0] = np.eye(self.cma.M)[:_eq.shape[0]]
_b[...,0] = 1. / self.cma.M
return _eq, _b
def calc_weight(
self,
) -> None:
_eq, _b = self.build_eq()
if _eq.shape[0] < _eq.shape[1]:
# NMS
print('LOG:: NMS mode')
_mAA = np.einsum('ijk,ljk->kil', _eq, _eq.conjugate()) # [f_bin x N x N]
self.weight = np.einsum('ijk,kil,lnk->kjn', _eq.conjugate(), np.linalg.inv(_mAA), _b)
elif _eq.shape[0] == _eq.shape[1]:
self.weight = np.linalg.solve(_eq.transpose(2, 0, 1), _b.transpose(2, 0, 1))
else:
raise NotImplemented
class DS(FixedBeamformor):
"""Delay and Sum
"""
def __init__(
self,
cma : circular_microphone_arrays,
sa : float, # steer angle (distortless directgion)
) -> None:
self.cma = cma
self.sa = sa + 360 if sa < 0 else sa
def calc_weight(
self,
) -> None:
self.weight = self.cma.get_steer(self.sa).T[...,None] / self.cma.M
class RSD(CDMA):
"""Robust Superdirective beamforming
"""
def __init__(
self,
cma : circular_microphone_arrays,
sa : float, # steer angle (distortless directgion)
mode : str = 'sphere', # diffuse noise mode, `sphere` or `cylinder`
sym_flag : bool = False, # add symmetry constraint or not
eps = 0., # eps on Gamma_dn, could be `float` or `np.ndarray` for [f_bin x 1 x 1]
**kwargs,
) -> None:
super(RSD, self).__init__(cma, sa, **kwargs)
self.g_dn_mode = mode
self.eps = eps
self.flg_sym = sym_flag
def calc_weight(
self,
) -> None:
_g_v = self.eps * np.eye(self.cma.M)[None] + (self.cma.g_dn_sphere \
if self.g_dn_mode == 'sphere' else self.cma.g_dn_cylinder) # [f_bin x M x M]
if not self.flg_sym:
# without symmetry constraint
sv = self.cma.get_steer(self.sa) # [M x f_bin]
_num = np.einsum('fmn,imf->fni', np.linalg.inv(_g_v), sv[None]) # [f_bin x M x 1]
_den = np.einsum('fmi,lmf->fil', _num, sv[None].conjugate()) # [f_bin x 1 x 1]
_num[0] = 1 / self.cma.M # set DC part to zero
self.weight = _num / _den
else:
# without symmetry constraint
_eq, _b = self.build_eq()
_gd_inv = np.linalg.solve(_g_v, _eq.conjugate().transpose(2, 1, 0)) # \Gamma^{-1}D^*, [f_bin x M x N]
_d_gd_inv = np.einsum('nmf,fml->fnl', _eq, _gd_inv) # D\Gamma^{-1}D^*, [f_bin x N x N]
self.weight = np.einsum('fmn,fnl,lif->fmi', _gd_inv, np.linalg.inv(_d_gd_inv), _b)
if __name__ == "__main__":
pass | 40.430556 | 146 | 0.482996 | 8,429 | 0.96519 | 0 | 0 | 870 | 0.099622 | 0 | 0 | 1,863 | 0.213329 |
c3fc3ab5d99683c3c1fb3c45a1d21f95f0d04d07 | 2,083 | py | Python | iteebot/manage.py | enkwolf/ITEE-discord-bot | 9b39852f99316c4fd376a5bf85e2500eadd6ec7e | [
"MIT"
] | null | null | null | iteebot/manage.py | enkwolf/ITEE-discord-bot | 9b39852f99316c4fd376a5bf85e2500eadd6ec7e | [
"MIT"
] | null | null | null | iteebot/manage.py | enkwolf/ITEE-discord-bot | 9b39852f99316c4fd376a5bf85e2500eadd6ec7e | [
"MIT"
] | null | null | null | """
Command line management utilities for ITEEBot. This module's command line
interface will act as the bot's entry point when installed.
"""
import click
from . import configurator as conf
from . import database as db
from .bot import ITEEBot
@click.group()
def cli():
pass
@click.command("init-config")
@click.argument(
"config_path",
default="instance/config.json",
type=click.Path()
)
def create_config(config_path):
"""
Command for writing or updating a configuration file. Configuration file
path will default to instance/config.json.
* config_path (str) - Path to the configuration file
Example:
iteebot init-config /home/donkey/.iteebot/config.json
"""
conf.write_config_file(config_path)
@click.command("init-db")
@click.argument(
"config_path",
default="instance/config.json",
type=click.Path(exists=True)
)
def init_db(config_path):
"""
Initializes a database to the location defined in the configuration's DB
option.
* config_path (str) - Path to the configuration file
Example:
iteebot init-db /home/donkey/.iteebot/config.json
"""
config = conf.load_config(config_path)
db.init_db(config["DB"])
@click.command("run")
@click.option("--debug", default=False, help="Run in debug mode")
@click.argument(
"config_path",
default="instance/config.json",
type=click.Path(exists=True)
)
def run(debug, config_path):
"""
Runs the bot using configuration frome the specific location (or default
of instance/config.json). Optional debug flag can be set to run in debug
mode, which will print logs to stdout instead of using log files.
* config_path (str) - Path to the configuration file
* debug (bool) - Run in debug mode
Example:
iteebot run --debug /home/donkey/.iteebot/config.json
"""
config = conf.load_config(config_path)
bot = ITEEBot(config, debug)
bot.run()
cli.add_command(create_config)
cli.add_command(init_db)
cli.add_command(run)
if __name__ == "__main__":
cli()
| 24.797619 | 77 | 0.69035 | 0 | 0 | 0 | 0 | 1,710 | 0.820931 | 0 | 0 | 1,229 | 0.590014 |
c3fc56ed683eeb1327d133f78576e74865f4e678 | 439 | py | Python | accounts/models.py | sulembutproton/morphicsys | 17bdeb501b1225fc7cb7c384f5c70ad0fabfdf23 | [
"MIT"
] | null | null | null | accounts/models.py | sulembutproton/morphicsys | 17bdeb501b1225fc7cb7c384f5c70ad0fabfdf23 | [
"MIT"
] | null | null | null | accounts/models.py | sulembutproton/morphicsys | 17bdeb501b1225fc7cb7c384f5c70ad0fabfdf23 | [
"MIT"
] | null | null | null | import random
from django.core.exceptions import ValidationError
from django.db import models
from django.conf import settings
class AuthToggle(models.Model):
enable_protection = models.BooleanField(default=False)
def __str__(self):
return "Options"
class PassPhrase(models.Model):
passphrase = models.CharField(max_length=100, default="YourMagicPassphrase")
def __str__(self):
return self.passphrase | 29.266667 | 80 | 0.758542 | 309 | 0.703872 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.068337 |
c3fff026f9609601d831faed00227194945d7bec | 7,685 | py | Python | corehq/apps/translations/integrations/transifex/project_migrator.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/translations/integrations/transifex/project_migrator.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/translations/integrations/transifex/project_migrator.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | import copy
import datetime
import tempfile
from collections import OrderedDict
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
import polib
from memoized import memoized
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.translations.integrations.transifex.client import (
TransifexApiClient,
)
from corehq.apps.translations.integrations.transifex.const import (
SOURCE_LANGUAGE_MAPPING,
TRANSIFEX_SLUG_PREFIX_MAPPING,
)
from corehq.apps.translations.integrations.transifex.exceptions import (
InvalidProjectMigration,
ResourceMissing,
)
from corehq.apps.translations.models import TransifexProject
class ProjectMigrator(object):
def __init__(self, domain, project_slug, source_app_id, target_app_id, resource_ids_mapping):
"""
Migrate a transifex project from one app to another by
1. updating slugs of resources to use new module/form ids
2. updating context of translations in "Menus_and_forms" sheet to use new module/form ids
:param resource_ids_mapping: tuple of type, old_id, new_id
"""
self.domain = domain
self.project_slug = project_slug
self.project = TransifexProject.objects.get(slug=project_slug)
self.client = TransifexApiClient(self.project.organization.get_api_token, self.project.organization,
project_slug)
self.source_app_id = source_app_id
self.target_app_id = target_app_id
self.resource_ids_mapping = resource_ids_mapping
self.id_mapping = {old_id: new_id for _, old_id, new_id in self.resource_ids_mapping}
def validate(self):
ProjectMigrationValidator(self).validate()
def migrate(self):
slug_update_responses = self._update_slugs()
menus_and_forms_sheet_update_responses = self._update_menus_and_forms_sheet()
return slug_update_responses, menus_and_forms_sheet_update_responses
def _update_slugs(self):
responses = {}
for resource_type, old_id, new_id in self.resource_ids_mapping:
slug_prefix = self._get_slug_prefix(resource_type)
if not slug_prefix:
continue
resource_slug = "%s_%s" % (slug_prefix, old_id)
new_resource_slug = "%s_%s" % (slug_prefix, new_id)
responses[old_id] = self.client.update_resource_slug(resource_slug, new_resource_slug)
return responses
@memoized
def _get_slug_prefix(self, resource_type):
return TRANSIFEX_SLUG_PREFIX_MAPPING.get(resource_type)
def _update_menus_and_forms_sheet(self):
langs = copy.copy(self.source_app_langs)
translations = OrderedDict()
for lang in langs:
try:
translations[lang] = self.client.get_translation("Menus_and_forms", lang, lock_resource=False)
except ResourceMissing:
# Probably a lang in app not present on Transifex, so skip
pass
self._update_context(translations)
return self._upload_new_translations(translations)
@cached_property
def source_app_langs(self):
return self._source_app.langs
@cached_property
def _source_app(self):
return get_app(self.domain, self.source_app_id)
def _update_context(self, translations):
"""
update msgctxt for all POEntry objects replacing ids
:param translations: dict of lang code mapped to it list of POEntries
"""
for po_entries in translations.values():
for po_entry in po_entries:
# make sure the format is as expected, if not skip
context_entries = po_entry.msgctxt.split(":")
if len(context_entries) == 3:
resource_id = context_entries[-1]
# replace if we have been asked to replace it
if resource_id in self.id_mapping:
po_entry.msgctxt = po_entry.msgctxt.replace(resource_id, self.id_mapping[resource_id])
def _upload_new_translations(self, translations):
responses = {}
# the project source lang, which is the app default language should be the first to update.
# HQ keeps the default lang on top and hence it should be the first one here
assert list(translations.keys())[0] == self.target_app_default_lang
for lang_code in translations:
responses[lang_code] = self._upload_translation(translations[lang_code], lang_code)
return responses
def _upload_translation(self, translations, lang_code):
po = polib.POFile()
po.check_for_duplicates = False
po.metadata = self.get_metadata()
po.extend(translations)
with tempfile.NamedTemporaryFile() as temp_file:
po.save(temp_file.name)
temp_file.seek(0)
if lang_code == self.target_app_default_lang:
return self.client.upload_resource(temp_file.name, "Menus_and_forms", "Menus_and_forms",
update_resource=True)
else:
return self.client.upload_translation(temp_file.name, "Menus_and_forms", "Menus_and_forms",
lang_code)
def get_metadata(self):
now = str(datetime.datetime.now())
return {
'App-Id': self.target_app_id,
'PO-Creation-Date': now,
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=utf-8',
'Language': self.target_app_default_lang
}
@cached_property
def target_app_default_lang(self):
return self._target_app.default_language
@cached_property
def _target_app(self):
return get_app(self.domain, self.target_app_id)
@cached_property
def get_project_source_lang(self):
return self.client.project_details().json()['source_language_code']
@cached_property
def source_app_default_lang(self):
return self._source_app.default_language
class ProjectMigrationValidator(object):
def __init__(self, migrator):
self.migrator = migrator
self.source_app_default_lang = migrator.source_app_default_lang
self.target_app_default_lang = migrator.target_app_default_lang
def validate(self):
self._ensure_same_source_lang()
def _ensure_same_source_lang(self):
"""
ensure same source lang for source app, target app and on transifex project
"""
if not self.source_app_default_lang or (self.source_app_default_lang != self.target_app_default_lang):
raise InvalidProjectMigration(
_("Target app default language and the source app default language don't match"))
project_source_lang = self.migrator.get_project_source_lang
source_app_lang_code = SOURCE_LANGUAGE_MAPPING.get(self.source_app_default_lang,
self.source_app_default_lang)
if source_app_lang_code != project_source_lang:
raise InvalidProjectMigration(
_("Transifex project source lang and the source app default language don't match"))
target_app_lang_code = SOURCE_LANGUAGE_MAPPING.get(self.target_app_default_lang,
self.target_app_default_lang)
if target_app_lang_code != project_source_lang:
raise InvalidProjectMigration(
_("Transifex project source lang and the target app default language don't match"))
| 41.766304 | 110 | 0.670787 | 6,979 | 0.908133 | 0 | 0 | 743 | 0.096682 | 0 | 0 | 1,337 | 0.173975 |
7f0027a4173acf95c325d3b417da922342f80c70 | 515 | py | Python | project3-mlops/Includes/Common-Notebooks/Common-Test.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | null | null | null | project3-mlops/Includes/Common-Notebooks/Common-Test.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | null | null | null | project3-mlops/Includes/Common-Notebooks/Common-Test.py | LiuxyEric/dscc202-402-spring2022 | f3877c2dde64656f9d84e3f913340f3fcefdc11b | [
"MIT"
] | 53 | 2022-01-11T19:06:06.000Z | 2022-03-25T19:27:48.000Z | # Databricks notebook source
# MAGIC
# MAGIC %md
# MAGIC # Integration Tests
# MAGIC The purpose of this notebook is to faciliate testing of our systems.
# COMMAND ----------
import os
spark.conf.set("com.databricks.training.module-name", "common-notebooks")
currentVersion = os.environ["DATABRICKS_RUNTIME_VERSION"]
print(currentVersion)
spark.conf.set("com.databricks.training.expected-dbr", currentVersion)
# COMMAND ----------
# MAGIC %run ./Common
# COMMAND ----------
allDone(courseAdvertisements)
| 19.807692 | 76 | 0.72233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.683495 |
7f0183081841c770b57330f1874dfa4da1f1b5b1 | 3,865 | py | Python | py/test_runner.py | radanalyticsio/tensorflow-k8s | cb1e05389bfb25f21da84ca489eb8c8630f36b58 | [
"Apache-2.0"
] | 4 | 2018-03-02T12:27:34.000Z | 2020-06-04T17:24:39.000Z | py/test_runner.py | radanalyticsio/tensorflow-k8s | cb1e05389bfb25f21da84ca489eb8c8630f36b58 | [
"Apache-2.0"
] | null | null | null | py/test_runner.py | radanalyticsio/tensorflow-k8s | cb1e05389bfb25f21da84ca489eb8c8630f36b58 | [
"Apache-2.0"
] | 2 | 2018-09-05T18:47:31.000Z | 2019-03-05T17:21:43.000Z | """Test runner runs a TfJob test."""
import argparse
import logging
import os
import time
import uuid
import jinja2
from kubernetes import client as k8s_client
from py import test_util
from py import util
from py import tf_job_client
from google.cloud import storage # pylint: disable=no-name-in-module
import yaml
def run_test(args):
"""Run a test."""
gcs_client = storage.Client(project=args.project)
project = args.project
cluster_name = args.cluster
zone = args.zone
util.configure_kubectl(project, zone, cluster_name)
util.load_kube_config()
api_client = k8s_client.ApiClient()
t = test_util.TestCase()
t.class_name = "tfjob_test"
t.name = os.path.basename(args.spec)
loader = jinja2.FileSystemLoader(os.path.dirname(args.spec))
if not args.image_tag:
raise ValueError("--image_tag must be provided.")
logging.info("Loading spec from %s with image_tag=%s", args.spec, args.image_tag)
spec_contents = jinja2.Environment(loader=loader).get_template(
os.path.basename(args.spec)).render(image_tag=args.image_tag)
spec = yaml.load(spec_contents)
# Make the job name unique.
spec["metadata"]["name"] += "-" + uuid.uuid4().hex[0:4]
try:
start = time.time()
api_response = tf_job_client.create_tf_job(api_client, spec)
namespace = api_response["metadata"]["namespace"]
name = api_response["metadata"]["name"]
logging.info("Created job %s in namespaces %s", name, namespace)
results = tf_job_client.wait_for_job(api_client, namespace, name,
status_callback=tf_job_client.log_status)
if results["status"]["state"] != "succeeded":
t.failure = "Job {0} in namespace {1} in state {2}".format(
name, namespace, results["status"]["state"])
# TODO(jlewi):
# Here are some validation checks to run:
# 1. Check tensorboard is created if its part of the job spec.
# 2. Check that all resources are garbage collected.
# TODO(jlewi): Add an option to add chaos and randomly kill various resources?
# TODO(jlewi): Are there other generic validation checks we should
# run.
except util.TimeoutError:
t.failure = "Timeout waiting for {0} in namespace {1} to finish.".format(
name, namespace)
finally:
t.time = time.time() - start
if args.junit_path:
test_util.create_junit_xml_file([t], args.junit_path, gcs_client)
def add_common_args(parser):
"""Add a set of common parser arguments."""
parser.add_argument(
"--spec",
default=None,
type=str,
required=True,
help="Path to the YAML file specifying the test to run.")
parser.add_argument(
"--project",
default=None,
type=str,
help=("The project to use."))
parser.add_argument(
"--cluster",
default=None,
type=str,
help=("The name of the cluster."))
parser.add_argument(
"--image_tag",
default=None,
type=str,
help="The tag for the docker image to use.")
parser.add_argument(
"--zone",
default="us-east1-d",
type=str,
help=("The zone for the cluster."))
parser.add_argument(
"--junit_path",
default="",
type=str,
help="Where to write the junit xml file with the results.")
def build_parser():
# create the top-level parser
parser = argparse.ArgumentParser(
description="Run a TfJob test.")
subparsers = parser.add_subparsers()
parser_test = subparsers.add_parser(
"test",
help="Run a tfjob test.")
add_common_args(parser_test)
parser_test.set_defaults(func=run_test)
return parser
def main(): # pylint: disable=too-many-locals
logging.getLogger().setLevel(logging.INFO) # pylint: disable=too-many-locals
parser = build_parser()
# parse the args and call whatever function was selected
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 27.411348 | 83 | 0.684605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.332471 |
7f019039c93471670011b52c66c98041328e2ea4 | 2,848 | py | Python | deepspeech/io/collator.py | iclementine/DeepSpeech | d0635c6592a2e787ca296e15241e7371a83ca55f | [
"Apache-2.0"
] | 1 | 2021-05-14T23:27:13.000Z | 2021-05-14T23:27:13.000Z | deepspeech/io/collator.py | xihuanafeng/DeepSpeech | 2bdf4c946af66cc173d638c072ba6435cd18a286 | [
"Apache-2.0"
] | null | null | null | deepspeech/io/collator.py | xihuanafeng/DeepSpeech | 2bdf4c946af66cc173d638c072ba6435cd18a286 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from deepspeech.frontend.utility import IGNORE_ID
from deepspeech.io.utility import pad_sequence
from deepspeech.utils.log import Log
__all__ = ["SpeechCollator"]
logger = Log(__name__).getlog()
class SpeechCollator():
def __init__(self, keep_transcription_text=True):
"""
Padding audio features with zeros to make them have the same shape (or
a user-defined shape) within one bach.
if ``keep_transcription_text`` is False, text is token ids else is raw string.
"""
self._keep_transcription_text = keep_transcription_text
def __call__(self, batch):
"""batch examples
Args:
batch ([List]): batch is (audio, text)
audio (np.ndarray) shape (D, T)
text (List[int] or str): shape (U,)
Returns:
tuple(audio, text, audio_lens, text_lens): batched data.
audio : (B, Tmax, D)
audio_lens: (B)
text : (B, Umax)
text_lens: (B)
"""
audios = []
audio_lens = []
texts = []
text_lens = []
for audio, text in batch:
# audio
audios.append(audio.T) # [T, D]
audio_lens.append(audio.shape[1])
# text
# for training, text is token ids
# else text is string, convert to unicode ord
tokens = []
if self._keep_transcription_text:
assert isinstance(text, str), (type(text), text)
tokens = [ord(t) for t in text]
else:
tokens = text # token ids
tokens = tokens if isinstance(tokens, np.ndarray) else np.array(
tokens, dtype=np.int64)
texts.append(tokens)
text_lens.append(tokens.shape[0])
padded_audios = pad_sequence(
audios, padding_value=0.0).astype(np.float32) #[B, T, D]
audio_lens = np.array(audio_lens).astype(np.int64)
padded_texts = pad_sequence(
texts, padding_value=IGNORE_ID).astype(np.int64)
text_lens = np.array(text_lens).astype(np.int64)
return padded_audios, audio_lens, padded_texts, text_lens
| 36.050633 | 86 | 0.61236 | 2,018 | 0.708567 | 0 | 0 | 0 | 0 | 0 | 0 | 1,377 | 0.483497 |
7f01e3a718f45996d405a272db21ce74f4e2b2d9 | 553 | py | Python | launch/test.py | a-tharva/LaunchEnv | 3f138d863e961c541513539c499574122fbd8b9b | [
"MIT"
] | 1 | 2022-01-25T04:44:22.000Z | 2022-01-25T04:44:22.000Z | launch/test.py | a-tharva/LaunchEnv | 3f138d863e961c541513539c499574122fbd8b9b | [
"MIT"
] | null | null | null | launch/test.py | a-tharva/LaunchEnv | 3f138d863e961c541513539c499574122fbd8b9b | [
"MIT"
] | null | null | null | #import subprocess
#
#subprocess.Popen(f'C:\Windows\system32\calc.exe')
#
#
#
#
#
#
#
#\ is a escape character. You have three options.
#
#1) use /. This, as a bonus works for linux as well:
# 'D:/xxx/xxxx/asd/asd.exe'
#
#2) escape the backslash
# 'D:\\xxx\\xxxx\\asd\\asd.exe'
#
#3) use raw strings:
#r'D:\xxx\xxxx\asd\asd.exe'
#import json
#
#with open('/src/environment/data.json', 'r+') as f:
# lst = json.load(f)
# print(lst.index(bee))
#import pyfiglet
#
#result = pyfiglet.figlet_format("fort", font = "small" )
#print(result)
| 14.945946 | 58 | 0.631103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.934901 |
7f02e9703a47b60d920bf5b4d7b9b607bc976036 | 96 | py | Python | struct2seq/__init__.py | amorehead/neurips19-graph-protein-design | 92ecd0b667069c3207afa668865eb23eea4631d4 | [
"MIT"
] | null | null | null | struct2seq/__init__.py | amorehead/neurips19-graph-protein-design | 92ecd0b667069c3207afa668865eb23eea4631d4 | [
"MIT"
] | null | null | null | struct2seq/__init__.py | amorehead/neurips19-graph-protein-design | 92ecd0b667069c3207afa668865eb23eea4631d4 | [
"MIT"
] | null | null | null | __all__ = ['data', 'noam_opt', 'protein_features', 'self_attention', 'struct2seq', 'seq_model']
| 48 | 95 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.760417 |
7f02f1eeb46d28ccafbcdaa3e9a08ba2c3640961 | 20,258 | py | Python | p1bsl_formatter.py | WonderMr/Linter1 | f7ab00505902e5622d24ac3a48d074850cd67193 | [
"Artistic-2.0"
] | null | null | null | p1bsl_formatter.py | WonderMr/Linter1 | f7ab00505902e5622d24ac3a48d074850cd67193 | [
"Artistic-2.0"
] | null | null | null | p1bsl_formatter.py | WonderMr/Linter1 | f7ab00505902e5622d24ac3a48d074850cd67193 | [
"Artistic-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import re
from collections import namedtuple
import subprocess
from typing import NewType
class p1parser:
def __init__(self):
self.rule = namedtuple("Rule" , ["Text" , "SubExp"])
self.directive = namedtuple("Directive" , ["UName" , "NName"])
self.eq_pos = 0
self.re_words2plus = re.compile(r'([^a-zа-яёА-ЯЁA-Z]+)')
self.re_if = re.compile(r'(\s*)(ИНАЧЕ)*(ЕСЛИ)\s([^ꡏ]*?)(?:ТОГДА)\s+([^ꡏ|\n]*)')
self.re_exps = re.compile(r'(\s*)(ПОПЫТКА)\s([^ꡏ]*)')
self.re_exps2 = re.compile(r'(\s*)(ИСКЛЮЧЕНИЕ)\s([^ꡏ]*)')
self.re_exps3 = re.compile(r'(\s*)(ВЫЗВАТЬИСКЛЮЧЕНИЕ\s[^ꡏ]*)')
self.re_for = re.compile(r'(\s*)(ДЛЯ)\s([^ꡏ]*?)(?:ПО)\s+([^ꡏ|\n]*)ЦИКЛ\s+([^ꡏ|\n]*)')
self.re_for_each = re.compile(r'(\s*)(ДЛЯ КАЖДОГО)\s([^ꡏ]*?)(?:ИЗ)\s+([^ꡏ|\n]*)ЦИКЛ\s+([^ꡏ|\n]*)')
self.bsl_txt = ""
self.file_in_name = ""
self.file_out_name = ""
self.words_in = ""
self.bsl_no_comments = ""
self.rules = []
self.directives = []
self.c1_words = []
self.rules.append(self.rule(r"\t" ," ")) # табы в 4 пробела
self.rules.append(self.rule(r"[\t ]{2,}" ," ")) # все пробелы в один
self.rules.append(self.rule(r"^[\t ]" ,"\n")) # пробел в самой первой строке
self.rules.append(self.rule(r"\n[\t ]" ,"\n")) # все пробелы в начале строк
self.rules.append(self.rule(r"[\t ]*;[\t ]*" ,";")) # убрать пробелы справа и слева от ;
self.rules.append(self.rule(r"[\t ]*\)[\t ]*" ,")")) # убрать пробелы справа и слева от )
self.rules.append(self.rule(r"[\t ]*\([\t ]*" ,"(")) # убрать пробелы справа и слева от (
self.rules.append(self.rule(r"[\t ]*\.[\t ]*" ,".")) # убрать пробелы справа и слева от )
self.rules.append(self.rule(r"[\t ]*\.[\t ]*" ,".")) # убрать пробелы справа и слева от (
self.rules.append(self.rule(r"\r*\n\r*\n" ,"\n")) # сворачиваем переносы строк
self.rules.append(self.rule(r"(\s*\/\/.*)" ,"")) # Комментарии
self.directives.append(self.directive("&НАКЛИЕНТЕ" ,"&НаКлиенте"))
self.directives.append(self.directive("&НАСЕРВЕРЕ" ,"&НаСервере"))
self.directives.append(self.directive("&НАСЕРВЕРЕБЕЗКОНТЕКСТА" ,"&НаСервереБезКонтекста"))
self.directives.append(self.directive("&НАКЛИЕНТЕНАСЕРВЕРЕБЕЗКОНТЕКСТА" ,"&НаКлиентеНаСервереБезКонтекста"))
self.c1_words.append(self.directive("ФУНКЦИЯ" ,"Функция"))
self.c1_words.append(self.directive("ПРОЦЕДУРА" ,"Процедура"))
self.c1_words.append(self.directive("КОНЕЦПРОЦЕДУРЫ" ,"КонецПроцедуры"))
self.c1_words.append(self.directive("КОНЕЦФУНКЦИИ" ,"КонецФункции"))
self.c1_words.append(self.directive("ЭКСПОРТ" ,"Экспорт"))
self.c1_words.append(self.directive("ВОЗВРАТ" ,"Возврат"))
self.c1_words.append(self.directive("ЕСЛИ" ,"Если"))
self.c1_words.append(self.directive("ПЕРЕМ" ,"Перем")) #
self.c1_words.append(self.directive("ВОЗВРАТ" ,"Возврат")) #
def read_bsl(self, file_path):
self.file_in_name = file_path
self.file_out_name = file_path+".out.txt"
fpp = open(file_path, encoding="UTF-8")
self.bsl_txt = fpp.read()
self.bsl_no_comments = re.sub(r"(\s*\/\/.*)", "", self.bsl_txt) # нужно для подготовки следующего пункта
self.bsl_no_comments = self.re_words2plus.sub("\n", self.bsl_no_comments)
self.words_in = self.bsl_no_comments.split("\n") # Сохраню написание всех слов
self.bsl_txt = self.bsl_txt.upper()
fpp.close()
return
def write_bsl(self):
fpp = open(self.file_out_name , mode="w", encoding="UTF-8")
fpp.write(self.bsl_txt)
fpp.close()
return
def give_spaces(self, count):
ret = ""
for z in range(count):
ret += " "
return ret
def preformat(self):
# предподготовка - выполняем правила замен из Rules
for rule in self.rules:
self.bsl_txt = re.sub(rule.Text, rule.SubExp, self.bsl_txt)
def format_eqs(self):
# выравнивание = это надо делать в конце
eq_s = re.findall("\n([^\(\|][^\n\(\|]*?)(?=\=)(=)(\s+)(.*)", self.bsl_txt)
for eq in eq_s:
if len(eq[0]) > self.eq_pos:
self.eq_pos = len(eq[0]) # вычисляем здесь самое крайнее появление равно
for eq in eq_s: # выравниваем по нему
spaces = self.give_spaces(self.eq_pos - len(eq[0]))
self.bsl_txt = self.bsl_txt.replace("".join(eq), eq[0] + spaces + "= "+eq[3])
for eq in re.findall("(Функция|Процедура)(\s+)(.*)" , self.bsl_txt):
spaces = self.give_spaces(self.eq_pos - len(eq[0]))
self.bsl_txt = self.bsl_txt.replace("".join(eq), eq[0] + spaces + eq[2])
for eq in re.findall(r'([\t ]{2,}Возврат)(\s+)(.*)' , self.bsl_txt):
spaces = self.give_spaces(self.eq_pos - len(eq[0]))
self.bsl_txt = self.bsl_txt.replace("".join(eq), eq[0] + spaces + eq[2])
return
def trim(self, txt_in):
if txt_in == "":
return ""
ret = re.sub('\r*\n' ,' ' ,txt_in) # whitespaces
ret = re.sub('\s+' ,' ' ,ret) # whitespaces
ret = re.sub('(;\s)+' ,';' ,ret) #
ret = re.sub(r'^[\s*;\s*]+' ,'' ,ret) # точки с запятыми в начале
ret = re.sub(r'[\s*;\s*]+$' ,'' ,ret) # точки с запятыми в конце
ret = re.sub(r'^\n*' ,'' ,ret) #
ret = re.sub(r'\n*$' ,'' ,ret) #
return ret
def replace_by_array(self, rba_name, rba_arr, crlf_before = False, space_before = False, space_after = False):
space_b = " " if space_before else ""
space_a = " " if space_after else ""
if rba_name in ["", "\n"]:
return ""
rba_name = self.trim(rba_name)
for rba_elem in rba_arr:
if rba_name.find(rba_elem.UName) == 0:
if crlf_before:
return ("\n" ) + space_b + rba_elem.NName + space_a
else:
return space_b + rba_elem.NName + space_a
return rba_name
def show_file(self, log):
save_bsl = self.bsl_txt
self.bsl_txt = log
self.write_bsl()
self.bsl_txt = save_bsl
subprocess.Popen(["c:\\Program Files\\Notepad++\\notepad++.exe", self.file_out_name], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
def give_str(self,gs_lvl):
ret_s = "@crlf"+self.give_spaces((gs_lvl)*4)
return ret_s
def format_body(self, body_in):
lvl = 1
ret = self.trim(body_in)+";"
#Начинаем разделывать тело
lines = ret.split(";") # разделяем по точке с запятой на строки
prev_line = ""
new_lines = [] # сюда сложим наш разбор
for z in range(len(lines)-1): # надо пройтись по всем разделённым
line_in = prev_line + lines[z] # обработаем "висячую строку"
q_count = line_in.count('"') # посчитаем текстовые кавычки
obr_count = line_in.count("(") # посчитаем сколько открывающих скобок
obr_first = line_in.find("(") if line_in.find("(") else 0 # найдём позицию первой открывающей
cbr_count = line_in.count(")") # посчитаем сколько закрывающих скобок
cbr_first = line_in.find(")") if line_in.find(")") else 0 # найдём позицию первой закрывающей
# распишу блок
# 1. Скобок одинаково и >0, открывающая в начале, кавычек - чёт
# 2. Скобок одинаково и = 0, кавычек - чёт
# 3. Скобок одинаково и = 0, кавычек - чёт
if (cbr_count > 0 and obr_count == cbr_count and obr_first<cbr_first and q_count % 2 == 0)\
or (cbr_count == 0 and obr_count == 0 and q_count % 2 == 0):
line = self.trim(line_in)
line = "@crlf"+ self.give_spaces(lvl*4) + line + ";"
new_lines.append(line)
prev_line = ""
else:
prev_line = line_in + ";"
# теперь поехали вынимать если, пока, для каждого
prev_line = ""
lines = []
# надо пройтись по всем разделённым
that_level_n = 1
in_if = False
#========================================================
for z in range(len(new_lines)):
line_in = new_lines[z].replace("@crlf","") # уберём пока из них разделитель
that_level_n = self.find_all(that_level_n, line_in, lines)
ret = "".join(lines)
ret = ret.replace("@crlf","\n")
#self.show_file(ret)
#print(ret)
return ret
def find_all(self, c_level, line, lines):
ifs = self.re_if.findall(line)
ifs_found = len(ifs) >0
#ifs_found = False
exps = self.re_exps.findall(line)
exps_found = len(exps) >0
#exps_found = False
exps2 = self.re_exps2.findall(line)
exps2_found = len(exps2) >0
#exps2_found = False
exps3 = self.re_exps3.findall(line)
exps3_found = len(exps3) >0
#exps3_found = False
fors = self.re_for.findall(line)
for_found = len(fors) >0
#for_found = False
fors_each = self.re_for_each.findall(line)
fors_each_found = len(fors_each) >0
fors_each_found = False
if ifs_found:
for e_ifs in ifs:
if(e_ifs[1] == "ИНАЧЕ"):
c_level -= 1
lines.append(self.give_str(c_level) + e_ifs[1]+e_ifs[2])
exp3 = e_ifs[3]
lines.append(self.give_str(c_level+1) + e_ifs[3])
lines.append(self.give_str(c_level) + "ТОГДА")
this_line = e_ifs[4]
if(e_ifs[1] == "ИНАЧЕ"):
c_level += 1
c_level = self.find_all(c_level , this_line, lines)
else:
c_level = self.find_all(c_level + 1, this_line, lines)
elif for_found:
for e_for in fors:
exp1 = e_for[1]
exp2 = e_for[2]
lines.append(self.give_str(c_level) + exp1 + " " + exp2)
exp3 = e_for[3]
exp4 = e_for[4]
lines.append(self.give_str(c_level) + "ПО " + exp3)
lines.append(self.give_str(c_level) + "ЦИКЛ")
c_level = self.find_all(c_level + 1, exp4, lines)
elif fors_each_found:
for e_for_e in fors_each:
exp1 = e_for_e[1]
exp2 = e_for_e[2]
lines.append(self.give_str(c_level) + exp1 + " " + exp2)
exp3 = e_for_e[3]
exp4 = e_for_e[4]
lines.append(self.give_str(c_level) + "ИЗ " + exp3)
lines.append(self.give_str(c_level) + "ЦИКЛ")
c_level = self.find_all(c_level + 1, exp4, lines)
elif exps_found:
for exp in exps:
exp1 = exp[1]
exp2 = exp[2]
lines.append(self.give_str(c_level) + exp1)
c_level += 1
lines.append(self.give_str(c_level) + exp2)
elif exps2_found and not exps3_found:
for exp in exps2:
exp1 = exp[1]
exp2 = exp[2]
lines.append(self.give_str(c_level-1) + exp1)
lines.append(self.give_str(c_level) + exp2)
elif exps3_found:
for exp in exps3:
exp1 = exp[1]
lines.append(self.give_str(c_level) + exp1)
else:
test_line = line.replace(" ","").replace(";","")
if test_line == "КОНЕЦЕСЛИ":
c_level -= 1
lines.append(self.give_str(c_level) + test_line + ";")
elif test_line=="КОНЕЦПОПЫТКИ":
c_level -= 1
lines.append(self.give_str(c_level) + test_line + ";")
elif test_line=="КОНЕЦЦИКЛА":
c_level -= 1
lines.append(self.give_str(c_level) + test_line + ";")
else:
lines.append(self.give_str(c_level) +line.lstrip())
return c_level
def num_diffs(self, s):
count = 0
prev = None
for ch in s:
if prev is not None and prev!=ch:
count += 1
prev = ch
return count
def decapitalize(self):
local_bsl = self.bsl_txt
words_txt = self.re_words2plus.sub("\n",local_bsl)
words = words_txt.split("\n")
word_in_count = len(self.words_in)
word_count = len(words)
if not (word_count == word_in_count):
print("Количество входящих слов = " + str(word_in_count) + "\nКоличество исходящих слов = " + str(word_count) + "\nЭто нудопустимо")
for z in range(word_count):
try:
old_w_u = self.words_in[z-1].upper()
new_w_u = words[z-1].upper()
except(Exception):
pass#print(str(.message()))
for i in range(len(words)):
new_w = words[i]
new_w_c = new_w.lower().capitalize()
old_w = self.words_in[i] if i< len(self.words_in) else ""
if not new_w == old_w:
if not old_w[:1].isupper():
old_w = old_w.capitalize()
#print("Различается написание "+ old_w + " и " + new_w)
local_bsl = local_bsl.replace(new_w, old_w, 1)
self.bsl_txt = local_bsl
def format_func(self):
re.IGNORECASE = True
re.UNICODE = True
re.MULTILINE = True
funcs = re.findall(r'((\&НА\w+\s*\n|\n)(?=ФУНКЦИЯ|ПРОЦЕДУРА)(ФУНКЦИЯ|ПРОЦЕДУРА)\s+([\w\dА-Яа-я\_]+)(\(.*\));*(ЭКСПОРТ)*([^ꡏ]*?)(?=КОНЕЦФУНКЦИИ|КОНЕЦПРОЦЕДУРЫ)(КОНЕЦФУНКЦИИ|КОНЕЦПРОЦЕДУРЫ))'
,self.bsl_txt)
#print(funcs[0][0])
#sys.exit
for e_func in funcs:
full_text = e_func[0] # вся вместе
run_at = self.replace_by_array(e_func[1], self.directives, crlf_before = False) # обработка директив компилятора
def_func = self.replace_by_array(e_func[2], self.c1_words, crlf_before = True, space_after= True) # Функция Или Процедура
name_offset = self.eq_pos - len(def_func)
def_name = e_func[3]
func_param = e_func[4]
func_exp = self.replace_by_array(e_func[5], self.c1_words, space_before = True) # экспорт или его отсутствие
func_body = self.format_body(e_func[6])
end_func = self.replace_by_array(e_func[7], self.c1_words, crlf_before = True) # Конец [ Функция Или Процедура ]
new_iter = "\n"\
+ run_at\
+ def_func\
+ self.give_spaces(name_offset+5)\
+ def_name\
+ func_param\
+ func_exp\
+ func_body\
+ end_func
self.show_file(new_iter)
self.bsl_txt = self.bsl_txt.replace(full_text , new_iter)
def process_bsl(self, file_path):
self.read_bsl(file_path)
self.preformat() # предподготовка - выполняем правила замен из Rules
self.format_func()
self.format_eqs() # выравнивание = это надо делать в конце
#self.show_file(self.bsl_txt)
self.decapitalize()
#self.show_file(self.bsl_no_comments)
self.show_file(self.bsl_txt)
p1 = p1parser()
cwd1 = os.getcwd()
for file in os.listdir(cwd1):
if file.capitalize().endswith(".bsl"):
p1.process_bsl(os.path.join(cwd1, file)) | 57.225989 | 217 | 0.423783 | 21,656 | 0.983604 | 0 | 0 | 0 | 0 | 0 | 0 | 5,352 | 0.243085 |
7f0316dc67c441582810e7ff6fb783c0825f64ba | 2,010 | py | Python | server/__init__.py | jbcurtin/datsu-panic | aa3706cf2c9c7b74d15e94b3ef767a67853720f9 | [
"MIT"
] | null | null | null | server/__init__.py | jbcurtin/datsu-panic | aa3706cf2c9c7b74d15e94b3ef767a67853720f9 | [
"MIT"
] | null | null | null | server/__init__.py | jbcurtin/datsu-panic | aa3706cf2c9c7b74d15e94b3ef767a67853720f9 | [
"MIT"
] | null | null | null | import asyncio
import logging
import time
from functools import partial
from signal import SIGINT, SIGTERM
from panic import \
datatypes as panic_datatypes
logger = logging.getLogger(__name__)
current_time = None
def update_current_time(loop):
"""
Caches the current time, since it is needed
at the end of every keep-alive request to update the request timeout time
:param loop:
:return:
"""
global current_time
current_time = time.time()
loop.call_later(1, partial(update_current_time, loop))
def serve(params: panic_datatypes.ServerParams) -> None:
logger.info(f'Goin\' Fast @ http://{params.host}:{params.port}')
server = partial(params.protocol, params)
# params.protocol,
# para
# loop=params.loop,
# connections=params.connections,
# signal=params.signal,
# request_handler=params.request_handler,
# error_handler=params.error_handler,
# request_timeout=params.request_timeout
#)
server_coroutine = params.loop.create_server(
server,
host=params.host,
port=params.port,
reuse_port=params.reuse_port)
#sock=params.sock)
params.loop.call_soon(partial(update_current_time, params.loop))
try:
http_server = params.loop.run_until_complete(server_coroutine)
except Exception:
logger.exception("Unable to start server")
return
# Register signals for graceful termination
for _signal in (SIGINT, SIGTERM):
params.loop.add_signal_handler(_signal, params.loop.stop)
try:
params.loop.run_forever()
finally:
logger.info("Stop requested, draining connections...")
# Wait for event loop to finish and all connections to drain
http_server.close()
params.loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
params.signal.stopped = True
for connection in params.connections:
connection.close_if_idle()
while params.connections:
params.loop.run_until_complete(asyncio.sleep(0.1))
params.loop.close()
| 25.443038 | 77 | 0.726866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 664 | 0.330348 |
7f03b804ac44c2d020cee4637a88e0c658251515 | 47,593 | py | Python | analysis/scripts/gmm.py | sbooeshaghi/azucar | 0ced041aa9cfa52593109f79794ac6009adf909a | [
"BSD-2-Clause"
] | null | null | null | analysis/scripts/gmm.py | sbooeshaghi/azucar | 0ced041aa9cfa52593109f79794ac6009adf909a | [
"BSD-2-Clause"
] | null | null | null | analysis/scripts/gmm.py | sbooeshaghi/azucar | 0ced041aa9cfa52593109f79794ac6009adf909a | [
"BSD-2-Clause"
] | null | null | null | # this file contains _base.py and _gaussian_mixture.py
# @title sklearn/mixture/_base.py
"""Base class for mixture models."""
# sklearn/mixture/_base.py
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from scipy.special import logsumexp
from sklearn import cluster
from sklearn.base import BaseEstimator
from sklearn.base import DensityMixin
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_is_fitted
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : str
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError(
"The parameter '%s' should have the shape of %s, but got %s"
% (name, param_shape, param.shape)
)
class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
def __init__(
self,
n_components,
tol,
reg_covar,
max_iter,
n_init,
init_params,
random_state,
warm_start,
verbose,
verbose_interval,
):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
def _check_initial_parameters(self, X):
"""Check values of the basic parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
if self.n_components < 1:
raise ValueError(
"Invalid value for 'n_components': %d "
"Estimation requires at least one component" % self.n_components
)
if self.tol < 0.0:
raise ValueError(
"Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative" % self.tol
)
if self.n_init < 1:
raise ValueError(
"Invalid value for 'n_init': %d Estimation requires at least one run"
% self.n_init
)
if self.max_iter < 1:
raise ValueError(
"Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration" % self.max_iter
)
if self.reg_covar < 0.0:
raise ValueError(
"Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % self.reg_covar
)
# Check all the parameters values of the derived class
self._check_parameters(X)
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state, B=None, resp=None):
"""Initialize the model parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
random_state : RandomState
A random number generator instance that controls the random seed
used for the method chosen to initialize the parameters.
"""
n_samples, _ = X.shape
if self.init_params == "kmeans":
resp = np.zeros((n_samples, self.n_components))
label = (
cluster.KMeans(
n_clusters=self.n_components, n_init=1, random_state=random_state
)
.fit(X)
.labels_
)
resp[np.arange(n_samples), label] = 1
elif self.init_params == "random":
if resp is None:
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError(
"Unimplemented initialization method '%s'" % self.init_params
)
self._initialize(X, resp, B)
@abstractmethod
def _initialize(self, X, resp, B=None):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fits the model ``n_init`` times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for ``max_iter``
times until the change of likelihood or lower bound is less than
``tol``, otherwise, a ``ConvergenceWarning`` is raised.
If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single
initialization is performed upon the first call. Upon consecutive
calls, training starts where it left off.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
The fitted mixture.
"""
self.fit_predict(X, y)
return self
def fit_predict(self, X, y=None, B=None, resp=None):
"""Estimate model parameters using X and predict the labels for X.
The method fits the model n_init times and sets the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is
raised. After fitting, it predicts the most probable label for the
input data points.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
"Expected n_samples >= n_components "
f"but got n_components = {self.n_components}, "
f"n_samples = {X.shape[0]}"
)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not (self.warm_start and hasattr(self, "converged_"))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.inf
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state, B=B, resp=resp)
lower_bound = -np.inf if do_init else self.lower_bound_
for n_iter in range(1, self.max_iter + 1):
prev_lower_bound = lower_bound
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp, B)
lower_bound = self._compute_lower_bound(log_resp, log_prob_norm)
change = lower_bound - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(lower_bound)
if lower_bound > max_lower_bound or max_lower_bound == -np.inf:
max_lower_bound = lower_bound
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn(
"Initialization %d did not converge. "
"Try different init parameters, "
"or increase max_iter, tol "
"or check for degenerate data." % (init + 1),
ConvergenceWarning,
)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
self.lower_bound_ = max_lower_bound
# Always do a final e-step to guarantee that the labels returned by
# fit_predict(X) are always consistent with fit(X).predict(X)
# for any value of max_iter and tol (and any random_state).
_, log_resp = self._e_step(X)
return log_resp.argmax(axis=1)
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp, B=None):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log-likelihood of each sample in `X` under the current model.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like of shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
log_likelihood : float
Log-likelihood of `X` under the Gaussian mixture model.
"""
return self.score_samples(X).mean()
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Evaluate the components' density for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Density of each Gaussian component for each sample in X.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample.
y : array, shape (nsamples,)
Component labels.
"""
check_is_fitted(self)
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components)
)
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == "full":
X = np.vstack(
[
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp
)
]
)
elif self.covariance_type == "tied":
X = np.vstack(
[
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(self.means_, n_samples_comp)
]
)
else:
X = np.vstack(
[
mean + rng.randn(sample, n_features) * np.sqrt(covariance)
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp
)
]
)
y = np.concatenate(
[np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)]
)
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_samples, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(
" Iteration %d\t time lapse %.5fs\t ll change %.5f"
% (n_iter, cur_time - self._iter_prev_time, diff_ll)
)
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print(
"Initialization converged: %s\t time lapse %.5fs\t ll %.5f"
% (self.converged_, time() - self._init_prev_time, ll)
)
# @title sklearn/mixture/_gaussian_mixture.py
"""Gaussian Mixture Model."""
# sklearn/mixture/_gaussian_mixture.py
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
# from ._base import BaseMixture, _check_shape these come from cell above
from sklearn.utils import check_array
from sklearn.utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like of shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(weights, (n_components,), "weights")
# check range
if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)):
raise ValueError(
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights))
)
# check normalization
if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0):
raise ValueError(
"The parameter 'weights' should be normalized, but got sum(weights) = %.5f"
% np.sum(weights)
)
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like of shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), "means")
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (
np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0)
):
raise ValueError(
"'%s precision' should be symmetric, positive-definite" % covariance_type
)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : str
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(
precisions,
dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == "full",
)
precisions_shape = {
"full": (n_components, n_features, n_features),
"tied": (n_features, n_features),
"diag": (n_components, n_features),
"spherical": (n_components,),
}
_check_shape(
precisions, precisions_shape[covariance_type], "%s precision" % covariance_type
)
_check_precisions = {
"full": _check_precisions_full,
"tied": _check_precision_matrix,
"diag": _check_precision_positivity,
"spherical": _check_precision_positivity,
}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[:: n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[:: len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means**2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type, B=None):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
# print("Doing the thing..")
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
# frankie
# get the mins for the marker genes
# ct_mins = [means[:, i].min() for i in B]
marker_gene_indices = [set(np.where(i)[0]) for i in B]
ct_mins = [means[i][B[i]].min() for i in range(means.shape[0])]
marker_gene_indices = [set(B[i]) for i in range(means.shape[0])]
# modify based on the min/f
f = 2.0
for idx, i in enumerate(means):
ct_min = ct_mins[idx]
betas = means[idx]
for jdx, b in enumerate(betas):
if jdx not in marker_gene_indices[idx]:
new = 0 # min(b, ct_min / f)
means[idx][jdx] = new
covariances = {
"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical,
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar."
)
if covariance_type == "full":
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(
cov_chol, np.eye(n_features), lower=True
).T
elif covariance_type == "tied":
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(
cov_chol, np.eye(n_features), lower=True
).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == "full":
n_components, _, _ = matrix_chol.shape
log_det_chol = np.sum(
np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1
)
elif covariance_type == "tied":
log_det_chol = np.sum(np.log(np.diag(matrix_chol)))
elif covariance_type == "diag":
log_det_chol = np.sum(np.log(matrix_chol), axis=1)
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features)
if covariance_type == "full":
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "tied":
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "diag":
precisions = precisions_chol**2
log_prob = (
np.sum((means**2 * precisions), 1)
- 2.0 * np.dot(X, (means * precisions).T)
+ np.dot(X**2, precisions.T)
)
elif covariance_type == "spherical":
precisions = precisions_chol**2
log_prob = (
np.sum(means**2, 1) * precisions
- 2 * np.dot(X, means.T * precisions)
+ np.outer(row_norms(X, squared=True), precisions)
)
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class ImprovedGaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Read more in the :ref:`User Guide <gmm>`.
.. versionadded:: 0.18
Parameters
----------
n_components : int, default=1
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
String describing the type of covariance parameters to use.
Must be one of:
'full'
each component has its own general covariance matrix
'tied'
all components share the same general covariance matrix
'diag'
each component has its own diagonal covariance matrix
'spherical'
each component has its own single variance
tol : float, default=1e-3
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, default=1e-6
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, default=100
The number of EM iterations to perform.
n_init : int, default=1
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, default='kmeans'
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like of shape (n_components, ), default=None
The user-provided initial weights.
If it is None, weights are initialized using the `init_params` method.
means_init : array-like of shape (n_components, n_features), default=None
The user-provided initial means,
If it is None, means are initialized using the `init_params` method.
precisions_init : array-like, default=None
The user-provided initial precisions (inverse of the covariance
matrices).
If it is None, precisions are initialized using the 'init_params'
method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state : int, RandomState instance or None, default=None
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
In that case, 'n_init' is ignored and only a single initialization
occurs upon the first call.
See :term:`the Glossary <warm_start>`.
verbose : int, default=0
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default=10
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like of shape (n_components,)
The weights of each mixture components.
means_ : array-like of shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Lower bound value on the log-likelihood (of the training data with
respect to the model) of the best fit of EM.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
Examples
--------
>>> import numpy as np
>>> from sklearn.mixture import GaussianMixture
>>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
>>> gm = GaussianMixture(n_components=2, random_state=0).fit(X)
>>> gm.means_
array([[10., 2.],
[ 1., 2.]])
>>> gm.predict([[0, 0], [12, 3]])
array([1, 0])
"""
def __init__(
self,
n_components=1,
*,
covariance_type="full",
tol=1e-3,
reg_covar=1e-6,
max_iter=100,
n_init=1,
init_params="kmeans",
weights_init=None,
means_init=None,
precisions_init=None,
random_state=None,
warm_start=False,
verbose=0,
verbose_interval=10,
):
super().__init__(
n_components=n_components,
tol=tol,
reg_covar=reg_covar,
max_iter=max_iter,
n_init=n_init,
init_params=init_params,
random_state=random_state,
warm_start=warm_start,
verbose=verbose,
verbose_interval=verbose_interval,
)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ["spherical", "tied", "diag", "full"]:
raise ValueError(
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']" % self.covariance_type
)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init, self.n_components)
if self.means_init is not None:
self.means_init = _check_means(
self.means_init, self.n_components, n_features
)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(
self.precisions_init,
self.covariance_type,
self.n_components,
n_features,
)
def _initialize(self, X, resp, B=None):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type, B=B
)
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type
)
elif self.covariance_type == "full":
self.precisions_cholesky_ = np.array(
[
linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init
]
)
elif self.covariance_type == "tied":
self.precisions_cholesky_ = linalg.cholesky(
self.precisions_init, lower=True
)
else:
self.precisions_cholesky_ = self.precisions_init
def _m_step(self, X, log_resp, B=None):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type, B=B
)
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type
)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type
)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _get_parameters(self):
return (
self.weights_,
self.means_,
self.covariances_,
self.precisions_cholesky_,
)
def _set_parameters(self, params):
(
self.weights_,
self.means_,
self.covariances_,
self.precisions_cholesky_,
) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == "full":
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == "tied":
self.precisions_ = np.dot(
self.precisions_cholesky_, self.precisions_cholesky_.T
)
else:
self.precisions_ = self.precisions_cholesky_**2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == "full":
cov_params = self.n_components * n_features * (n_features + 1) / 2.0
elif self.covariance_type == "diag":
cov_params = self.n_components * n_features
elif self.covariance_type == "tied":
cov_params = n_features * (n_features + 1) / 2.0
elif self.covariance_type == "spherical":
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
The input samples.
Returns
-------
bic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log(
X.shape[0]
)
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
The input samples.
Returns
-------
aic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
| 33.25856 | 88 | 0.605992 | 31,453 | 0.660874 | 0 | 0 | 1,680 | 0.035299 | 0 | 0 | 25,396 | 0.533608 |
7f040d97e1fdd272b5cef573caeb25422a650cb8 | 4,548 | py | Python | main.py | scottyplunkett/crayket | 6cc203e55865ff8c1cf4269c3a55e18b70a1b40f | [
"MIT"
] | 1 | 2020-08-25T15:53:05.000Z | 2020-08-25T15:53:05.000Z | main.py | scottyplunkett/crayket | 6cc203e55865ff8c1cf4269c3a55e18b70a1b40f | [
"MIT"
] | null | null | null | main.py | scottyplunkett/crayket | 6cc203e55865ff8c1cf4269c3a55e18b70a1b40f | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.button import Button, Label
from kivy.properties import ListProperty, ObjectProperty
from game import Game
from player import Player
from helpers import new_targets
class PlayerMenuButtons(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 2
self.btn1 = Button(text='1')
self.btn1.bind(on_press=self.update)
self.btn2 = Button(text='2')
self.btn2.bind(on_press=self.update)
self.btn3 = Button(text='3')
self.btn3.bind(on_press=self.update)
self.btn4 = Button(text='4')
self.btn4.bind(on_press=self.update)
self.add_widget(self.btn1)
self.add_widget(self.btn2)
self.add_widget(self.btn3)
self.add_widget(self.btn4)
def update(self, instance):
print(instance)
app = App.get_running_app()
app.root.configure_players(instance)
instance.parent.parent.show_current_players()
class PlayerMenu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.label = Label(text="Select # of Players:")
self.add_widget(self.label)
self.player_menu_buttons = PlayerMenuButtons()
self.add_widget(self.player_menu_buttons)
def show_current_players(self):
app = App.get_running_app()
self.label.text = 'Current Players:'
self.remove_widget(self.player_menu_buttons)
self.current_players_label = Label(text=str(len(app.root.players)))
self.add_widget(self.current_players_label)
class TurnMenu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.add_widget(Button(text='Undo'))
self.add_widget(Button(text='Clear'))
class Menu(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.add_widget(PlayerMenu())
self.add_widget(TurnMenu())
class ScoreColumn(BoxLayout):
def __init__(self, player, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.player = player
self.add_widget(Label(text=self.player.name))
for target in self.player.targets:
self.add_widget(Label(text=str(target['shots'])))
self.add_widget(Label(text=""))
class TargetsColumn(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "vertical"
self.add_widget(Label(text="P", underline=True))
self.add_widget(Label(text="20"))
self.add_widget(Label(text="19"))
self.add_widget(Label(text="18"))
self.add_widget(Label(text="17"))
self.add_widget(Label(text="16"))
self.add_widget(Label(text="15"))
self.add_widget(Label(text="B"))
self.add_widget(Label(text=""))
class Sheet(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "horizontal"
self.targets = TargetsColumn()
self.add_widget(self.targets)
def sync_players_to_sheet(self, players):
for player in players:
self.add_widget(ScoreColumn(player))
class Root(BoxLayout):
players = ListProperty([])
game = ObjectProperty(Game(players))
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.orientation = "horizontal"
self.label = Label(id='instruction', text=self.game.state.value)
self.add_widget(self.label)
self.sheet = Sheet()
self.add_widget(self.sheet)
self.menu = Menu()
self.add_widget(self.menu)
def configure_players(self, instance):
self.players = []
print("Set number to %s", instance.text)
number = int(instance.text) - 1
while (len(self.players) <= number):
name = str(len(self.players) + 1)
self.players.append(Player(name, 0, new_targets()))
print(self.players)
self.sheet.sync_players_to_sheet(self.players)
self.set_game_players()
def set_game_players(self):
self.game.set_players(self.players)
print(self.game.state.value)
self.label.text = self.game.state.value
class CrayketApp(App):
def build(self):
return Root()
CrayketApp().run()
| 31.583333 | 75 | 0.644019 | 4,178 | 0.918646 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.046394 |
7f052867dedcbc62e101a7a70ed0204af998d934 | 609 | py | Python | src/modular/funcion1.py | GokoshiJr/algoritmos2-py | 106dcbed31739309c193a77c671522aac17f6e45 | [
"MIT"
] | null | null | null | src/modular/funcion1.py | GokoshiJr/algoritmos2-py | 106dcbed31739309c193a77c671522aac17f6e45 | [
"MIT"
] | null | null | null | src/modular/funcion1.py | GokoshiJr/algoritmos2-py | 106dcbed31739309c193a77c671522aac17f6e45 | [
"MIT"
] | null | null | null | # 16. Evalúe la siguiente función matemática: f(a,b,c,d,e) = ((a! + b! + c!) / d!) + e^c / e!
# Ejemplo: Si a = 5, b = 1, c= 4 , d = 3, e = 2 → despliega: 32,17
def factorial(numero):
fact = 1
for i in range (1, numero+1):
fact *= i
return (fact)
def formula(a, b, c, d, e):
termino1 = termino2 = result = 0
termino1 = (factorial(a) + factorial(b) + factorial(c)) / factorial(d)
termino2 = pow(e, c) / factorial(e)
result = termino1 + termino2
return (result)
a, b, c, d, e = 5, 1, 4, 3, 2
print('Despliega: {:.2f}'.format(formula(a, b, c, d, e)))
| 32.052632 | 94 | 0.533662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.301303 |
7f055b39eb9d9bedda5195c6c85467b444c1804f | 799 | py | Python | setup.py | Fragalli/brFinance | f06d7b148d20d07361c89158837d47225c4fea1f | [
"MIT"
] | 1 | 2021-07-19T19:12:11.000Z | 2021-07-19T19:12:11.000Z | setup.py | Fragalli/brFinance | f06d7b148d20d07361c89158837d47225c4fea1f | [
"MIT"
] | 1 | 2021-07-22T13:52:30.000Z | 2021-07-22T13:52:30.000Z | setup.py | Fragalli/brFinance | f06d7b148d20d07361c89158837d47225c4fea1f | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='brfinance',
version='0.1',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='A Python package for webscraping financial data brazilian sources such as CVM, Banco Central, B3, ANBIMA, etc.',
long_description=open('README.md').read(),
install_requires=['numpy', 'beautifulsoup4', 'bs4', 'certifi', 'charset-normalizer', 'colorama', 'configparser', 'crayons', 'fake-useragent',
'idna', 'lxml', 'numpy', 'pandas', 'python-dateutil', 'pytz', 'requests', 'selenium', 'six', 'soupsieve', 'urllib3', 'webdriver-manager'],
url='https://github.com/BillMills/python-package-example',
author='Eudes Rodrigo Nunes de Oliveira',
author_email='eudesrodrigo@outlook.com'
)
| 49.9375 | 160 | 0.679599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.605757 |
7f08f99ac096d3d4fdb36c4caaac2056f5f43854 | 5,938 | py | Python | model/DeepLabv3plus2.py | THU-CVlab/JMedSeg | 1c9c66a1b2c6e4c5e3f70ca9e1ed54447b944755 | [
"MIT"
] | 26 | 2021-08-19T05:22:44.000Z | 2022-03-08T05:44:43.000Z | model/DeepLabv3plus2.py | Jittor/JMedSeg | 1c9c66a1b2c6e4c5e3f70ca9e1ed54447b944755 | [
"MIT"
] | null | null | null | model/DeepLabv3plus2.py | Jittor/JMedSeg | 1c9c66a1b2c6e4c5e3f70ca9e1ed54447b944755 | [
"MIT"
] | 3 | 2021-08-19T06:12:49.000Z | 2021-08-19T11:41:16.000Z | import jittor as jt
from jittor import nn
from jittor import Module
from jittor import init
from jittor.contrib import concat
from model.backbone import resnet50, resnet101
from model.backbone import res2net101
Backbone_List = ['resnet50', 'resnet101', 'res2net101']
class DeepLab(Module):
def __init__(self, output_stride=16, num_classes=2, backbone = 'resnet101'):
super(DeepLab, self).__init__()
if not backbone in Backbone_List:
print('Invalid Backbone! Initialized to resnet101')
backbone = 'resnet101'
if backbone == 'resnet50':
self.backbone = resnet50(output_stride=output_stride)
elif backbone == 'res2net101':
self.backbone = res2net101(output_stride=output_stride)
else:
self.backbone = resnet101(output_stride=output_stride)
self.backbone_name = backbone
self.aspp = ASPP(output_stride)
self.decoder = Decoder(num_classes)
def execute(self, input):
low_level_feat, _, _, x = self.backbone(input)
x = self.aspp(x)
x = self.decoder(x, low_level_feat)
x = nn.resize(x, size=(input.shape[2], input.shape[3]), mode='bilinear')
return x
def get_backbone(self):
return self.backbone
def get_head(self):
return [self.aspp, self.decoder]
def get_loss(self, target, pred, ignore_index=None):
loss_pred = nn.cross_entropy_loss(pred, target, ignore_index=ignore_index)
return loss_pred
def update_params(self, loss, optimizer):
optimizer.zero_grad()
loss.backward()
optimizer.step()
class Decoder(nn.Module):
def __init__(self, num_classes):
super(Decoder, self).__init__()
low_level_inplanes = 256 # mobilenet = 24 resnet / res2net = 256 xception = 128
self.conv1 = nn.Conv(low_level_inplanes, 48, 1, bias=False)
self.bn1 = nn.BatchNorm(48)
self.relu = nn.ReLU()
self.last_conv = nn.Sequential(nn.Conv(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm(256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv(256, num_classes, kernel_size=1, stride=1))
def execute(self, x, low_level_feat):
low_level_feat = self.conv1(low_level_feat)
low_level_feat = self.bn1(low_level_feat)
low_level_feat = self.relu(low_level_feat)
#print (low_level_feat.shape)
x = nn.resize(x, size=(low_level_feat.shape[2], low_level_feat.shape[3]) , mode='bilinear')
x = concat((x, low_level_feat), dim=1)
x = self.last_conv(x)
return x
class Single_ASPPModule(Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation):
super(Single_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm(planes)
self.relu = nn.ReLU()
def execute(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class ASPP(Module):
def __init__(self, output_stride):
super(ASPP, self).__init__()
inplanes = 2048 # mobilnet = 320 resnet = 2048
if output_stride == 16:
dilations = [1, 6, 12, 18]
elif output_stride == 8:
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = Single_ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0])
self.aspp2 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1])
self.aspp3 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2])
self.aspp4 = Single_ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3])
self.global_avg_pool = nn.Sequential(GlobalPooling(),
nn.Conv(inplanes, 256, 1, stride=1, bias=False),
nn.BatchNorm(256),
nn.ReLU())
self.conv1 = nn.Conv(1280, 256, 1, bias=False)
self.bn1 = nn.BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
def execute(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = x5.broadcast((1,1,x4.shape[2],x4.shape[3]))
x = concat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dropout(x)
return x
class GlobalPooling (Module):
def __init__(self):
super(GlobalPooling, self).__init__()
def execute (self, x):
return jt.mean(x, dims=[2,3], keepdims=1)
def main():
model = DeepLab(backbone = 'resnet101')
x = jt.ones([2, 3, 512, 512])
y = model(x)
print (y.shape)
_ = y.data
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
'''
DeepLab
59,572,610 total parameters.
59,462,946 training parameters.
'''
if __name__ == '__main__':
main() | 36.207317 | 105 | 0.580835 | 5,044 | 0.849444 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.084709 |
7f094e91b9871860a612632460408191babfa754 | 788 | py | Python | snakeoil/migrations/0001_initial.py | wbcsmarteezgithub/django-snakeoil | ae1a8dab9e14194e48963101ff3349f45aee0ccf | [
"BSD-2-Clause"
] | 1 | 2020-07-03T15:52:25.000Z | 2020-07-03T15:52:25.000Z | snakeoil/migrations/0001_initial.py | wbcsmarteezgithub/django-snakeoil | ae1a8dab9e14194e48963101ff3349f45aee0ccf | [
"BSD-2-Clause"
] | null | null | null | snakeoil/migrations/0001_initial.py | wbcsmarteezgithub/django-snakeoil | ae1a8dab9e14194e48963101ff3349f45aee0ccf | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 2.2 on 2020-03-30 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SeoUrl',
fields=[
('head_title', models.CharField(blank=True, max_length=55, verbose_name='head title')),
('meta_description', models.TextField(blank=True, max_length=160, verbose_name='meta description')),
('url', models.CharField(max_length=255, primary_key=True, serialize=False, unique=True, verbose_name='URL')),
],
options={
'verbose_name': 'SEO URL',
'verbose_name_plural': 'SEO URLs',
},
),
]
| 29.185185 | 126 | 0.574873 | 697 | 0.884518 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.224619 |
7f09c4ad50e3517a9c308f3dc246fb0ef9e4dac5 | 1,090 | py | Python | appswag/tests/test_codec.py | atchoum31/appswag | 7279ccde73e2a27495228a9342c88e8526b0534a | [
"MIT"
] | null | null | null | appswag/tests/test_codec.py | atchoum31/appswag | 7279ccde73e2a27495228a9342c88e8526b0534a | [
"MIT"
] | null | null | null | appswag/tests/test_codec.py | atchoum31/appswag | 7279ccde73e2a27495228a9342c88e8526b0534a | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from appswag.primitives import MimeCodec
import unittest
class CodecTestCase(unittest.TestCase):
def test_register_unregister(self):
mime_codec = MimeCodec()
mime = 'test'
dummy_codec = {}
self.assertEqual(None, mime_codec.codec(mime))
mime_codec.register(mime, dummy_codec)
self.assertEqual(dummy_codec, mime_codec.codec(mime))
mime_codec.unregister(mime)
self.assertEqual(None, mime_codec.codec(mime))
def test_plain_codec(self):
mime_codec = MimeCodec()
mime = 'text/plain'
text = 'plain text'
self.assertEqual(text, mime_codec.marshal(mime, text))
self.assertEqual(text, mime_codec.unmarshal(mime, text))
def test_json_codec(self):
mime_codec = MimeCodec()
mime = 'application/json'
value = dict(key='value')
data = '{"key": "value"}'
self.assertEqual(data, mime_codec.marshal(mime, value))
self.assertEqual(value, mime_codec.unmarshal(mime, data))
| 35.16129 | 66 | 0.646789 | 985 | 0.90367 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.066972 |
7f0b21821574f29c48de13330ac73d4176385f22 | 2,211 | py | Python | mediafeed/databases/files.py | media-feed/mediafeed | c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf | [
"MIT"
] | null | null | null | mediafeed/databases/files.py | media-feed/mediafeed | c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf | [
"MIT"
] | null | null | null | mediafeed/databases/files.py | media-feed/mediafeed | c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf | [
"MIT"
] | null | null | null | import os
from shutil import rmtree
from ..settings import DATA_PATH
class Thumbnail(object):
def __init__(self, model):
self.model = model
self.module = model.module
self.filename = os.path.join('thumbnails', model.__tablename__, self.module.id, model.id)
self.full_filename = os.path.join(DATA_PATH, self.filename)
self.url = model.thumbnail_url
def __repr__(self):
return '<Thumbnail "%s">' % self.filename
def __bool__(self):
return bool(self.path)
@property
def path(self):
if self.local_path:
return self.filename
if self.online_path:
return self.online_path
return ''
@property
def local_path(self):
if os.path.exists(self.full_filename):
return self.full_filename
@property
def online_path(self):
if self.url:
return self.url
def download(self, options=None):
if options is None:
options = getattr(self.model, 'options', None)
if self.url:
self.module.get_thumbnail(self.full_filename, self.url, options)
def remove(self):
local_path = self.local_path
if local_path:
os.remove(local_path)
def get_media_path(item):
return os.path.join(DATA_PATH, 'medias', item.module_id, item.id)
def get_medias(item):
try:
return {Media(item, filename) for filename in os.listdir(get_media_path(item))}
except FileNotFoundError:
return set()
def remove_medias(item):
try:
rmtree(get_media_path(item))
except FileNotFoundError:
pass
class Media(object):
def __init__(self, item, media_filename):
self.item = item
self.media_filename = media_filename
self.filename = os.path.join('medias', item.module_id, item.id, media_filename)
self.full_filename = os.path.join(DATA_PATH, self.filename)
def __repr__(self):
return '<Media "%s:%s" "%s">' % (self.item.module_id, self.item.id, self.media_filename)
def remove(self):
full_filename = self.full_filename
if os.path.exists(full_filename):
os.remove(full_filename)
| 26.638554 | 97 | 0.635911 | 1,749 | 0.791045 | 0 | 0 | 378 | 0.170963 | 0 | 0 | 79 | 0.03573 |
7f0c072f59aca4526734d94d5badc06ae290a227 | 2,823 | py | Python | src/alipay/payment.py | davidrepos/py3alipay | e547e4918c661eb90e69a276ab3e881f28b1112a | [
"MIT"
] | null | null | null | src/alipay/payment.py | davidrepos/py3alipay | e547e4918c661eb90e69a276ab3e881f28b1112a | [
"MIT"
] | null | null | null | src/alipay/payment.py | davidrepos/py3alipay | e547e4918c661eb90e69a276ab3e881f28b1112a | [
"MIT"
] | null | null | null | from hashlib import md5
from urllib.parse import urlencode
from .exceptions import AlipayExcepton,ParameterValueErrorException,MissingParameterException,TokenAuthorizationErrorException
class Alipay(object):
GATEWAY_URL = 'https://mapi.alipay.com/gateway.do'
NOTIFY_GATEWAY_URL = 'https://mapi.alipay.com/gateway.do'\
'?service=notify_verify&partner=%s¬ify_id=%s'
sign_tuple = ('sign_type', 'MD5', 'MD5')
sign_key = False
def __init__(self,pid,key,seller_email=None,seller_id=None):
self.key = key
self.pid = pid
self.default_params = {'_input_charset':'utf-8',
'partner':pid,
'payment_type':'1'}
# 优先使用 seller_id (与接口端的行为一致)
if seller_id is not None:
self.default_params['seller_id'] = seller_id
elif seller_email is not None:
self.default_params['seller_email'] = seller_email
else:
raise MissingParameterException('missing parameter seller_id or seller_email')
def encode_dict(self,params):
return {k: str(v).encode('utf-8') for k, v in params.items()}
def _generate_md5_sign(self,params):
src = '&'.join(['%s=%s' % (key,value) for key,value in sorted(params.items())])+self.key
return md5(src.encode('utf-8')).hexdigest()
def _check_params(self,params,names):
if not all(k in params for k in names):
raise MissingParameterException(' missing parameters')
def _build_url(self,service,paramnames=None,**kw):
'''
创建带签名的请求地址,paramnames为需要八号的参谋名,由于避免出现过多的参数,默认使用全部的参数
'''
params = self.default_params.copy()
params['service'] = service
params.update(kw)
if paramnames:
params = dict([(k,params[k]) for k in paramnames if k in params])
signkey,signvalue,signdescription = self.sign_tuple
signmethod = getattr(self,'_generate_%s_sign' % signdescription.lower(),
None #getattr raise AttributeError If not default provided
)
if signmethod is None:
raise NotImplementedError("This type '%s' of sign is not implemented yet." % signmethod)
if self.sign_key:
params.update({signkey,signvalue})
params.update({signkey:signvalue,'sign':signmethod(params)})
return '%s?%s' % (self.GATEWAY_URL,urlencode(self.encode_dict(params)))
def create_direct_pay_by_user_url(self,**kw):
self._check_params(kw,('out_trade_no','subject'))
if not kw.get('total_fee') and \
not (kw.get('price') and kw.get('quantity')):
raise ParameterValueErrorException('total_fee or (price and quantity) must have one.')
url =self._build_url('create_direct_pay_by_user',**kw)
return url | 44.809524 | 126 | 0.643642 | 2,746 | 0.936243 | 0 | 0 | 0 | 0 | 0 | 0 | 789 | 0.269008 |
7f0c54650b946eb5ed1cdc2900d388cb5381d2f3 | 1,998 | py | Python | src/robusta/core/triggers/error_event_trigger.py | Rutam21/robusta | 7c918d96362f607488c0e7e0056f436a06dce4ae | [
"MIT"
] | null | null | null | src/robusta/core/triggers/error_event_trigger.py | Rutam21/robusta | 7c918d96362f607488c0e7e0056f436a06dce4ae | [
"MIT"
] | null | null | null | src/robusta/core/triggers/error_event_trigger.py | Rutam21/robusta | 7c918d96362f607488c0e7e0056f436a06dce4ae | [
"MIT"
] | 1 | 2022-02-20T01:49:36.000Z | 2022-02-20T01:49:36.000Z | from ..discovery.top_service_resolver import TopServiceResolver
from ...core.playbooks.base_trigger import TriggerEvent
from ...integrations.kubernetes.autogenerated.triggers import EventAllChangesTrigger, EventChangeEvent
from ...integrations.kubernetes.base_triggers import K8sTriggerEvent
from ...utils.rate_limiter import RateLimiter
class WarningEventTrigger(EventAllChangesTrigger):
rate_limit: int = 3600
def __init__(self,
name_prefix: str = None,
namespace_prefix: str = None,
labels_selector: str = None,
rate_limit: int = 3600,
):
super().__init__(
name_prefix=name_prefix,
namespace_prefix=namespace_prefix,
labels_selector=labels_selector,
)
self.rate_limit = rate_limit
def should_fire(self, event: TriggerEvent, playbook_id: str):
should_fire = super().should_fire(event, playbook_id)
if not should_fire:
return should_fire
if not isinstance(event, K8sTriggerEvent):
return False
exec_event = self.build_execution_event(event, [])
if not isinstance(exec_event, EventChangeEvent):
return False
if not exec_event.obj or not exec_event.obj.involvedObject:
return False
if exec_event.get_event().type != "Warning":
return False
# Perform a rate limit for this service key according to the rate_limit parameter
name = exec_event.obj.involvedObject.name
namespace = exec_event.obj.involvedObject.namespace if exec_event.obj.involvedObject.namespace else ""
service_key = TopServiceResolver.guess_service_key(name=name,namespace=namespace)
return RateLimiter.mark_and_test(
f"WarningEventTrigger_{playbook_id}_{exec_event.obj.reason}",
service_key if service_key else namespace + ":" + name,
self.rate_limit,
)
| 38.423077 | 110 | 0.669169 | 1,657 | 0.829329 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.077578 |
7f0c64c539a14aefaa3e8de5c3955487b8cbc27a | 6,483 | py | Python | photoman/rename.py | maciejgaleja/good-content | 15d88aafec25775211672ca447a569478d1d0fbd | [
"MIT"
] | null | null | null | photoman/rename.py | maciejgaleja/good-content | 15d88aafec25775211672ca447a569478d1d0fbd | [
"MIT"
] | null | null | null | photoman/rename.py | maciejgaleja/good-content | 15d88aafec25775211672ca447a569478d1d0fbd | [
"MIT"
] | null | null | null | import logging
import exifread # type: ignore
from datetime import datetime
import pathlib
import os
import shutil
import filecmp
import subprocess
from typing import Tuple, List
exifread.logger.disabled = True
date_str_default = "1970:01:01 00:00:00"
class FileIsADuplicate(Exception):
pass
class FFMpegNotFound(Exception):
pass
class InvalidDateFormat(Exception):
pass
def parse_date_str(date_str: str) -> datetime:
date_formats = ["%Y:%m:%d %H:%M:%S", "%d/%m/%Y %H:%M",
"%Y-%m-%d %H:%M:%S ", "%Y-%m-%d %H:%M:%S", "%Y-%m-%dT%H:%M:%S.%fZ"]
date = None
for date_format in date_formats:
try:
date = datetime.strptime(date_str, date_format)
break
except ValueError:
continue
if date is None:
raise InvalidDateFormat
return date
def get_date_str(filename: str, use_short_name: bool) -> Tuple[str, str]:
photo_extensions = ['.JPG', '.JPEG', '.CR2', '.DNG']
video_extensions = ['.AVI', '.MP4', '.MOV', '.3GP', '.M4V', '.MPG']
rw2_extensions = ['.RW2']
extension = os.path.splitext(filename)[1].upper()
assert len(extension) > 0
if extension in photo_extensions:
ret = get_date_str_image(filename, use_short_name)
elif extension in video_extensions:
ret = get_date_str_video(filename)
elif extension in rw2_extensions:
ret = get_date_str_rw2(filename)
else:
raise NotImplementedError
return ret
def get_date_str_video(filename: str) -> Tuple[str, str]:
try:
ffprobe_out = subprocess.run(
["ffprobe", filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError: # pragma: no cover
raise FFMpegNotFound
try:
ffprobe_str = ffprobe_out.stdout.decode(
"utf-8") + "\n" + ffprobe_out.stderr.decode("utf-8")
ffprobe_lines = ffprobe_str.split("\n")
dates = []
for line in ffprobe_lines:
if "creation_time" in line:
dates.append(":".join(line.split(":")[1:]).strip())
date_str = dates[0]
except: # noqa: E722
date_str = date_str_default
date = parse_date_str(date_str)
return (date.strftime("%Y%m%d_%H%M%S"), date.strftime("%Y-%m-%d") + "-video")
def get_date_str_image(filename: str, use_short_name: bool) -> Tuple[str, str]:
f = open(filename, "rb")
tags = exifread.process_file(f)
try:
date_str = str(tags["EXIF DateTimeOriginal"])
except KeyError:
date_str = date_str_default
try:
model_name = str(tags["Image Model"])
except KeyError:
model_name = "UNKNOWN"
model_name = model_name.replace(" ", "_")
model_name = model_name.replace("<", "")
model_name = model_name.replace(">", "")
model_name = model_name.replace("\\", "")
model_name = model_name.replace("/", "")
date = parse_date_str(date_str)
if use_short_name:
ret = (date.strftime("%Y%m%d_%H%M%S"), date.strftime("%Y-%m-%d"))
else:
ret = (date.strftime("%Y%m%d_%H%M%S"),
date.strftime("%Y-%m-%d") + "-" + model_name)
return ret
def get_date_str_rw2(filename: str) -> Tuple[str, str]:
data: str = ""
with open(filename, 'rb') as f:
f.seek(0x0E46)
data = f.read(19).decode('utf-8')
date = parse_date_str(data)
return (date.strftime("%Y%m%d_%H%M%S"), date.strftime("%Y-%m-%d"))
def move_file(oldname: str, newname: str, create_dirs: bool) -> None:
os.makedirs(os.path.dirname(newname), exist_ok=True)
logging.debug("Will move file {0} to {1}".format(oldname, newname))
if not os.path.exists(newname):
try:
os.rename(oldname, newname)
except OSError: # pragma: no cover
shutil.move(oldname, newname)
else:
if not (oldname == newname):
files_identical = filecmp.cmp(oldname, newname, shallow=False)
if files_identical:
logging.warning("File {} is a duplicate.".format(oldname))
raise FileIsADuplicate()
else:
raise FileExistsError()
def rename_files(filenames: List[str], output_dir: str, create_dirs: bool = False, remove_duplicates: bool = False, short_dir_names: bool = False) -> None: # noqa: C901 E501
num_total = len(filenames)
num_current = 0
for file_path in filenames:
try:
original_file_path_str = file_path
(date_str, dir_str) = get_date_str(
original_file_path_str, short_dir_names)
new_file_path_str = output_dir
original_file_path = pathlib.PurePath(file_path)
original_filename = original_file_path.name
extension = str(original_file_path.suffix)
original_filename = original_filename.replace(extension, '')
name_suffix_n = 0
while True:
date_str_to_write = ""
if create_dirs:
date_str_to_write = str(os.path.join(dir_str, date_str))
else:
date_str_to_write = date_str
if name_suffix_n > 0:
date_str_to_write = date_str_to_write + \
"_" + str(name_suffix_n)
new_file_path_str = output_dir + date_str_to_write + extension.upper()
try:
move_file(original_file_path_str,
new_file_path_str, create_dirs)
logging.info("{:3.0f}".format(num_current*100/num_total) + "%\t" +
str(original_file_path_str) + "\t-->\t" + new_file_path_str)
except FileExistsError:
name_suffix_n += 1
continue
except FileIsADuplicate:
if remove_duplicates:
logging.error("deleting {} ...".format(
original_file_path_str))
os.remove(original_file_path_str)
break
except KeyboardInterrupt: # pragma: no cover
raise
except FFMpegNotFound as e: # pragma: no cover
raise e
except: # noqa: E722
logging.warning("{:3.0f}".format(
num_current*100/num_total) + "%\t" + str(file_path) + "\t-->\t <skipping>")
logging.exception("ERROR")
num_current = num_current + 1
| 33.417526 | 174 | 0.583989 | 128 | 0.019744 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.108592 |
7f0d5ca49f1919e597ff44fb16712aa31f0671e9 | 3,536 | py | Python | scripts/yaml2rdf_module.py | opencultureagency/Training.Module.Template | eb9c7a348e55d4884b4be543b00e71b3ba608ee1 | [
"CC0-1.0"
] | null | null | null | scripts/yaml2rdf_module.py | opencultureagency/Training.Module.Template | eb9c7a348e55d4884b4be543b00e71b3ba608ee1 | [
"CC0-1.0"
] | null | null | null | scripts/yaml2rdf_module.py | opencultureagency/Training.Module.Template | eb9c7a348e55d4884b4be543b00e71b3ba608ee1 | [
"CC0-1.0"
] | null | null | null | '''
Converts ASKotec training module meta-data
(from module.yaml) into an RDF/Turtle.
'''
import glob
import os
from rdflib.namespace import DC, DCTERMS, DOAP, FOAF, SKOS, OWL, RDF, RDFS, VOID, XMLNS, XSD
import wget
from yaml2rdf_shared import *
from yaml2rdf import convert
KEY_RESOURCE_URL_YAML = 'yaml-url'
KEY_RESOURCE_URL_RDF = 'rdf-url'
KEY_RESOURCE_FILE_RDF = 'rdf-file'
def download(url, path):
'''
Downloads a URL pointing to a file into a local file,
pointed to by path.
'''
print('downloading %s to %s ...' % (url, path))
if os.path.exists(path):
os.remove(path)
wget.download(url, path, None)
def ensure_resource_turtles(yaml_cont, g):
'''
Either downloads the reosurce RDF files directly,
or downloads their YAML version, an dconverts them to RDF afterwards.
'''
res_i = 0
for elem in yaml_cont['resources']:
if KEY_RESOURCE_URL_YAML in elem:
res_yml_url = elem[KEY_RESOURCE_URL_YAML]
res_yml_file = os.path.join(os.path.curdir,
'resource_%d.yml' % res_i)
res_ttl_file = os.path.join(os.path.curdir,
'resource_%d.ttl' % res_i)
res_pre_file = os.path.join(os.path.curdir,
'resource_%d.pref' % res_i)
download(res_yml_url, res_yml_file)
convert(res_yml_file, res_ttl_file, res_pre_file)
yaml_cont[KEY_RESOURCE_FILE_RDF] = res_ttl_file
elif KEY_RESOURCE_URL_RDF in elem:
res_ttl_url = elem[KEY_RESOURCE_URL_RDF]
res_ttl_file = os.path.join(os.path.curdir,
'resource_%d.ttl' % res_i)
download(res_ttl_url, res_ttl_file)
yaml_cont[KEY_RESOURCE_FILE_RDF] = res_ttl_file
else:
conv_fail('Resource needs either of %s or %s spezified'
% (KEY_RESOURCE_URL_YAML, KEY_RESOURCE_URL_RDF))
res_i = res_i + 1
def convert_module_yaml_to_rdf(yaml_cont, g):
'''
Converts ASKotec training module meta-data content
into an RDF/Turtle string.
'''
supported_version = "1.0"
if version_compare(yaml_cont['version'], supported_version) < 0:
raise 'The content version is not supported by this converter. Please get a newer version!'
y = yaml_cont['module']
pre_path = 'module'
m_s = ASKM[str2id(y['name'])]
ensure_resource_turtles(y, g)
for res_ttl in glob.glob('resource_*.ttl'):
g.parse(res_ttl, format='ttl')
for res_s,_,_ in g.triples((None, RDF['type'], ASK['Resource'])):
g.add(( m_s, ASK.resource, res_s ))
g.add(( m_s, RDF.type, ASK.Module ))
g.add(( m_s, RDFS.label, rdf_str(y['name']) ))
if 'manual' in y:
g.add(( m_s, ASK.manual, rdf_str(y['manual']) ))
elif os.path.exists('manual.md'):
g.add(( m_s, ASK.manual, rdf_str('manual.md') ))
else:
conv_fail('Entry not found "%s", and default path "%s" does not exist'
% (pre_path + '.' + 'manual', os.path.curdir + '/manual.md'))
g.add(( m_s, ASK.release, rdf_str(y['release']) ))
g.add(( m_s, ASK.duration, rdf_str(y['duration']) ))
g.add(( m_s, ASK.maxParticipians, rdf_str(y['max-participians']) ))
g.add(( m_s, ASK.compatibility, rdf_str(y['compatibility']) ))
g.add(( m_s, ASK.blogPosts, rdf_str(y['blog-posts']) ))
g.add(( m_s, ASK.issues, rdf_str(y['issues']) ))
g.add(( m_s, ASK.newIssue, rdf_str(y['new-issue']) ))
conv_authors(y, g, m_s)
conv_licenses(y, g, m_s)
| 36.081633 | 99 | 0.625283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 966 | 0.27319 |
7f0de68f1e52bb92eebc116356fe75164e1a0c39 | 8,346 | py | Python | bot/handlers/poll_handler.py | rednas174/Whatsapp_bot | 05c48f9b82ac92c77c29fb4c4d394ce0607db40f | [
"MIT"
] | 3 | 2021-01-07T21:14:43.000Z | 2021-05-24T11:43:59.000Z | bot/handlers/poll_handler.py | rednas174/Whatsapp_bot | 05c48f9b82ac92c77c29fb4c4d394ce0607db40f | [
"MIT"
] | 4 | 2021-01-09T13:05:28.000Z | 2021-01-10T12:41:47.000Z | bot/handlers/poll_handler.py | rednas174/Whatsapp_bot | 05c48f9b82ac92c77c29fb4c4d394ce0607db40f | [
"MIT"
] | 3 | 2021-01-07T21:09:18.000Z | 2021-01-12T18:53:09.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 11:43:21 2021
@author: Sander
"""
import random
# TODO: '/poll vote 88' has no output
# : invalid poll id gives no output
# Poll datastructure
#
# {
# "number or name of person":
# {
# "__id" : unique id for every poll
# "__name" : name,
# "__votees" :
# [
# ["votee", "option"],
# ["votee", "option"],
# ["votee", "option"],
# ...
# ]
# "Option 1" : votes,
# "Option 2" : votes,
# "Option 3" : votes,
# ...
# },
#
# "number or name of person":
# {
# "__id" : unique id for every poll
# "__name" : name,
# "__votees" :
# [
# ["votee", "option"],
# ["votee", "option"],
# ["votee", "option"],
# ...
# ]
# "Option 1" : votes,
# "Option 2" : votes,
# "Option 3" : votes,
# ...
# },
#
# ...
# }
# List of possible commands:
# /poll create <name with spaces> [<options 1>/<option 2>/...]
# >>> Creates a poll with that name in that persons account name
# >>> A poll can't have these '-[]/' characters in either its name nor its options.
# >>> Neither can a poll have __name of __votees as an option.
#
# /poll remove <id>
# >>> Removes the poll associated with that person
#
# /poll vote <name> -<option>
# >>> Adds a vote to that poll.
# --> Check if person already voted.
# TODO: Check if the voter votes for an existing option
polls = {}
def is_option(poll_id: str, option: str):
if option[:2] == "__":
return False
for name in polls:
if polls[name]["__id"] == int(poll_id):
if option in list(polls[name]):
return True
else:
return False
return False
def vote(poll_id, person, option):
# Loop over every poll until the ID matches
for poll_option in polls:
if polls[poll_option]["__id"] == int(poll_id):
# Loop over every vote until the voter has been found
for voter_index in range(len(polls[poll_option]["__votees"])):
# Change the vote, or return a sassy message
if polls[poll_option]["__votees"][voter_index][0] == person:
# Check if the votes votes again for the same
if polls[poll_option]["__votees"][voter_index][1] == option:
return "Can't vote for the same item twice, you clapped jonko"
else:
polls[poll_option][polls[poll_option]["__votees"][voter_index][1]] -= 1
polls[poll_option][option] += 1
polls[poll_option]["__votees"][voter_index][1] = option
return "Changed vote"
polls[poll_option]["__votees"].append([person, option])
polls[poll_option][option] += 1
return show_poll_by_id(int(poll_id))
def id_generator(name):
loop = True
while loop:
new_id = random.randint(1,999)
loop = False
for poll_name in polls:
if poll_name != name:
if polls[poll_name]["__id"] == new_id:
loop = True
break
return new_id
def create_poll(command, poll_creator):
if command.count("[") == 1 and command.count("]") == 1:
# Extract the poll name
name = command.split(" ", 2)[-1]
name = name[:name.find("[")]
# Remove last space if present
name = name[:-1] if name[-1] == " " else name
# Extract the options
options = command[command.find("[")+1:command.find("]")].split("/")
for option in options:
if option[:2] == "__":
return "Error: You can't start a poll option with '__', please follow the "
try:
polls.pop(poll_creator)
except:
pass
# Set default values
poll_id = id_generator(poll_creator)
polls[poll_creator] = {}
polls[poll_creator]["__id"] = poll_id
polls[poll_creator]["__name"] = name
polls[poll_creator]["__votees"] = []
# Insert options
for option in options:
polls[poll_creator][option] = 0
return show_poll_by_id(poll_id)
def vote_poll(command, poll_votee):
try:
command = command.split(" ", 2)[-1]
print(command)
poll_id, option = command.split(" ", 1)
for poll_name in polls:
#print("id->", polls[poll_name]["__id"], int(poll_id))
if polls[poll_name]["__id"] == int(poll_id):
if is_option(poll_id, option):
return vote(poll_id, poll_votee, option)
else:
return "Error: " + option + " isn't an option"
except:
return "Something went wrong, please make sure you follow the syntax of the command as described in '/poll help'"
def show_poll_by_id(ID):
for poll_item in polls:
if polls[poll_item]["__id"] == ID:
out = "*Poll by " + poll_item + " (" + str(polls[poll_item]["__id"]) + "): " + polls[poll_item]["__name"] + "*\n\n"
for option in list(polls[poll_item].keys()):
if option[:2] != "__":
out += "_" + option + "_ with " + str(polls[poll_item][option]) + " vote(s)\n"
return out
return "Error 404: Poll not found"
def get_poll_list():
out = "*List of all currently active polls:*\n\n"
for option in list(polls.keys()):
out += option + " (" + str(polls[option]["__id"]) + ") - " + polls[option]["__name"][:100]
return out
def handle_poll(command:str, person:str):
"""
Functionality:
This command can create a poll.
/poll create Klap die jonko [Yes/No/Unsure what to do/NOOOO]
Parameters
----------
command: Whatever the command is
creator:
Returns
-------
None.
"""
items = command.split(" ")
# Handle help command
if len(items) == 1:
return "You need to add something, perhaps 'help' to get the syntax help?"
elif len(items) == 2:
if items[1] == "help":
return ("*Syntax help for /poll:*\n"
+ "\n"
+ "You can vote for or create a poll:\n"
+ "\n"
+ "_Creating a poll:_\n"
+ "/poll create <name> [<option 1>/<option 2>/...]"
+ "\n"
+ "_Voting for a poll:_\n"
+ "/poll vote <name> <option>\n"
+ "\n"
+ "Keep in mind that you need to follow the formatting, else there might be some naming problems.\n"
+ "You can also list all polls with their IDs with /poll list.\n"
+ "then show it with /poll show <number or name (for special people ;)>")
if items[1] == "list":
return "".join("Poll (" + str(polls[x]["__id"]) + ") from: " + x[:100] + "\n\n" for x in list(polls.keys()))
elif len(items) == 3:
if items[1].lower() == "show":
return show_poll_by_id(int(items[2]))
else:
if items[1].lower() == "create":
return create_poll(command, person)
if items[1].lower() == "vote":
return vote_poll(command, person)
else:
return items[1] + " isn't a valid option"
if __name__ == "__main__":
print("\n\n>" + handle_poll("/poll create Moet die jonko geklapt worden? [Ja/Nee/Im a loser]", "Prive"))
print("\n\n>" + handle_poll("/poll list", "Prive"))
print("\n\n>" + handle_poll("/poll show 111", "Prive"))
print("\n\n>" + handle_poll("/poll vote 111 Ja", "Prive"))
print("\n\n>" + handle_poll("/poll vote 111 Nee", "Prive"))
print("\n\n>" + handle_poll("/poll vote 111 DSADSADSA", "Prive")) | 30.23913 | 127 | 0.496645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,681 | 0.44105 |
7f0e7fdf479fefea84a6252c3937890b9d28d7ee | 7,689 | py | Python | script.py | OJP98/Text-Prediction | 2ef69c82c8e1a0809e54dfbb42f5ed335f5c74d1 | [
"MIT"
] | 1 | 2020-12-18T00:10:34.000Z | 2020-12-18T00:10:34.000Z | script.py | OJP98/Text-Prediction | 2ef69c82c8e1a0809e54dfbb42f5ed335f5c74d1 | [
"MIT"
] | null | null | null | script.py | OJP98/Text-Prediction | 2ef69c82c8e1a0809e54dfbb42f5ed335f5c74d1 | [
"MIT"
] | null | null | null | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Laboratorio #3 - Predicción de textos
#
# * Oscar Juárez - 17315
# * José Pablo Cifuentes - 17509
# * Paul Belches - 17088
# %%
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.preprocessing.text import Tokenizer
from numpy import array
import random
import collections
from wordcloud import WordCloud
import matplotlib
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
import re
import nltk
nltk.download('stopwords')
# Definirmos lista de stopwords según nltk
stopwords = stopwords.words('english')
# Para el modelo
# %% [markdown]
# ## Importación y limpieza de datos
#
# ### 1. Abrir y leer archivos.
#
# Cabe mencionar que todos los archivos fueron convertidos a minúsculas, se quitan los urls y en algunas ocasiones, la mayoría de símbolos que consideramos innecesarios. También se quitan las stopwords, los números y finalmente las apostrophes. Además, se separan oraciones mediante los símbolos de **.**, **!** y **?**. Se debe validar que no hayan espacios vacíos luego de estas oraciones.
#
# #### Caso 1: Blogs
# %%
# Se instancian arreglos
blog = []
with open('./files/en_US.blogs.txt', 'r', encoding='utf-8') as blog_txt:
for line in blog_txt:
# Quitar saltos de linea y pasar todo a minusculas
line = line.rstrip('\n').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . ? ! y '
line = re.sub(r"[^\w.?!\d'\s]", '', line)
# Quitar números
line = re.sub(r'[0-9]', ' ', line)
# Quitar espacios extra
line = line.strip(' \t\n\r')
# Quitamos todas las stopwords
line = [word for word in line.split(' ') if word not in stopwords]
line = ' '.join(line)
# Finalmente, quitamos apostrofes
line = line.replace("'", '')
# Separar posibles oraciones
dotSentences = line.split('.')
excSentences = line.split('!')
queSentences = line.split('?')
# Validar y verificar que valga la pena recorrer varias oraciones
if len(dotSentences) > 1:
for sentence in dotSentences:
# Por cada posible oración, debemos quitar los símbolos de puntuación
sentence = re.sub(r'[^\w]', ' ', sentence).strip()
if len(sentence) > 1:
blog.append(sentence)
elif len(excSentences) > 1:
for sentence in excSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
blog.append(sentence)
elif len(queSentences) > 1:
for sentence in queSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
blog.append(sentence)
elif len(line.split(' ')) > 1:
line = re.sub(r'[^\w]', ' ', line).strip()
blog.append(line)
# %% [markdown]
# #### Caso 2: Noticias
#
# Este caso tuvo un procedimiento igual al caso 1.
# %%
news = []
with open('./files/en_US.news.txt', 'r', encoding='utf-8') as news_txt:
for line in news_txt:
# Quitar saltos de linea y pasar todo a minusculas
line = line.rstrip('\n').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . ? ! y '
line = re.sub(r"[^\w.?!\d'\s]", '', line)
# Quitar números
line = re.sub(r'[0-9]', ' ', line)
# Quitar espacios extra
line = line.strip(' \t\n\r')
# Quitamos todas las stopwords
line = [word for word in line.split(' ') if word not in stopwords]
line = ' '.join(line)
# Finalmente, quitamos apostrofes
line = line.replace("'", '')
# Separar posibles oraciones
dotSentences = line.split('.')
excSentences = line.split('!')
queSentences = line.split('?')
# Validar y verificar que valga la pena recorrer varias oraciones
if len(dotSentences) > 1:
for sentence in dotSentences:
# Por cada posible oración, debemos quitar los símbolos de puntuación
sentence = re.sub(r'[^\w]', ' ', sentence).strip()
if len(sentence) > 1:
news.append(sentence)
elif len(excSentences) > 1:
for sentence in excSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
news.append(sentence)
elif len(queSentences) > 1:
for sentence in queSentences:
sentence = re.sub(r'[^\w]', ' ', sentence)
if len(sentence) > 1:
news.append(sentence)
elif len(line.split(' ')) > 1:
line = re.sub(r'[^\w]', ' ', line).strip()
news.append(line)
# %% [markdown]
# #### Caso 3: Twitter
#
# En este caso, se toma cada distinto tweet como una oración. Es necesario quitar emojis y símbolos como #, $, %, !, @, etc. Además, se quitan urls y se permiten los símbolos: **.** **,** **'**
# %%
tweets = []
with open('./files/en_US.twitter.txt', 'r', encoding='utf-8') as twitter_txt:
for line in twitter_txt:
# Quitar \n y pasarlo a minusculas
line = line.replace('\n', '').lower()
# Quitar URLS
line = re.sub(r'^https?:\/\/.[\r\n]', '', line)
# Quitar el resto de expresiones regulares, excepto . , y '
line = re.sub(r"[^\w.,\d'\s]", '', line)
# Quitar números fuera de contexto
line = re.sub('^\d+\s|\s\d+\s|\s\d+$', '', line)
# Añadirlos a la lista de tweets
tweets.append(line.strip())
# %%
complete_data = blog + news + tweets
random.shuffle(complete_data)
# %%
data_size = int(len(complete_data)*0.005)
print('Se va a utilizar ' + str(data_size) + ' datos')
data = complete_data[:data_size]
# %% [markdown]
# Crear CSV con las palabras utilizadas para el entrenamiento
# %%
df = pd.DataFrame(data, columns=["oraciones"])
df.to_csv('training.csv', index=False)
# %% [markdown]
# Se genera un tokenizer lo cual es una representacion de enteros de cada palabra en nuestra data.
# %%
tokenizer = Tokenizer()
tokenizer.fit_on_texts([data])
encoded = tokenizer.texts_to_sequences([data])[0]
# %%
# Obtenemos el largo de nuestro vocabulario
vocab_size = len(tokenizer.word_index) + 1
# %%
# mapeamos 2 palabras a una palabra
sequences = list()
for i in range(2, len(encoded)):
sequence = encoded[i-2:i+1]
sequences.append(sequence)
max_length = max([len(seq) for seq in sequences])
sequences = pad_sequences(sequences, maxlen=max_length, padding='pre')
# %% [markdown]
# separamos en los elementos inputs y outputs
#
# %%
sequences = array(sequences)
X, y = sequences[:, :-1], sequences[:, -1]
y = to_categorical(y, num_classes=vocab_size)
# %% [markdown]
# Definimos el modelo
# %%
model = Sequential()
model.add(Embedding(vocab_size, 10, input_length=max_length-1))
model.add(LSTM(50))
model.add(Dense(vocab_size, activation='softmax'))
print(model.summary())
# %% [markdown]
# Compilamos el modelo
# %%
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# %%
# Entrenaoms el modelo
model.fit(X, y, epochs=150, verbose=2)
# %%
model.save_weights('deep_no_stopwords')
| 31.383673 | 391 | 0.60437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,168 | 0.410575 |
7f0f365849e50a905970be37d0158ad513dfbf3f | 976 | py | Python | notebook/str_compare_re.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/str_compare_re.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/str_compare_re.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import re
s = 'aaa-AAA-123'
print(re.search('aaa', s))
# <re.Match object; span=(0, 3), match='aaa'>
print(re.search('xxx', s))
# None
print(re.search('^aaa', s))
# <re.Match object; span=(0, 3), match='aaa'>
print(re.search('^123', s))
# None
print(re.search('aaa$', s))
# None
print(re.search('123$', s))
# <re.Match object; span=(8, 11), match='123'>
print(re.search('[A-Z]+', s))
# <re.Match object; span=(4, 7), match='AAA'>
s = '012-3456-7890'
print(re.fullmatch(r'\d{3}-\d{4}-\d{4}', s))
# <re.Match object; span=(0, 13), match='012-3456-7890'>
s = 'tel: 012-3456-7890'
print(re.fullmatch(r'\d{3}-\d{4}-\d{4}', s))
# None
s = '012-3456-7890'
print(re.search(r'^\d{3}-\d{4}-\d{4}$', s))
# <re.Match object; span=(0, 13), match='012-3456-7890'>
s = 'tel: 012-3456-7890'
print(re.search('^\d{3}-\d{4}-\d{4}$', s))
# None
s = 'ABC'
print(re.search('abc', s))
# None
print(re.search('abc', s, re.IGNORECASE))
# <re.Match object; span=(0, 3), match='ABC'>
| 18.415094 | 56 | 0.574795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 597 | 0.61168 |
7f1013721896a66800cbfdbbd69ac5bfb01f2332 | 47 | py | Python | pytype/__version__.py | jjedele/pytype | 3c5d920d26ac583bdfd68080e7db454ecb1dc900 | [
"Apache-2.0"
] | null | null | null | pytype/__version__.py | jjedele/pytype | 3c5d920d26ac583bdfd68080e7db454ecb1dc900 | [
"Apache-2.0"
] | null | null | null | pytype/__version__.py | jjedele/pytype | 3c5d920d26ac583bdfd68080e7db454ecb1dc900 | [
"Apache-2.0"
] | null | null | null | # pylint: skip-file
__version__ = '2018.12.21'
| 15.666667 | 26 | 0.702128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.659574 |
7f102b5952e499b7df63927d1e17dfc99e397355 | 291 | py | Python | keybindings/autokey/data/Mac/refresh.py | guoyiteng/linux-for-macos-user | 705baec9ddffb9ab73172cdc2b272ab123b1e402 | [
"MIT"
] | 1 | 2019-10-24T19:52:23.000Z | 2019-10-24T19:52:23.000Z | keybindings/autokey/data/Mac/refresh.py | guoyiteng/linux-for-macos-user | 705baec9ddffb9ab73172cdc2b272ab123b1e402 | [
"MIT"
] | null | null | null | keybindings/autokey/data/Mac/refresh.py | guoyiteng/linux-for-macos-user | 705baec9ddffb9ab73172cdc2b272ab123b1e402 | [
"MIT"
] | null | null | null | store.set_global_value('hotkey', '<meta>+r')
if re.match('.*(Hyper)', window.get_active_class()):
logging.debug('terminal refresh buffer')
engine.set_return_value('<ctrl>+<shift>+r')
else:
logging.debug('normal')
engine.set_return_value('<ctrl>+r')
engine.run_script('combo') | 36.375 | 52 | 0.697595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.333333 |
7f10c789a41fdf99ae85a779599834f61aad15f7 | 13,722 | py | Python | mednickdb_pyapi/test_mednickdb_pyapi.py | MednickLab/python_module | 818763a70d1058e72ddecfea7e07b88e42b39f3b | [
"MIT"
] | null | null | null | mednickdb_pyapi/test_mednickdb_pyapi.py | MednickLab/python_module | 818763a70d1058e72ddecfea7e07b88e42b39f3b | [
"MIT"
] | null | null | null | mednickdb_pyapi/test_mednickdb_pyapi.py | MednickLab/python_module | 818763a70d1058e72ddecfea7e07b88e42b39f3b | [
"MIT"
] | 1 | 2018-12-06T21:51:22.000Z | 2018-12-06T21:51:22.000Z | from mednickdb_pyapi.mednickdb_pyapi import MednickAPI
import pytest
import time
user = 'bdyetton@hotmail.com'
password = 'Pass1234'
server_address = 'http://saclab.ss.uci.edu:8000'
def test_login():
"""Test login, this will always pass until we deal with login"""
med_api = MednickAPI(server_address, user , password)
assert med_api.token
assert med_api.usertype == 'admin'
def test_clear_test_study():
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
fids = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id')
if fids:
for fid in fids:
med_api.delete_file(fid, delete_all_versions=True)
med_api.delete_data_from_single_file(fid)
fids2 = med_api.extract_var(med_api.get_files(studyid='TEST'),'_id')
assert fid not in fids2
assert (fids2 == [])
deleted_fids = med_api.extract_var(med_api.get_deleted_files(),'_id')
assert all([dfid in deleted_fids for dfid in fids])
med_api.delete_data(studyid='TEST')
assert len(med_api.get_data(studyid='TEST', format='nested_dict')) == 0 #TODO after clearing up sourceid bug
@pytest.mark.dependency(['test_clear_test_study'])
def test_upload_and_download_file():
"""Uploaded a file and download it again and make sure it matches"""
med_api = MednickAPI(server_address, user, password)
files_on_server_before_upload = med_api.get_files()
parsed_files_before_upload = med_api.get_unparsed_files()
with open('testfiles/scorefile1.mat', 'rb') as uploaded_version:
file_info = med_api.upload_file(fileobject=uploaded_version,
fileformat='scorefile',
studyid='TEST',
subjectid=1,
versionid=1,
filetype='sleep')
downloaded_version = med_api.download_file(file_info['_id'])
with open('testfiles/scorefile1.mat', 'rb') as uploaded_version:
assert downloaded_version == uploaded_version.read()
files_on_server_after_upload = med_api.get_files()
parsed_files_after_upload = med_api.get_unparsed_files()
assert len(files_on_server_before_upload)+1 == len(files_on_server_after_upload)
assert len(parsed_files_before_upload)+1 == len(parsed_files_after_upload)
@pytest.mark.dependency(['test_clear_test_study'])
def test_upload_and_overwrite():
"""Test that a file uploaded with the same name and info overwrites the older version
When a file with the same filename, and same location in the file servers is uploaded:
- The previous version will be set as active=False
- The new version will get a new FID
-
"""
med_api = MednickAPI(server_address, user, password)
with open('testfiles/TEST_Demographics.xlsx', 'rb') as uploaded_version_1:
file1_info_before_overwrite = med_api.upload_file(fileobject=uploaded_version_1,
fileformat='tabular',
studyid='TEST',
subjectid=1,
versionid=1,
filetype='unique_thing_1')
downloaded_version_1 = med_api.download_file(file1_info_before_overwrite['_id'])
file_version_before_overwrite = file1_info_before_overwrite['filename_version']
with open('testfiles/updated_versions/TEST_Demographics.xlsx', 'rb') as uploaded_version_2:
file1_info_after_overwrite = med_api.upload_file(fileobject=uploaded_version_2,
fileformat='tabular',
studyid='TEST',
subjectid=1,
versionid=1,
filetype='unique_thing_1')
downloaded_version_2 = med_api.download_file(file1_info_after_overwrite['_id'])
file_version_after_overwrite = file1_info_after_overwrite['filename_version']
with open('testfiles/updated_versions/TEST_Demographics.xlsx', 'rb') as uploaded_version_2:
f = uploaded_version_2.read()
assert downloaded_version_2 == f
assert downloaded_version_1 != f
#Get all versions, and make sure both versions of the file match what was uploaded
all_versions = med_api.get_files(filename='TEST_Demographics.xlsx', previous_versions=True)
assert all([file in med_api.extract_var(all_versions, 'filename_version') for file in [file_version_after_overwrite, file_version_before_overwrite]])
file = med_api.get_files(filename='TEST_Demographics.xlsx')
assert len(file) == 1
assert file1_info_before_overwrite['_id'] != file1_info_after_overwrite['_id'] #It gets a new fid
assert file[0]['_id'] == file1_info_after_overwrite['_id']
downloaded_version_current = med_api.download_file(file[0]['_id'])
assert downloaded_version_current == downloaded_version_2
assert downloaded_version_1 != downloaded_version_2
def test_file_query():
"""Upload a bunch of files to the server, and query them using all the types of querying available"""
test_clear_test_study() # Start Fresh
med_api = MednickAPI(server_address, user, password)
with open('testfiles/scorefile1.mat', 'rb') as uploaded_version:
file_info1 = med_api.upload_file(fileobject=uploaded_version,
fileformat='scorefile',
studyid='TEST',
subjectid=1,
versionid=1,
filetype='sleep')
fid1 = file_info1['_id']
with open('testfiles/scorefile1.mat', 'rb') as uploaded_version:
file_info2 = med_api.upload_file(fileobject=uploaded_version,
fileformat='scorefile',
studyid='TEST',
subjectid=2,
versionid=1,
filetype='sleep')
fid2 = file_info2['_id']
with open('testfiles/TEST_Demographics.xlsx', 'rb') as uploaded_version_1:
file_info3 = med_api.upload_file(fileobject=uploaded_version_1,
fileformat='tabular',
studyid='TEST',
subjectid=3,
versionid=2,
filetype='unique_thing_2')
fid3 = file_info3['_id']
time.sleep(1)
#Test ==
fids = med_api.extract_var(med_api.get_files(query='studyid==TEST'),'_id')
assert all([fid in fids for fid in [fid1, fid2, fid3]])
#Test IN
fids = med_api.extract_var(med_api.get_files(query='subjectid in [1,2]'),'_id')
assert all([fid in fids for fid in [fid1, fid2]])
#Test not in
fids = med_api.extract_var(med_api.get_files(query='subjectid not in [1,2]'),'_id')
assert all([fid in fids for fid in [fid3]])
# Test and
fids = med_api.extract_var(med_api.get_files(query='subjectid==1 and versionid==1'),'_id')
assert all([fid in fids for fid in [fid1]])
# Test or
fids = med_api.extract_var(med_api.get_files(query='subjectid==2 or 1'),'_id')
assert all([fid in fids for fid in [fid1, fid2]])
#Test not =
fids = med_api.extract_var(med_api.get_files(query='subjectid!=2'),'_id')
assert all([fid in fids for fid in [fid1, fid3]])
#Test >
fids = med_api.extract_var(med_api.get_files(query='subjectid>2'),'_id')
assert all([fid in fids for fid in [fid3]])
#Test <
fids = med_api.extract_var(med_api.get_files(query='subjectid<2'),'_id')
assert all([fid in fids for fid in [fid1]])
#Test <=
fids = med_api.extract_var(med_api.get_files(query='subjectid<=2'),'_id')
assert all([fid in fids for fid in [fid1, fid2]])
#Test <=
fids = med_api.extract_var(med_api.get_files(query='subjectid>=2'),'_id')
assert all([fid in fids for fid in [fid2, fid3]])
# Test complex #TODO
fids = med_api.extract_var(med_api.get_files(query='subjectid>2 or <=1'),'_id')
assert all([fid in fids for fid in [fid1, fid3]])
def test_data_query():
med_api = MednickAPI(server_address, user, password)
def dict_is_subset(superset, subset):
return all(item in superset.items() for item in subset.items())
def strip_non_matching_keys(strip_from, template):
return {k: v for k, v in strip_from.items() if k in template}
test_clear_test_study()
with open('testfiles/TEST_Demographics.xlsx', 'rb') as uploaded_version_1:
file_info1 = med_api.upload_file(fileobject=uploaded_version_1,
fileformat='tabular',
studyid='TEST',
subjectid=1,
versionid=2,
filetype='unique_thing_3')
fid1 = file_info1['_id']
row1 = {'sex':'M', 'age':22, 'edu':12}
row2 = {'sex':'F', 'age':19, 'edu':8}
row3 = {'sex':'M', 'age':29, 'edu':18}
med_api.upload_data(data=row1,
studyid='TEST',
subjectid=1,
versionid=1,
visitid=1,
filetype='demographics',
fid=fid1)
med_api.upload_data(data=row2,
studyid='TEST',
subjectid=2,
versionid=1,
visitid=2,
filetype='demographics',
fid=fid1
)
med_api.upload_data(data=row3,
studyid='TEST',
subjectid=3,
versionid=1,
filetype='demographics',
fid=fid1)
time.sleep(1)
#sanity check to see if we have any data at all:
data_rows = med_api.get_data(format='nested_dict', studyid='TEST')
assert len(data_rows) > 0
#Test ==
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.sex==M')]
assert all([row in data_rows for row in [row1]])
# Test IN
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age in [22,19]')]
assert all([row in data_rows for row in [row1, row2]])
# Test not in
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age not in [22,19]')]
assert all([row in data_rows for row in [row3]])
# Test and
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age==22 and versionid==1')]
assert all([row in data_rows for row in [row1]])
# Test or
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age==22 or 19')]
assert all([row in data_rows for row in [row1, row2]])
# Test not =
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age!=22')]
assert all([row in data_rows for row in [row2, row3]])
# Test >
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age>19')]
assert all([row in data_rows for row in [row1, row3]])
# Test <
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age<22')]
assert all([row in data_rows for row in [row2]])
# Test <=
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age>=22')]
assert all([row in data_rows for row in [row1, row3]])
# Test >=
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age<=22')]
assert all([row in data_rows for row in [row1, row2]])
# Test complex or
data_rows = [strip_non_matching_keys(row['data']['demographics'], row1) for row in med_api.get_data(format='nested_dict',query='data.demographics.age<22 or >28')]
assert all([row in data_rows for row in [row2, row3]])
# def test_update_file_info(): #TODO Junbai to add to simple_tests
# med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
# fids = med_api.get_files(studyid='TEST')
# file_info_1 = med_api.get_file_by_fid(fid=fids[0]['_id'])
# to_add = {'sessionid': 10}
# med_api.update_file_info(fid=fids[0]['_id'], file_info=to_add)
# file_info_1.update(to_add)
# time.sleep(file_update_time) # Give db 5 seconds to update
#
# file_info_2 = med_api.get_file_by_fid(fids[0]['_id'])
# assert (file_info_2 == file_info_1)
# def test_parsing_status(): #TODO Junbai to add to simple_tests
# med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
# fids = med_api.get_files(studyid='TEST')
# med_api.update_parsed_status(fids[0], False)
# time.sleep(5)
# fids2 = med_api.get_unparsed_files()
# assert (fids[0] in fids2)
| 44.843137 | 177 | 0.626366 | 0 | 0 | 0 | 0 | 3,871 | 0.282102 | 0 | 0 | 3,836 | 0.279551 |
7f114c9480739ed4ffd7edf4c037a334bd96a0d0 | 1,286 | py | Python | passl/hooks/evaluate_hook.py | LielinJiang/PASSL | 56be08aa50c4883e12b2a728103ccb56dd19b047 | [
"Apache-2.0"
] | null | null | null | passl/hooks/evaluate_hook.py | LielinJiang/PASSL | 56be08aa50c4883e12b2a728103ccb56dd19b047 | [
"Apache-2.0"
] | null | null | null | passl/hooks/evaluate_hook.py | LielinJiang/PASSL | 56be08aa50c4883e12b2a728103ccb56dd19b047 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import tqdm
from collections import OrderedDict
import paddle
import paddle.distributed as dist
from .hook import Hook
from .builder import HOOKS
from ..utils.logger import get_logger
from ..utils.misc import AverageMeter
@HOOKS.register()
class EvaluateHook(Hook):
def __init__(self, init_eval=False, eval_kargs=None):
if eval_kargs is None:
self.eval_kargs = {}
else:
self.eval_kargs = eval_kargs
self.init_eval = init_eval
def run_begin(self, trainer):
if self.init_eval:
trainer.val(**self.eval_kargs)
def train_epoch_end(self, trainer):
trainer.val(**self.eval_kargs)
| 31.365854 | 74 | 0.724728 | 423 | 0.328927 | 0 | 0 | 441 | 0.342924 | 0 | 0 | 596 | 0.463453 |
7f131597a81ecfde4479ee53fb1d6ae3dac7f3c1 | 13,154 | py | Python | npu/core/common.py | xloem/npu | ed6da8599b04e9a5f2eded88d6c6ca433d46aa03 | [
"MIT"
] | null | null | null | npu/core/common.py | xloem/npu | ed6da8599b04e9a5f2eded88d6c6ca433d46aa03 | [
"MIT"
] | null | null | null | npu/core/common.py | xloem/npu | ed6da8599b04e9a5f2eded88d6c6ca433d46aa03 | [
"MIT"
] | null | null | null | import hashlib
import json
import math
import os
import dill
import base64
from sys import exit
import requests
from bson import ObjectId
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
#from cryptography.hazmat.primitives.asymmetric import padding
#from cryptography.hazmat.primitives import serialization, hashes
from tqdm import tqdm
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from datetime import datetime
from .Model import Model
from .DataLoader import DataLoader
from .Dataset import Dataset
from .saving.saving import save_data, determine_model, TF_str, mxnet_str, pytorch_str
from .web.urls import TOKEN_URL, HASH_URL, UPLOAD_DATA_URL
VERBOSITY = 1
MIN_VERBOSITY = 1
MID_VERBOSITY = 2
FULL_VERBOSITY = 3
_token = ""
_project = ""
_deployed = False
utcnow = datetime.utcnow
with open(os.path.join(os.path.dirname(__file__), "pub_cred_key.pub"), "rb") as key_file:
#pub_key_encryption = serialization.load_pem_public_key(key_file.read())
pub_key_encryption = PKCS1_OAEP.new(RSA.importKey(key_file.read()), SHA256)
# from SO
class bcolors:
PURPLE = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ORANGE = '\33[38;5;208m'
levels = {"WARNING": bcolors.ORANGE, "INFO": bcolors.PURPLE, "ERROR": bcolors.FAIL}
NEURO_AI_STR = f"[{bcolors.OKBLUE}Neuro Ai{bcolors.ENDC}]"
def api(token_, project_name, verbosity, deployed):
global _token
global _project
global VERBOSITY
global _deployed
if token_ == "":
token_ = os.environ.get("NPU_API_TOKEN", "")
_token = token_
VERBOSITY = verbosity
verbose_print(f"Verbosity level set to {VERBOSITY}", MID_VERBOSITY)
_deployed = deployed
if _deployed:
npu_print("DEPLOYMENT MODE")
params = {"token": _token, "project_name": project_name}
response = post(TOKEN_URL, json=params)
if response.status_code == 200:
npu_print("Token successfully authenticated")
_project = response.json()
npu_print(f"Using project: {project_name}")
return response
else:
raise ValueError(response.text)
# "API token not valid"
def getToken():
return _token
def auth_header():
return {"authorization": "Bearer " + getToken()}
def get_verbosity():
return VERBOSITY
def get_project():
return _project
def is_deployed():
return _deployed
def get_response(response):
try:
return response.json()
except Exception as e:
raise ConnectionError("Invalid response received. Error: {}".format(response.text))
# https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def add_kwargs_to_params(params, **kwargs):
params = {**params, **kwargs}
return params
def read_in_chunks(file_object, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def check_model(model):
from .Task import Task
from .Model import Model
if not isinstance(model, Task) and not isinstance(model, str) and not isinstance(model, Model):
raise ValueError("Model is not a valid format. Please make sure you've compiled it first.")
def check_model_type(model, params):
from .Task import Task
if isinstance(model, Model):
params["model_name"] = model.name
params["model_attr"] = model.attr
elif isinstance(model, str) and not ObjectId.is_valid(model):
params["model_name"] = model
elif model != "" and not isinstance(model, Task):
params["modelId"] = model
def check_data_type(data, param_name, params):
from .Task import Task
if isinstance(data, Dataset):
params[param_name + "_name"] = data.id
elif isinstance(data, str) and not ObjectId.is_valid(data):
params[param_name + "_name"] = data
elif isinstance(data, HubDataset):
params[param_name + "Id"] = data.hub_meta
elif data != "" and not isinstance(data, Task):
params[param_name + "Id"] = data
params[f"{param_name}_hub_ds"] = isinstance(data, HubDataset)
def check_data(data, name=""):
if not isinstance(name, str):
raise ValueError("Name given is not valid. Please supply a string.")
if isinstance(data, dict):
return data
try:
import hub
hub_meta = {}
if hasattr(data, "dataset"):
if hasattr(data, "indexes"):
hub_meta["indexes"] = data.indexes
if hasattr(data, "subpath"):
hub_meta["subpath"] = data.subpath
data = data.dataset
if isinstance(data, hub.Dataset):
encrypted_token = base64.b64encode(
pub_key_encryption.encrypt(
json.dumps(data.token).encode()
)).decode()
#pub_key_encryption.encrypt(
# json.dumps(data.token).encode(),
# padding.OAEP(
# mgf=padding.MGF1(
# algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None))).decode()
hub_meta = {"url": data.url, "schema": data.schema, "token": encrypted_token, **hub_meta}
hub_meta = base64.b64encode(dill.dumps(hub_meta)).decode()
return HubDataset(hub_meta)
except Exception as e:
# print(e)
pass
if isinstance(data, str) and (data.endswith(("npy", "npz")) or ObjectId.is_valid(data) or data == ""):
return data
elif isinstance(data, Dataset):
return data
elif isinstance(data, DataLoader):
response = upload_data_loader(data, name)
else:
response = upload_data(data, name)
status_code = response.status_code
if status_code not in (204, 200, 201):
raise ConnectionAbortedError("Data upload has not worked: {}".format(response.content))
if status_code != 204:
response = get_response(response)
if isinstance(response, dict) and status_code == 200:
message = response.get("message")
npu_print(message)
response = response["id"]
return response
def slice_data(data):
id = data["id"]
start = data["indexes"]
end = None
if isinstance(start, slice):
end = start.stop
start = start.start
return id, start, end
def gen(dl):
for data_part in dl.numpy():
yield save_data(data_part)
def create_callback(encoder):
encoder_len = encoder.len
bar = tqdm(desc=f"{NEURO_AI_STR} Uploading", unit="B", unit_scale=True, total=encoder_len, unit_divisor=1024)
def callback(monitor):
bar.n = monitor.bytes_read
bar.refresh()
if monitor.bytes_read == encoder_len:
bar.close()
return callback
def get_progress_bar_uploader(file, json):
encoder = create_upload(file, json)
callback = create_callback(encoder)
monitor = MultipartEncoderMonitor(encoder, callback)
return monitor
def create_upload(file, _json):
return MultipartEncoder({
'file': ('file', file, 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'}),
'json': (None, json.dumps(_json), 'application/json', {}),
})
def upload_data_loader(dl, name=""):
verbose_print("Hashing data locally...", MID_VERBOSITY)
hash, size, length = dl.hash()
params = {"token": getToken(), "hash": hash, "collection": 1, "chunked": True, "is_last": False, "size": size,
"given_name": name, "input_shape": dl.shape, "project": get_project()}
# params = {"token": getToken(), "hash": hash, "collection": 1, "size": size, "given_name": name}
verbose_print("Checking if data is on servers...", MID_VERBOSITY)
response = get(HASH_URL, params=params)
if response.status_code == 200:
verbose_print("Data already uploaded. Will not reupload.", MID_VERBOSITY)
return response
npu_print("Data not on servers. Starting to upload. Total size of data is {}".format(convert_size(size)))
if length == 1:
return upload_data(next(dl.numpy()), name)
npu_print("{} chunks to upload...".format(length))
for i, data_part in enumerate(dl.numpy()):
verbose_print("Uploading chunk {} out of {}...".format(i + 1, length), MID_VERBOSITY)
if i == length - 1:
params["is_last"] = True
file = save_data(data_part)
monitor = get_progress_bar_uploader(file, params)
response = post(UPLOAD_DATA_URL, data=monitor,
headers={'Content-Type': monitor.content_type})
return response
def upload_data(data, name=""):
verbose_print("Saving data locally...", FULL_VERBOSITY)
generic_file = False
if isinstance(data, str):
file = open(data, "rb")
generic_file = True
else:
file = save_data(data)
verbose_print("Hashing...", FULL_VERBOSITY)
hash = hashlib.md5()
for piece in read_in_chunks(file):
hash.update(piece)
size = file.tell()
hash = hash.hexdigest()
verbose_print("Checking if data is on servers...", MID_VERBOSITY)
params = {"token": getToken(), "hash": hash, "collection": 1, "given_name": name, "project": get_project(),
"generic_file": generic_file}
response = get(HASH_URL, params=params, json=params)
if response.status_code == 200:
verbose_print("Data already on servers. Returning result...", MID_VERBOSITY)
file.close()
return response
npu_print("Data not found on servers. Total size of data is {}. Uploading now...".format(convert_size(size)))
file.seek(0)
monitor = get_progress_bar_uploader(file=file, json=params)
response = post(UPLOAD_DATA_URL, data=monitor,
headers={'Content-Type': monitor.content_type})
if isinstance(data, str):
file.close()
return response
def upload_sample(data, params):
required = (len(data[0]) if isinstance(data, (tuple, list)) else len(data)) > 10
if not required:
return False
data = [d[:10] for d in data] if isinstance(data, (tuple, list)) else data[:10]
def hash_file(file):
hash = hashlib.md5()
for piece in read_in_chunks(file):
hash.update(piece)
# break
hash = hash.hexdigest()
return hash
def validate_model(model, data):
library = determine_model(model)
if isinstance(data, str):
return
# data = convert_to_numpy(data)
if library == pytorch_str:
from torch import ones
elif library == mxnet_str:
from mxnet import nd
ones = nd.ones
elif library == TF_str:
from numpy import ones
else:
return
# raise ValueError("Cannot validate library: {} .".format(library))
placeholder_data = ones(data.shape)
model(placeholder_data)
def determine_data(data):
start = end = None
name = ""
if isinstance(data, dict):
data, start, end = slice_data(data)
if isinstance(data, Dataset):
name = data.id
data = data
return data, name, start, end
def npu_print(val, level="INFO"):
log_str = f"{NEURO_AI_STR} {utcnow_formatted()} - [{levels[level]}{level}{bcolors.ENDC}]: {val}"
print(f"{log_str}")
def verbose_print(str, verbosity):
if VERBOSITY >= verbosity:
npu_print(str)
def utcnow_formatted():
return utcnow().strftime("%H:%M:%S")
def make_request(request_type_function, url, data, headers, json, params, **kwargs):
if params is None:
params = {}
if json is None:
json = {}
if data is None:
data = {}
if headers is None:
headers = {}
try:
response = request_type_function(url, data=data, headers={**headers, **auth_header()}, json=json,
params=params, **kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as _:
response = response.json()
if "error" in response:
npu_print(f"Error: {response['error']}", level="ERROR")
elif "message" in response:
npu_print(f"Error: {response['message']}", level="ERROR")
raise Exception
# exit(1)
def post(url, data=None, headers=None, json=None, params=None, **kwargs):
return make_request(requests.post, url, data, headers, json, params, **kwargs)
def get(url, data=None, headers=None, json=None, params=None, **kwargs):
return make_request(requests.get, url, data, headers, json, params, **kwargs)
class HubDataset:
def __init__(self, hub_meta):
self.hub_meta = hub_meta
| 31.696386 | 114 | 0.64277 | 339 | 0.025772 | 342 | 0.026 | 0 | 0 | 0 | 0 | 2,579 | 0.196062 |
7f1364a55addfc28c7b3b109ab7ac3024926b0fb | 54 | py | Python | BZOJ/BZOJ1349.py | xehoth/OnlineJudgeCodes | 013d31cccaaa1d2b6d652c2f5d5d6cb2e39884a7 | [
"Apache-2.0"
] | 7 | 2017-09-21T13:20:05.000Z | 2020-03-02T03:03:04.000Z | BZOJ/BZOJ1349.py | xehoth/OnlineJudgeCodes | 013d31cccaaa1d2b6d652c2f5d5d6cb2e39884a7 | [
"Apache-2.0"
] | null | null | null | BZOJ/BZOJ1349.py | xehoth/OnlineJudgeCodes | 013d31cccaaa1d2b6d652c2f5d5d6cb2e39884a7 | [
"Apache-2.0"
] | 3 | 2019-01-05T07:02:57.000Z | 2019-06-13T08:23:13.000Z | import math
print(int(math.ceil(math.sqrt(input()))))
| 18 | 41 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7f13a49ab964463c1066e45f31d90b6e23b47055 | 1,326 | py | Python | apps/order/migrations/0002_auto_20180710_0937.py | jakejie/ShopPro | f0cec134ae77f4449f15a0219123d6a6bce2aad2 | [
"Apache-2.0"
] | 1 | 2019-04-20T16:58:02.000Z | 2019-04-20T16:58:02.000Z | apps/order/migrations/0002_auto_20180710_0937.py | jakejie/ShopPro | f0cec134ae77f4449f15a0219123d6a6bce2aad2 | [
"Apache-2.0"
] | 6 | 2020-06-05T19:57:58.000Z | 2021-09-08T00:49:17.000Z | apps/order/migrations/0002_auto_20180710_0937.py | jakejie/ShopPro | f0cec134ae77f4449f15a0219123d6a6bce2aad2 | [
"Apache-2.0"
] | 1 | 2021-09-10T18:29:28.000Z | 2021-09-10T18:29:28.000Z | # Generated by Django 2.0.7 on 2018-07-10 09:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0001_initial'),
('order', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orderlist',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户'),
),
migrations.AddField(
model_name='orderitem',
name='orderNum',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.OrderList', verbose_name='订单号'),
),
migrations.AddField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Product', verbose_name='商品'),
),
migrations.AddField(
model_name='logistics',
name='orderNum',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.OrderList', verbose_name='订单号'),
),
]
| 33.15 | 129 | 0.631222 | 1,187 | 0.881872 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.192422 |
7f15fb89af9a585518029d9c6d50226a9e6439c3 | 935 | py | Python | discordbot.py | yutoring/discordpy-startup | 2a1d7f46d5b83384675494805eb82d5395be622f | [
"MIT"
] | null | null | null | discordbot.py | yutoring/discordpy-startup | 2a1d7f46d5b83384675494805eb82d5395be622f | [
"MIT"
] | null | null | null | discordbot.py | yutoring/discordpy-startup | 2a1d7f46d5b83384675494805eb82d5395be622f | [
"MIT"
] | null | null | null | from discord.ext import commands
import tasks
from datetime import datetime
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
# 接続に必要なオブジェクトを生成
client = discord.Client()
#投稿する日時
dateTimeList = [
'2019/11/19 18:09',
'2019/11/19 18:15',
'2019/11/19 18:20',
]
# 起動時に動作する処理
@client.event
async def on_ready():
print('ready')
# 指定時間に走る処理
async def SendMessage():
channel = get_channel(ctx)
await channel.send('時間だよ')
# 30秒に一回ループ
@tasks.loop(seconds=30)
async def time_check():
sleepTime = 0
# 現在の時刻
now = datetime.now().strftime('%Y/%m/%d %H:%M')
if now in dateTimeList :
print(now)
await SendMessage()
#該当時間だった場合は2重に投稿しないよう30秒余計に待機
await asyncio.sleep(30)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
#ループ処理
time_check.start()
# Botの起動とDiscordサーバーへの接続
client.run(token)
bot.run(token)
| 17 | 51 | 0.680214 | 0 | 0 | 0 | 0 | 457 | 0.401935 | 498 | 0.437995 | 437 | 0.384345 |
7f16d84239714a967c293f97513e0e7811b10129 | 2,431 | py | Python | model-optimizer/unit_tests/extensions/ops/sparse_reshape_test.py | monroid/openvino | 8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6 | [
"Apache-2.0"
] | 2,406 | 2020-04-22T15:47:54.000Z | 2022-03-31T10:27:37.000Z | model-optimizer/unit_tests/extensions/ops/sparse_reshape_test.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 4,948 | 2020-04-22T15:12:39.000Z | 2022-03-31T18:45:42.000Z | model-optimizer/unit_tests/extensions/ops/sparse_reshape_test.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 991 | 2020-04-23T18:21:09.000Z | 2022-03-31T18:40:57.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.ops.sparse_reshape import SparseReshape
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'input_indices': {'shape': None, 'value': None, 'kind': 'data'},
'input_shape': {'shape': None, 'value': None, 'kind': 'data'},
'new_shape': {'shape': None, 'value': None, 'kind': 'data'},
'sparse_reshape_node': {'op': 'SparseReshape', 'kind': 'op'},
'output_indices': {'shape': None, 'value': None, 'kind': 'data'},
'output_shape': {'shape': None, 'value': None, 'kind': 'data'}}
# graph 1
edges1 = [('input_indices', 'sparse_reshape_node', {'in': 0}),
('input_shape', 'sparse_reshape_node', {'in': 1}),
('new_shape', 'sparse_reshape_node', {'in': 2}),
('sparse_reshape_node', 'output_indices', {'out': 0}),
('sparse_reshape_node', 'output_shape', {'out': 1})]
inputs1 = {'input_indices': {'shape': int64_array([5, 2]), 'value': None},
'input_shape': {'shape': int64_array([2]), 'value': int64_array([4, 5])},
'new_shape': {'shape': int64_array([3]), 'value': int64_array([5, -1, 2])}}
class TestSparseReshape(unittest.TestCase):
def test_partial_infer1(self):
graph = build_graph(nodes_attributes, edges1, inputs1)
sparse_reshape_node = Node(graph, 'sparse_reshape_node')
SparseReshape.infer(sparse_reshape_node)
# prepare reference results
ref_output_indices_shape = np.array([5, 3], dtype=np.int32)
ref_output_shape_value = np.array([5, 2, 2], dtype=np.int32)
# get the result
res_output_indices_shape = graph.node['output_indices']['shape']
res_output_shape_value = graph.node['output_shape']['value']
self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape),
'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape))
self.assertTrue(np.array_equal(ref_output_shape_value, res_output_shape_value),
'values do not match expected: {} and given: {}'.format(ref_output_shape_value, res_output_shape_value))
| 47.666667 | 132 | 0.640889 | 1,040 | 0.427807 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.336898 |
7f17b5e719007933ccff3a44c5b4667ddd2e9fe6 | 2,425 | py | Python | app/utils/sqlalchemy_helpers.py | maricaantonacci/slat | 739a98a93884bc1defbf2b297f4225e0ba069fef | [
"Apache-2.0"
] | null | null | null | app/utils/sqlalchemy_helpers.py | maricaantonacci/slat | 739a98a93884bc1defbf2b297f4225e0ba069fef | [
"Apache-2.0"
] | null | null | null | app/utils/sqlalchemy_helpers.py | maricaantonacci/slat | 739a98a93884bc1defbf2b297f4225e0ba069fef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2020-2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ref: https://docs.sqlalchemy.org/en/13/core/custom_types.html#backend-agnostic-guid-type
from sqlalchemy.types import TypeDecorator, CHAR, Integer
from sqlalchemy.dialects.postgresql import UUID
import uuid
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
if not isinstance(value, uuid.UUID):
value = uuid.UUID(value)
return value
class IntEnum(TypeDecorator):
"""
Enables passing in a Python enum and storing the enum's *value* in the db.
The default would have stored the enum's *name* (ie the string).
"""
impl = Integer
def __init__(self, enumtype, *args, **kwargs):
super(IntEnum, self).__init__(*args, **kwargs)
self._enumtype = enumtype
def process_bind_param(self, value, dialect):
if isinstance(value, int):
return int(value)
return value.value
def process_result_value(self, value, dialect):
return self._enumtype(value)
| 31.907895 | 90 | 0.654021 | 1,591 | 0.656082 | 0 | 0 | 0 | 0 | 0 | 0 | 1,044 | 0.430515 |
7f17cfaa77e2ef66b6517f601eec65ab88ad3f26 | 6,955 | py | Python | python/uw/like/scalemodels.py | coclar/pointlike | 7088724b5a40cf787371aff69e64c9bec701f578 | [
"BSD-3-Clause"
] | 1 | 2019-03-19T14:45:28.000Z | 2019-03-19T14:45:28.000Z | python/uw/like/scalemodels.py | coclar/pointlike | 7088724b5a40cf787371aff69e64c9bec701f578 | [
"BSD-3-Clause"
] | 1 | 2019-03-05T17:30:52.000Z | 2019-03-05T18:12:15.000Z | python/uw/like/scalemodels.py | coclar/pointlike | 7088724b5a40cf787371aff69e64c9bec701f578 | [
"BSD-3-Clause"
] | 3 | 2018-03-14T15:34:07.000Z | 2021-11-05T15:29:32.000Z | """ Code to implement ScaleFactor:: decorator supported
in gtlike.
The gtlike feature is documented here:
https://confluence.slac.stanford.edu/display/ST/Science+Tools+Development+Notes?focusedCommentId=103582318#comment-103582318
Author: Joshua Lande
"""
import operator
from copy import deepcopy
import numpy as np
from uw.like.Models import PowerLaw, PowerLawFlux, FileFunction, PLSuperExpCutoff, Gaussian, Constant, CompositeModel
from uw.darkmatter.spectral import DMFitFunction
def build_scale_factor(model_class):
"""
First, create the ScaleFactorPowerLaw and a comparison PowerLaw
>>> scale = 3.133141
>>> sfpl=ScaleFactorPowerLaw(ScaleFactor=scale)
>>> pl = PowerLaw()
>>> print sfpl.name
ScaleFactorPowerLaw
>>> print sfpl.gtlike['name']
ScaleFactor::PowerLaw
>>> print sfpl.pretty_name
ScaleFactor::PowerLaw
>>> print sfpl.full_name()
ScaleFactor::PowerLaw, e0=1000, ScaleFactor=3.133141
>>> print sfpl.e0 == pl.e0
True
>>> sfpl.default_extra_params == pl.default_extra_params
True
>>> np.all(sfpl.default_p == [1] + pl.default_p)
True
>>> print sfpl.param_names == ['ScaleFactor'] + pl.param_names
True
>>> print np.all(sfpl.default_mappers == Constant.default_mappers + PowerLaw.default_mappers)
True
>>> sfpl.default_extra_params == pl.default_extra_params
True
>>> sfpl.default_extra_attrs == sfpl.default_extra_attrs
True
>>> print sfpl.default_oomp_limits == ['ScaleFactor'] + PowerLaw.default_oomp_limits
True
Make sure that default_limits acts correclty
>>> dl=sfpl.default_limits
>>> dl['Norm'] == pl.default_limits['Norm']
True
>>> dl['Index'] == pl.default_limits['Index']
True
>>> dl['ScaleFactor'] == Constant.default_limits['Scale']
True
Make sure the __call__ function is correct
>>> energies=np.logspace(1,5,100)
>>> np.all(sfpl(energies) == scale*pl(energies))
True
And that the gradient follows the chain rule:
>>> grad = sfpl.external_gradient(energies)
>>> np.all(grad[0] == pl(energies))
True
>>> np.all(grad[1:] == scale*pl.external_gradient(energies))
True
Note, we can set default limits for ScaleFactor objects (necessary for XML creation):
>>> print sfpl.mappers == Constant.default_mappers + PowerLaw.default_mappers
True
>>> print np.all(sfpl.default_mappers == Constant.default_mappers + PowerLaw.default_mappers)
True
>>> sfpl.set_default_limits()
>>> sfpl.mappers == [Constant.default_limits['Scale'],PowerLaw.default_limits['Norm'],PowerLaw.default_limits['Index']]
True
Also, you can obtain the unfit parameters either as values of the object or with getp/setp
>>> sfpl.e0 == pl.e0 and sfpl['e0'] == pl.e0 and sfpl.getp('e0') == pl.e0
True
We can create ScaleFactor object for other models. For PowerLawFlux:
>>> sfpl2=ScaleFactorPowerLawFlux(ScaleFactor=scale)
>>> pl2 = PowerLawFlux()
>>> print sfpl2.name
ScaleFactorPowerLawFlux
>>> print sfpl2.gtlike['name']
ScaleFactor::PowerLaw2
>>> sfpl2.emax == pl2.emax and sfpl2.emax == pl2.emax
True
And, of course, the values are just scaled
>>> np.all(sfpl2(energies) == scale*pl2(energies))
True
There is also a ScaleFactorFileFunction object, which acts just like a FileFunction.
>>> from tempfile import NamedTemporaryFile
>>> temp = NamedTemporaryFile()
>>> filename = temp.name
>>> sfpl2.save_profile(filename, emin=1, emax=1e5)
>>> temp.seek(0)
>>> sfff = ScaleFactorFileFunction(ScaleFactor=5.5, normalization=1, file=filename)
>>> np.allclose(sfff(energies),5.5*sfpl2(energies),rtol=1e-10, atol=1e-10)
True
Note, it sets default_extra_attrs correctly:
>>> sfff.default_extra_attrs == FileFunction.default_extra_attrs
True
>>> sfff.file == filename
True
"""
# For a description of creating classes on the fly, see:
# http://jjinux.blogspot.com/2005/03/python-create-new-class-on-fly.html
c = type('ScaleFactor' + model_class.__name__, (CompositeModel,), {})
# Note, default_p, param_names, default_mappers, automatically taken care of by CompositeModel
c.default_extra_params=model_class.default_extra_params
c.default_extra_attrs=model_class.default_extra_attrs
c.gtlike = deepcopy(model_class.gtlike)
c.gtlike['name']='ScaleFactor::%s' % c.gtlike['name']
c.gtlike['param_names'].insert(0,'ScaleFactor')
c.gtlike['topointlike'].insert(0,operator.pos)
c.gtlike['togtlike'].insert(0,operator.pos)
def __init__(self, **kwargs):
scale = Constant(name='ScaleFactor')
scale.default_oomp_limits=['ScaleFactor']
if 'ScaleFactor' in kwargs:
scale['ScaleFactor'] = kwargs.pop('ScaleFactor')
m=model_class(**kwargs)
super(c,self).__init__(scale,m)
self.scale=scale
self.model=m
for p in c.default_extra_params.keys() + c.default_extra_attrs.keys():
# Allow getting and setting the default_extra_params and default_extra_attrs
# directly through the self.model object.
get=lambda self: getattr(self.model,p)
set=lambda self, value: setattr(self.model,p,value)
setattr(c,p,property(get, set, p))
c.__init__ = __init__
c.__call__ = lambda self,e: self.scale.__call__(e)*self.model.__call__(e)
c.pretty_name = property(lambda self: 'ScaleFactor::%s' % self.model.pretty_name)
c.full_name = lambda self: 'ScaleFactor::%s, ScaleFactor=%s' % (self.model.full_name(),self['ScaleFactor'])
def external_gradient(self, e):
a=self.scale.external_gradient(e)*self.model.__call__(e)
b=self.scale.__call__(e)*self.model.external_gradient(e)
return np.concatenate((a,b),axis=0)
c.external_gradient = external_gradient
return c
ScaleFactorPowerLaw=build_scale_factor(PowerLaw)
ScaleFactorPowerLawFlux=build_scale_factor(PowerLawFlux)
ScaleFactorFileFunction=build_scale_factor(FileFunction)
ScaleFactorDMFitFunction=build_scale_factor(DMFitFunction)
ScaleFactorPLSuperExpCutoff=build_scale_factor(PLSuperExpCutoff)
ScaleFactorGaussian=build_scale_factor(Gaussian)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36.036269 | 132 | 0.634364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,875 | 0.700935 |
7f191b51e43f5f96fe4666dba8c6b32532e8c659 | 581 | py | Python | neighbour/forms.py | Nobella-Nyarari-Ejiofor/Neighbourood | cacd9d64c944fa195a4bcb554425c9d0210b87b8 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | neighbour/forms.py | Nobella-Nyarari-Ejiofor/Neighbourood | cacd9d64c944fa195a4bcb554425c9d0210b87b8 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | neighbour/forms.py | Nobella-Nyarari-Ejiofor/Neighbourood | cacd9d64c944fa195a4bcb554425c9d0210b87b8 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | from django.contrib.auth.models import User
from django import forms
import neighbour
from .models import Business, Profile , Neighbourhood , Posts
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class NeighbourhoodForm(forms.ModelForm):
class Meta:
model = Neighbourhood
exclude = ['occupants_count']
class PostForm(forms.ModelForm):
class Meta:
model = Posts
exclude= ['profile','neighbourhood']
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
exclude =['profile', 'neighbourhood'] | 23.24 | 61 | 0.726334 | 425 | 0.731497 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.122203 |
7f191d3bb5dfb78d923ae3d5e4afeb6b44cfb286 | 3,956 | py | Python | src/gp_server/utils/paths_overlay_filter.py | hellej/hope-green-path-server | 46742c413fbe4c3734edd12ec867bdf7e2b29f05 | [
"MIT"
] | 5 | 2020-05-24T12:18:54.000Z | 2021-04-25T19:07:16.000Z | src/gp_server/utils/paths_overlay_filter.py | hellej/hope-green-path-server | 46742c413fbe4c3734edd12ec867bdf7e2b29f05 | [
"MIT"
] | 43 | 2019-09-22T18:45:35.000Z | 2021-07-10T08:51:33.000Z | src/gp_server/utils/paths_overlay_filter.py | hellej/hope-green-path-server | 46742c413fbe4c3734edd12ec867bdf7e2b29f05 | [
"MIT"
] | 7 | 2020-08-04T06:50:14.000Z | 2021-01-17T11:36:13.000Z | """
This module provides functionality for filtering out paths with nearly identical geometries.
"""
from typing import List, Tuple, Union
from gp_server.app.path import Path
from gp_server.app.logger import Logger
def __get_path_overlay_candidates_by_len(
param_path: Path,
all_paths: List[Path],
len_diff: int = 25
) -> List[Path]:
"""Returns paths with length difference not greater or less than specified in [len_diff] (m)
compared to the length of [path]. If [all_paths] contains [param_path], the latter is included
in the returned list.
"""
return [
path for path in all_paths
if ((path.length < (param_path.length + len_diff)) and
(path.length > (param_path.length - len_diff)))
]
def __get_overlapping_paths(
log: Logger,
param_path: Path,
compare_paths: List[Path],
buffer_m: int = None
) -> List[Path]:
"""Returns [compare_paths] that are within a buffered geometry of [param_path].
"""
overlapping_paths = [param_path]
path_geom_buff = param_path.geometry.buffer(buffer_m)
for compare_path in [
compare_path for compare_path in compare_paths if compare_path.path_id != param_path.path_id
]:
bool_within = compare_path.geometry.within(path_geom_buff)
if bool_within:
overlapping_paths.append(compare_path)
if len(overlapping_paths) > 1:
log.debug(
f'Found {len(overlapping_paths)} overlapping paths for: '
f'{param_path.path_id} - {[path.path_id for path in overlapping_paths]}'
)
return overlapping_paths
def __get_least_cost_path(
paths: List[Path],
cost_attr: str = 'nei_norm'
) -> Path:
"""Returns the least expensive (best) path by given cost attribute.
"""
if len(paths) == 1:
return next(iter(paths))
ordered = paths.copy()
def get_cost(path: Path):
if cost_attr == 'nei_norm':
return path.noise_attrs.nei_norm
if cost_attr == 'aqc_norm':
return path.aqi_attrs.aqc_norm
ordered.sort(key=get_cost)
return ordered[0]
def get_unique_paths_by_geom_overlay(
log: Logger,
all_paths: Tuple[Path],
buffer_m: int = None,
cost_attr: str = 'nei_norm'
) -> Union[List[str], None]:
"""Filters a list of paths by comparing buffered line geometries of the paths and selecting
only the unique paths by given buffer_m (m).
Args:
all_paths: Both fastest and exposure optimized paths.
buffer_m: A buffer size in meters with which the path geometries will be buffered when
comparing path geometries.
cost_attr: The name of a cost attribute to minimize when selecting the best of overlapping
paths.
Note:
Filters out fastest path if an overlapping green path is found to replace it.
Returns:
A filtered list of paths having nearly unique line geometry with respect to the given
buffer_m. None if PathSet contains only one path.
"""
if len(all_paths) == 1:
return None
paths_already_overlapped = []
filtered_paths_ids = []
for path in all_paths:
if path.path_id not in filtered_paths_ids and path.path_id not in paths_already_overlapped:
overlay_candidates = __get_path_overlay_candidates_by_len(path, all_paths, len_diff=25)
overlapping_paths = __get_overlapping_paths(log, path, overlay_candidates, buffer_m)
best_overlapping_path = __get_least_cost_path(overlapping_paths, cost_attr=cost_attr)
if best_overlapping_path.path_id not in filtered_paths_ids:
filtered_paths_ids.append(best_overlapping_path.path_id)
paths_already_overlapped += [path.path_id for path in overlapping_paths]
log.debug(
f'Filtered {len(filtered_paths_ids)} unique paths '
f'from {len(all_paths)} unique paths by overlay'
)
return filtered_paths_ids
| 36.293578 | 100 | 0.685035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,489 | 0.37639 |
7f19de1db916a2221c0fcabfd4e70d02f0ac0cdb | 1,676 | py | Python | ADT/Homework 4/heapsort_variant.py | Devang-25/CS-Jacobs | d56a70c13ce3863bcf3140990fab5b634b30a2af | [
"MIT"
] | null | null | null | ADT/Homework 4/heapsort_variant.py | Devang-25/CS-Jacobs | d56a70c13ce3863bcf3140990fab5b634b30a2af | [
"MIT"
] | null | null | null | ADT/Homework 4/heapsort_variant.py | Devang-25/CS-Jacobs | d56a70c13ce3863bcf3140990fab5b634b30a2af | [
"MIT"
] | null | null | null |
# Defining the left function
def left(i):
return 2*i+1
# Defining the right function
def right(i):
return 2*i+2
# Defining the parent node function
def parent(i):
return (i-1)//2
# Max_Heapify
def max_heapify(arr,n,i):
l=left(i)
r=right(i)
largest=i
if n>l and arr[largest]<arr[l] :
largest=l
if n>r and arr[largest]<arr[r]:
largest=r
if largest!=i :
arr[largest],arr[i]=arr[i],arr[largest]
#Hepify the root again
max_heapify(arr,n,largest)
# build_max_heap function
def build_max_heap(arr):
for i in range(len(arr)//2,-1,-1):
max_heapify(arr,len(arr),i)
#Push Up function
def pushup(arr,i):
if arr[parent(i)]<arr[i]:
arr[i],arr[parent(i)]=arr[parent(i)],arr[i]
pushup(arr, parent(i))
# Push Down function
def push_down(arr,n,i):
l=left(i)
r=right(i)
if l >=n :
return i
elif r>=n:
arr[i], arr[l] = arr[l], arr[i]
return l
else:
if arr[l]>arr[r]:
arr[i],arr[l]=arr[l],arr[i]
largest=l
else:
arr[i],arr[r]=arr[r],arr[i]
largest=r
return push_down(arr,n,largest)
# Heapsort Algorithm
def heapsort_variant(arr):
size=len(arr)
# Build max heap
build_max_heap(arr)
for i in range(size-1, 0, -1):
# Swapping the last element and the first
arr[i], arr[0] = arr[0], arr[i]
# Maintaining the heap
leafpos = push_down(arr,i-1,0)
pushup(arr, leafpos)
| 23.605634 | 51 | 0.516706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.172434 |
7f19df6d90e4eaf71ba87a40f7d247e20f11c992 | 6,625 | py | Python | kunquat/tracker/ui/model/keymapmanager.py | kagu/kunquat | 83a2e972121e6a114ecc5ef4392b501ce926bb06 | [
"CC0-1.0"
] | 13 | 2016-09-01T21:52:49.000Z | 2022-03-24T06:07:20.000Z | kunquat/tracker/ui/model/keymapmanager.py | kagu/kunquat | 83a2e972121e6a114ecc5ef4392b501ce926bb06 | [
"CC0-1.0"
] | 290 | 2015-03-14T10:59:25.000Z | 2022-03-20T08:32:17.000Z | kunquat/tracker/ui/model/keymapmanager.py | kagu/kunquat | 83a2e972121e6a114ecc5ef4392b501ce926bb06 | [
"CC0-1.0"
] | 7 | 2015-03-19T13:28:11.000Z | 2019-09-03T16:21:16.000Z | # -*- coding: utf-8 -*-
#
# Authors: Toni Ruottu, Finland 2014
# Tomi Jylhä-Ollila, Finland 2016-2019
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
class HitKeymapID:
pass
class KeyboardAction():
NOTE = 'note'
OCTAVE_DOWN = 'octave_down'
OCTAVE_UP = 'octave_up'
PLAY = 'play'
REST = 'rest'
SILENCE = 'silence'
def __init__(self, action_type):
super().__init__()
self.action_type = action_type
def __eq__(self, other):
return (self.action_type == other.action_type)
def __hash__(self):
return hash(self.action_type)
class KeyboardNoteAction(KeyboardAction):
def __init__(self, row, index):
super().__init__(KeyboardAction.NOTE)
self.row = row
self.index = index
def _get_fields(self):
return (self.action_type, self.row, self.index)
def __eq__(self, other):
if not isinstance(other, KeyboardNoteAction):
return False
return (self._get_fields() == other._get_fields())
def __hash__(self):
return hash(self._get_fields())
_hit_keymap = {
'is_hit_keymap': True,
'name': 'Hits',
'keymap': [
[0, 7, 1, 8, 2, 9, 3, 10, 4, 11, 5, 12, 6, 13,
14, 23, 15, 24, 16, 25, 17, 26, 18, 27, 19, 28, 20, 29, 21, 30, 22, 31],
[32, 39, 33, 40, 34, 41, 35, 42, 36, 43, 37, 44, 38, 45,
46, 55, 47, 56, 48, 57, 49, 58, 50, 59, 51, 60, 52, 61, 53, 62, 54, 63],
[64, 71, 65, 72, 66, 73, 67, 74, 68, 75, 69, 76, 70, 77,
78, 87, 79, 88, 80, 89, 81, 90, 82, 91, 83, 92, 84, 93, 85, 94, 86, 95],
[96, 103, 97, 104, 98, 105, 99, 106, 100, 107, 101, 108, 102, 109,
110, 119, 111, 120, 112, 121, 113, 122, 114, 123, 115, 124, 116, 125,
117, 126, 118, 127],
],
}
class KeymapManager():
def __init__(self):
self._session = None
self._updater = None
self._ui_model = None
def set_controller(self, controller):
self._session = controller.get_session()
self._updater = controller.get_updater()
self._share = controller.get_share()
def set_ui_model(self, ui_model):
self._ui_model = ui_model
def _are_keymap_actions_valid(self, actions):
ka = KeyboardAction
req_single_action_types = (
ka.OCTAVE_DOWN, ka.OCTAVE_UP, ka.PLAY, ka.REST, ka.SILENCE)
for action_type in req_single_action_types:
if ka(action_type) not in actions:
return False
note_actions = set(a for a in actions if a.action_type == ka.NOTE)
if len(note_actions) != 33:
return False
return True
def get_keyboard_row_sizes(self):
# The number of buttons provided for configuration on each row
# On a QWERTY layout, the leftmost buttons are: 1, Q, A, Z
return (11, 11, 11, 10)
def get_typewriter_row_sizes(self):
return (9, 10, 7, 7)
def get_typewriter_row_offsets(self):
return (1, 0, 1, 0)
def _is_row_layout_valid(self, locs):
row_sizes = self.get_keyboard_row_sizes()
used_locs = set()
for loc in locs:
if loc in used_locs:
return False
used_locs.add(loc)
row, index = loc
if not (0 <= row < len(row_sizes)):
return False
if not (0 <= index < row_sizes[row]):
return False
return True
def set_key_actions(self, actions):
assert self._is_row_layout_valid(actions.keys())
assert self._are_keymap_actions_valid(actions.values())
self._session.keyboard_key_actions = actions
action_locations = { act: loc for (loc, act) in actions.items() }
self._session.keyboard_action_locations = action_locations
def set_key_names(self, names):
assert self._is_row_layout_valid(names.keys())
self._session.keyboard_key_names = names
def set_scancode_locations(self, codes_to_locs):
assert self._is_row_layout_valid(codes_to_locs.values())
self._session.keyboard_scancode_locations = codes_to_locs
def set_key_id_locations(self, ids_to_locs):
assert self._is_row_layout_valid(ids_to_locs.values())
self._session.keyboard_id_locations = ids_to_locs
def get_scancode_location(self, code):
return self._session.keyboard_scancode_locations.get(code, None)
def get_key_id_location(self, key_id):
return self._session.keyboard_id_locations.get(key_id, None)
def get_key_action(self, location):
return self._session.keyboard_key_actions.get(location, None)
def get_key_name(self, location):
return self._session.keyboard_key_names.get(location, None)
def get_action_location(self, action):
if not isinstance(action, KeyboardAction):
assert action != KeyboardAction.NOTE
action = KeyboardAction(action)
return self._session.keyboard_action_locations.get(action, None)
def _get_keymap_ids(self):
notation_mgr = self._ui_model.get_notation_manager()
keymap_ids = notation_mgr.get_notation_ids()
keymap_ids.append(HitKeymapID)
return keymap_ids
def _get_some_keymap_id(self):
keymap_ids = self._get_keymap_ids()
if len(keymap_ids) < 2:
return HitKeymapID
some_id = sorted(keymap_ids)[1]
return some_id
def get_selected_keymap_id(self):
#keymap_ids = self.get_keymap_ids()
selected_id = self._session.get_selected_notation_id() or self._get_some_keymap_id()
return selected_id
def is_hit_keymap_active(self):
return self._session.is_hit_keymap_active()
def get_selected_keymap(self):
if self.is_hit_keymap_active():
return _hit_keymap
notation_mgr = self._ui_model.get_notation_manager()
notation = notation_mgr.get_selected_notation()
return notation.get_keymap()
def set_hit_keymap_active(self, active):
self._session.set_hit_keymap_active(active)
keymap_data = self.get_selected_keymap()
if keymap_data.get('is_hit_keymap', False):
base_octave = 0
else:
base_octave = keymap_data['base_octave']
typewriter_mgr = self._ui_model.get_typewriter_manager()
typewriter_mgr.set_octave(base_octave)
| 31.103286 | 92 | 0.635774 | 5,540 | 0.8361 | 0 | 0 | 0 | 0 | 0 | 0 | 608 | 0.09176 |
7f1a722e6f68f116e0ececcc7c8694b6ecbb5279 | 453 | py | Python | make_instance.py | github-nakasho/ohzeki_method | 975e5a85e0209d6f3689eaee179c19335c52e35a | [
"MIT"
] | null | null | null | make_instance.py | github-nakasho/ohzeki_method | 975e5a85e0209d6f3689eaee179c19335c52e35a | [
"MIT"
] | null | null | null | make_instance.py | github-nakasho/ohzeki_method | 975e5a85e0209d6f3689eaee179c19335c52e35a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
def make_instance():
# set the number of random numbers
num_rands = 2000
# set K
K = 5
# set random numbers
rands = np.random.rand(num_rands)
# optimal solution
rands_sort = sorted(rands)
optimal_obj = sum(rands_sort[0:5])
print('*****')
print(optimal_obj)
print('*****')
return {'num_rands': num_rands, 'K': K, 'rands': rands, 'optimal_obj': optimal_obj}
| 22.65 | 87 | 0.620309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.328918 |
7f1aa77534d7ca4bf005172321cf304ed4c252e6 | 7,175 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | Kuan-HC/Udacity-CarND-Capstone | cfe6c44f0f46de19a7df728de3044fb2c1a80666 | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | Kuan-HC/Udacity-CarND-Capstone | cfe6c44f0f46de19a7df728de3044fb2c1a80666 | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | Kuan-HC/Udacity-CarND-Capstone | cfe6c44f0f46de19a7df728de3044fb2c1a80666 | [
"MIT"
] | null | null | null | from styx_msgs.msg import TrafficLight
import rospy
import tensorflow as tf
import numpy as np
import cv2
from PIL import Image
# for visualization
#import matplotlib.pyplot as plt
from PIL import ImageDraw
from PIL import ImageColor
import os
class TLClassifier(object):
def __init__(self):
#TODO load classifier
SSD_GRAPH_FILE = "light_classification/model/frozen_inference_graph.pb"
#self.session = None
self.detection_graph = self.__load_graph(os.path.abspath(SSD_GRAPH_FILE))
# visualization
cmap = ImageColor.colormap
self.COLOR_LIST = sorted([c for c in cmap.keys()])
# Create Tensor Session
self.sess = tf.Session(graph = self.detection_graph)
def __load_graph(self, graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
rospy.loginfo("model loaded")
return graph
def get_classification(self, cv2_img):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
#image = cv2.resize(image, (300, 300))
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
image = Image.fromarray(cv2_img)
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
# The input placeholder for the image. Get_tensor_by_name` returns the Tensor with the associated name in the Graph.
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
# Actual detection.
(boxes, scores, classes) = self.sess.run([detection_boxes, detection_scores, detection_classes],
feed_dict={image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
confidence_cutoff = 0.55 # was 0.8
# Filter boxes with a confidence score less than `confidence_cutoff`
boxes, scores, classes = self.__filter_boxes(confidence_cutoff, boxes, scores, classes)
rospy.loginfo("Object Detector output")
# The current box coordinates are normalized to a range between 0 and 1.
# This converts the coordinates actual location on the image.
width, height = image.size
box_coords = self.__to_image_coords(boxes, height, width)
light_state = TrafficLight.UNKNOWN
if len(classes)>0:
light_state = self.__classifier(cv2_img, box_coords, classes)
rospy.loginfo("Traffic light from Detector: %d" %light_state)
return light_state
def __filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def __to_image_coords(self, boxes, height, width):
"""
The original box coordinate output is normalized, i.e [0, 1].
This converts it back to the original coordinate based on the image
size.
"""
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
def __classifier(self, image, boxes, classes):
traffic_counter = 0
predict_sum = 0.
for i in range(len(boxes)):
if (classes[i]==10):
traffic_counter += 1
bot, left, top, right = boxes[i, ...]
crop_image = image[int(bot):int(top), int(left):int(right)]
'''
Traffic Light classifier - project from intro to self driving cars
'''
predict_sum += self.__estimate_label(crop_image)
# traffic_counter ==0 means there is no object detect as traffic
if (traffic_counter !=0):
avg = predict_sum/traffic_counter
rospy.loginfo("This group brightness value: %f" %avg)
else:
avg = 0
'''
Traffic light definition in UNKNOWN=4
GREEN=2 YELLOW=1 RED=0
'''
if (avg > 1.0):
return TrafficLight.RED
else:
return TrafficLight.UNKNOWN
'''
Traffic Light classifier - reuse project from intro-to-self-driving-cars
'''
def __estimate_label(self, rgb_image):
rgb_image = cv2.resize(rgb_image,(32,32))
test_image_hsv = cv2.cvtColor(np.array(rgb_image), cv2.COLOR_RGB2HSV)
# Mask HSV channel
masked_red = self.__mask_red(test_image_hsv, rgb_image)
Masked_R_V = self.__Masked_Image_Brightness(masked_red)
AVG_Masked_R = self.__AVG_Brightness(Masked_R_V)
return AVG_Masked_R
def __mask_red(self, HSV_image, rgb_image):
#red_mask_1 = cv2.inRange(HSV_image, (0,50,60), (10,255,255))
red_mask = cv2.inRange(HSV_image, (140,10,100), (180,255,255)) #was (140,36,100)
#red_mask = np.add(red_mask_1,red_mask_2)
masked_image = np.copy(rgb_image)
masked_image[red_mask == 0] = [0, 0, 0]
return masked_image
def __Masked_Image_Brightness(self, image):
masked_Image_HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
masked_Image_V = masked_Image_HSV[:,:,2]
return masked_Image_V
def __AVG_Brightness(self, image):
height, width = image.shape
brightness_avg = np.sum(image)/(height*width)
return brightness_avg
| 38.783784 | 124 | 0.607387 | 6,929 | 0.965714 | 0 | 0 | 0 | 0 | 0 | 0 | 2,110 | 0.294077 |
7f1b46a68b462c9832a4e39c40ba6411d23ff59c | 3,279 | py | Python | template/plugin/datafile.py | vaMuchenje/Template-Python | 9edbf401cfeb9c33b50bd37fd0643fa0205d8a5e | [
"Artistic-2.0"
] | 1 | 2016-03-02T06:51:04.000Z | 2016-03-02T06:51:04.000Z | template/plugin/datafile.py | vaMuchenje/Template-Python | 9edbf401cfeb9c33b50bd37fd0643fa0205d8a5e | [
"Artistic-2.0"
] | 6 | 2015-10-13T13:46:10.000Z | 2019-06-17T09:39:57.000Z | template/plugin/datafile.py | vaMuchenje/Template-Python | 9edbf401cfeb9c33b50bd37fd0643fa0205d8a5e | [
"Artistic-2.0"
] | 3 | 2018-12-03T13:15:21.000Z | 2019-03-13T09:12:09.000Z | #
# The Template-Python distribution is Copyright (C) Sean McAfee 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import re
from template.plugin import Plugin
from template.util import Sequence
"""
template.plugin.datafile - Plugin to construct records from a simple data file
SYNOPSIS
[% USE mydata = datafile('/path/to/datafile') %]
[% USE mydata = datafile('/path/to/datafile', delim = '|') %]
[% FOREACH record = mydata %]
[% record.this %] [% record.that %]
[% END %]
DESCRIPTION
This plugin provides a simple facility to construct a list of
dictionaries, each of which represents a data record of known
structure, from a data file.
[% USE datafile(filename) %]
A absolute filename must be specified (for this initial implementation
at least - in a future version it might also use the INCLUDE_PATH).
An optional 'delim' parameter may also be provided to specify an
alternate delimiter character.
[% USE userlist = datafile('/path/to/file/users') %]
[% USE things = datafile('items', delim = '|') %]
The format of the file is intentionally simple. The first line
defines the field names, delimited by colons with optional surrounding
whitespace. Subsequent lines then defines records containing data
items, also delimited by colons. e.g.
id : name : email : tel
abw : Andy Wardley : abw@cre.canon.co.uk : 555-1234
neilb : Neil Bowers : neilb@cre.canon.co.uk : 555-9876
Each line is read, split into composite fields, and then used to
initialise a dictionary containing the field names as relevant keys.
The plugin returns an object that encapsulates the dictionaries in the
order as defined in the file.
[% FOREACH user = userlist %]
[% user.id %]: [% user.name %]
[% END %]
The first line of the file MUST contain the field definitions. After
the first line, blank lines will be ignored, along with comment line
which start with a '#'.
BUGS
Should handle file names relative to INCLUDE_PATH.
Doesn't permit use of ':' in a field. Some escaping mechanism is required.
"""
class Datafile(Plugin, Sequence):
"""Template Toolkit Plugin which reads a datafile and constructs a
list object containing hashes representing records in the file.
"""
def __init__(self, context, filename, params=None):
Plugin.__init__(self)
params = params or {}
delim = params.get("delim") or ":"
items = []
line = None
names = None
splitter = re.compile(r'\s*%s\s*' % re.escape(delim))
try:
f = open(filename)
except IOError, e:
return self.fail("%s: %s" % (filename, e))
for line in f:
line = line.rstrip("\n\r")
if not line or line.startswith("#") or line.isspace():
continue
fields = splitter.split(line)
if names is None:
names = fields
else:
fields.extend([None] * (len(names) - len(fields)))
items.append(dict(zip(names, fields)))
f.close()
self.items = items
def __iter__(self):
return iter(self.items)
def as_list(self):
return self.items
| 28.763158 | 78 | 0.68466 | 997 | 0.304056 | 0 | 0 | 0 | 0 | 0 | 0 | 2,363 | 0.720647 |
7f1c8a1a6999814f41798c4e38334cdaa0a22f93 | 1,544 | py | Python | src/tests/colorsensor.py | duckida/legosort | 8fb6a3810056ced7b09fa7e5db42148b54831bcc | [
"MIT"
] | 3 | 2020-04-30T18:56:32.000Z | 2020-08-03T10:20:36.000Z | src/tests/colorsensor.py | duckida/legosort | 8fb6a3810056ced7b09fa7e5db42148b54831bcc | [
"MIT"
] | 1 | 2020-08-03T23:34:03.000Z | 2020-08-03T23:34:03.000Z | src/tests/colorsensor.py | duckida/legosort | 8fb6a3810056ced7b09fa7e5db42148b54831bcc | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
s2 = 26
s3 = 27
signal = 17
NUM_CYCLES = 10
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(signal,GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(s2,GPIO.OUT)
GPIO.setup(s3,GPIO.OUT)
print("\n")
def loop():
temp = 1
while(1):
GPIO.output(s2,GPIO.LOW)
GPIO.output(s3,GPIO.LOW)
time.sleep(0.3)
start = time.time()
for impulse_count in range(NUM_CYCLES):
GPIO.wait_for_edge(signal, GPIO.FALLING)
duration = time.time() - start #seconds to run for loop
red = NUM_CYCLES / duration #in Hz
print("red value - ",red)
GPIO.output(s2,GPIO.LOW)
GPIO.output(s3,GPIO.HIGH)
time.sleep(0.3)
start = time.time()
for impulse_count in range(NUM_CYCLES):
GPIO.wait_for_edge(signal, GPIO.FALLING)
duration = time.time() - start
blue = NUM_CYCLES / duration
print("blue value - ",blue)
GPIO.output(s2,GPIO.HIGH)
GPIO.output(s3,GPIO.HIGH)
time.sleep(0.3)
start = time.time()
for impulse_count in range(NUM_CYCLES):
GPIO.wait_for_edge(signal, GPIO.FALLING)
duration = time.time() - start
green = NUM_CYCLES / duration
print("green value - ",green)
time.sleep(2)
if green > 12500:
print("Green")
elif red > 12500:
print("Red")
elif blue > 12500:
print("Blue")
def endprogram():
GPIO.cleanup()
if __name__=='__main__':
setup()
try:
loop()
except KeyboardInterrupt:
endprogram() | 19.794872 | 64 | 0.610751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.069301 |
7f1d0f17b0c7610564f4bd552870d3ed540a3a70 | 1,045 | py | Python | example/example2.py | chrisseto/Breeze | 53fc81666ba144e4a873c5f20e77cde00a675d38 | [
"MIT"
] | null | null | null | example/example2.py | chrisseto/Breeze | 53fc81666ba144e4a873c5f20e77cde00a675d38 | [
"MIT"
] | null | null | null | example/example2.py | chrisseto/Breeze | 53fc81666ba144e4a873c5f20e77cde00a675d38 | [
"MIT"
] | null | null | null | import datetime
import peewee as p
from breeze import App, Resource, Serializable
db = p.SqliteDatabase('users.db')
class UserModel(p.Model):
username = p.CharField(unique=True)
password = p.CharField()
email = p.CharField()
join_date = p.DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
class User(Serializable, Resource):
email = Serializable.String()
username = Serializable.String()
join_date = Serializable.DateTime()
@classmethod
def from_model(cls, model):
return cls.__init__(
email=model.email,
username=model.username,
join_date=model.join_date
)
@classmethod
def list(cls, filter_options):
return [
cls.from_model(u) for u in
UserModel.select().paginate(
filter_options.page + 1,
filter_options.size
)
]
db.connect()
app = App(User, prefix='/api/v1/', debug=True)
if __name__ == '__main__':
app.serve()
| 21.326531 | 62 | 0.614354 | 814 | 0.778947 | 0 | 0 | 434 | 0.415311 | 0 | 0 | 30 | 0.028708 |
7f1d6247aa5afbf4e9f81a212764267b491f054c | 551 | py | Python | sqlalchemy_jsonapi/constants.py | jimbobhickville/sqlalchemy-jsonapi | 40f8b5970d44935b27091c2bf3224482d23311bb | [
"MIT"
] | 73 | 2015-01-11T09:23:21.000Z | 2021-07-07T03:51:38.000Z | sqlalchemy_jsonapi/constants.py | jimbobhickville/sqlalchemy-jsonapi | 40f8b5970d44935b27091c2bf3224482d23311bb | [
"MIT"
] | 61 | 2015-03-16T22:08:14.000Z | 2018-01-12T01:43:48.000Z | sqlalchemy_jsonapi/constants.py | JanAckermann/sqlalchemy-jsonapi | 40f8b5970d44935b27091c2bf3224482d23311bb | [
"MIT"
] | 39 | 2015-01-21T15:04:40.000Z | 2020-09-24T19:54:15.000Z | """
SQLAlchemy-JSONAPI
Constants
Colton J. Provias
MIT License
"""
try:
from enum import Enum
except ImportError:
from enum34 import Enum
class Method(Enum):
""" HTTP Methods used by JSON API """
GET = 'GET'
POST = 'POST'
PATCH = 'PATCH'
DELETE = 'DELETE'
class Endpoint(Enum):
""" Four paths specified in JSON API """
COLLECTION = '/<api_type>'
RESOURCE = '/<api_type>/<obj_id>'
RELATED = '/<api_type>/<obj_id>/<relationship>'
RELATIONSHIP = '/<api_type>/<obj_id>/relationships/<relationship>'
| 17.774194 | 70 | 0.637024 | 397 | 0.720508 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.529946 |
7f1e39640d0d32b0f11e287faac312c0865dad46 | 2,617 | py | Python | 1.-MapReduce Spark/B-Datos Meteorologicos/Meteorologico_3.py | gorco/sgdi-lab | afd967145fe21a5237893e93a3bfda828dba09e3 | [
"Apache-2.0"
] | null | null | null | 1.-MapReduce Spark/B-Datos Meteorologicos/Meteorologico_3.py | gorco/sgdi-lab | afd967145fe21a5237893e93a3bfda828dba09e3 | [
"Apache-2.0"
] | null | null | null | 1.-MapReduce Spark/B-Datos Meteorologicos/Meteorologico_3.py | gorco/sgdi-lab | afd967145fe21a5237893e93a3bfda828dba09e3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Este fichero generado para la asignatura SGDI
# Practica 1 MapReduce Y Spark, Ejercicio B.3
# Autores: Antonio Calvo Morata y Carlos Congosto Sandoval
# Antonio Calvo Morata y Carlos Congosto Sandoval declaramos que esta solución es fruto exclusivamente de nuestro
# trabajo personal. No hemos sido ayudados por ninguna otra persona ni hemos obtenido la solución de fuentes externas,
# y tampoco hemos compartido nuestra solución con nadie. Declaramos además que no hemos realizado de manera deshonesta
# ninguna otra actividad que pueda mejorar nuestros resultados ni perjudicar los resultados de los demás.
from mrjob.job import MRJob
from operator import itemgetter
import sys
# 3.- Implementar una tarea MapReduce en mrjob que resuelva este problema utilizando las fases mapper, combiner y
# reducer
class MRMeteoOpt(MRJob):
MRJob.SORT_VALUES = True
# Fase MAP (line es una cadena de texto)
# Devuelve repetido el valor de la bateria para poder trabajar con los valores min, avg y max de manera sencilla en
# la fase combiner permitiendo que tanto entrada como salida sea del mismo formato
def mapper(self, key, line):
word = line.split(',')
if word[0] != 'date-time':
time = word[0].split('/')
yield str(time[1]) + '/' + str(time[0]), (word[8], word[8], word[8], 1)
# Fase COMBINER (key es una cadena texto, values un generador de valores)
def combiner(self, key, values):
valuesList = list(values)
sumValue = 0;
minValue = sys.maxint;
maxValue = 0;
count = 0
for l in valuesList:
sumValue += float(l[0])
count += 1
minValue = float(l[1]) if float(l[1]) < float(minValue) else float(minValue)
maxValue = float(l[2]) if float(l[2]) > float(maxValue) else float(maxValue)
yield key, (sumValue, minValue, maxValue, count)
# Fase REDUCE (key es una cadena texto, values un generador de valores)
def reducer(self, key, values):
valuesList = list(values)
sumValue = 0;
minValue = sys.maxint;
maxValue = 0;
count = 0
for l in valuesList:
sumValue += l[0]
count += l[3]
minValue = float(l[1]) if float(l[1]) < float(minValue) else float(minValue)
maxValue = float(l[2]) if float(l[2]) > float(maxValue) else float(maxValue)
if float(count) > 0:
avgValue = float(sumValue)/float(count)
yield key, dict(max=maxValue, avg=avgValue, min=minValue)
if __name__ == '__main__':
MRMeteoOpt.run()
| 40.890625 | 119 | 0.658387 | 1,741 | 0.663997 | 1,344 | 0.512586 | 0 | 0 | 0 | 0 | 1,156 | 0.440885 |
7f20ed98a090dda844e5340489d6c208513276d2 | 226 | py | Python | components/studio/deployments/admin.py | ScilifelabDataCentre/stackn | 00a65a16ff271f04548b3ff475c72dacbfd916df | [
"Apache-2.0"
] | null | null | null | components/studio/deployments/admin.py | ScilifelabDataCentre/stackn | 00a65a16ff271f04548b3ff475c72dacbfd916df | [
"Apache-2.0"
] | null | null | null | components/studio/deployments/admin.py | ScilifelabDataCentre/stackn | 00a65a16ff271f04548b3ff475c72dacbfd916df | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import DeploymentDefinition, DeploymentInstance, HelmResource
admin.site.register(HelmResource)
admin.site.register(DeploymentDefinition)
admin.site.register(DeploymentInstance)
| 28.25 | 74 | 0.862832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7f223ce0b0a9bae329fb3c8299587d1e8fad0fa3 | 1,576 | py | Python | edinet_baseline_hourly_module/edinet_models/pyEMIS/ConsumptionModels/constantMonthlyModel.py | BeeGroup-cimne/module_edinet | 0cda52e9d6222a681f85567e9bf0f7e5885ebf5e | [
"MIT"
] | null | null | null | edinet_baseline_hourly_module/edinet_models/pyEMIS/ConsumptionModels/constantMonthlyModel.py | BeeGroup-cimne/module_edinet | 0cda52e9d6222a681f85567e9bf0f7e5885ebf5e | [
"MIT"
] | 13 | 2021-03-25T22:24:38.000Z | 2022-03-12T00:56:45.000Z | edinet_baseline_hourly_module/edinet_models/pyEMIS/ConsumptionModels/constantMonthlyModel.py | BeeGroup-cimne/module_edinet | 0cda52e9d6222a681f85567e9bf0f7e5885ebf5e | [
"MIT"
] | 1 | 2019-03-13T09:49:56.000Z | 2019-03-13T09:49:56.000Z | #This is a class because it stores its model parameters and has a 'prediction' function which returns predictions for input data
import numpy as np
from baseModel import baseModel, ModellingError as me
from datetime import datetime
import pandas as pd
class ModellingError(me): pass
class ConstantMonthlyModel(baseModel):
"""
A constant consumption model: consumption is estimated as the average of all input data
Input_data must respond to the method call 'consumption'
"""
n_parameters = 1
def __init__(self, data):
if len(data) <= 11:#(self.n_parameters + 2):
self.mean = np.nan
self.std = np.nan
#raise ModellingError, "Not enough input data"
if 'temperature' in data.dtype.names:
x = data['temperature']
self.xrange = [min(x), max(x)]
data_pd = pd.DataFrame.from_records(data)
data_pd['ts'] = data_pd['timestamp'].apply(datetime.fromtimestamp)
data_pd = data_pd.set_index(pd.DatetimeIndex(data_pd['ts']))
data_pd.sort_index(inplace=True)
last_month = data_pd[-1:].index.month+1 if data_pd[-1:].index.month != 12 else 1
self.mean = data_pd[data_pd.index.month==last_month]['consumption'].mean()
self.std = data_pd[data_pd.index.month==last_month]['consumption'].std()
def prediction(self, independent_data):
return np.array([self.mean] * len(independent_data))
def simulation(self, independent_data):
return self.std * np.random.randn(independent_data.size) + self.mean
def parameters(self):
return {'mean': self.mean, 'std': self.std} | 37.52381 | 128 | 0.701142 | 1,321 | 0.838198 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.279188 |
7f225c7492072715078bdd768b21a5ef8c2bd1a5 | 3,746 | py | Python | geojson_rewind/rewind.py | chris48s/geojson-rewind | fc0fe3e64cb2228b4aa35788866eb3ab973f6a94 | [
"MIT"
] | 15 | 2019-02-22T15:43:35.000Z | 2021-12-16T14:31:33.000Z | geojson_rewind/rewind.py | chris48s/geojson-rewind | fc0fe3e64cb2228b4aa35788866eb3ab973f6a94 | [
"MIT"
] | 18 | 2019-06-12T08:58:50.000Z | 2022-01-31T02:06:17.000Z | geojson_rewind/rewind.py | chris48s/geojson-rewind | fc0fe3e64cb2228b4aa35788866eb3ab973f6a94 | [
"MIT"
] | 2 | 2019-07-19T17:29:23.000Z | 2021-11-10T16:56:44.000Z | import argparse
import copy
import json
import logging
import math
import sys
RADIUS = 6378137
def rewind(geojson, rfc7946=True):
gj = copy.deepcopy(geojson)
_check_crs(geojson)
if isinstance(gj, str):
return json.dumps(_rewind(json.loads(gj), rfc7946))
else:
return _rewind(gj, rfc7946)
def _check_crs(geojson):
if (
"crs" in geojson
and "properties" in geojson["crs"]
and "name" in geojson["crs"]["properties"]
and geojson["crs"]["properties"]["name"] != "urn:ogc:def:crs:OGC:1.3:CRS84"
):
logging.warning(
"Co-ordinates in the input data are assumed to be WGS84 with "
"(lon, lat) ordering, as per RFC 7946. Input with co-ordinates "
"using any other CRS may lead to unexpected results."
)
def _rewind(gj, rfc7946):
if gj["type"] == "FeatureCollection":
gj["features"] = list(map(lambda obj: _rewind(obj, rfc7946), gj["features"]))
return gj
if gj["type"] == "GeometryCollection":
gj["geometries"] = list(
map(lambda obj: _rewind(obj, rfc7946), gj["geometries"])
)
return gj
if gj["type"] == "Feature":
gj["geometry"] = _rewind(gj["geometry"], rfc7946)
if gj["type"] in ["Polygon", "MultiPolygon"]:
return correct(gj, rfc7946)
return gj
def correct(feature, rfc7946):
if feature["type"] == "Polygon":
feature["coordinates"] = correctRings(feature["coordinates"], rfc7946)
if feature["type"] == "MultiPolygon":
feature["coordinates"] = list(
map(lambda obj: correctRings(obj, rfc7946), feature["coordinates"])
)
return feature
def correctRings(rings, rfc7946):
# change from rfc7946: True/False to clockwise: True/False here
# RFC 7946 ordering determines how we deal with an entire polygon
# but at this point we are switching to deal with individual rings
# (which in isolation are just clockwise or anti-clockwise)
clockwise = not (bool(rfc7946))
rings[0] = wind(rings[0], clockwise)
for i in range(1, len(rings)):
rings[i] = wind(rings[i], not (clockwise))
return rings
def wind(ring, clockwise):
if is_clockwise(ring) == clockwise:
return ring
return ring[::-1]
def is_clockwise(ring):
return ringArea(ring) >= 0
def ringArea(coords):
area = 0
coordsLength = len(coords)
if coordsLength > 2:
for i in range(0, coordsLength):
if i == coordsLength - 2:
lowerIndex = coordsLength - 2
middleIndex = coordsLength - 1
upperIndex = 0
elif i == coordsLength - 1:
lowerIndex = coordsLength - 1
middleIndex = 0
upperIndex = 1
else:
lowerIndex = i
middleIndex = i + 1
upperIndex = i + 2
p1 = coords[lowerIndex]
p2 = coords[middleIndex]
p3 = coords[upperIndex]
area = area + (rad(p3[0]) - rad(p1[0])) * math.sin(rad(p2[1]))
area = area * RADIUS * RADIUS / 2
return area
def rad(coord):
return coord * math.pi / 180
def main():
parser = argparse.ArgumentParser(
description="Enforce RFC 7946 ring winding order on a GeoJSON file"
)
parser.add_argument(
"file",
nargs="?",
help="Input file, if empty stdin is used",
type=argparse.FileType("r"),
default=sys.stdin,
)
args = parser.parse_args()
if args.file.isatty():
parser.print_help()
return 0
sys.stdout.write(rewind(args.file.read()))
return 0
if __name__ == "__main__":
sys.exit(main())
| 27.343066 | 85 | 0.585424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.237587 |
7f23399a461688f3bfbab0ead1e3f84996220c76 | 1,591 | py | Python | corehq/apps/domain/project_access/middleware.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/apps/domain/project_access/middleware.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/apps/domain/project_access/middleware.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.utils.deprecation import MiddlewareMixin
from corehq.apps.domain.project_access.models import SuperuserProjectEntryRecord, ENTRY_RECORD_FREQUENCY
from corehq.util.quickcache import quickcache
from corehq.apps.users.tasks import update_domain_date
class ProjectAccessMiddleware(MiddlewareMixin):
def process_view(self, request, view_func, view_args, view_kwargs):
if getattr(request, 'couch_user', None) and request.couch_user.is_superuser \
and hasattr(request, 'domain'):
self.record_superuser_entry(request.domain, request.couch_user.username)
if getattr(request, 'couch_user', None) and request.couch_user.is_web_user() \
and hasattr(request, 'domain'):
self.record_web_user_entry(request.couch_user, request.domain)
@quickcache(['domain', 'username'], timeout=ENTRY_RECORD_FREQUENCY.seconds)
def record_superuser_entry(self, domain, username):
if not SuperuserProjectEntryRecord.entry_recently_recorded(username, domain):
SuperuserProjectEntryRecord.record_entry(username, domain)
return None
@staticmethod
def record_web_user_entry(user, domain):
membership = user.get_domain_membership(domain)
yesterday = (datetime.today() - timedelta(hours=24)).date()
if membership and (not membership.last_accessed or membership.last_accessed <= yesterday):
update_domain_date.delay(user.user_id, domain)
| 49.71875 | 104 | 0.754871 | 1,208 | 0.759271 | 0 | 0 | 648 | 0.407291 | 0 | 0 | 58 | 0.036455 |
7f238b01c0def33202fd44f0d56c115a89fb1370 | 9,288 | py | Python | languages/Natlab/src/natlab/tame/builtin/gen/classProp.py | dherre3/mclab-core | ccdcd6f46ee42285c7ad055ff0a9ea3361112e11 | [
"Apache-2.0"
] | 11 | 2015-05-31T16:11:35.000Z | 2021-02-16T00:04:48.000Z | languages/Natlab/src/natlab/tame/builtin/gen/classProp.py | sshrdp/mclab | 1843078edb13e647c0261d1944320ffbcf02ad99 | [
"Apache-2.0"
] | 12 | 2015-05-04T16:21:04.000Z | 2019-04-24T21:49:33.000Z | languages/Natlab/src/natlab/tame/builtin/gen/classProp.py | sshrdp/mclab | 1843078edb13e647c0261d1944320ffbcf02ad99 | [
"Apache-2.0"
] | 13 | 2015-05-31T17:16:45.000Z | 2021-02-09T17:08:26.000Z | # DEPRECATED - THERE IS A PARSER FOR THE CLASS LANGUAGE IN JAVA NOW
# TODO -delete
# processing Class tag - class propagation language
import processTags
import sys
# definition of the class propagation language - in a dictionary
# helper method - converts numbers to a MatlabClassVar
def convertNum(a): return CPNum(a) if isinstance(a, (long, int)) else a;
# 1) python classes
# general matlab class used by the class tag (CP = ClassProp..Info) - defines the operators
class CP():
def __or__ (self, other): return CPUnion(convertNum(self),convertNum(other));
def __ror__ (self, other): return CPUnion(convertNum(self),convertNum(other));
def __and__ (self, other): return CPChain(convertNum(self),convertNum(other));
def __rand__(self, other): return CPChain(convertNum(other),convertNum(self));
def __gt__ (self, other): return CPMap(convertNum(self),convertNum(other));
def __lt__ (self, other): return CPMap(convertNum(other),convertNum(self));
def __repr__(self): return str(self);
# <class1> - represents Matlab builtin class
class CPBuiltin(CP):
def __init__(self,name): self.name = name;
def __str__ (self): return self.name;
def toJava(self): return 'new CPBuiltin("'+self.name+'")';
# class1 | clas2 - mulitple possibilities for one type
class CPUnion(CP):
def __init__(self,a,b): self.class1 = convertNum(a); self.class2 = convertNum(b);
def __str__ (self): return '('+str(self.class1)+'|'+str(self.class2)+')';
def toJava (self): return 'new CPUnion('+self.class1.toJava()+','+self.class2.toJava()+')';
# class1 & class2 - sequences of types
class CPChain(CP):
def __init__(self,a,b): self.class1 = convertNum(a); self.class2 = convertNum(b);
def __str__ (self): return '('+str(self.class1)+')&('+str(self.class2)+')';
def toJava (self): return 'new CPChain('+self.class1.toJava()+','+self.class2.toJava()+')';
# class1 > class2 - matches lhs, emits rhs
class CPMap(CP):
def __init__(self,a,b): self.args = convertNum(a); self.res = convertNum(b);
def __str__ (self): return str(self.args)+'>'+str(self.res);
def toJava (self): return 'new CPMap('+self.args.toJava()+','+self.res.toJava()+')';
# <n> - a specific other argument, defined by a number - negative is counted from back (i.e. -1 is last)
class CPNum(CP):
def __init__(self,num): self.num = num;
def __str__ (self): return str(self.num);
def toJava (self): return 'new CPNum('+str(self.num)+')';
# coerce(CP denoting replacement expr for every argument, CP affeced expr)
# example: coerce((char|logical)>double, (numerical&numerical)>double )
# TODO: this should be a McFunction
class CPCoerce(CP):
def __init__(self,replaceExpr,expr): self.replaceExpr=replaceExpr; self.expr=expr;
def __str__ (self): return 'coerce('+str(self.replaceExpr)+','+str(self.expr)+')'
def toJava (self): return 'new CPCoerce('+self.replaceExpr.toJava()+','+self.expr.toJava()+')'
# unparametric expressions of the language - the string and java representation are given by the constructor
class CPNonParametric(CP):
def __init__(self,str,java): self.str = str; self.java = java;
def __str__(self): return self.str;
def toJava (self): return self.java;
# function of the form name(<expr>,<expr>,...) - the expresions, and string, java are given by the constructor
class CPFunction(CP):
def __init__(self,str,java,*exprs): self.exprs = exprs; self.java = java; self.str = str;
def __str__(self): return self.str+"("+','.join([str(e) for e in self.exprs])+")"
def toJava (self): return self.java+"("+','.join([e.toJava() for e in self.exprs])+")"
# 2) set up keywords of the language in a dictionary
# basic types:
lang = dict(double=CPBuiltin('double'),single=CPBuiltin('single'),char=CPBuiltin('char'),logical=CPBuiltin('logical'),
uint8=CPBuiltin('uint8'),uint16=CPBuiltin('uint16'),uint32=CPBuiltin('uint32'),uint64=CPBuiltin('uint64'),
int8=CPBuiltin('int8'),int16=CPBuiltin('int16'),int32=CPBuiltin('int32'),int64=CPBuiltin('int64'),
function_handle=CPBuiltin('function_handle'))
# union types
lang.update(dict(float=lang['single']|lang['double'], uint=(lang['uint8']|lang['uint16']|lang['uint32']|lang['uint64']),
sint=(lang['int8']|lang['int16']|lang['int32']|lang['int64'])));
lang['int'] = (lang['uint']|lang['sint']);
lang['numeric']= (lang['float']|lang['int']);
lang['matrix'] = (lang['numeric']|lang['char']|lang['logical']);
# non-parametric bits
lang['none'] = CPNonParametric('none', 'new CPNone()');
lang['end'] = CPNonParametric('end', 'new CPEnd()');
lang['begin'] = CPNonParametric('begin', 'new CPBegin()');
lang['any'] = CPNonParametric('any', 'new CPAny()');
lang['parent'] = CPNonParametric('parent','parentClassPropInfo'); # java code is the local variable
lang['error'] = CPNonParametric('error', 'new CPError()');
lang['natlab'] = CPNonParametric('class', 'getClassPropagationInfo()');
lang['matlab'] = CPNonParametric('class', 'getMatlabClassPropagationInfo()');
lang['scalar'] = CPNonParametric('scalar','new CPScalar()');
# other bits of the language
lang['coerce'] = lambda replaceExpr, expr: CPCoerce(replaceExpr,expr)
lang['opt'] = lambda expr: (expr|lang['none']) #note: op(x), being (x|none), will cause an error on the rhs
lang['not'] = lambda typesExpr: CPFunction('not','new CPNot',typesExpr)
lang['arg'] = lambda num : CPNum(num)
# todo - so far opt only allows up to 10 repititions
opt = lang['opt']
lang['star']= lambda expr: opt(expr)&opt(expr)&opt(expr)&opt(expr)&opt(expr)&opt(expr)&opt(expr)&opt(expr)&opt(expr)&opt(expr)
# functions
lang['typeString'] = lambda typesExpr: CPFunction('typeString','new CPTypeString',typesExpr)
# TODO - other possible language features
#variables?
#mult(x,[max],[min]) #tries to match as many as possible max,min may be 0
#matlab - allows matching the current matlab tree for a MatlabClass
# helper method - turns a sequence of CP objects into CPUnion objects
def tupleToCP(seq):
if len(seq) == 1: return seq[0]
return CPUnion(seq[0],tupleToCP(seq[1:]))
# produces the CP tree from the given tagArgs
def parseTagArgs(tagArgs,builtin):
# parse arg
try:
args = processTags.makeArgString(tagArgs);
tree = convertNum(eval(args,lang))
except:
sys.stderr.write(("ERROR: cannot parse/build class propagation information for builtin: "+builtin.name+"\ndef: "+tagArgs+"\n"));
raise
# turn tuple into chain of Unions
if isinstance(tree, tuple): tree = tupleToCP(tree)
return tree
# actual tag definition
def Class(builtin, tagArgs, iset):
# add the interface
iset.add("HasClassPropagationInfo");
# create CP tree
tree = parseTagArgs(tagArgs,builtin)
if (processTags.DEBUG):
print "Class args: ",tagArgs
print "tree: ", tree
#print "java: ", tree.toJava()
# java expr for parent info - find if tag 'Class' is defined for a parent
if (builtin.parent and builtin.parent.getAllTags().has_key('Class')):
parentInfo = 'super.getClassPropagationInfo()'
else:
parentInfo = 'new CPNone()'
# deal with the matlabClass info - check if there is a matlabClass tag defined - if not, emit the default
if (not builtin.getAllTags().has_key('MatlabClass')):
matlabClassMethod = """
public CP getMatlabClassPropagationInfo(){{
return getClassPropagationInfo();
}}
"""; # there's no explicit tag for matlab - just return the class info
else:
matlabClassMethod = ''; # emit nothing - the matlabClass tag will deal with it
# produce code
return matlabClassMethod+"""
private CP classPropInfo = null; //{tree};
public CP getClassPropagationInfo(){{
//set classPropInfo if not defined
if (classPropInfo == null){{
CP parentClassPropInfo = {parentInfo};
classPropInfo = {tree};
}}
return classPropInfo;
}}
""".format(tree=tree.toJava(), javaName=builtin.javaName, parentInfo=parentInfo);
# matlabClass tag definition
def MatlabClass(builtin, tagArgs, iset):
if not builtin.getAllTags().has_key('Class'):
raise Exception('MatlabClass tag defined for builtin '+builtin.name+', but there is no Class tag defined')
# create CP tree
tree = parseTagArgs(tagArgs,builtin)
if (processTags.DEBUG):
print "MatlabClass args: ",tagArgs
print "tree: ", tree
# java expr for parent info - find if tag 'Class' is defined for a parent
if (builtin.parent and builtin.parent.getAllTags().has_key('Class')):
parentInfo = 'super.getMatlabClassPropagationInfo()'
else:
parentInfo = 'new CPNone()'
# produce code
return """
private CP matlabClassPropInfo = null; //{tree};
public CP getMatlabClassPropagationInfo(){{
//set classPropInfo if not defined
if (matlabClassPropInfo == null){{
CP parentClassPropInfo = {parentInfo};
matlabClassPropInfo = {tree};
}}
return matlabClassPropInfo;
}}
""".format(tree=tree.toJava(), javaName=builtin.javaName, parentInfo=parentInfo);
| 46.673367 | 134 | 0.669681 | 2,547 | 0.274225 | 0 | 0 | 0 | 0 | 0 | 0 | 4,246 | 0.457149 |
612f623225d8c8f8a22570950f3c2e1560fdac28 | 2,392 | py | Python | python/rrc_simulation/pinocchio_utils.py | prstolpe/rrc_simulation | b430fe4e575641cdd64945cf57d0dd67a0eea17a | [
"BSD-3-Clause"
] | 39 | 2020-08-10T20:27:57.000Z | 2021-03-02T17:10:31.000Z | python/rrc_simulation/pinocchio_utils.py | prstolpe/rrc_simulation | b430fe4e575641cdd64945cf57d0dd67a0eea17a | [
"BSD-3-Clause"
] | 8 | 2020-08-12T09:14:17.000Z | 2020-09-22T12:57:40.000Z | python/rrc_simulation/pinocchio_utils.py | prstolpe/rrc_simulation | b430fe4e575641cdd64945cf57d0dd67a0eea17a | [
"BSD-3-Clause"
] | 22 | 2020-08-10T20:20:13.000Z | 2020-10-08T20:42:30.000Z | import numpy as np
import pinocchio
class PinocchioUtils:
"""
Consists of kinematic methods for the finger platform.
"""
def __init__(self, finger_urdf_path, tip_link_names):
"""
Initializes the finger model on which control's to be performed.
Args:
finger (SimFinger): An instance of the SimFinger class
"""
self.robot_model = pinocchio.buildModelFromUrdf(finger_urdf_path)
self.data = self.robot_model.createData()
self.tip_link_ids = [
self.robot_model.getFrameId(link_name)
for link_name in tip_link_names
]
def forward_kinematics(self, joint_positions):
"""
Compute end effector positions for the given joint configuration.
Args:
finger (SimFinger): a SimFinger object
joint_positions (list): Flat list of angular joint positions.
Returns:
List of end-effector positions. Each position is given as an
np.array with x,y,z positions.
"""
pinocchio.framesForwardKinematics(
self.robot_model, self.data, joint_positions,
)
return [
np.asarray(self.data.oMf[link_id].translation).reshape(-1).tolist()
for link_id in self.tip_link_ids
]
def inverse_kinematics(self, fid, xdes, q0):
"""
Method not in use right now, but is here with the intention
of using pinocchio for inverse kinematics instead of using
the in-house IK solver of pybullet.
"""
raise NotImplementedError()
dt = 1.0e-3
pinocchio.computeJointJacobians(
self.robot_model, self.data, q0,
)
pinocchio.framesKinematics(
self.robot_model, self.data, q0,
)
pinocchio.framesForwardKinematics(
self.robot_model, self.data, q0,
)
Ji = pinocchio.getFrameJacobian(
self.robot_model,
self.data,
fid,
pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED,
)[:3, :]
xcurrent = self.data.oMf[fid].translation
try:
Jinv = np.linalg.inv(Ji)
except Exception:
Jinv = np.linalg.pinv(Ji)
dq = Jinv.dot(xdes - xcurrent)
qnext = pinocchio.integrate(self.robot_model, q0, dt * dq)
return qnext
| 31.064935 | 79 | 0.599916 | 2,352 | 0.983278 | 0 | 0 | 0 | 0 | 0 | 0 | 797 | 0.333194 |
61311e2005e71142f8cad95fe41af86e171237aa | 226 | py | Python | expense/admin.py | jramnai/ExpenseCalculator | e220ab0531dc0849e50c713f8f06f5f08be2319a | [
"MIT"
] | 1 | 2019-11-24T10:03:07.000Z | 2019-11-24T10:03:07.000Z | expense/admin.py | jramnai/ExpenseCalculator | e220ab0531dc0849e50c713f8f06f5f08be2319a | [
"MIT"
] | 2 | 2020-06-06T00:10:07.000Z | 2021-06-10T22:18:23.000Z | expense/admin.py | jramnai/ExpenseCalculator | e220ab0531dc0849e50c713f8f06f5f08be2319a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Category, Expense
# Register your models here.
admin.site.register(Category)
admin.site.register(Expense)
| 22.6 | 39 | 0.778761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.225664 |
6133a6ac1f3abd910d3fcd607ed9220a877b07c1 | 1,383 | py | Python | setup.py | dbots-pkg/dbots.py | 3076e39b30276844328dc7d14287310130c81c1e | [
"MIT"
] | 8 | 2020-04-09T05:21:29.000Z | 2021-11-01T19:47:50.000Z | setup.py | dbots-pkg/dbots.py | 3076e39b30276844328dc7d14287310130c81c1e | [
"MIT"
] | 13 | 2020-09-25T06:25:16.000Z | 2021-11-15T22:06:59.000Z | setup.py | dbots-pkg/dbots.py | 3076e39b30276844328dc7d14287310130c81c1e | [
"MIT"
] | 1 | 2021-06-02T17:26:04.000Z | 2021-06-02T17:26:04.000Z | import setuptools
import re
requirements = []
with open('requirements.txt') as f:
requirements = f.read().splitlines()
version = ''
with open('dbots/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('version is not set')
with open("README.md", "r") as f:
readme = f.read()
setuptools.setup(
name = "dbots",
version = version,
author = "Snazzah",
description = "Discord bot list poster and stats retriever",
long_description = readme,
long_description_content_type = "text/markdown",
url = "https://github.com/dbots-pkg/dbots.py",
packages = setuptools.find_packages(),
install_requires = requirements,
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
python_requires='>=3.6',
)
| 31.431818 | 99 | 0.617498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.492408 |
6134e4f18a0efb8caf473f314596ff6aa401c746 | 597 | py | Python | ml-work/test.py | numankh/HypeBeastDashboard | 8b30fe2cb972a603b6ce1d84004b418d52471a7e | [
"MIT"
] | null | null | null | ml-work/test.py | numankh/HypeBeastDashboard | 8b30fe2cb972a603b6ce1d84004b418d52471a7e | [
"MIT"
] | null | null | null | ml-work/test.py | numankh/HypeBeastDashboard | 8b30fe2cb972a603b6ce1d84004b418d52471a7e | [
"MIT"
] | null | null | null | # linear regression feature importance
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot
# define dataset
X, y = make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=1)
# define the model
model = LinearRegression()
# fit the model
model.fit(X, y)
# get importance
importance = model.coef_
# summarize feature importance
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
pyplot.bar([x for x in range(len(importance))], importance)
pyplot.show()
| 31.421053 | 86 | 0.778894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.309883 |
61353a4f4ab02e99fc9e1983a95020c9594e9427 | 4,483 | py | Python | parl/utils/logger.py | TomorrowIsAnOtherDay/PARL | 0fc314630664a3205873a325d8c3105ec4c2500f | [
"Apache-2.0"
] | 1 | 2019-08-01T08:34:40.000Z | 2019-08-01T08:34:40.000Z | parl/utils/logger.py | TomorrowIsAnOtherDay/PARL | 0fc314630664a3205873a325d8c3105ec4c2500f | [
"Apache-2.0"
] | null | null | null | parl/utils/logger.py | TomorrowIsAnOtherDay/PARL | 0fc314630664a3205873a325d8c3105ec4c2500f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os
import os.path
import sys
from termcolor import colored
import shutil
__all__ = ['set_dir', 'get_dir', 'set_level']
# globals: logger file and directory:
LOG_DIR = None
_FILE_HANDLER = None
def _makedirs(dirname):
assert dirname is not None
if dirname == '' or os.path.isdir(dirname):
return
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
class _Formatter(logging.Formatter):
def format(self, record):
msg = '%(message)s'
if record.levelno == logging.WARNING:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]',
'yellow')
fmt = date + ' ' + colored(
'WRN', 'yellow', attrs=['blink']) + ' ' + msg
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]', 'red')
fmt = date + ' ' + colored(
'WRN', 'yellow', attrs=['blink']) + ' ' + msg
fmt = date + ' ' + colored(
'ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg
elif record.levelno == logging.DEBUG:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]',
'blue')
fmt = date + ' ' + colored(
'DEBUG', 'blue', attrs=['blink']) + ' ' + msg
else:
date = colored(
'[%(asctime)s %(threadName)s @%(filename)s:%(lineno)d]',
'green')
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(_Formatter, self).format(record)
def _getlogger():
logger = logging.getLogger('PARL')
logger.propagate = False
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(_Formatter(datefmt='%m-%d %H:%M:%S'))
logger.addHandler(handler)
return logger
_logger = _getlogger()
_LOGGING_METHOD = [
'info', 'warning', 'error', 'critical', 'warn', 'exception', 'debug',
'setLevel'
]
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
__all__.append(func)
# export Level information
_LOGGING_LEVEL = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
for level in _LOGGING_LEVEL:
locals()[level] = getattr(logging, level)
__all__.append(level)
def _set_file(path):
global _FILE_HANDLER
if os.path.isfile(path):
try:
os.remove(path)
except OSError:
pass
hdl = logging.FileHandler(filename=path, encoding='utf-8', mode='w')
hdl.setFormatter(_Formatter(datefmt='%m-%d %H:%M:%S'))
_FILE_HANDLER = hdl
_logger.addHandler(hdl)
def set_level(level):
# To set level, need create new handler
set_dir(get_dir())
_logger.setLevel(level)
def set_dir(dirname):
global LOG_DIR, _FILE_HANDLER
if _FILE_HANDLER:
# unload and close the old file handler, so that we may safely delete the logger directory
_logger.removeHandler(_FILE_HANDLER)
del _FILE_HANDLER
if not os.path.isdir(dirname):
_makedirs(dirname)
LOG_DIR = dirname
_set_file(os.path.join(dirname, 'log.log'))
def get_dir():
return LOG_DIR
# Will save log to log_dir/main_file_name/log.log by default
mod = sys.modules['__main__']
if hasattr(mod, '__file__'):
basename = os.path.basename(mod.__file__)
auto_dirname = os.path.join('log_dir', basename[:basename.rfind('.')])
shutil.rmtree(auto_dirname, ignore_errors=True)
set_dir(auto_dirname)
_logger.info("Argv: " + ' '.join(sys.argv))
| 30.496599 | 98 | 0.61789 | 1,408 | 0.314075 | 0 | 0 | 0 | 0 | 0 | 0 | 1,517 | 0.338389 |
61361bb27deebdcaf54deab305993d93df72c9f9 | 357 | py | Python | venv/lib/python2.7/site-packages/nano-1.0a3-py2.7.egg/nano/blog/settings.py | 784134748/kubernetes-install | 5df59632c2619632e422948b667fb68eab9ff5be | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/nano-1.0a3-py2.7.egg/nano/blog/settings.py | 784134748/kubernetes-install | 5df59632c2619632e422948b667fb68eab9ff5be | [
"MIT"
] | 1 | 2021-04-30T20:41:19.000Z | 2021-04-30T20:41:19.000Z | venv/lib/python2.7/site-packages/nano-1.0a3-py2.7.egg/nano/blog/settings.py | 784134748/kubernetes-install | 5df59632c2619632e422948b667fb68eab9ff5be | [
"MIT"
] | null | null | null | from django.conf import settings
NANO_BLOG_TAGS = None
# Optional support for django-taggit
try:
if ('taggit' in settings.INSTALLED_APPS
and getattr(settings, 'NANO_BLOG_USE_TAGS', False)):
import taggit as NANO_BLOG_TAGS
except ImportError:
pass
NANO_BLOG_SPECIAL_TAGS = getattr(settings, 'NANO_BLOG_SPECIAL_TAGS', ('pinned',))
| 25.5 | 81 | 0.745098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.268908 |
6137412bca12c1594b05667ccd21c7747a8c970c | 278 | py | Python | scripts/get_article.py | theblueskies/prose | 0251669de08abd6f7b0577701d786685c4e61a34 | [
"MIT"
] | 2,906 | 2017-04-05T17:03:08.000Z | 2022-03-30T21:37:05.000Z | scripts/get_article.py | theblueskies/prose | 0251669de08abd6f7b0577701d786685c4e61a34 | [
"MIT"
] | 70 | 2017-02-22T04:41:32.000Z | 2022-01-19T19:24:33.000Z | scripts/get_article.py | theblueskies/prose | 0251669de08abd6f7b0577701d786685c4e61a34 | [
"MIT"
] | 162 | 2017-04-13T15:07:26.000Z | 2022-01-23T18:07:32.000Z | import os
from newspaper import Article
url = 'http://fox13now.com/2013/12/30/new-year-new-laws-obamacare-pot-guns-and-drones/'
article = Article(url)
article.download()
article.parse()
with open(os.path.join('testdata', 'article.txt'), 'w') as f:
f.write(article.text)
| 21.384615 | 87 | 0.719424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.384892 |
613a98422c87f9037a1871ac68d9a3a2da07842e | 337 | py | Python | examples/example_02.py | vesche/juc2 | 3484175988bbf0c8f188c876641f1dd39b3c4af0 | [
"MIT"
] | null | null | null | examples/example_02.py | vesche/juc2 | 3484175988bbf0c8f188c876641f1dd39b3c4af0 | [
"MIT"
] | null | null | null | examples/example_02.py | vesche/juc2 | 3484175988bbf0c8f188c876641f1dd39b3c4af0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
juc2/examples/example_02.py
Move a rectangle across the terminal. <3
"""
from juc2 import art, Stage
stage = Stage(height=40, width=80, frame=True)
rectangle = art.Shapes.Rectangle(width=10, height=5, x=5, y=5)
while True:
stage.draw(rectangle, FPS=4)
if rectangle.x < 60:
rectangle.x += 1 | 18.722222 | 62 | 0.676558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.290801 |
613e54b69b09b894c0bfdcdad303fe36c638d48b | 18,784 | py | Python | codes/att_reader/layers.py | caglar/Attentive_reader | dcb9eb7d8fd1fd71c4ef333fed3aac7df99d663d | [
"BSD-3-Clause"
] | 31 | 2016-03-02T16:00:37.000Z | 2019-03-18T16:01:55.000Z | codes/att_reader/layers.py | caglar/Attentive_reader | dcb9eb7d8fd1fd71c4ef333fed3aac7df99d663d | [
"BSD-3-Clause"
] | 1 | 2016-05-30T16:59:05.000Z | 2016-05-30T16:59:05.000Z | codes/att_reader/layers.py | caglar/Attentive_reader | dcb9eb7d8fd1fd71c4ef333fed3aac7df99d663d | [
"BSD-3-Clause"
] | 14 | 2016-03-02T16:08:55.000Z | 2019-03-18T16:01:57.000Z | import theano
import theano.tensor as tensor
import numpy
from att_reader.utils import prfx, norm_weight, ortho_weight
from core.utils import dot, sharedX
from core.commons import Sigmoid, Tanh, Rect, global_trng, Linear, ELU
"""
We have functions to create the layers and initialize them.
"""
profile = False
layers = {
'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
'lstm': ('param_init_lstm', 'lstm_layer'),
'lstm_tied': ('param_init_lstm_tied', 'lstm_tied_layer'),
}
# layer
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options,
params,
prefix='ff',
nin=None,
nout=None,
ortho=True,
use_bias=True):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[prfx(prefix, 'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
if use_bias:
params[prfx(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams,
state_below,
options,
prefix='rconv',
use_bias=True,
activ='lambda x: tensor.tanh(x)',
**kwargs):
if use_bias:
return eval(activ)(dot(state_below, tparams[prfx(prefix, 'W')]) + tparams[prfx(prefix, 'b')])
else:
return eval(activ)(dot(state_below, tparams[prfx(prefix, 'W')]))
# GRU layer
def param_init_gru(options,
params,
prefix='gru',
nin=None,
dim=None,
hiero=False):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[prfx(prefix, 'W')] = W
params[prfx(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[prfx(prefix, 'U')] = U
Wx = norm_weight(nin, dim)
params[prfx(prefix, 'Wx')] = Wx
Ux = ortho_weight(dim)
params[prfx(prefix, 'Ux')] = Ux
params[prfx(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams,
state_below,
options,
prefix='gru',
mask=None,
nsteps=None,
truncate=None,
init_state=None,
**kwargs):
if nsteps is None:
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
param = lambda name: tparams[prfx(prefix, name)]
dim = param('Ux').shape[1]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
if mask.ndim == 3 and mask.ndim == state_below.ndim:
mask = mask.reshape((mask.shape[0], \
mask.shape[1] * mask.shape[2])).dimshuffle(0, 1, 'x')
elif mask.ndim == 2:
mask = mask.dimshuffle(0, 1, 'x')
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = dot(state_below, param('W')) + param('b')
state_belowx = dot(state_below, param('Wx')) + param('bx')
# initial/previous state
if init_state is None:
if not options['learn_h0']:
init_state = tensor.alloc(0., n_samples, dim)
else:
init_state0 = sharedX(numpy.zeros((options['dim'])),
name=prfx(prefix, "h0"))
init_state = tensor.concatenate([[init_state0] \
for i in xrange(options['batch_size'])],
axis=0)
tparams[prfx(prefix, 'h0')] = init_state0
U = tparams[prfx(prefix, 'U')]
Ux = tparams[prfx(prefix, 'Ux')]
def _step_slice(mask, sbelow, sbelowx, sbefore, U, Ux):
preact = dot(sbefore, U)
preact += sbelow
r = Sigmoid(_slice(preact, 0, dim))
u = Sigmoid(_slice(preact, 1, dim))
preactx = dot(r * sbefore, Ux)
# preactx = preactx
preactx = preactx + sbelowx
h = Tanh(preactx)
h = u * sbefore + (1. - u) * h
h = mask[:, None] * h + (1. - mask)[:, None] * sbefore
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state],
non_sequences=[U, Ux],
name=prfx(prefix, '_layers'),
n_steps=nsteps,
truncate_gradient=truncate,
profile=profile,
strict=True)
rval = [rval]
return rval
# LSTM layer
def param_init_lstm(options,
params,
prefix='lstm',
nin=None,
dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)],
axis=1)
params[prfx(prefix,'W')] = W
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)],
axis=1)
params[prfx(prefix,'U')] = U
params[prfx(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
def lstm_layer(tparams, state_below,
options,
prefix='lstm',
mask=None, one_step=False,
init_state=None,
init_memory=None,
nsteps=None,
**kwargs):
if nsteps is None:
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
param = lambda name: tparams[prfx(prefix, name)]
dim = param('U').shape[0]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# initial/previous state
if init_state is None:
if not options['learn_h0']:
init_state = tensor.alloc(0., n_samples, dim)
else:
init_state0 = sharedX(numpy.zeros((options['dim'])),
name=prfx(prefix, "h0"))
init_state = tensor.alloc(init_state0, n_samples, dim)
tparams[prfx(prefix, 'h0')] = init_state0
U = param('U')
b = param('b')
W = param('W')
non_seqs = [U, b, W]
# initial/previous memory
if init_memory is None:
init_memory = tensor.alloc(0., n_samples, dim)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(mask, sbelow, sbefore, cell_before, *args):
preact = dot(sbefore, param('U'))
preact += sbelow
preact += param('b')
i = Sigmoid(_slice(preact, 0, dim))
f = Sigmoid(_slice(preact, 1, dim))
o = Sigmoid(_slice(preact, 2, dim))
c = Tanh(_slice(preact, 3, dim))
c = f * cell_before + i * c
c = mask * c + (1. - mask) * cell_before
h = o * tensor.tanh(c)
h = mask * h + (1. - mask) * sbefore
return h, c
lstm_state_below = dot(state_below, param('W')) + param('b')
if state_below.ndim == 3:
lstm_state_below = lstm_state_below.reshape((state_below.shape[0],
state_below.shape[1],
-1))
if one_step:
mask = mask.dimshuffle(0, 'x')
h, c = _step(mask, lstm_state_below, init_state, init_memory)
rval = [h, c]
else:
if mask.ndim == 3 and mask.ndim == state_below.ndim:
mask = mask.reshape((mask.shape[0], \
mask.shape[1]*mask.shape[2])).dimshuffle(0, 1, 'x')
elif mask.ndim == 2:
mask = mask.dimshuffle(0, 1, 'x')
rval, updates = theano.scan(_step,
sequences=[mask, lstm_state_below],
outputs_info = [init_state,
init_memory],
name=prfx(prefix, '_layers'),
non_sequences=non_seqs,
strict=True,
n_steps=nsteps)
return rval
# LSTM layer
def param_init_lstm_tied(options,
params,
prefix='lstm_tied',
nin=None,
dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[prfx(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[prfx(prefix, 'U')] = U
params[prfx(prefix, 'b')] = numpy.zeros((3 * dim,)).astype('float32')
return params
def lstm_tied_layer(tparams,
state_below,
options,
prefix='lstm_tied',
mask=None,
one_step=False,
init_state=None,
init_memory=None,
nsteps=None,
**kwargs):
if nsteps is None:
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
param = lambda name: tparams[prfx(prefix, name)]
dim = param('U').shape[0]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# initial/previous state
if init_state is None:
if not options['learn_h0']:
init_state = tensor.alloc(0., n_samples, dim)
else:
init_state0 = sharedX(numpy.zeros((options['dim'])),
name=prfx(prefix, "h0"))
init_state = tensor.concatenate([[init_state0] \
for i in xrange(options['batch_size'])],
axis=0)
tparams[prfx(prefix, 'h0')] = init_state0
# initial/previous memory
if init_memory is None:
init_memory = tensor.alloc(0., n_samples, dim)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(mask, sbelow, sbefore, cell_before):
preact = dot(sbefore, param('U'))
preact += sbelow
preact += tparams[prfx(prefix, 'b')]
f = Sigmoid(_slice(preact, 0, dim))
o = Sigmoid(_slice(preact, 1, dim))
c = Tanh(_slice(preact, 2, dim))
c = f * cell_before + (1 - f) * c
c = mask * c + (1. - mask) * cell_before
h = o * tensor.tanh(c)
h = mask * h + (1. - mask) * sbefore
return h, c
state_below = dot(state_below, param('W')) + param('b')
if one_step:
mask = mask.dimshuffle(0, 'x')
h, c = _step(mask, state_below, init_state, init_memory)
rval = [h, c]
else:
if mask.ndim == 3 and mask.ndim == state_below.ndim:
mask = mask.reshape((mask.shape[0], mask.shape[1]*mask.shape[2])).dimshuffle(0, 1, 'x')
elif mask.ndim == 2:
mask = mask.dimshuffle(0, 1, 'x')
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[init_state,
init_memory],
name=prfx(prefix, '_layers'),
n_steps=nsteps)
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options,
params,
prefix='gru_cond',
nin=None,
dim=None,
dimctx=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
params = param_init_gru(options,
params,
prefix,
nin=nin,
dim=dim)
# context to LSTM
Wc = norm_weight(dimctx, dim*2)
params[prfx(prefix, 'Wc')] = Wc
Wcx = norm_weight(dimctx, dim)
params[prfx(prefix, 'Wcx')] = Wcx
# attention: prev -> hidden
Wi_att = norm_weight(nin, dimctx)
params[prfx(prefix, 'Wi_att')] = Wi_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[prfx(prefix, 'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim, dimctx)
params[prfx(prefix, 'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[prfx(prefix, 'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx, 1)
params[prfx(prefix, 'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[prfx(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams,
state_below,
options,
prefix='gru',
mask=None,
context=None,
one_step=False,
init_memory=None,
init_state=None,
context_mask=None,
nsteps=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
if nsteps is None:
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[prfx(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, 'Context must be 3-d: #annotation x #sample x dim'
pctx_ = dot(context, tparams[prfx(prefix, 'Wc_att')]) + tparams[prfx(prefix, 'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = dot(state_below, tparams[prfx(prefix, 'Wx')]) + \
tparams[prfx(prefix, 'bx')]
state_below_ = dot(state_below, tparams[prfx(prefix, 'W')]) + \
tparams[prfx(prefix, 'b')]
state_belowc = dot(state_below, tparams[prfx(prefix, 'Wi_att')])
def _step_slice(mask,
sbelow,
sbelowx,
xc_, sbefore,
ctx_, alpha_,
pctx_, cc_,
U, Wc,
Wd_att, U_att,
c_tt, Ux, Wcx):
# attention
pstate_ = dot(sbefore, Wd_att)
pctx__ = pctx_ + pstate_[None, :, :]
pctx__ += xc_
pctx__ = Tanh(pctx__)
alpha = dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:, :, None]).sum(0)
# current context
preact = dot(sbefore, U)
preact += sbelow
preact += dot(ctx_, Wc)
preact = Sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = dot(sbefore, Ux)
preactx *= r
preactx += sbelowx
preactx += dot(ctx_, Wcx)
h = Tanh(preactx)
h = u * sbefore + (1. - u) * h
h = mask[:, None] * h + (1. - mask)[:, None] * sbefore
return h, ctx_, alpha.T
seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[prfx(prefix, 'U')],
tparams[prfx(prefix, 'Wc')],
tparams[prfx(prefix, 'Wd_att')],
tparams[prfx(prefix, 'U_att')],
tparams[prfx(prefix, 'c_tt')],
tparams[prfx(prefix, 'Ux')],
tparams[prfx(prefix, 'Wcx')]]
if one_step:
rval = _step(*(seqs+[init_state, None, None, pctx_, context]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state,
tensor.alloc(0., n_samples, context.shape[2]),
tensor.alloc(0., n_samples, context.shape[0])],
non_sequences=[pctx_,
context]+shared_vars,
name=prfx(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
def dropout_layer(state_before,
use_noise,
p=0.5):
proj = tensor.switch(use_noise,
state_before * global_trng.binomial(state_before.shape,
p=p, n=1,
dtype=state_before.dtype),
state_before * p)
return proj
| 31.047934 | 101 | 0.486744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,550 | 0.082517 |
613ea9b31a2272b26363309b98b740602337ecd8 | 10,687 | py | Python | server/itk_tube.py | KitwareMedical/itk-tube-web | dbd29e6fba6f63929dea5ba5f7170ab47b1104f6 | [
"BSD-3-Clause"
] | 3 | 2017-12-31T10:16:44.000Z | 2018-06-02T21:30:46.000Z | server/itk_tube.py | KitwareMedical/itk-tube-web | dbd29e6fba6f63929dea5ba5f7170ab47b1104f6 | [
"BSD-3-Clause"
] | 2 | 2017-11-15T15:33:27.000Z | 2017-12-04T14:26:41.000Z | server/itk_tube.py | KitwareMedical/itk-tube-web | dbd29e6fba6f63929dea5ba5f7170ab47b1104f6 | [
"BSD-3-Clause"
] | 2 | 2018-01-30T14:52:54.000Z | 2018-10-10T01:50:44.000Z | r"""
This module is a ITK Web server application.
The following command line illustrates how to use it::
$ python .../server/itk-tube.py --data /.../path-to-your-data-file
--data
Path to file to load.
Any WSLink executable script comes with a set of standard arguments that can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen.
--content /path-to-web-content/
Directory that you want to serve as static web content.
By default, this variable is empty which means that we rely on another
server to deliver the static content and the current process only
focuses on the WebSocket connectivity of clients.
"""
# import to process args
import os
import argparse
from json import JSONEncoder
import numpy as np
# import itk modules.
import itk
from itkTypes import itkCType
import ctypes
import sys
if sys.version_info > (3,0):
long = int
# import Twisted reactor for later callback
from twisted.internet import reactor
# import Web connectivity
from wslink import register
from wslink import server
from wslink.websocket import LinkProtocol
# import tube utils
from tubeutils import GetTubePoints
# maps itk ctype to other types
itkCTypeToOthers = {
itk.B: (ctypes.c_bool, 'UInt8Array', 1, 'i'),
itk.D: (ctypes.c_double, 'Float64Array', 8, 'f'),
itk.F: (ctypes.c_float, 'Float32Array', 4, 'f'),
itk.LD: (ctypes.c_longdouble, 'Float64Array', 8, 'f'),
itk.SC: (ctypes.c_char, 'Int8Array', 1, 'i'),
itk.SI: (ctypes.c_int, 'Int32Array', 4, 'i'),
itk.SL: (ctypes.c_long, 'Int32Array', 4, 'i'),
itk.SLL: (ctypes.c_longlong, 'Int32Array', 4, 'i'),
itk.SS: (ctypes.c_short, 'Int16Array', 2, 'i'),
itk.UC: (ctypes.c_ubyte, 'UInt8Array', 1, 'i'),
itk.UI: (ctypes.c_uint, 'UInt32Array', 4, 'i'),
itk.UL: (ctypes.c_ulong, 'UInt32Array', 4, 'i'),
itk.ULL: (ctypes.c_ulonglong, 'UInt32Array', 4, 'i'),
itk.US: (ctypes.c_ushort, 'UInt16Array', 2, 'i'),
}
# preload itk modules here so we don't incur lazy load
# on user request.
itk.TranslationTransform
itk.CompositeTransform
itk.ScaleTransform
itk.SegmentTubes
itk.Image
itk.ImageFileReader
itk.ImageIOFactory
itk.SpatialObjectReader
__id = 0
def get_next_id():
'''Simple ID generator.'''
global __id
__id += 1
return __id
def reset_id_counter():
global __id
__id = 0
class Tube(JSONEncoder):
def __init__(self, _id=-1, parent=-1, params=None, status='pending', color=None, **kwargs):
super(Tube, self).__init__(**kwargs)
self.id = _id
self.parent = parent
self.params = params or dict()
self.status = status
self.color = color or [1, 0, 0] # default to red
self.tube = None
self._mesh = None
@property
def mesh(self):
if not self.tube:
return None
if self._mesh:
return self._mesh
# generate mesh
points = GetTubePoints(self.tube)
# transform tube points properly
self.tube.ComputeObjectToWorldTransform()
transform = self.tube.GetIndexToWorldTransform()
scaling = [transform.GetMatrix()(i,i) for i in range(3)]
scale = sum(scaling) / len(scaling)
for i in range(len(points)):
pt, radius = points[i]
pt = list(transform.TransformPoint(pt))
points[i] = (pt, radius*scale)
self._mesh = points
return self._mesh
def copyfrom(self, obj):
'''Copies certain properties from a given dictionary.'''
if type(obj) is not dict:
raise Exception('Given object is not a dict!')
self.id = obj.get('id', self.id)
self.parent = obj.get('parent', self.parent)
self.params = obj.get('params', self.params)
self.status = obj.get('status', self.status)
self.color = obj.get('color', self.color)
def serialize(self):
return dict(
id=self.id,
parent=self.parent,
params=self.params,
status=self.status,
color=self.color,
mesh=self.mesh,
)
# =============================================================================
# Create Web Server to handle requests
# =============================================================================
class ItkTubeProtocol(LinkProtocol):
timelapse = 0.1 # Time in seconds
def __init__(self):
self.idToSpatialObject = dict()
# NOTE maybe not the most memory-efficient cache since we store points
# in array form here?
self.tubeCache = {}
self.pendingTubes = []
def loadDataFile(self, filename):
# Load file in ITK
self.loadItkImage(filename)
# setup image to world transform, since segmenttubes
# will use the world coords.
self.imageToWorldTransform = itk.CompositeTransform[itk.D, 3].New()
translate = itk.TranslationTransform[itk.D, 3].New()
translate.Translate(self.itkImage.GetOrigin())
scale = itk.ScaleTransform[itk.D, 3].New()
scale.Scale(self.itkImage.GetSpacing())
self.imageToWorldTransform.AppendTransform(translate)
self.imageToWorldTransform.AppendTransform(scale)
# setup segmenter
imgType = itk.Image[self.itkPixelType, self.dimensions]
self.segmentTubes = itk.SegmentTubes[imgType].New()
self.segmentTubes.SetInputImage(self.itkImage)
self.segmentTubes.SetDebug(True)
scaleVector = self.itkImage.GetSpacing()
offsetVector = self.itkImage.GetOrigin()
self.segmentTubes.GetTubeGroup().GetObjectToParentTransform() \
.SetScale(scaleVector)
self.segmentTubes.GetTubeGroup().GetObjectToParentTransform() \
.SetOffset(offsetVector)
self.segmentTubes.GetTubeGroup().GetObjectToParentTransform() \
.SetMatrix(self.itkImage.GetDirection())
self.segmentTubes.GetTubeGroup().ComputeObjectToWorldTransform()
# reset id counter between segments
reset_id_counter()
def scheduleQueueProcessing(self):
if len(self.pendingTubes) > 0:
reactor.callLater(ItkTubeProtocol.timelapse, self.processQueue)
def processQueue(self):
if len(self.pendingTubes) == 0:
return
itemToProcess = self.pendingTubes.pop(0)
# extract tube
seed = itk.Point[itk.D, self.dimensions](itemToProcess['position'])
index = self.itkImage.TransformPhysicalPointToContinuousIndex(seed)
scaleNorm = self.itkImage.GetSpacing()[0]
if itemToProcess['params']['scale']/scaleNorm < 0.3:
raise Exception('scale/scaleNorm < 0.3')
self.segmentTubes.SetRadius(itemToProcess['params']['scale']/scaleNorm)
tubeObj = self.segmentTubes.ExtractTube(index, itemToProcess['id'], True)
itemToProcess['status'] = 'done'
tube = Tube()
tube.copyfrom(itemToProcess)
if tubeObj:
self.segmentTubes.AddTube(tubeObj)
tube.tube = tubeObj
self.tubeCache[tube.id] = tube
# Publish any update
self.publish('itk.tube.mesh', tube.serialize())
# Reschedule ourself
self.scheduleQueueProcessing()
def loadItkImage(self, filename):
base = itk.ImageIOFactory.CreateImageIO(filename, itk.ImageIOFactory.ReadMode)
base.SetFileName(filename)
base.ReadImageInformation()
componentType = base.GetComponentType()
itkctype = itkCType.GetCType("float")
imageType = itk.Image[itkctype, base.GetNumberOfDimensions()]
reader = itk.ImageFileReader[imageType].New()
reader.SetFileName(filename)
reader.Update()
self.itkImage = reader.GetOutput()
self.itkPixelType = itkctype
self.dimensions = base.GetNumberOfDimensions()
@register('itk.volume.open')
def openVolume(self, filename):
self.loadDataFile(str(filename))
# Get ITK image data
imgCType, imgJsArrType, pixelSize, pixelDType = itkCTypeToOthers[self.itkPixelType]
pointer = long(self.itkImage.GetBufferPointer())
imageBuffer = ctypes.cast(pointer, ctypes.POINTER(imgCType))
size = self.itkImage.GetLargestPossibleRegion().GetSize()
length = size[0]*size[1]*size[2]
imgArray = np.ctypeslib.as_array(
(imgCType * length).from_address(ctypes.addressof(imageBuffer.contents)))
# Send data to client
return {
"extent": (0, size[0]-1, 0, size[1]-1, 0, size[2]-1),
"origin": list(self.itkImage.GetOrigin()),
"spacing": list(self.itkImage.GetSpacing()),
"typedArray": imgJsArrType,
"scalars": self.addAttachment(imgArray.tobytes()),
}
@register('itk.tube.save')
def saveTubes(self, filename):
dim = 3
tubeGroup = self.segmentTubes.GetTubeGroup()
writer = itk.SpatialObjectWriter[dim].New()
writer.SetFileName(str(filename))
writer.SetInput(tubeGroup)
writer.Update()
@register('itk.tube.generate')
def generateTube(self, coords, params):
coords = list(self.imageToWorldTransform.TransformPoint(coords))
itemToProcess = {
'id': get_next_id(),
'parent': -1, # denotes this tube's parent as not a tube
'position': coords,
'params': params,
'status': 'pending',
'color': [1, 0, 0], # default to red
}
self.pendingTubes.append(itemToProcess)
self.scheduleQueueProcessing()
return itemToProcess
@register('itk.tube.delete')
def deleteTube(self, tubeId):
tube = self.tubeCache[tubeId]
self.segmentTubes.DeleteTube(tube.tube)
del self.tubeCache[tubeId]
@register('itk.tube.setcolor')
def setTubeColor(self, tubeId, color):
self.tubeCache[tubeId].color = color
@register('itk.tube.reparent')
def reparentTubes(self, parent, children):
if type(parent) is not int or type(children) is not list:
raise Exception('Invalid arguments')
if parent in children:
raise Exception('Cannot have tube be parent of itself')
parentTube = self.tubeCache[parent].tube
for child in children:
# reparents child tube to parent tube
parentTube.AddSpatialObject(self.tubeCache[child].tube)
self.tubeCache[child].parent = parent
| 33.606918 | 106 | 0.623561 | 7,966 | 0.745392 | 0 | 0 | 3,283 | 0.307196 | 0 | 0 | 2,453 | 0.229531 |
613ed83ec74ebf36b8cc8d8d8a4d471088eef80d | 3,388 | py | Python | SimplE/tester.py | dertilo/knowledge-graph-reasoning | e36d57ee34aa2b532f4dfa98a1e1d222037337cc | [
"MIT"
] | null | null | null | SimplE/tester.py | dertilo/knowledge-graph-reasoning | e36d57ee34aa2b532f4dfa98a1e1d222037337cc | [
"MIT"
] | null | null | null | SimplE/tester.py | dertilo/knowledge-graph-reasoning | e36d57ee34aa2b532f4dfa98a1e1d222037337cc | [
"MIT"
] | null | null | null | import torch
from tqdm import tqdm
from dataset import Dataset
import numpy as np
from measure import Measure
from os import listdir
from os.path import isfile, join
class Tester:
def __init__(self, dataset, model_path, valid_or_test):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = torch.load(model_path, map_location=self.device)
self.model.eval()
self.dataset = dataset
self.valid_or_test = valid_or_test
self.measure = Measure()
self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def get_rank(self, sim_scores): # assuming the test fact is the first one
return (sim_scores > sim_scores[0]).sum() + 1.0
def create_queries(self, fact, head_or_tail):
head, rel, tail = fact
if head_or_tail == "head":
return [(i, rel, tail) for i in range(self.dataset.num_ent())]
elif head_or_tail == "tail":
return [(head, rel, i) for i in range(self.dataset.num_ent())]
def add_fact_and_shred(self, fact, queries, raw_or_fil):
if raw_or_fil == "raw":
result = [tuple(fact)] + queries
elif raw_or_fil == "fil":
result = [tuple(fact)] + list(
set(queries) - self.all_facts_as_set_of_tuples
)
return self.shred_facts(result)
# def replace_and_shred(self, fact, raw_or_fil, head_or_tail):
# ret_facts = []
# head, rel, tail = fact
# for i in range(self.dataset.num_ent()):
# if head_or_tail == "head" and i != head:
# ret_facts.append((i, rel, tail))
# if head_or_tail == "tail" and i != tail:
# ret_facts.append((head, rel, i))
# if raw_or_fil == "raw":
# ret_facts = [tuple(fact)] + ret_facts
# elif raw_or_fil == "fil":
# ret_facts = [tuple(fact)] + list(set(ret_facts) - self.all_facts_as_set_of_tuples)
# return self.shred_facts(ret_facts)
def test(self):
settings = ["raw", "fil"] if self.valid_or_test == "test" else ["fil"]
for i, fact in tqdm(enumerate(self.dataset.data[self.valid_or_test])):
for head_or_tail in ["head", "tail"]:
queries = self.create_queries(fact, head_or_tail)
for raw_or_fil in settings:
h, r, t = self.add_fact_and_shred(fact, queries, raw_or_fil)
sim_scores = self.model(h, r, t).cpu().data.numpy()
rank = self.get_rank(sim_scores)
self.measure.update(rank, raw_or_fil)
self.measure.normalize(len(self.dataset.data[self.valid_or_test]))
self.measure.print_()
return self.measure.mrr["fil"]
def shred_facts(self, triples):
heads = [triples[i][0] for i in range(len(triples))]
rels = [triples[i][1] for i in range(len(triples))]
tails = [triples[i][2] for i in range(len(triples))]
return (
torch.LongTensor(heads).to(self.device),
torch.LongTensor(rels).to(self.device),
torch.LongTensor(tails).to(self.device),
)
def allFactsAsTuples(self):
tuples = []
for spl in self.dataset.data:
for fact in self.dataset.data[spl]:
tuples.append(tuple(fact))
return tuples
| 37.644444 | 96 | 0.592975 | 3,218 | 0.949823 | 0 | 0 | 0 | 0 | 0 | 0 | 700 | 0.206612 |
613fd24f44efdf77077663143efe7c456df30295 | 14,319 | py | Python | eulxml/xmlmap/cerp.py | ig0774/eulxml | 17d71c7d98c0cebda9932b7f13e72093805e1fe2 | [
"Apache-2.0"
] | 19 | 2015-02-23T17:01:24.000Z | 2022-03-14T08:14:16.000Z | eulxml/xmlmap/cerp.py | ig0774/eulxml | 17d71c7d98c0cebda9932b7f13e72093805e1fe2 | [
"Apache-2.0"
] | 30 | 2015-07-24T17:11:52.000Z | 2021-01-19T22:29:37.000Z | eulxml/xmlmap/cerp.py | ig0774/eulxml | 17d71c7d98c0cebda9932b7f13e72093805e1fe2 | [
"Apache-2.0"
] | 13 | 2015-01-27T21:49:16.000Z | 2021-01-19T23:00:00.000Z | # file eulxml/xmlmap/cerp.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import codecs
import datetime
import email
import logging
import os
import six
from eulxml import xmlmap
from eulxml.utils.compat import u
logger = logging.getLogger(__name__)
# CERP is described at http://siarchives.si.edu/cerp/ . XML spec available at
# http://www.records.ncdcr.gov/emailpreservation/mail-account/mail-account_docs.html
# schema resolves but appears to be empty as of April 2016
# Current schema : http://www.history.ncdcr.gov/SHRAB/ar/emailpreservation/mail-account/mail-account.xsd
# internally-reused and general-utility objects
#
class _BaseCerp(xmlmap.XmlObject):
'Common CERP namespace declarations'
ROOT_NS = 'http://www.archives.ncdcr.gov/mail-account'
ROOT_NAMESPACES = { 'xm': ROOT_NS }
class Parameter(_BaseCerp):
ROOT_NAME = 'Parameter'
name = xmlmap.StringField('xm:Name')
value = xmlmap.StringField('xm:Value')
def __str__(self):
return '%s=%s' % (self.name, self.value)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
class Header(_BaseCerp):
ROOT_NAME = 'Header'
name = xmlmap.StringField('xm:Name')
value = xmlmap.StringField('xm:Value')
comments = xmlmap.StringListField('xm:Comments')
def __str__(self):
return '%s: %s' % (self.name, self.value)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class _BaseBody(_BaseCerp):
'''Common email header elements'''
content_type_list = xmlmap.StringListField('xm:ContentType')
charset_list = xmlmap.StringListField('xm:Charset')
content_name_list = xmlmap.StringListField('xm:ContentName')
content_type_comments_list = xmlmap.StringListField('xm:ContentTypeComments')
content_type_param_list = xmlmap.NodeListField('xm:ContentTypeParam', Parameter)
transfer_encoding_list = xmlmap.StringListField('xm:TransferEncoding')
transfer_encoding_comments_list = xmlmap.StringListField('xm:TransferEncodingComments')
content_id_list = xmlmap.StringListField('xm:ContentId')
content_id_comments_list = xmlmap.StringListField('xm:ContentIdComments')
description_list = xmlmap.StringListField('xm:Description')
description_comments_list = xmlmap.StringListField('xm:DescriptionComments')
disposition_list = xmlmap.StringListField('xm:Disposition')
disposition_file_name_list = xmlmap.StringListField('xm:DispositionFileName')
disposition_comments_list = xmlmap.StringListField('xm:DispositionComments')
disposition_params = xmlmap.NodeListField('xm:DispositionParams', Parameter)
other_mime_headers = xmlmap.NodeListField('xm:OtherMimeHeader', Header)
class Hash(_BaseCerp):
ROOT_NAME = 'Hash'
HASH_FUNCTION_CHOICES = [ 'MD5', 'WHIRLPOOL', 'SHA1', 'SHA224',
'SHA256', 'SHA384', 'SHA512', 'RIPEMD160']
value = xmlmap.StringField('xm:Value')
function = xmlmap.StringField('xm:Function',
choices=HASH_FUNCTION_CHOICES)
def __str__(self):
return self.value
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.function)
class _BaseExternal(_BaseCerp):
'''Common external entity reference elements'''
EOL_CHOICES = [ 'CR', 'LF', 'CRLF' ]
rel_path = xmlmap.StringField('xm:RelPath')
eol = xmlmap.StringField('xm:Eol', choices=EOL_CHOICES)
hash = xmlmap.NodeField('xm:Hash', Hash)
class _BaseContent(_BaseCerp):
'''Common content encoding elements'''
charset_list = xmlmap.StringListField('xm:CharSet')
transfer_encoding_list = xmlmap.StringListField('xm:TransferEncoding')
#
# messages and bodies
#
class BodyContent(_BaseContent):
ROOT_NAME = 'BodyContent'
content = xmlmap.StringField('xm:Content')
class ExtBodyContent(_BaseExternal, _BaseContent):
ROOT_NAME = 'ExtBodyContent'
local_id = xmlmap.IntegerField('xm:LocalId')
xml_wrapped = xmlmap.SimpleBooleanField('xm:XMLWrapped',
true='1', false='0')
class SingleBody(_BaseBody):
ROOT_NAME = 'SingleBody'
body_content = xmlmap.NodeField('xm:BodyContent', BodyContent)
ext_body_content = xmlmap.NodeField('xm:ExtBodyContent', ExtBodyContent)
child_message = xmlmap.NodeField('xm:ChildMessage', None) # this will be fixed below
@property
def content(self):
return self.body_content or \
self.ext_body_content or \
self.child_message
phantom_body = xmlmap.StringField('xm:PhantomBody')
class MultiBody(_BaseCerp):
ROOT_NAME = 'MultiBody'
preamble = xmlmap.StringField('xm:Preamble')
epilogue = xmlmap.StringField('xm:Epilogue')
single_body = xmlmap.NodeField('xm:SingleBody', SingleBody)
multi_body = xmlmap.NodeField('xm:MultiBody', 'self')
@property
def body(self):
return self.single_body or self.multi_body
class Incomplete(_BaseCerp):
ROOT_NAME = 'Incomplete'
error_type = xmlmap.StringField('xm:ErrorType')
error_location = xmlmap.StringField('xm:ErrorLocation')
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.error_type)
class _BaseMessage(_BaseCerp):
'''Common message elements'''
local_id = xmlmap.IntegerField('xm:LocalId')
message_id = xmlmap.StringField('xm:MessageId')
message_id_supplied = xmlmap.SimpleBooleanField('xm:MessageId/@Supplied',
true='1', false=None)
mime_version = xmlmap.StringField('xm:MimeVersion')
orig_date_list = xmlmap.StringListField('xm:OrigDate') # FIXME: really datetime
# NOTE: eulxml.xmlmap.DateTimeField supports specifying format,
# but we might need additional work since %z only works with
# strftime, not strptime
from_list = xmlmap.StringListField('xm:From')
sender_list = xmlmap.StringListField('xm:Sender')
to_list = xmlmap.StringListField('xm:To')
cc_list = xmlmap.StringListField('xm:Cc')
bcc_list = xmlmap.StringListField('xm:Bcc')
in_reply_to_list = xmlmap.StringListField('xm:InReplyTo')
references_list = xmlmap.StringListField('xm:References')
subject_list = xmlmap.StringListField('xm:Subject')
comments_list = xmlmap.StringListField('xm:Comments')
keywords_list = xmlmap.StringListField('xm:Keywords')
headers = xmlmap.NodeListField('xm:Header', Header)
single_body = xmlmap.NodeField('xm:SingleBody', SingleBody)
multi_body = xmlmap.NodeField('xm:MultiBody', MultiBody)
@property
def body(self):
return self.single_body or self.multi_body
incomplete_list = xmlmap.NodeField('xm:Incomplete', Incomplete)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
self.message_id or self.local_id or '(no id)')
class Message(_BaseMessage, _BaseExternal):
"""A single email message in a :class:`Folder`."""
ROOT_NAME = 'Message'
STATUS_FLAG_CHOICES = [ 'Seen', 'Answered', 'Flagged', 'Deleted',
'Draft', 'Recent']
status_flags = xmlmap.StringListField('xm:StatusFlag',
choices=STATUS_FLAG_CHOICES)
@classmethod
def from_email_message(cls, message, local_id=None):
'''
Convert an :class:`email.message.Message` or compatible message
object into a CERP XML :class:`eulxml.xmlmap.cerp.Message`. If an
id is specified, it will be stored in the Message <LocalId>.
:param message: `email.message.Message` object
:param id: optional message id to be set as `local_id`
:returns: :class:`eulxml.xmlmap.cerp.Message` instance populated
with message information
'''
result = cls()
if local_id is not None:
result.local_id = id
message_id = message.get('Message-Id')
if message_id:
result.message_id_supplied = True
result.message_id = message_id
result.mime_version = message.get('MIME-Version')
dates = message.get_all('Date', [])
result.orig_date_list.extend([parse_mail_date(d) for d in dates])
result.from_list.extend(message.get_all('From', []))
result.sender_list.extend(message.get_all('From', []))
try:
result.to_list.extend(message.get_all('To', []))
except UnicodeError:
print(repr(message['To']))
raise
result.cc_list.extend(message.get_all('Cc', []))
result.bcc_list.extend(message.get_all('Bcc', []))
result.in_reply_to_list.extend(message.get_all('In-Reply-To', []))
result.references_list.extend(message.get_all('References', []))
result.subject_list.extend(message.get_all('Subject', []))
result.comments_list.extend(message.get_all('Comments', []))
result.keywords_list.extend(message.get_all('Keywords', []))
headers = [ Header(name=key, value=val) for key, val in message.items() ]
result.headers.extend(headers)
# FIXME: skip multipart messages for now
if not message.is_multipart():
result.create_single_body()
# FIXME: this is a small subset of the actual elements CERP allows.
# we should add the rest of them, too.
# message.get_content_type() always returns something. only
# put it in the CERP if a Content-Type was explicitly specified.
if message['Content-Type']:
result.single_body.content_type_list.append(message.get_content_type())
if message.get_content_charset():
result.single_body.charset_list.append(message.get_content_charset())
if message.get_filename():
result.single_body.content_name_list.append(message.get_filename())
# FIXME: attaching the body_content only makes sense for text
# content types. we'll eventually need a better solution for
# non-text messages
result.single_body.create_body_content()
payload = message.get_payload(decode=False)
# if not unicode, attempt to convert
if isinstance(payload, six.binary_type):
charset = message.get_charset()
# decode according to the specified character set, if any
if charset is not None:
charset_decoder = codecs.getdecoder(str(charset))
payload, length = charset_decoder(payload)
# otherwise, just try to convert
else:
payload = u(payload)
# remove any control characters not allowed in XML
control_char_map = dict.fromkeys(range(32))
for i in [9, 10, 13]: # preserve horizontal tab, line feed, carriage return
del control_char_map[i]
payload = u(payload).translate(control_char_map)
result.single_body.body_content.content = payload
else:
# TODO: handle multipart
logger.warn('CERP conversion does not yet handle multipart')
# assume we've normalized newlines:
result.eol = EOLMAP[os.linesep]
return result
class ChildMessage(_BaseMessage):
ROOT_NAME = 'ChildMessage'
# no additional elements
# Patch-up from above. FIXME: This is necessary because of recursive
# NodeFields. eulxml.xmlmap.NodeField doesn't currently support these
SingleBody.child_message.node_class = ChildMessage
#
# accounts and folders
#
class Mbox(_BaseExternal):
ROOT_NAME = 'Mbox'
# no additional fields
class Folder(_BaseCerp):
"""A single email folder in an :class:`Account`, composed of multiple
:class:`Message` objects and associated metadata."""
ROOT_NAME = 'Folder'
name = xmlmap.StringField('xm:Name')
messages = xmlmap.NodeListField('xm:Message', Message)
subfolders = xmlmap.NodeListField('xm:Folder', 'self')
mboxes = xmlmap.NodeListField('xm:Mbox', Mbox)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class ReferencesAccount(_BaseCerp):
ROOT_NAME = 'ReferencesAccount'
REF_TYPE_CHOICES = [ 'PreviousContent', 'SubsequentContent',
'Supplemental', 'SeeAlso', 'SeeInstead' ]
href = xmlmap.StringField('xm:Href')
email_address = xmlmap.StringField('xm:EmailAddress')
reference_type = xmlmap.StringField('xm:RefType',
choices=REF_TYPE_CHOICES)
class Account(_BaseCerp):
"""A single email account associated with a single email address and
composed of multiple :class:`Folder` objects and additional metadata."""
ROOT_NAME = 'Account'
XSD_SCHEMA = 'http://www.history.ncdcr.gov/SHRAB/ar/emailpreservation/mail-account/mail-account.xsd'
email_address = xmlmap.StringField('xm:EmailAddress')
global_id = xmlmap.StringField('xm:GlobalId')
references_accounts = xmlmap.NodeListField('xm:ReferencesAccount',
ReferencesAccount)
folders = xmlmap.NodeListField('xm:Folder', Folder)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
self.global_id or self.email_address or '(no id)')
def parse_mail_date(datestr):
'''Helper method used by :meth:`Message.from_email_message` to
convert dates from rfc822 format to iso 8601.
:param datestr: string containing a date in rfc822 format
:returns: string with date in iso 8601 format
'''
time_tuple = email.utils.parsedate_tz(datestr)
if time_tuple is None:
return datestr
dt = datetime.datetime.fromtimestamp(email.utils.mktime_tz(time_tuple))
return dt.isoformat()
EOLMAP = {
'\r': 'CR',
'\n': 'LF',
'\r\n': 'CRLF',
}
| 35.887218 | 104 | 0.68238 | 12,232 | 0.85425 | 0 | 0 | 4,345 | 0.303443 | 0 | 0 | 5,180 | 0.361757 |
6140947de47795d851d9a57f819bb4fedb7e6c30 | 706 | py | Python | nestfit/test/__init__.py | autocorr/nestf | 7b82c42d8be75837e87c3ce4053714acb219b013 | [
"MIT"
] | 11 | 2019-08-22T17:19:10.000Z | 2021-12-10T06:43:32.000Z | nestfit/test/__init__.py | autocorr/nestf | 7b82c42d8be75837e87c3ce4053714acb219b013 | [
"MIT"
] | 3 | 2019-09-30T22:28:56.000Z | 2021-02-15T21:40:33.000Z | nestfit/test/__init__.py | autocorr/nestf | 7b82c42d8be75837e87c3ce4053714acb219b013 | [
"MIT"
] | 2 | 2020-06-30T07:18:18.000Z | 2020-07-13T16:27:33.000Z | #!/usr/bin/env python3
import warnings
from pathlib import Path
from spectral_cube import SpectralCube
from astropy.wcs import FITSFixedWarning
DATA_PATH = Path(__file__).parent / "data"
NH3_RMS_K = 0.35
def get_ammonia_cube(trans_id=1):
assert trans_id in (1, 2)
transition = f"{trans_id}" * 2
fpath = DATA_PATH / f"ammonia_{transition}_cutout.fits"
# Filter WCS "obsfix" warning about multiple OBSGEO keywords
warnings.filterwarnings(
"ignore",
message=R".*Set OBSGEO-. to .* from OBSGEO-\[XYZ\]",
category=FITSFixedWarning,
)
cube = SpectralCube.read(str(fpath))
cube = cube[:-1] # last channel contains NaNs
return cube
| 23.533333 | 64 | 0.677054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.304533 |
614115a92e300f92dd37eb72064e8a55346da282 | 228 | py | Python | djangopeople/djangopeople/management/commands/recluster.py | timgraham/djangopeople | de595ac8d7e8540871c282294a332a835ac0c177 | [
"MIT"
] | null | null | null | djangopeople/djangopeople/management/commands/recluster.py | timgraham/djangopeople | de595ac8d7e8540871c282294a332a835ac0c177 | [
"MIT"
] | null | null | null | djangopeople/djangopeople/management/commands/recluster.py | timgraham/djangopeople | de595ac8d7e8540871c282294a332a835ac0c177 | [
"MIT"
] | null | null | null | from django.core.management.base import NoArgsCommand
from ... import clustering
class Command(NoArgsCommand):
help = "Re-runs the server-side clustering"
def handle_noargs(self, **options):
clustering.run()
| 20.727273 | 53 | 0.723684 | 143 | 0.627193 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.157895 |
6141442f7c66378254acb69ff2d917fdf3531f0e | 211 | py | Python | phl_courts_scraper/portal/__init__.py | PhilaController/phl-courts-scraper | d18eca8c5658f88b8f783903ab65c247f8f6d579 | [
"MIT"
] | null | null | null | phl_courts_scraper/portal/__init__.py | PhilaController/phl-courts-scraper | d18eca8c5658f88b8f783903ab65c247f8f6d579 | [
"MIT"
] | 1 | 2022-03-12T00:52:08.000Z | 2022-03-12T00:52:08.000Z | phl_courts_scraper/portal/__init__.py | PhilaController/phl-courts-scraper | d18eca8c5658f88b8f783903ab65c247f8f6d579 | [
"MIT"
] | null | null | null | """Parse the UJS court portal."""
from .core import UJSPortalScraper # noqa: F401
from .schema import PortalResult, PortalResults # noqa: F401
__all__ = ["UJSPortalScraper", "PortalResult", "PortalResults"]
| 30.142857 | 63 | 0.744076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.492891 |
61417ac842baa5fce26e656d22d5a3fc89e691b0 | 3,671 | py | Python | tuning/TensileConfiguration.py | mhbliao/Tensile | 99261adfa7fe6b30b0c76b17b87bbd6cb41bae60 | [
"MIT"
] | null | null | null | tuning/TensileConfiguration.py | mhbliao/Tensile | 99261adfa7fe6b30b0c76b17b87bbd6cb41bae60 | [
"MIT"
] | null | null | null | tuning/TensileConfiguration.py | mhbliao/Tensile | 99261adfa7fe6b30b0c76b17b87bbd6cb41bae60 | [
"MIT"
] | 5 | 2019-07-29T01:23:56.000Z | 2022-03-08T09:28:10.000Z |
import os
import sys
import argparse
################################################################################
# Print Debug
################################################################################
#def printWarning(message):
# print "Tensile::WARNING: %s" % message
# sys.stdout.flush()
def printExit(message):
print "Tensile::FATAL: %s" % message
sys.stdout.flush()
sys.exit(-1)
try:
import yaml
except ImportError:
printExit("You must install PyYAML to use Tensile (to parse config files). See http://pyyaml.org/wiki/PyYAML for installation instructions.")
#HR = "################################################################################"
def ensurePath( path ):
if not os.path.exists(path):
os.makedirs(path)
return path
################################################################################
# Define Constants
################################################################################
def constant(f):
def fset(self, value):
raise TypeError
def fget(self):
return f(self)
return property(fget, fset)
class _Const(object):
@constant
def GlobalParameters(self):
return "GlobalParameters"
@constant
def BenchmarkProblems(self):
return "BenchmarkProblems"
@constant
def LibraryLogic(self):
return "LibraryLogic"
@constant
def LibraryClient(self):
return "LibraryClient"
CONST = _Const()
################################################################################
# Tuning Configuration Container
################################################################################
class TuningConfiguration:
def __init__(self,filename=None):
print "implement"
if filename is not None:
print ("# Reading configuration: " + filename)
try:
stream = open(filename, "r")
except IOError:
printExit("Cannot open file: %s" % filename )
data = yaml.load(stream, yaml.SafeLoader)
if CONST.GlobalParameters in data:
self.__set_globalParameters(data[CONST.GlobalParameters])
else:
self.__set_globalParameters(None)
if CONST.BenchmarkProblems in data:
self.__set_benchmarkProblems(data[CONST.BenchmarkProblems])
else:
self.__set_benchmarkProblems(None)
if CONST.LibraryLogic in data:
self.__set_libraryLogic(data[CONST.LibraryLogic])
else:
self.__set_libraryLogic(None)
if CONST.LibraryClient in data:
self.__set_libraryClient(data[CONST.LibraryClient])
else:
self.__set_libraryClient(None)
stream.close()
else:
self.__set_globalParameters(None)
self.__set_benchmarkProblems(None)
self.__set_libraryLogic(None)
self.__set_libraryLogic(None)
def __get_globalParameters(self):
return self.__globalParameters
def __set_globalParameters(self,value):
self.__globalParameters = value
globalParamters = property(__get_globalParameters,__set_globalParameters)
def __get_benchmarkProblems(self):
return self.__benchmarkProblems
def __set_benchmarkProblems(self,value):
self.__benchmarkProblems = value
benchmarkProblems = property(__get_benchmarkProblems,__set_benchmarkProblems)
def __get_libraryLogic(self):
return self.__libraryLogic
def __set_libraryLogic(self,value):
self.__libraryLogic = value
libraryLogic = property(__get_libraryLogic,__set_libraryLogic)
def __get_libraryClient(self):
return self.__libraryClient
def __set_libraryClient(self,value):
self.__libraryClient = value
libraryClient = property(__get_libraryClient,__set_libraryClient)
| 25.493056 | 143 | 0.602016 | 2,355 | 0.641515 | 0 | 0 | 264 | 0.071915 | 0 | 0 | 999 | 0.272133 |
61419b6e24bcba1a05210251891441000213b61a | 1,557 | py | Python | WeLearn/M3-Python/L1-Python_Intro/hello.py | Sheldon101/mycssi2019labs | 4ff1ecda475a54be79ee9ce44f1dac77418df2db | [
"Apache-2.0"
] | null | null | null | WeLearn/M3-Python/L1-Python_Intro/hello.py | Sheldon101/mycssi2019labs | 4ff1ecda475a54be79ee9ce44f1dac77418df2db | [
"Apache-2.0"
] | null | null | null | WeLearn/M3-Python/L1-Python_Intro/hello.py | Sheldon101/mycssi2019labs | 4ff1ecda475a54be79ee9ce44f1dac77418df2db | [
"Apache-2.0"
] | null | null | null | #num1=int(raw_input("Enter num #1:"))
#num2=int(raw_input("Enter num #2:"))
#total= num1 + num2
#print("The sum is: "+ str(total))
# need to be a string so computer can read it
# all strings can be integers but not all integers can be strings
# num = int(raw_input("Enter a number:"))
# if num>0:
# print("That's a postive num!")
# elif num<0:
# print("That's a negative num!")
# else:
# print("Zero is neither postive nor negative!")
# string = "hello"
# for letter in string:
# print(letter.upper())
#
# for i in range(5): repaeted executed depend on how may letters in the string so hello would be 5
# print(i)
#
# x=1
# while x <=5:
# print(x)
# x=x+1
# my_name= "B"
# friend1= "A"
# friend2= "J"
# friend3= "M"
# print(
# "My name is %s and my friends are %s, %s, and %s" %
# (my_name,friend1,friend2,friend3 )
# )
#
# name= "C"
# age= 19
# print("My name is "+ name + "and I'm " + str(age)+"years old.") one way
# print("My name is %s and I'm %s years old." %(name,age)) second way
# def greetAgent():
# print("B. James Bond.")
# greetAgent() always call the func
#
# def greetAgent(first_name, last_name):
# print("%s. %s. %s." % (last_name, first_name, last_name))
# One way
#
#
# def createAgentGreeting(first_name, last_name):
# return"%s. %s. %s." % (last_name, first_name, last_name)
#
# print(createAgentGreeting("Citlally", "G"))
# second way
word = "computerz"
print(word[:5]) # prints "compu"
print(word[:-1]) # prints "computer"
print(word[4:]) # prints "uterz"
print(word[-3:]) # prints "erz"
| 26.389831 | 98 | 0.626204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,422 | 0.913295 |
614460f71cfeb7b215e13c2a3a7099f7272a6039 | 11,541 | py | Python | test/test_server.py | gndu91/wsproto | 7ddbdd9d7feee16b72ef7cd50c71fe18a88ce094 | [
"MIT"
] | 179 | 2017-05-03T19:52:16.000Z | 2022-03-26T13:14:46.000Z | test/test_server.py | gndu91/wsproto | 7ddbdd9d7feee16b72ef7cd50c71fe18a88ce094 | [
"MIT"
] | 116 | 2017-05-03T20:18:50.000Z | 2022-02-27T12:33:34.000Z | test/test_server.py | gndu91/wsproto | 7ddbdd9d7feee16b72ef7cd50c71fe18a88ce094 | [
"MIT"
] | 38 | 2017-05-04T18:04:07.000Z | 2022-02-26T19:54:36.000Z | from typing import cast, List, Optional, Tuple
import h11
import pytest
from wsproto import WSConnection
from wsproto.connection import SERVER
from wsproto.events import (
AcceptConnection,
Event,
RejectConnection,
RejectData,
Request,
)
from wsproto.extensions import Extension
from wsproto.typing import Headers
from wsproto.utilities import (
generate_accept_token,
generate_nonce,
normed_header_dict,
RemoteProtocolError,
)
from .helpers import FakeExtension
def _make_connection_request(request_headers: Headers, method: str = "GET") -> Request:
client = h11.Connection(h11.CLIENT)
server = WSConnection(SERVER)
server.receive_data(
client.send(h11.Request(method=method, target="/", headers=request_headers))
)
event = next(server.events())
assert isinstance(event, Request)
return event
def test_connection_request() -> None:
event = _make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
(b"X-Foo", b"bar"),
]
)
assert event.extensions == []
assert event.host == "localhost"
assert event.subprotocols == []
assert event.target == "/"
headers = normed_header_dict(event.extra_headers)
assert b"host" not in headers
assert b"sec-websocket-extensions" not in headers
assert b"sec-websocket-protocol" not in headers
assert headers[b"connection"] == b"Keep-Alive, Upgrade"
assert headers[b"sec-websocket-version"] == b"13"
assert headers[b"upgrade"] == b"WebSocket"
assert headers[b"x-foo"] == b"bar"
def test_connection_request_bad_method() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
],
method="POST",
)
assert str(excinfo.value) == "Request method must be GET"
def test_connection_request_bad_connection_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, No-Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
]
)
assert str(excinfo.value) == "Missing header, 'Connection: Upgrade'"
def test_connection_request_bad_upgrade_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"h2c"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
]
)
assert str(excinfo.value) == "Missing header, 'Upgrade: WebSocket'"
@pytest.mark.parametrize("version", [b"12", b"not-a-digit"])
def test_connection_request_bad_version_header(version: bytes) -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", version),
(b"Sec-WebSocket-Key", generate_nonce()),
]
)
assert str(excinfo.value) == "Missing header, 'Sec-WebSocket-Version'"
assert excinfo.value.event_hint == RejectConnection(
headers=[(b"Sec-WebSocket-Version", b"13")], status_code=426
)
def test_connection_request_key_header() -> None:
with pytest.raises(RemoteProtocolError) as excinfo:
_make_connection_request(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
]
)
assert str(excinfo.value) == "Missing header, 'Sec-WebSocket-Key'"
def test_upgrade_request() -> None:
server = WSConnection(SERVER)
server.initiate_upgrade_connection(
[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", generate_nonce()),
(b"X-Foo", b"bar"),
],
"/",
)
event = next(server.events())
event = cast(Request, event)
assert event.extensions == []
assert event.host == "localhost"
assert event.subprotocols == []
assert event.target == "/"
headers = normed_header_dict(event.extra_headers)
assert b"host" not in headers
assert b"sec-websocket-extensions" not in headers
assert b"sec-websocket-protocol" not in headers
assert headers[b"connection"] == b"Keep-Alive, Upgrade"
assert headers[b"sec-websocket-version"] == b"13"
assert headers[b"upgrade"] == b"WebSocket"
assert headers[b"x-foo"] == b"bar"
def _make_handshake(
request_headers: Headers,
accept_headers: Optional[Headers] = None,
subprotocol: Optional[str] = None,
extensions: Optional[List[Extension]] = None,
) -> Tuple[h11.InformationalResponse, bytes]:
client = h11.Connection(h11.CLIENT)
server = WSConnection(SERVER)
nonce = generate_nonce()
server.receive_data(
client.send(
h11.Request(
method="GET",
target="/",
headers=[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", nonce),
]
+ request_headers,
)
)
)
client.receive_data(
server.send(
AcceptConnection(
extra_headers=accept_headers or [],
subprotocol=subprotocol,
extensions=extensions or [],
)
)
)
event = client.next_event()
return event, nonce
def test_handshake() -> None:
response, nonce = _make_handshake([])
response.headers = sorted(response.headers) # For test determinism
assert response == h11.InformationalResponse(
status_code=101,
headers=[
(b"connection", b"Upgrade"),
(b"sec-websocket-accept", generate_accept_token(nonce)),
(b"upgrade", b"WebSocket"),
],
)
def test_handshake_extra_headers() -> None:
response, nonce = _make_handshake([], accept_headers=[(b"X-Foo", b"bar")])
response.headers = sorted(response.headers) # For test determinism
assert response == h11.InformationalResponse(
status_code=101,
headers=[
(b"connection", b"Upgrade"),
(b"sec-websocket-accept", generate_accept_token(nonce)),
(b"upgrade", b"WebSocket"),
(b"x-foo", b"bar"),
],
)
@pytest.mark.parametrize("accept_subprotocol", ["one", "two"])
def test_handshake_with_subprotocol(accept_subprotocol: str) -> None:
response, _ = _make_handshake(
[(b"Sec-Websocket-Protocol", b"one, two")], subprotocol=accept_subprotocol
)
headers = normed_header_dict(response.headers)
assert headers[b"sec-websocket-protocol"] == accept_subprotocol.encode("ascii")
def test_handshake_with_extension() -> None:
extension = FakeExtension(accept_response=True)
response, _ = _make_handshake(
[(b"Sec-Websocket-Extensions", extension.name.encode("ascii"))],
extensions=[extension],
)
headers = normed_header_dict(response.headers)
assert headers[b"sec-websocket-extensions"] == extension.name.encode("ascii")
def test_handshake_with_extension_params() -> None:
offered_params = "parameter1=value3; parameter2=value4"
accepted_params = "parameter1=value1; parameter2=value2"
extension = FakeExtension(accept_response=accepted_params)
response, _ = _make_handshake(
[
(
b"Sec-Websocket-Extensions",
(f"{extension.name}; {offered_params}").encode("ascii"),
)
],
extensions=[extension],
)
headers = normed_header_dict(response.headers)
assert extension.offered == f"{extension.name}; {offered_params}"
assert headers[b"sec-websocket-extensions"] == (
f"{extension.name}; {accepted_params}"
).encode("ascii")
def test_handshake_with_extra_unaccepted_extension() -> None:
extension = FakeExtension(accept_response=True)
response, _ = _make_handshake(
[
(
b"Sec-Websocket-Extensions",
b"pretend, %s" % extension.name.encode("ascii"),
)
],
extensions=[extension],
)
headers = normed_header_dict(response.headers)
assert headers[b"sec-websocket-extensions"] == extension.name.encode("ascii")
def test_protocol_error() -> None:
server = WSConnection(SERVER)
with pytest.raises(RemoteProtocolError) as excinfo:
server.receive_data(b"broken nonsense\r\n\r\n")
assert str(excinfo.value) == "Bad HTTP message"
def _make_handshake_rejection(
status_code: int, body: Optional[bytes] = None
) -> List[Event]:
client = h11.Connection(h11.CLIENT)
server = WSConnection(SERVER)
nonce = generate_nonce()
server.receive_data(
client.send(
h11.Request(
method="GET",
target="/",
headers=[
(b"Host", b"localhost"),
(b"Connection", b"Keep-Alive, Upgrade"),
(b"Upgrade", b"WebSocket"),
(b"Sec-WebSocket-Version", b"13"),
(b"Sec-WebSocket-Key", nonce),
],
)
)
)
if body is not None:
client.receive_data(
server.send(
RejectConnection(
headers=[(b"content-length", b"%d" % len(body))],
status_code=status_code,
has_body=True,
)
)
)
client.receive_data(server.send(RejectData(data=body)))
else:
client.receive_data(server.send(RejectConnection(status_code=status_code)))
events = []
while True:
event = client.next_event()
events.append(event)
if isinstance(event, h11.EndOfMessage):
return events
def test_handshake_rejection() -> None:
events = _make_handshake_rejection(400)
assert events == [
h11.Response(headers=[(b"content-length", b"0")], status_code=400),
h11.EndOfMessage(),
]
def test_handshake_rejection_with_body() -> None:
events = _make_handshake_rejection(400, body=b"Hello")
assert events == [
h11.Response(headers=[(b"content-length", b"5")], status_code=400),
h11.Data(data=b"Hello"),
h11.EndOfMessage(),
]
| 32.418539 | 87 | 0.593709 | 0 | 0 | 0 | 0 | 1,112 | 0.096352 | 0 | 0 | 2,580 | 0.223551 |
6148d66b350b1b75a0fa80c7fdba2459fab09824 | 1,499 | py | Python | KnowledgeMapping/spark/connNeo4j/demo_mysql.py | nickliqian/ralph_doc_to_chinese | be120ce2bb94a8e8395630218985f5e51ae087d9 | [
"MIT"
] | 8 | 2018-05-22T01:11:33.000Z | 2020-03-19T01:44:55.000Z | KnowledgeMapping/spark/connNeo4j/demo_mysql.py | nickliqian/ralph_doc_to_chinese | be120ce2bb94a8e8395630218985f5e51ae087d9 | [
"MIT"
] | null | null | null | KnowledgeMapping/spark/connNeo4j/demo_mysql.py | nickliqian/ralph_doc_to_chinese | be120ce2bb94a8e8395630218985f5e51ae087d9 | [
"MIT"
] | 3 | 2018-07-25T09:31:53.000Z | 2019-09-14T14:05:31.000Z | import pymysql
print("Connect to mysql...")
mysql_db = "report_system"
m_conn = pymysql.connect(host='192.168.20.20', port=3306, user='admin', passwd='1qaz@WSX', db=mysql_db, charset='utf8')
m_cursor = m_conn.cursor()
# sql = "select row_key,account_id,cell_no,is_id_match,product_id,response_ref_id,query_time from tb_report_info_test where id >= (select id from {} order by id limit {},1) order by id limit 0, {};".format(name_key_field, mysql_table, mysql_table, i * step, step)
# sql = "select row_key,account_id,cell_no,is_id_match,product_id,response_ref_id,query_time from tb_report_info_test limit {},{}".format(num_id * step, step)
num_id = 0
step = 10000
try:
while True:
sql = "select row_key,account_id,cell_no,is_id_match,product_id,response_ref_id,query_time " \
"from tb_report_info_test limit {},{}".format(num_id*step, step)
# print("===> {}".format(sql))
m_cursor.execute(sql)
query_results = m_cursor.fetchall()
if not query_results:
print("MySQL查询结果为空 id=<{}>".format(num_id))
break
else:
for index, result in enumerate(query_results):
row_key, account_id, cell_no, is_id_match, product_id, response_ref_id, query_time = result
print(num_id, index, row_key, account_id, cell_no, is_id_match, product_id, response_ref_id, query_time)
num_id += 1
finally:
m_cursor.close()
m_conn.close()
print("MySQL connection close...") | 45.424242 | 263 | 0.682455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.469226 |
614996a118e2aab5b9a38bd9e742094a6bc21b63 | 993 | py | Python | attached/delete.py | yougikou/yougikou.github.io | a8ebb2a8e525e735806ea6d8a8a7f89fbf0305c1 | [
"Apache-2.0"
] | 1 | 2022-01-25T17:24:05.000Z | 2022-01-25T17:24:05.000Z | attached/delete.py | yougikou/yougikou.github.io | a8ebb2a8e525e735806ea6d8a8a7f89fbf0305c1 | [
"Apache-2.0"
] | 6 | 2020-05-06T23:36:55.000Z | 2021-11-02T10:46:11.000Z | attached/delete.py | yougikou/yougikou.github.io | a8ebb2a8e525e735806ea6d8a8a7f89fbf0305c1 | [
"Apache-2.0"
] | null | null | null | import sys, os, time, traceback
from pdfrw import PdfReader, PdfWriter, PageMerge
def processFile(file):
inpfn = file
outfn = 'out\\' + os.path.basename(inpfn)
reader = PdfReader(inpfn)
writer = PdfWriter(outfn)
pagesNum = len(reader.pages)
print(os.path.basename(inpfn) + ": page 1 - " + str(pagesNum))
print("Please specify page with space (Ex. 2 4 11).")
delPages = list(map(int, input().split()))
for idx, page in enumerate(reader.pages):
if (idx + 1) not in delPages:
writer.addPage(page)
writer.write()
if __name__ == "__main__":
inpfn = sys.argv[1]
try:
assert inpfn
if inpfn.lower().endswith(".pdf"):
if not os.path.exists("out"):
os.mkdir("out")
processFile(inpfn)
else:
print("File is not pdf")
time.sleep(10)
except Exception as e:
traceback.print_exc()
time.sleep(10)
| 29.205882 | 67 | 0.562941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.109768 |
614b094bc6fe81f808b2770a13e59af2778e159e | 321 | py | Python | Audio Features/waveplot.py | kritika58/A-Novel-Framework-Using-Neutrosophy-for-Integrated-Speech-and-Text-Sentiment-Analysis | a16bd5d02f2efd34ad20f496fb59f273fdb9b60c | [
"MIT"
] | null | null | null | Audio Features/waveplot.py | kritika58/A-Novel-Framework-Using-Neutrosophy-for-Integrated-Speech-and-Text-Sentiment-Analysis | a16bd5d02f2efd34ad20f496fb59f273fdb9b60c | [
"MIT"
] | null | null | null | Audio Features/waveplot.py | kritika58/A-Novel-Framework-Using-Neutrosophy-for-Integrated-Speech-and-Text-Sentiment-Analysis | a16bd5d02f2efd34ad20f496fb59f273fdb9b60c | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import librosa.display
plt.rcParams.update({'font.size': 16})
y, sr = librosa.load(librosa.util.example_audio_file())
plt.figure(figsize=(18, 7))
librosa.display.waveplot(y, sr=sr, x_axis='s')
print(sr)
plt.ylabel('Sampling Rate',fontsize=32)
plt.xlabel('Time (s)',fontsize=32)
plt.show() | 29.181818 | 55 | 0.744548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.121495 |
614c833cd5e98e613a94ad6a3e10cb9506f857ac | 49,928 | py | Python | services/dashboard/projekt/dashboard_server.py | JoshPrim/EVA-Projekt | 94e4f594519eda676e0f5f2787f8643831f346df | [
"Apache-2.0"
] | 2 | 2018-05-30T08:40:26.000Z | 2018-09-06T15:37:25.000Z | services/dashboard/projekt/dashboard_server.py | JoshPrim/EVA-Projekt | 94e4f594519eda676e0f5f2787f8643831f346df | [
"Apache-2.0"
] | 1 | 2021-06-01T22:37:55.000Z | 2021-06-01T22:37:55.000Z | services/dashboard/projekt/dashboard_server.py | JoshPrim/EVA-Projekt | 94e4f594519eda676e0f5f2787f8643831f346df | [
"Apache-2.0"
] | 2 | 2018-05-31T14:55:04.000Z | 2018-08-29T09:38:31.000Z | # -*- coding: utf-8 -*-
'''
Autor: Joshua Prim, Philipp Krenitz, Bartos Mosch, Sophie Hagemann
Version: 1.3
Server fuer das hosten des FaSta-Dashboards
Copyright 2018 The Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
'''
import sys
import dash
import dash_auth
import dash_core_components
import dash_core_components as dcc
import dash_html_components as html
import dash_table_experiments as dt
import flask
import pandas as pd
import plotly.graph_objs as go
import pymongo
import threading
from dash.dependencies import Input, Output
import os
import collections
from pprint import pprint
from pymongo.command_cursor import CommandCursor
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
from types import *
import pandas as pd
import numpy as np
from pandas import DataFrame
sys.path.append('./Clients')
import folium
from geopy.geocoders import Nominatim
#from sqlalchemy import create_engine
import psycopg2
########################################################################## #############################################################################################################################################
########################################################################## Web Application #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Konstanten
MONGO_URL = os.environ.get('MONGO_URI')
POSTGRESS_URL = os.environ.get('POSTGRES_URL')
HOST_ID = '0.0.0.0'
PORT = '37002'
print('Fasta Server initialisiert!')
def createGraphDataForEscalatorPage(numberOfLastEntries: int):
ergDF = pd.DataFrame(columns=['Datum', 'Anzahl_Ausfälle'])
facilities_collection = facilities.find({})
pandas_facilities = pd.DataFrame(list(facilities_collection))
pandas_facilities = pandas_facilities[['equipmentnumber', 'datetime', 'state']]
facilities_distinct = pandas_facilities
facilities_distinct.columns = ['ID', 'Datum', 'Status']
facilities_distinct['Datum'] = pd.to_datetime(facilities_distinct['Datum'], format="%Y-%m-%d_%H-%M-%S")
facilities_distinct['Datum'] = facilities_distinct['Datum'].dt.strftime('%Y-%m-%d')
facilities_distinct_inactive = facilities_distinct[facilities_distinct['Status'] == 'INACTIVE']
dfOnlyDatetime = pd.DataFrame(facilities_distinct_inactive['Datum'], columns=['Datum']).drop_duplicates()
facilities_distinct_inactive_latestDate = facilities_distinct_inactive.groupby('ID')['Datum'].max()
counter = 0
for index, row in dfOnlyDatetime.iterrows():
counter = 0
for key, value in facilities_distinct_inactive_latestDate.items():
if value == row['Datum']:
counter += 1
ergDF.loc[index] = row['Datum'], counter
ergDF = ergDF.reset_index().drop(['index'], axis=1)
ergDF = ergDF.iloc[-numberOfLastEntries:]
return ergDF
def getDesiredState(listWithStates, state):
stateCounter = 0
for i in listWithStates:
if state == i['state']:
stateCounter += 1
return stateCounter
def getDesiredStateExplanation(listWithStates, state, stateExplanation):
stateExpressionCounter = 0
for i in listWithStates:
if state == i['state'] and stateExplanation == i['stateExplanation']:
stateExpressionCounter += 1
return stateExpressionCounter
def createOverview(givenType: str):
resultOverview = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
listWithStates = []
for i in resultOverview:
listWithStates.append(i)
stateCountACTIVE = getDesiredState(listWithStates, 'ACTIVE')
stateCountINACTIVE = getDesiredState(listWithStates, 'INACTIVE')
stateCountUNKNOWN = getDesiredState(listWithStates, 'UNKNOWN')
return stateCountACTIVE, stateCountINACTIVE, stateCountUNKNOWN
def createReasonsForInactivity(givenType: str):
uniqueList = facilities.distinct("stateExplanation");
resultGruendeFuerInaktivitaet = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
'stateExplanation': {'$last': '$stateExplanation'}
}}
])
listWithStateExplanations = []
for i in resultGruendeFuerInaktivitaet:
listWithStateExplanations.append(i)
dictStateExplanationReason = {}
for i in uniqueList:
count = getDesiredStateExplanation(listWithStateExplanations, 'INACTIVE', str(i))
if count != 0:
dictStateExplanationReason[str(i)] = count
key_array = []
value_array = []
for key, value in dictStateExplanationReason.items():
key_array.append(key)
value_array.append(value)
return key_array, value_array
def createInitialData():
client = pymongo.MongoClient(MONGO_URL, maxPoolSize=50)
dbeva = client.eva_dev
facilities = dbeva['facilities']
# Aufzüge reinladen
conn = psycopg2.connect(host='station-db', user='postgres', password='postgres', dbname='eva_dev', port=5432)
cur = conn.cursor()
querry = 'select * from "elevator"'
cur.execute( querry )
stammdaten_liste = cur.fetchall()
aufzüge = pd.DataFrame(stammdaten_liste)
columns = ['ID','Standort Equipment', 'TechnPlatzBezeichng', 'Equipment', 'Equipmentname', 'Ort', 'Wirtschaftseinheit',
'Hersteller',
'Baujahr', 'ANTRIEBSART', 'ANZAHL_HALTESTELLEN', 'ANZAHL_TUEREN_KABINE', 'ANZAHL_TUEREN_SCHACHT',
'FOERDERGESCHWINDIGKEIT',
'FOERDERHOEHE', 'LAGE', 'TRAGKRAFT', 'ERWEITERTE_ORTSANGABE', 'MIN_TUERBREITE', 'KABINENTIEFE',
'KABINENBREITE',
'KABINENHOEHE', 'TUERHOHE', 'FABRIKNUMMER', 'TUERART', 'GEOKOORDINATERECHTSWERT',
'GEOKOORDINATEHOCHWERT', 'AUSFTEXTLICHEBESCHREIBUNG']
aufzüge.columns = columns
aufzüge = aufzüge.drop(0)
aufzüge['Equipment'] = aufzüge['Equipment'].astype(str).astype('int64')
aufzüge = aufzüge.drop_duplicates(['Equipment'])
aufzüge = aufzüge.drop(columns=['ID'])
aufzüge = aufzüge.fillna(value=np.nan)
aufzüge['Baujahr'] = pd.to_numeric(aufzüge['Baujahr'], errors='coerce')
print('Anzahl Aufzüge: ', len(aufzüge))
return facilities, aufzüge
def createMap(givenType: str):
resultCommandCursor = facilities.aggregate([
{'$match': {'type': givenType}},
{'$group': {
'_id': '$equipmentnumber',
'description': {'$last': '$description'},
'geocoordX': {'$last': '$geocoordX'},
'geocoordY': {'$last': '$geocoordY'},
'lastStateChangeDate': {'$last': '$datetime'},
'state': {'$last': '$state'},
}}
])
resultCommandCursor = pd.DataFrame(list(resultCommandCursor))
resultCommandCursor.columns = ['equipmentnumber', 'description', 'geocoordX', 'geocoordY', 'lastStateChangeDate', 'state']
inactive = resultCommandCursor[resultCommandCursor['state'] == 'INACTIVE']
active = resultCommandCursor[resultCommandCursor['state'] == 'ACTIVE']
# Zoom am ausgewählten Ort
geolocator = Nominatim(user_agent="Eva_Dashboard")
return inactive, active, geolocator
#####################################################################
################ Start of Code (create initial data) ################
#####################################################################
facilities, aufzüge = createInitialData()
############################################################
################# Die Aufzüge im Überblick #################
############################################################
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
############################################################
############### Die Rolltreppen im Überblick ###############
############################################################
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
####################################################
###### Gründe für Inaktivität von Fahrstühlen ######
####################################################
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
####################################################
###### Gründe für Inaktivität von Rolltreppen ######
####################################################
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
####################################################
###### Routine zum Aktualisieren der Daten ######
####################################################
def updateValues():
global facilities, aufzüge, elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN
global escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN
global elevator_key_array, elevator_value_array
global escalator_key_array, escalator_value_array
facilities, aufzüge = createInitialData()
elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN = createOverview('ELEVATOR')
escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN = createOverview('ESCALATOR')
elevator_key_array, elevator_value_array = createReasonsForInactivity('ELEVATOR')
escalator_key_array, escalator_value_array = createReasonsForInactivity('ESCALATOR')
# Daten werden jede Stunde aktualisiert
scheduler = BlockingScheduler()
scheduler.add_job(updateValues, 'interval', minutes=5)
class UpdateValue(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
scheduler.start()
print('Thread zum Updaten der Werte gestartet!')
tread = UpdateValue()
tread.start()
####################################
###### Wusstest du schon? ######
####################################
# Ältester Aufzug
aeltesteAufzug_datensatz = aufzüge[aufzüge['Baujahr'] == int(aufzüge['Baujahr'].min())]
aeltesteAufzug_ort = aeltesteAufzug_datensatz['Ort'].values[0]
aeltesteAufzug_jahr = int(aeltesteAufzug_datensatz['Baujahr'].values[0])
# Station mit den meisten Aufzügen
uniquelist_orte = aufzüge['Ort'].unique()
df_anzahlProStation = pd.DataFrame(columns=['Ort', 'Anzahl_Aufzüge'])
for i in uniquelist_orte:
tmp = len(aufzüge[aufzüge['Ort'] == i])
df_anzahlProStation.loc[i] = i,tmp
df_anzahlProStation = df_anzahlProStation.sort_values(by=['Anzahl_Aufzüge'], ascending=False)
####################################
###### Aggregierte Werte ######
####################################
# Anzahl Antriebsart
anzahl_seilAufzüge = len(aufzüge[aufzüge['ANTRIEBSART'] == 'SEIL'])
anzahl_hydraulischAufzüge = len(aufzüge[aufzüge['ANTRIEBSART'] == 'HYDRAULISCH'])
# Top Hersteller
uniquelist_hersteller = aufzüge['Hersteller'].unique()
df_anzahlAufzüge = pd.DataFrame(columns=['Hersteller', 'Anzahl_Aufzüge'])
for i in uniquelist_hersteller:
tmp = len(aufzüge[aufzüge['Hersteller'] == i])
df_anzahlAufzüge.loc[i] = i,tmp
df_anzahlAufzüge = df_anzahlAufzüge.sort_values(by=['Anzahl_Aufzüge'], ascending=False)
# Aufälle gesamt
df_anzahlAusfälle = pd.DataFrame(columns=['Aufzug_ID', 'Anzahl_Ausfälle'])
temp_count = facilities.aggregate( [
{ '$match': { 'state': 'INACTIVE' } },
{
'$group': {
'_id': "$equipmentnumber",
'count': { '$sum': 1 }
}
}
] )
for i in temp_count:
df_anzahlAusfälle.loc[i['_id']] = i['_id'], i['count']
df_anzahlAusfälle = df_anzahlAusfälle.sort_values(by=['Anzahl_Ausfälle'], ascending=False)
aufzug_aggregiert, anzahl_aggregiert = df_anzahlAusfälle['Aufzug_ID'].iloc[0], df_anzahlAusfälle['Anzahl_Ausfälle'].iloc[0]
###############################
###### Karte für Aufzüge ######
###############################
inactive, active, geolocator = createMap('ELEVATOR')
###################################
###### Karte für Rolltreppen ######
###################################
escalator_inactive, escalator_active, escalator_geolocator = createMap('ESCALATOR')
###################################
##### Daten für Rolltreppen ######
###################################
graphDataEscalator = createGraphDataForEscalatorPage(14)
####################################
###### APP ######
####################################
# Die Passworter eigentlich aus dem Quellcode-Repository heraushalten und in einer Datei oder einer Datenbank speichern.
VALID_USERNAME_PASSWORD_PAIRS = [
['Josh', '1234'],
['Sophie', '1234'],
['Phil', '1234'],
['Bart', '1234']
]
server = flask.Flask('EVA Dashboard')
app = dash.Dash('EVA Dashboard', server=server)
app.title = 'EVA Dashboard'
auth = dash_auth.BasicAuth(
app,
VALID_USERNAME_PASSWORD_PAIRS
)
# Erklärung:
# Since we're adding callbacks to elements that don't exist in the app.layout, Dash will raise an exception to warn us
# that we might be doing something wrong. In this case, we're adding the elements through a callback, so we can ignore the exception.
app.config.suppress_callback_exceptions = True
###########################################################################################################
###########################################################################################################
####################################### #######################################
####################################### 2. Seite für Rolltreppen #######################################
####################################### #######################################
###########################################################################################################
###########################################################################################################
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content'),
html.Div(dt.DataTable(rows=[{}]), style={'display': 'none'})
])
page_rolltreppen = html.Div(children=[
# Überschrift
html.Div([
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em'},
children='EVA Dashboard'),
]),
# Unterüberschrift
html.Div([
html.Hr(),
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em',
'color': '#000099'}, children='Der Rolltreppenwärter'),
dcc.Markdown('''
**Informationen rund um Rolltreppen in Bahnhöfen der DB Station & Service AG**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000099', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
]),
html.Div([
dcc.Link('Go to Page Aufzüge', href='/page_aufzuege')
], style={'text-align': 'left'}),
# Hauptteil
html.Div([
# Diagramme
html.Div([dcc.Graph(
id='diagramm_status',
figure={
'data': [
{'x': ['aktiv', 'inaktiv', 'keine Information'],
'y': [escalatorStateCountACTIVE, escalatorStateCountINACTIVE, escalatorStateCountUNKNOWN],
'type': 'bar', 'name': 'Rolltreppen',
'marker': dict(color=['green', 'red', 'orange'])
},
],
'layout': {
'title': 'Die Rolltreppen im Überblick',
'width': '35%',
'align': 'left'
}
}
)], style={'width': '35%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}),
html.Div([dcc.Graph(
id='diagramm_inaktive',
figure={
'data': [
{'values': escalator_value_array, 'type': 'pie', 'name': 'GründeInaktivität',
'marker': dict(colors=['#DCDCDC', '#778899', '#C0C0C0']), 'labels': escalator_key_array
},
],
'layout': {
'title': 'Gründe für Inaktivität',
'width': '35%',
'align': 'right'
}
}
)],
style={'width': '40%', 'text-align': 'right', 'display': 'inline-block', 'padding-left': 10,
'padding-bottom': 10}),
html.Hr(),
html.Div([dcc.Graph(
figure=go.Figure(
data=[
go.Bar(
x=graphDataEscalator['Datum'],
y=graphDataEscalator['Anzahl_Ausfälle'],
name='Anzahl Ausfälle',
marker=go.Marker(
color='rgb(55, 83, 109)'
)
)
],
layout=go.Layout(
title='Anzahl der Ausfälle von Rolltreppen auf Tagesebene',
showlegend=True,
legend=go.Legend(
x=0,
y=1.0
),
margin=go.Margin(l=40, r=0, t=40, b=30)
)
),
style={'height': 300, 'width': 800},
id='escalator_mid_graph'
)], style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}),
html.Hr(),
# unteres Drittel
html.Div([
# Titel
html.Div([
html.H3(style={'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'},
children='Funktionieren die Rolltreppen an deiner Haltestelle? - Finde es heraus!'),
], style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
'padding-left': 140, 'padding-bottom': 10}), ## neu vorher gar nichts
# linker Teil ########################################## geändert alle ids + escalator
html.Div([
html.Div(['Stadt: '],
style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='escalator_stadt_input', value='Frankfurt', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Div(['Bundesland: '],
style={'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='escalator_bundesland_input', value='Hessen', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
dcc.RadioItems(
id='escalator_radio_button',
options=[
{'label': 'Aktive Rolltreppen', 'value': 'aktiv'},
{'label': 'Inaktive Rolltreppen', 'value': 'inaktiv'},
{'label': ' Alle Rolltreppen', 'value': 'beide'}
],
value='inaktiv', style={'margin-left': 10}
),
html.Iframe(id='escalator_karte', srcDoc=open('./projekt/Maps/map_inactive_elevators.html', 'r').read(),
style={'width': '90%', 'height': '30em'})
], style={'width': '49%', 'display': 'inline-block'}),
#style={'width': '60%', 'text-align': 'left', 'display': 'inline-block', 'padding-top': 10,
# 'padding-left': 140, 'padding-bottom': 10}),
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
# rechter Teil
html.Div([
html.Br(), html.Br(),
html.Div(['Rolltreppen-ID: '],
style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='rolltreppe_id_input', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Hr(),
# Tabelle
html.Div([
dt.DataTable(
rows=[{}],
columns=['Datum_Uhrzeit', 'Status', 'Erklärung des Status'],
editable=False,
row_selectable=False,
filterable=False,
sortable=False,
id='datatable-status-escalator',
selected_row_indices=[],
min_height=250
),
html.Br(),
])
], style={'width': '49%', 'display': 'inline-block', 'vertical-align': 'top'})
##########################################################################################################################################
##########################################################################################################################################
##########################################################################################################################################
], style={'margin-left': '20'}),
], style={'background-color': '#E6E6FA'}),
# Fußzeile
html.Div([], style={'height': 70}),
html.Hr(),
html.Div([
dcc.Markdown('''
**THM Friedberg**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}}),
dcc.Markdown('''
**Sophie Hagemann, Philipp Krenitz, Bartos Mosch, Joshua Prim**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
], style={'height': 70}),
], style={'marginTop': '2%', 'marginLeft': '5%', 'marginRight': '5%'})
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
page_aufzuege = html.Div(children=[
# Überschrift
html.Div([
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '15em'},
children='EVA Dashboard'),
]),
# Unterüberschrift
html.Div([
html.Hr(),
html.H1(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'center', 'width': '10em',
'color': '#000099'}, children='Der Aufzugwächter'),
dcc.Markdown('''
**Informationen rund um Aufzüge in Bahnhöfen der DB Station & Service AG**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000099', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
]),
html.Div([
dcc.Link('Go to Page Rolltreppen', href='/page-rolltreppen')
], style={'text-align':'right'}),
# Hauptteil
html.Div([
#Diagramme
html.Div([], style={'width':'10%', 'display': 'inline-block', 'vertical-align':'top'}),
html.Div([
html.Div([ dcc.Graph(
id='diagramm_status',
figure={
'data': [
{'x': ['aktiv', 'inaktiv', 'keine Information'], 'y': [elevatorStateCountACTIVE, elevatorStateCountINACTIVE, elevatorStateCountUNKNOWN], 'type': 'bar', 'name': 'Aufzüge',
'marker': dict(color=['green', 'red', 'orange'])
},
],
'layout': {
'title': 'Die Aufzüge im Überblick',
'width': '35%',
'align': 'left'
}
}
)], style={'width': '40%', 'display': 'inline-block', 'padding-top': 10, 'padding-bottom': 10}),
html.Div([ dcc.Graph(
id='diagramm_inaktive',
figure={
'data': [
{'values': elevator_value_array, 'type': 'pie', 'name': 'GründeInaktivität',
'marker': dict(colors=['#DCDCDC', '#778899', '#C0C0C0']), 'labels': elevator_key_array
},
],
'layout': {
'title': 'Gründe für Inaktivität',
'width': '35%',
'align': 'right'
}
}
)],
style={'width': '40%', 'display': 'inline-block', 'padding-left': 10, 'padding-bottom': 10}),
], style={'width':'90%', 'margin':'auto', 'display': 'inline-block', 'vertical-align':'top'}),
html.Hr(),
#mittleres Drittel: "Wusstest du schon?", aggregierte Werte etc.
html.Div([]),
html.Div([
html.H3(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'right',
'color': '#000099'}, children='Wusstest du schon?'),
html.Br(),
html.Div('Der älteste Aufzug ist aus dem Jahr {} steht in: {}'.format(aeltesteAufzug_jahr, aeltesteAufzug_ort)),
html.Div(id='aeltester_aufzug', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Div('Die Station mit den meisten Aufzügen ist: {} mit {} Aufzügen'.format(df_anzahlProStation['Ort'].iloc[0], df_anzahlProStation['Anzahl_Aufzüge'].iloc[0])),
#count wie oft eine 'stationnumber' vorkommt, kann dann die mit den meisten dann einer Stadt zugeordnet werden?
html.Div(id='meisten_aufzüge', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Div('Der Aufzug mit den meinste Ausfällen ist {} mit {} Ausfällen'.format(aufzug_aggregiert, anzahl_aggregiert)),
#count wie oft 'inactive' im Status vorkommt
html.Div(id='meiste_ausfälle', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
], style={'display': 'inline-block', 'text-align': 'right', 'width': '45%', 'margin-right':20, 'vertical-align':'top'}),
html.Hr(style={'width': 1, 'height': 200, 'display': 'inline-block'}),
html.Div([
html.H3(style={'margin-left': 'auto', 'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'}, children='Aggregierte Werte'),
html.Div([
html.Div('Antriebsart:'),
html.Br(), html.Br(), html.Br(), html.Br(),
html.Div('Top Hersteller:'),
html.Br(),
], style={'display':'inline-block', 'width': '20%' }),
html.Div([
html.Div('HYDRAULISCH: {} Aufzüge'.format(anzahl_hydraulischAufzüge)),
html.Div('SEIL: {} Aufzüge'.format(anzahl_seilAufzüge)),
html.Br(), html.Br(), html.Br(),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[0], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[0])),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[1], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[1])),
html.Div('{}: {} Aufzüge'.format(df_anzahlAufzüge['Hersteller'].iloc[2], df_anzahlAufzüge['Anzahl_Aufzüge'].iloc[2]))
], style={'display':'inline-block', 'width': '80%', 'vertical-align':'top'})
], style={'display': 'inline-block', 'text-align': 'left', 'width': '50%', 'margin-left':20, 'vertical-align':'top'}),
html.Hr(),
#unteres Drittel
html.Div([
#Titel
html.Div([
html.H3(style={'margin-right': 'auto', 'text-align': 'left',
'color': '#000099'}, children='Funktionieren die Aufzüge an deiner Haltestelle? - Finde es heraus!'),
]),
#linker Teil
html.Div([
html.Div(['Stadt: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='stadt_input', value='Frankfurt', type='text', style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Div(['Bundesland: '], style={'margin-left': '15', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='bundesland_input', value='Hessen', type='text', style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
dcc.RadioItems(
id='radio_button',
options=[
{'label': 'Aktive Aufzüge', 'value': 'aktiv'},
{'label': 'Inaktive Aufzüge', 'value': 'inaktiv'},
{'label': ' Alle Aufzüge', 'value': 'beide'}
],
value='inaktiv', style={'margin-left':10}
),
html.Iframe(id='karte', srcDoc=open('./projekt/Maps/map_inactive_elevators.html', 'r').read(),
style={'width': '90%', 'height': '30em'})
], style={'width': '49%', 'display': 'inline-block'}),
#rechter Teil
html.Div([
html.Br(), html.Br(),
html.Div(['Aufzug-ID: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
dcc.Input(id='aufzug_id_input', type='text',
style={'margin-left': '5', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(),
html.Hr(),
html.Div([
html.Div(['Stationsname: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Beschreibung: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Hersteller: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Antriebsart: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(['Baujahr: '], style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
], style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Div(id='stationsname', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='beschreibung', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='hersteller',style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='antrieb', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
html.Div(id='baujahr', style={'margin-left': 'auto', 'margin-right': 'auto', 'display': 'inline-block'}),
html.Br(), html.Br(),
], style={'width': '80%', 'display': 'inline-block'}),
# Tabelle
html.Div([
dt.DataTable(
rows=[{}],
columns=['Datum_Uhrzeit', 'Status' , 'Erklärung des Status'],
editable=False,
row_selectable=False,
filterable=False,
sortable=False,
id='datatable-status-elevator',
selected_row_indices=[],
min_height=250
),
html.Br(),
])
], style={'width': '49%','display': 'inline-block', 'vertical-align':'top'})
], style={'margin-left':'20'}),
], style = {'background-color': '#E6E6FA'}),
#Fußzeile
html.Div([ ], style={'height':70}),
html.Hr(),
html.Div([
dcc.Markdown('''
**THM Friedberg**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style': {'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}}),
dcc.Markdown('''
**Sophie Hagemann, Philipp Krenitz, Bartos Mosch, Joshua Prim**
'''.replace(' ', ''), className='beschreibung',
containerProps={
'style':{'maxWidth': '650px', 'color': '#000000', 'margin-left': 'auto',
'margin-right': 'auto', 'text-align': 'center'}})
], style={'height':70}),
], style={'marginTop': '2%', 'marginLeft': '5%', 'marginRight': '5%'})
########################################################################## #############################################################################################################################################
########################################################################## CALLBACKS #############################################################################################################################################
########################################################################## #############################################################################################################################################
# Callback Karte aktualisieren für Aufzüge
@app.callback(
Output(component_id='karte', component_property='srcDoc'),
[Input(component_id='stadt_input', component_property='value'),
Input(component_id='bundesland_input', component_property='value'),
Input(component_id='radio_button', component_property='value')]
)
def karte_aktualisieren(input_stadt, input_bland, radio_button):
if radio_button == 'aktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
# TODO: Zeitmessung!
for i, row in active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_active_elevators.html')
return open('./projekt/Maps/map_active_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_active_elevators_FFM.html', 'r').read()
elif radio_button == 'inaktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
for i, row in inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_inactive_elevators.html')
return open('./projekt/Maps/map_inactive_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_elevators_FFM.html', 'r').read()
else:
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude,location.longitude], zoom_start=10)
for i, row in active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
for i, row in inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: '+ str(row['equipmentnumber'])+ ' Beschreibung: '+ str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup = tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_both_elevators.html')
return open('./projekt/Maps/map_both_elevators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_elevators_FFM.html', 'r').read()
######################################################################################################
# Callback Karte aktualisieren für Rolltreppen
@app.callback(
Output(component_id='escalator_karte', component_property='srcDoc'),
[Input(component_id='escalator_stadt_input', component_property='value'),
Input(component_id='escalator_bundesland_input', component_property='value'),
Input(component_id='escalator_radio_button', component_property='value')]
)
def karte_aktualisieren(input_stadt, input_bland, radio_button):
if radio_button == 'aktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_active_escalators.html')
return open('./projekt/Maps/map_active_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_active_escalators_FFM.html', 'r').read()
elif radio_button == 'inaktiv':
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_inactive_escalators.html')
return open('./projekt/Maps/map_inactive_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_escalators_FFM.html', 'r').read()
else:
try:
input_user = str(input_stadt + ', ' + input_bland + ', Deutschland')
location = escalator_geolocator.geocode(input_user)
m = folium.Map(location=[location.latitude, location.longitude], zoom_start=10)
for i, row in escalator_active.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='green', icon='info-sign')).add_to(m)
for i, row in escalator_inactive.iterrows():
if str(row['geocoordY']) == 'nan' or str(row['geocoordX']) == 'nan':
pass
else:
tmp = str('ID: ' + str(row['equipmentnumber']) + ' Beschreibung: ' + str(row['description']))
folium.Marker([row['geocoordY'], row['geocoordX']],
popup=tmp,
icon=folium.Icon(color='red', icon='info-sign')).add_to(m)
m.save('./projekt/Maps/map_both_escalators.html')
return open('./projekt/Maps/map_both_escalators.html', 'r').read()
except:
return open('./projekt/Maps/map_inactive_escalators_FFM.html', 'r').read()
######################################################################################################
# Callback Stationsname aktualisieren
@app.callback(
Output(component_id='stationsname', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def stationsname_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Ort'].values
return attribute[0]
except:
return str('Aufzug existiert nicht!')
# Callback Hersteller aktualisieren
@app.callback(
Output(component_id='hersteller', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def hersteller_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Hersteller'].values
return attribute[0]
except:
return ''
# Callback Beschreibung aktualisieren
@app.callback(
Output(component_id='beschreibung', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def beschreibung_aktualisieren(input_value):
try:
tmp3 = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = tmp3['Standort Equipment'].values
return attribute[0]
except:
return ''
# Callback Antriebsart aktualisieren
@app.callback(
Output(component_id='antrieb', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def anstriebsart_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['ANTRIEBSART'].values
return attribute[0]
except:
return ''
# Callback Baujahr aktualisieren
@app.callback(
Output(component_id='baujahr', component_property='children'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def baujahr_aktualisieren(input_value):
try:
aufzug = aufzüge[aufzüge['Equipment'] == int(input_value)]
attribute = aufzug['Baujahr'].values
return attribute[0]
except:
return ''
# Callback Tabelle aktualisieren
@app.callback(
Output(component_id='datatable-status-elevator', component_property='rows'),
[Input(component_id='aufzug_id_input', component_property='value')]
)
def elevator_tabelle_aktualisieren(input_value):
try:
tabellen_input = facilities.find({"type": "ELEVATOR", "equipmentnumber": int(input_value)})
tabellen_input = pd.DataFrame(list(tabellen_input))
tabellen_input = tabellen_input[['datetime', 'state', 'stateExplanation']]
status_tabelle = tabellen_input[::-1]
status_tabelle.columns = ['Datum_Uhrzeit', 'Status', 'Erklärung des Status']
return status_tabelle.to_dict('records')
except:
return [{}]
@app.callback(
Output(component_id='datatable-status-escalator', component_property='rows'),
[Input(component_id='rolltreppe_id_input', component_property='value')]
)
def escalator_tabelle_aktualisieren(input_value):
try:
tabellen_input = facilities.find({"type": "ESCALATOR", "equipmentnumber": int(input_value)})
tabellen_input = pd.DataFrame(list(tabellen_input))
tabellen_input = tabellen_input[['datetime', 'state', 'stateExplanation']]
status_tabelle = tabellen_input[::-1]
status_tabelle.columns = ['Datum_Uhrzeit', 'Status', 'Erklärung des Status']
return status_tabelle.to_dict('records')
except:
return [{}]
#Seite updaten für den Wechsel zwischen Aufzügen und Rolltreppen
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/page-aufzuege':
return page_aufzuege
elif pathname == '/page-rolltreppen':
return page_rolltreppen
else:
return page_aufzuege
if sys.version_info < (3, 0):
sys.exit("Dieses Programm erfordert Python 3.0 und höher")
app.run_server(debug=False, host=HOST_ID, port=PORT)
| 42.491915 | 232 | 0.500581 | 206 | 0.004113 | 0 | 0 | 11,694 | 0.233506 | 0 | 0 | 21,119 | 0.421705 |
614d70b8129724b5e053055af8277e59183bfa60 | 1,753 | py | Python | hardware/humidity_rev001/main.py | deniz195/json-sensor | c0e55d39bab3be6eca444273fb5436e1eafe8860 | [
"MIT"
] | 1 | 2018-10-30T11:22:33.000Z | 2018-10-30T11:22:33.000Z | hardware/humidity_rev001/main.py | deniz195/json-sensor | c0e55d39bab3be6eca444273fb5436e1eafe8860 | [
"MIT"
] | null | null | null | hardware/humidity_rev001/main.py | deniz195/json-sensor | c0e55d39bab3be6eca444273fb5436e1eafe8860 | [
"MIT"
] | null | null | null | # Trinket IO demo
# Welcome to CircuitPython 2.0.0 :)
import board
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogOut, AnalogIn
import touchio
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
import adafruit_dotstar as dotstar
import time
import neopixel
from busio import I2C
from board import SCL, SDA
import adafruit_sht31d
i2c = I2C(SCL, SDA)
sensor = adafruit_sht31d.SHT31D(i2c)
# # Built in red LED
# led = DigitalInOut(board.D13)
# led.direction = Direction.OUTPUT
# # Analog input on D0
# analog1in = AnalogIn(board.D0)
# # Analog output on D1
# aout = AnalogOut(board.D1)
# # Digital input with pullup on D2
# button = DigitalInOut(board.D2)
# button.direction = Direction.INPUT
# button.pull = Pull.UP
# # Used if we do HID output, see below
# kbd = Keyboard()
######################### HELPERS ##############################
# # Helper to convert analog input to voltage
# def getVoltage(pin):
# return (pin.value * 3.3) / 65536
######################### MAIN LOOP ##############################
averages = 1
# report_time = 0.0
# loop_time = report_time/averages
i = 0
temperature = 0
relative_humidity = 0
while True:
temperature += sensor.temperature
relative_humidity += sensor.relative_humidity
if i == averages - 1:
temperature /= averages
relative_humidity /= averages
output = ""
output += '{'
output += ' "guid": "btrn-tmp-sensor-0001",'
output += ' "temperature": %0.2f,' % sensor.temperature
output += ' "relative_humidity": %0.2f,' % sensor.relative_humidity
output += '}'
print(output)
temperature = 0
relative_humidity = 0
i = (i + 1) % averages
# time.sleep(loop_time)
| 20.869048 | 72 | 0.654307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 835 | 0.476326 |
614f16ac6495c02324492c1a2db96f18b5c7f6dc | 20,681 | py | Python | tests/lib/raw.py | Defense-Cyber-Crime-Center/dfvfs | da2ccbc4c989ced5ad651057bd8f5a4b18af6d37 | [
"Apache-2.0"
] | 2 | 2016-02-18T12:46:26.000Z | 2022-03-13T03:05:05.000Z | tests/lib/raw.py | Defense-Cyber-Crime-Center/dfvfs | da2ccbc4c989ced5ad651057bd8f5a4b18af6d37 | [
"Apache-2.0"
] | null | null | null | tests/lib/raw.py | Defense-Cyber-Crime-Center/dfvfs | da2ccbc4c989ced5ad651057bd8f5a4b18af6d37 | [
"Apache-2.0"
] | 5 | 2016-12-18T08:05:39.000Z | 2019-11-19T21:18:00.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the storage media RAW image support helper functions."""
import unittest
from dfvfs.lib import raw
from dfvfs.lib import definitions
from dfvfs.path import fake_path_spec
from dfvfs.path import raw_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import fake_file_system
class GlobRawFileTest(unittest.TestCase):
"""The unit test for the storage media RAW image file glob functionality."""
def _BuildFileFakeFileSystem(
self, segment_filenames, segment_file_path_specs):
"""Builds a fake file system containing storage media RAW segment files.
Args:
filename: the filename of the first segment file with extension.
segment_filenames: a list of segment filenames.
segment_file_path_specs: a list to store the segment file path
specifications in.
Returns:
The fake file system (instance of dvfvs.FakeFileSystem).
"""
resolver_context = context.Context()
file_system = fake_file_system.FakeFileSystem(resolver_context)
file_system.AddFileEntry(
u'/', file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)
for segment_filename in segment_filenames:
path = u'/{0:s}'.format(segment_filename)
file_system.AddFileEntry(path)
segment_file_path_specs.append(fake_path_spec.FakePathSpec(location=path))
return file_system
def testGlobRawSinglecExtension(self):
"""Test the glob function for a RAW single extension scheme."""
# Test single segment file: dd.
segment_filenames = [u'ímynd.dd']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/ímynd.dd')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test single segment file: dmg.
segment_filenames = [u'image.dmg']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.dmg')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test single segment file: img.
segment_filenames = [u'image.img']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.img')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test single segment file: raw.
segment_filenames = [u'image.raw']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.raw')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawAlphabeticalExtension(self):
"""Test the glob function for a RAW alphabetical extension scheme."""
segment_filenames = [u'image.aaa']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: aaa.
path_spec = fake_path_spec.FakePathSpec(location=u'/image.aaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: aaa.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus.aaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: aaa-aak.
segment_filenames = [
u'image.aaa', u'image.aab', u'image.aac', u'image.aad', u'image.aae',
u'image.aaf', u'image.aag', u'image.aah', u'image.aai', u'image.aaj',
u'image.aak']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.aaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: AAA-AAk.
segment_filenames = [
u'image.AAA', u'image.AAB', u'image.AAC', u'image.AAD', u'image.AAE',
u'image.AAF', u'image.AAG', u'image.AAH', u'image.AAI', u'image.AAJ',
u'image.AAK']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.AAA')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawAlphabeticalSuffix(self):
"""Test the glob function for a RAW alphabetical suffix scheme."""
segment_filenames = [u'imageaaa']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: aaa.
path_spec = fake_path_spec.FakePathSpec(location=u'/imageaaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: aaa.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogusaaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: aaa-aak.
segment_filenames = [
u'imageaaa', u'imageaab', u'imageaac', u'imageaad', u'imageaae',
u'imageaaf', u'imageaag', u'imageaah', u'imageaai', u'imageaaj',
u'imageaak']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/imageaaa')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: AAA-AAk.
segment_filenames = [
u'imageAAA', u'imageAAB', u'imageAAC', u'imageAAD', u'imageAAE',
u'imageAAF', u'imageAAG', u'imageAAH', u'imageAAI', u'imageAAJ',
u'imageAAK']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/imageAAA')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawNumericExtension(self):
"""Test the glob function for a RAW numeric extension scheme."""
segment_filenames = [u'image.000']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 000.
path_spec = fake_path_spec.FakePathSpec(location=u'/image.000')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 000.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus.000')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 000-010.
segment_filenames = [
u'image.000', u'image.001', u'image.002', u'image.003', u'image.004',
u'image.005', u'image.006', u'image.007', u'image.008', u'image.009',
u'image.010']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.000')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image.001', u'image.002', u'image.003', u'image.004', u'image.005',
u'image.006', u'image.007', u'image.008', u'image.009', u'image.010']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.001')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 1-10.
segment_filenames = [
u'image.1', u'image.2', u'image.3', u'image.4', u'image.5',
u'image.6', u'image.7', u'image.8', u'image.9', u'image.10']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image.1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawNumericSuffix(self):
"""Test the glob function for a RAW numeric suffix scheme."""
segment_filenames = [u'image1']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 000.
path_spec = fake_path_spec.FakePathSpec(location=u'/image1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 000.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 000-010.
segment_filenames = [
u'image0', u'image1', u'image2', u'image3', u'image4', u'image5',
u'image6', u'image7', u'image8', u'image9', u'image10']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image0')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 1-10.
segment_filenames = [
u'image1', u'image2', u'image3', u'image4', u'image5',
u'image6', u'image7', u'image8', u'image9', u'image10']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image1')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image001', u'image002', u'image003', u'image004', u'image005',
u'image006', u'image007', u'image008', u'image009', u'image010']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image001')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawAsbExtension(self):
"""Test the glob function for a RAW ASB extension scheme."""
segment_filenames = [u'image001.asb']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 001.
path_spec = fake_path_spec.FakePathSpec(location=u'/image001.asb')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 001.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus000.asb')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image001.asb', u'image002.asb', u'image003.asb', u'image004.asb',
u'image005.asb', u'image006.asb', u'image007.asb', u'image008.asb',
u'image009.asb', u'image010.asb']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image001.asb')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
def testGlobRawVmdkExtension(self):
"""Test the glob function for a RAW VMDK extension scheme."""
segment_filenames = [u'image-f001.vmdk']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
# Test single segment file: 001.
path_spec = fake_path_spec.FakePathSpec(location=u'/image-f001.vmdk')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test non exiting segment file: 001.
expected_segment_file_path_specs = []
path_spec = fake_path_spec.FakePathSpec(location=u'/bogus-f000.vmdk')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
# Test multiple segment files: 001-010.
segment_filenames = [
u'image-f001.vmdk', u'image-f002.vmdk', u'image-f003.vmdk',
u'image-f004.vmdk', u'image-f005.vmdk', u'image-f006.vmdk',
u'image-f007.vmdk', u'image-f008.vmdk', u'image-f009.vmdk',
u'image-f010.vmdk']
expected_segment_file_path_specs = []
file_system = self._BuildFileFakeFileSystem(
segment_filenames, expected_segment_file_path_specs)
path_spec = fake_path_spec.FakePathSpec(location=u'/image-f001.vmdk')
path_spec = raw_path_spec.RawPathSpec(parent=path_spec)
segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec)
self.assertEqual(
len(segment_file_path_specs), len(expected_segment_file_path_specs))
self.assertEqual(
segment_file_path_specs, expected_segment_file_path_specs)
if __name__ == '__main__':
unittest.main()
| 41.695565 | 80 | 0.748175 | 20,294 | 0.981192 | 0 | 0 | 0 | 0 | 0 | 0 | 4,054 | 0.196006 |
614f585cc17a886444d4e45f07eb1b61a7b2e1d3 | 2,062 | py | Python | src/tools/plot_training_log.py | motherapp/CenterNet | 8a8a80e21637fab1462916b1a555fd2b2bfb6988 | [
"MIT"
] | 6 | 2019-11-22T05:54:16.000Z | 2020-03-27T07:35:32.000Z | src/tools/plot_training_log.py | motherapp/CenterNet | 8a8a80e21637fab1462916b1a555fd2b2bfb6988 | [
"MIT"
] | null | null | null | src/tools/plot_training_log.py | motherapp/CenterNet | 8a8a80e21637fab1462916b1a555fd2b2bfb6988 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
import sys
import time
def get_train_loss(line):
splitted_line = line.split(" ")
return float(splitted_line[2]), float(splitted_line[4])
def get_val_loss(line):
splitted_line = line.split(" ")
if len(splitted_line)>19:
return float(splitted_line[19])
return None
def read(logfile):
with open(logfile) as f:
train_y = []
val_y = []
train_epoches = []
val_epoches = []
while True:
line = f.readline()
if line:
epoch, train_data = get_train_loss(line)
val_data = get_val_loss(line)
train_y.append(train_data)
train_epoches.append(epoch)
if val_data is not None:
val_y.append(val_data)
val_epoches.append(epoch)
yield train_epoches, train_y, val_epoches, val_y
else:
time.sleep(0.1)
def main():
if len(sys.argv)<2:
print("Usage: python %s [training log file]" % sys.argv[0])
return
log_file = sys.argv[1]
fig, ax = plt.subplots()
ax.set_xlabel('epochs')
ax.set_ylabel('loss')
ax.grid()
train_line, = ax.plot([], [])
val_line, = ax.plot([], [])
train_line.set_label("Train")
val_line.set_label("Val")
ax.legend()
def animate(values):
train_x, train_y, val_x, val_y = values
print(train_x[-1], train_y[-1])
train_line.set_data(train_x, train_y)
val_line.set_data(val_x, val_y)
ax.set_xlim([train_x[0]-1, train_x[-1]])
max_y = max(train_y)
min_y = min(train_y)
if val_y:
max_y = max(max_y, max(val_y))
min_y = min(min_y, min(val_y))
max_y = min(max_y, 10)
ax.set_ylim([min_y, max_y])
ani = FuncAnimation(fig, animate, frames=read(log_file), interval=1)
plt.show()
if __name__ == '__main__':
main()
| 25.775 | 72 | 0.567895 | 0 | 0 | 668 | 0.323957 | 0 | 0 | 0 | 0 | 80 | 0.038797 |
61507394f52ac1e05ac449ce9da47bed8f214087 | 50,220 | py | Python | pyxb/binding/content.py | thorstenb/pyxb | 634e86f61dfb73a2900f32fc3d819e9c25365a49 | [
"Apache-2.0"
] | null | null | null | pyxb/binding/content.py | thorstenb/pyxb | 634e86f61dfb73a2900f32fc3d819e9c25365a49 | [
"Apache-2.0"
] | null | null | null | pyxb/binding/content.py | thorstenb/pyxb | 634e86f61dfb73a2900f32fc3d819e9c25365a49 | [
"Apache-2.0"
] | null | null | null | # Copyright 2009, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Helper classes that maintain the content model of XMLSchema in the binding
classes.
L{AttributeUse} and L{ElementUse} record information associated with a binding
class, for example the types of values, the original XML QName or NCName, and
the Python field in which the values are stored. They also provide the
low-level interface to set and get the corresponding values in a binding
instance.
@todo: Document new content model
L{Wildcard} holds content-related information used in the content model.
"""
import pyxb
import pyxb.namespace
import basis
import xml.dom
class ContentState_mixin (pyxb.cscRoot):
"""Declares methods used by classes that hold state while validating a
content model component."""
def accepts (self, particle_state, instance, value, element_use):
"""Determine whether the provided value can be added to the instance
without violating state validation.
This method must not throw any non-catastrophic exceptions; general
failures should be transformed to a C{False} return value.
@param particle_state: The L{ParticleState} instance serving as the
parent to this state. The implementation must inform that state when
the proposed value completes the content model.
@param instance: An instance of a subclass of
{basis.complexTypeDefinition}, into which the provided value will be
stored if it is consistent with the current model state.
@param value: The value that is being validated against the state.
@param element_use: An optional L{ElementUse} instance that specifies
the element to which the value corresponds. This will be available
when the value is extracted by parsing a document, but will be absent
if the value was passed as a constructor positional parameter.
@return: C{True} if the value was successfully matched against the
state. C{False} if the value did not match against the state."""
raise Exception('ContentState_mixin.accepts not implemented in %s' % (type(self),))
def notifyFailure (self, sub_state, particle_ok):
"""Invoked by a sub-state to indicate that validation cannot proceed
in the current state.
Normally this is used when an intermediate content model must reset
itself to permit alternative models to be evaluated.
@param sub_state: the state that was unable to accept a value
@param particle_ok: C{True} if the particle that rejected the value is
in an accepting terminal state
"""
raise Exception('ContentState_mixin.notifyFailure not implemented in %s' % (type(self),))
def _verifyComplete (self, parent_particle_state):
"""Determine whether the deep state is complete without further elements.
No-op for non-aggregate state. For aggregate state, all contained
particles should be checked to see whether the overall model can be
satisfied if no additional elements are provided.
This method does not have a meaningful return value; violations of the
content model should produce the corresponding exception (generally,
L{MissingContentError}).
@param parent_particle_state: the L{ParticleState} for which this state
is the term.
"""
pass
class ContentModel_mixin (pyxb.cscRoot):
"""Declares methods used by classes representing content model components."""
def newState (self, parent_particle_state):
"""Return a L{ContentState_mixin} instance that will validate the
state of this model component.
@param parent_particle_state: The L{ParticleState} instance for which
this instance is a term. C{None} for the top content model of a
complex data type.
"""
raise Exception('ContentModel_mixin.newState not implemented in %s' % (type(self),))
def _validateCloneSymbolSet (self, symbol_set_im):
"""Create a mutable copy of the symbol set.
The top-level map is copied, as are the lists of values. The values
within the lists are unchanged, as validation does not affect them."""
rv = symbol_set_im.copy()
for (k, v) in rv.items():
rv[k] = v[:]
return rv
def _validateCloneOutputSequence (self, output_sequence_im):
return output_sequence_im[:]
def _validateReplaceResults (self, symbol_set_out, symbol_set_new, output_sequence_out, output_sequence_new):
"""In-place update of symbol set and output sequence structures.
Use this to copy from temporary mutable structures updated by local
validation into the structures that were passed in once the validation
has succeeded."""
symbol_set_out.clear()
symbol_set_out.update(symbol_set_new)
output_sequence_out[:] = output_sequence_new
def _validate (self, symbol_set, output_sequence):
"""Determine whether an output sequence created from the symbols can
be made consistent with the model.
The symbol set represents letters in an alphabet; the output sequence
orders those letters in a way that satisfies the regular expression
expressed in the model. Both are changed as a result of a successful
validation; both remain unchanged if the validation failed. In
recursing, implementers may assume that C{output_sequence} is
monotonic: its length remains unchanged after an invocation iff the
symbol set also remains unchanged. The L{_validateCloneSymbolSet},
L{_validateCloneOutputSequence}, and L{_validateReplaceResults}
methods are available to help preserve this behavior.
@param symbol_set: A map from L{ElementUse} instances to a list of
values. The order of the values corresponds to the order in which
they should appear. A key of C{None} identifies values that are
stored as wildcard elements. Values are removed from the lists as
they are used; when the last value of a list is removed, its key is
removed from the map. Thus an empty dictionary is the indicator that
no more symbols are available.
@param output_sequence: A mutable list to which should be appended
tuples C{( eu, val )} where C{eu} is an L{ElementUse} from the set of
symbol keys, and C{val} is a value from the corresponding list. A
document generated by producing the elements in the given order is
expected to validate.
@return: C{True} iff the model validates. C{symbol_set} and
C{output_path} must be unmodified if returns C{False}.
"""
raise Exception('ContentState_mixin._validate not implemented in %s' % (type(self),))
class AttributeUse (pyxb.cscRoot):
"""A helper class that encapsulates everything we need to know
about the way an attribute is used within a binding class.
Attributes are stored internally as pairs C{(provided, value)}, where
C{provided} is a boolean indicating whether a value for the attribute was
provided externally, and C{value} is an instance of the attribute
datatype. The C{provided} flag is used to determine whether an XML
attribute should be added to a created DOM node when generating the XML
corresponding to a binding instance.
"""
__name = None # ExpandedName of the attribute
__id = None # Identifier used for this attribute within the owning class
__key = None # Private attribute used in instances to hold the attribute value
__dataType = None # PST datatype
__unicodeDefault = None # Default value as a unicode string, or None
__defaultValue = None # Default value as an instance of datatype, or None
__fixed = False # If True, value cannot be changed
__required = False # If True, attribute must appear
__prohibited = False # If True, attribute must not appear
def __init__ (self, name, id, key, data_type, unicode_default=None, fixed=False, required=False, prohibited=False):
"""Create an AttributeUse instance.
@param name: The name by which the attribute is referenced in the XML
@type name: L{pyxb.namespace.ExpandedName}
@param id: The Python identifier for the attribute within the
containing L{pyxb.basis.binding.complexTypeDefinition}. This is a
public identifier, derived from the local part of the attribute name
and modified to be unique, and is usually used as the name of the
attribute's inspector method.
@type id: C{str}
@param key: The string used to store the attribute
value in the dictionary of the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is mangled so
that it is unique among and is treated as a Python private member.
@type key: C{str}
@param data_type: The class reference to the subclass of
L{pyxb.binding.basis.simpleTypeDefinition} of which the attribute
values must be instances.
@type data_type: C{type}
@keyword unicode_default: The default value of the attribute as
specified in the schema, or None if there is no default attribute
value. The default value (of the keyword) is C{None}.
@type unicode_default: C{unicode}
@keyword fixed: If C{True}, indicates that the attribute, if present,
must have the value that was given via C{unicode_default}. The
default value is C{False}.
@type fixed: C{bool}
@keyword required: If C{True}, indicates that the attribute must appear
in the DOM node used to create an instance of the corresponding
L{pyxb.binding.basis.complexTypeDefinition}. The default value is
C{False}. No more that one of L{required} and L{prohibited} should be
assigned C{True}.
@type required: C{bool}
@keyword prohibited: If C{True}, indicates that the attribute must
B{not} appear in the DOM node used to create an instance of the
corresponding L{pyxb.binding.basis.complexTypeDefinition}. The
default value is C{False}. No more that one of L{required} and
L{prohibited} should be assigned C{True}.
@type prohibited: C{bool}
@raise pyxb.BadTypeValueError: the L{unicode_default} cannot be used
to initialize an instance of L{data_type}
"""
self.__name = name
self.__id = id
self.__key = key
self.__dataType = data_type
self.__unicodeDefault = unicode_default
if self.__unicodeDefault is not None:
self.__defaultValue = self.__dataType.Factory(self.__unicodeDefault)
self.__fixed = fixed
self.__required = required
self.__prohibited = prohibited
def name (self):
"""The expanded name of the element.
@rtype: L{pyxb.namespace.ExpandedName}
"""
return self.__name
def defaultValue (self):
"""The default value of the attribute."""
return self.__defaultValue
def fixed (self):
"""C{True} iff the value of the attribute cannot be changed."""
return self.__fixed
def required (self):
"""Return True iff the attribute must be assigned a value."""
return self.__required
def prohibited (self):
"""Return True iff the attribute must not be assigned a value."""
return self.__prohibited
def provided (self, ctd_instance):
"""Return True iff the given instance has been explicitly given a
value for the attribute.
This is used for things like only generating an XML attribute
assignment when a value was originally given (even if that value
happens to be the default).
"""
return self.__getProvided(ctd_instance)
def id (self):
"""Tag used within Python code for the attribute.
This is not used directly in the default code generation template."""
return self.__id
def key (self):
"""String used as key within object dictionary when storing attribute value."""
return self.__key
def dataType (self):
"""The subclass of L{pyxb.binding.basis.simpleTypeDefinition} of which any attribute value must be an instance."""
return self.__dataType
def __getValue (self, ctd_instance):
"""Retrieve the value information for this attribute in a binding instance.
@param ctd_instance: The instance object from which the attribute is to be retrieved.
@type ctd_instance: subclass of L{pyxb.binding.basis.complexTypeDefinition}
@return: C{(provided, value)} where C{provided} is a C{bool} and
C{value} is C{None} or an instance of the attribute's datatype.
"""
return getattr(ctd_instance, self.__key, (False, None))
def __getProvided (self, ctd_instance):
return self.__getValue(ctd_instance)[0]
def value (self, ctd_instance):
"""Get the value of the attribute from the instance."""
return self.__getValue(ctd_instance)[1]
def __setValue (self, ctd_instance, new_value, provided):
return setattr(ctd_instance, self.__key, (provided, new_value))
def reset (self, ctd_instance):
"""Set the value of the attribute in the given instance to be its
default value, and mark that it has not been provided."""
self.__setValue(ctd_instance, self.__defaultValue, False)
def addDOMAttribute (self, dom_support, ctd_instance, element):
"""If this attribute as been set, add the corresponding attribute to the DOM element."""
( provided, value ) = self.__getValue(ctd_instance)
if provided:
assert value is not None
dom_support.addAttribute(element, self.__name, value.xsdLiteral())
return self
def validate (self, ctd_instance):
(provided, value) = self.__getValue(ctd_instance)
if value is not None:
if self.__prohibited:
raise pyxb.ProhibitedAttributeError('Value given for prohibited attribute %s' % (self.__name,))
if self.__required and not provided:
assert self.__fixed
raise pyxb.MissingAttributeError('Fixed required attribute %s was never set' % (self.__name,))
if not self.__dataType._IsValidValue(value):
raise pyxb.BindingValidationError('Attribute %s value type %s not %s' % (self.__name, type(value), self.__dataType))
self.__dataType.XsdConstraintsOK(value)
else:
if self.__required:
raise pyxb.MissingAttributeError('Required attribute %s does not have a value' % (self.__name,))
return True
def set (self, ctd_instance, new_value):
"""Set the value of the attribute.
This validates the value against the data type, creating a new instance if necessary.
@param ctd_instance: The binding instance for which the attribute
value is to be set
@type ctd_instance: subclass of L{pyxb.binding.basis.complexTypeDefinition}
@param new_value: The value for the attribute
@type new_value: An C{xml.dom.Node} instance, or any value that is
permitted as the input parameter to the C{Factory} method of the
attribute's datatype.
"""
provided = True
if isinstance(new_value, xml.dom.Node):
unicode_value = self.__name.getAttribute(new_value)
if unicode_value is None:
if self.__required:
raise pyxb.MissingAttributeError('Required attribute %s from %s not found' % (self.__name, ctd_instance._ExpandedName or type(ctd_instance)))
provided = False
unicode_value = self.__unicodeDefault
if unicode_value is None:
# Must be optional and absent
provided = False
new_value = None
else:
new_value = unicode_value
else:
assert new_value is not None
if self.__prohibited:
raise pyxb.ProhibitedAttributeError('Value given for prohibited attribute %s' % (self.__name,))
if (new_value is not None) and (not isinstance(new_value, self.__dataType)):
new_value = self.__dataType.Factory(new_value)
if self.__fixed and (new_value != self.__defaultValue):
raise pyxb.AttributeChangeError('Attempt to change value of fixed attribute %s' % (self.__name,))
self.__setValue(ctd_instance, new_value, provided)
return new_value
def _description (self, name_only=False, user_documentation=True):
if name_only:
return str(self.__name)
assert issubclass(self.__dataType, basis._TypeBinding_mixin)
desc = [ str(self.__id), ': ', str(self.__name), ' (', self.__dataType._description(name_only=True, user_documentation=False), '), ' ]
if self.__required:
desc.append('required')
elif self.__prohibited:
desc.append('prohibited')
else:
desc.append('optional')
if self.__defaultValue is not None:
desc.append(', ')
if self.__fixed:
desc.append('fixed')
else:
desc.append('default')
desc.extend(['=', self.__unicodeDefault ])
return ''.join(desc)
class ElementUse (ContentState_mixin, ContentModel_mixin):
"""Aggregate the information relevant to an element of a complex type.
This includes the L{original tag name<name>}, the spelling of L{the
corresponding object in Python <id>}, an L{indicator<isPlural>} of whether
multiple instances might be associated with the field, and other relevant
information..
"""
def name (self):
"""The expanded name of the element.
@rtype: L{pyxb.namespace.ExpandedName}
"""
return self.__name
__name = None
def id (self):
"""The string name of the binding class field used to hold the element
values.
This is the user-visible name, and excepting disambiguation will be
equal to the local name of the element."""
return self.__id
__id = None
# The dictionary key used to identify the value of the element. The value
# is the same as that used for private member variables in the binding
# class within which the element declaration occurred.
__key = None
def elementBinding (self):
"""The L{basis.element} instance identifying the information
associated with the element declaration.
"""
return self.__elementBinding
def _setElementBinding (self, element_binding):
# Set the element binding for this use. Only visible at all because
# we have to define the uses before the element instances have been
# created.
self.__elementBinding = element_binding
return self
__elementBinding = None
def isPlural (self):
"""True iff the content model indicates that more than one element
can legitimately belong to this use.
This includes elements in particles with maxOccurs greater than one,
and when multiple elements with the same NCName are declared in the
same type.
"""
return self.__isPlural
__isPlural = False
def __init__ (self, name, id, key, is_plural, element_binding=None):
"""Create an ElementUse instance.
@param name: The name by which the element is referenced in the XML
@type name: L{pyxb.namespace.ExpandedName}
@param id: The Python name for the element within the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is a public
identifier, albeit modified to be unique, and is usually used as the
name of the element's inspector method or property.
@type id: C{str}
@param key: The string used to store the element
value in the dictionary of the containing
L{pyxb.basis.binding.complexTypeDefinition}. This is mangled so
that it is unique among and is treated as a Python private member.
@type key: C{str}
@param is_plural: If C{True}, documents for the corresponding type may
have multiple instances of this element. As a consequence, the value
of the element will be a list. If C{False}, the value will be C{None}
if the element is absent, and a reference to an instance of the type
identified by L{pyxb.binding.basis.element.typeDefinition} if present.
@type is_plural: C{bool}
@param element_binding: Reference to the class that serves as the
binding for the element.
"""
self.__name = name
self.__id = id
self.__key = key
self.__isPlural = is_plural
self.__elementBinding = element_binding
def defaultValue (self):
"""Return the default value for this element.
@todo: Right now, this returns C{None} for non-plural and an empty
list for plural elements. Need to support schema-specified default
values for simple-type content.
"""
if self.isPlural():
return []
return None
def value (self, ctd_instance):
"""Return the value for this use within the given instance."""
return getattr(ctd_instance, self.__key, self.defaultValue())
def reset (self, ctd_instance):
"""Set the value for this use in the given element to its default."""
setattr(ctd_instance, self.__key, self.defaultValue())
return self
def set (self, ctd_instance, value):
"""Set the value of this element in the given instance."""
if value is None:
return self.reset(ctd_instance)
assert self.__elementBinding is not None
if basis._TypeBinding_mixin._PerformValidation:
value = self.__elementBinding.compatibleValue(value, is_plural=self.isPlural())
setattr(ctd_instance, self.__key, value)
ctd_instance._addContent(value, self.__elementBinding)
return self
def setOrAppend (self, ctd_instance, value):
"""Invoke either L{set} or L{append}, depending on whether the element
use is plural."""
if self.isPlural():
return self.append(ctd_instance, value)
return self.set(ctd_instance, value)
def append (self, ctd_instance, value):
"""Add the given value as another instance of this element within the binding instance.
@raise pyxb.StructuralBadDocumentError: invoked on an element use that is not plural
"""
if not self.isPlural():
raise pyxb.StructuralBadDocumentError('Cannot append to element with non-plural multiplicity')
values = self.value(ctd_instance)
if basis._TypeBinding_mixin._PerformValidation:
value = self.__elementBinding.compatibleValue(value)
values.append(value)
ctd_instance._addContent(value, self.__elementBinding)
return values
def toDOM (self, dom_support, parent, value):
"""Convert the given value to DOM as an instance of this element.
@param dom_support: Helper for managing DOM properties
@type dom_support: L{pyxb.utils.domutils.BindingDOMSupport}
@param parent: The DOM node within which this element should be generated.
@type parent: C{xml.dom.Element}
@param value: The content for this element. May be text (if the
element allows mixed content), or an instance of
L{basis._TypeBinding_mixin}.
"""
if isinstance(value, basis._TypeBinding_mixin):
element_binding = self.__elementBinding
if value._substitutesFor(element_binding):
element_binding = value._element()
assert element_binding is not None
if element_binding.abstract():
raise pyxb.DOMGenerationError('Element %s is abstract but content %s not associated with substitution group member' % (self.name(), value))
element = dom_support.createChildElement(element_binding.name(), parent)
elt_type = element_binding.typeDefinition()
val_type = type(value)
if isinstance(value, basis.complexTypeDefinition):
assert isinstance(value, elt_type)
else:
if isinstance(value, basis.STD_union) and isinstance(value, elt_type._MemberTypes):
val_type = elt_type
if dom_support.requireXSIType() or elt_type._RequireXSIType(val_type):
val_type_qname = value._ExpandedName.localName()
tns_prefix = dom_support.namespacePrefix(value._ExpandedName.namespace())
if tns_prefix is not None:
val_type_qname = '%s:%s' % (tns_prefix, val_type_qname)
dom_support.addAttribute(element, pyxb.namespace.XMLSchema_instance.createExpandedName('type'), val_type_qname)
value._toDOM_csc(dom_support, element)
elif isinstance(value, (str, unicode)):
element = dom_support.createChildElement(self.name(), parent)
element.appendChild(dom_support.document().createTextNode(value))
else:
raise pyxb.LogicError('toDOM with unrecognized value type %s: %s' % (type(value), value))
def _description (self, name_only=False, user_documentation=True):
if name_only:
return str(self.__name)
desc = [ str(self.__id), ': ']
if self.isPlural():
desc.append('MULTIPLE ')
desc.append(self.elementBinding()._description(user_documentation=user_documentation))
return ''.join(desc)
def newState (self, parent_particle_state):
"""Implement parent class method."""
return self
def accepts (self, particle_state, instance, value, element_use):
rv = self._accepts(instance, value, element_use)
if rv:
particle_state.incrementCount()
return rv
def _accepts (self, instance, value, element_use):
if element_use == self:
self.setOrAppend(instance, value)
return True
if element_use is not None:
# If there's a known element, and it's not this one, the content
# does not match. This assumes we handled xsi:type and
# substitution groups earlier, which may be true.
return False
if isinstance(value, xml.dom.Node):
# If we haven't been able to identify an element for this before,
# then we don't recognize it, and will have to treat it as a
# wildcard.
return False
try:
self.setOrAppend(instance, self.__elementBinding.compatibleValue(value, _convert_string_values=False))
return True
except pyxb.BadTypeValueError, e:
pass
#print '%s %s %s in %s' % (instance, value, element_use, self)
return False
def _validate (self, symbol_set, output_sequence):
values = symbol_set.get(self)
#print 'values %s' % (values,)
if values is None:
return False
used = values.pop(0)
output_sequence.append( (self, used) )
if 0 == len(values):
del symbol_set[self]
return True
def __str__ (self):
return 'EU.%s@%x' % (self.__name, id(self))
class Wildcard (ContentState_mixin, ContentModel_mixin):
"""Placeholder for wildcard objects."""
NC_any = '##any' #<<< The namespace constraint "##any"
NC_not = '##other' #<<< A flag indicating constraint "##other"
NC_targetNamespace = '##targetNamespace'
NC_local = '##local'
__namespaceConstraint = None
def namespaceConstraint (self):
"""A constraint on the namespace for the wildcard.
Valid values are:
- L{Wildcard.NC_any}
- A tuple ( L{Wildcard.NC_not}, a L{namespace<pyxb.namespace.Namespace>} instance )
- set(of L{namespace<pyxb.namespace.Namespace>} instances)
Namespaces are represented by their URIs. Absence is
represented by None, both in the "not" pair and in the set.
"""
return self.__namespaceConstraint
PC_skip = 'skip' #<<< No constraint is applied
PC_lax = 'lax' #<<< Validate against available uniquely determined declaration
PC_strict = 'strict' #<<< Validate against declaration or xsi:type which must be available
# One of PC_*
__processContents = None
def processContents (self): return self.__processContents
def __normalizeNamespace (self, nsv):
if nsv is None:
return None
if isinstance(nsv, basestring):
nsv = pyxb.namespace.NamespaceForURI(nsv, create_if_missing=True)
assert isinstance(nsv, pyxb.namespace.Namespace), 'unexpected non-namespace %s' % (nsv,)
return nsv
def __init__ (self, *args, **kw):
# Namespace constraint and process contents are required parameters.
nsc = kw['namespace_constraint']
if isinstance(nsc, tuple):
nsc = (nsc[0], self.__normalizeNamespace(nsc[1]))
elif isinstance(nsc, set):
nsc = set([ self.__normalizeNamespace(_uri) for _uri in nsc ])
self.__namespaceConstraint = nsc
self.__processContents = kw['process_contents']
def matches (self, instance, value):
"""Return True iff the value is a valid match against this wildcard.
Validation per U{Wildcard allows Namespace Name<http://www.w3.org/TR/xmlschema-1/#cvc-wildcard-namespace>}.
"""
ns = None
if isinstance(value, xml.dom.Node):
if value.namespaceURI is not None:
ns = pyxb.namespace.NamespaceForURI(value.namespaceURI)
elif isinstance(value, basis._TypeBinding_mixin):
elt = value._element()
if elt is not None:
ns = elt.name().namespace()
else:
ns = value._ExpandedName.namespace()
else:
raise pyxb.LogicError('Need namespace from value')
if isinstance(ns, pyxb.namespace.Namespace) and ns.isAbsentNamespace():
ns = None
if self.NC_any == self.__namespaceConstraint:
return True
if isinstance(self.__namespaceConstraint, tuple):
(_, constrained_ns) = self.__namespaceConstraint
assert self.NC_not == _
if ns is None:
return False
if constrained_ns == ns:
return False
return True
return ns in self.__namespaceConstraint
def newState (self, parent_particle_state):
return self
def accepts (self, particle_state, instance, value, element_use):
if isinstance(value, xml.dom.Node):
value_desc = 'value in %s' % (value.nodeName,)
else:
value_desc = 'value of type %s' % (type(value),)
if not self.matches(instance, value):
return False
if not isinstance(value, basis._TypeBinding_mixin):
print 'NOTE: Created unbound wildcard element from %s' % (value_desc,)
assert isinstance(instance.wildcardElements(), list), 'Uninitialized wildcard list in %s' % (instance._ExpandedName,)
instance._appendWildcardElement(value)
particle_state.incrementCount()
return True
def _validate (self, symbol_set, output_sequence):
# @todo check node against namespace constraint and process contents
#print 'WARNING: Accepting node as wildcard match without validating.'
wc_values = symbol_set.get(None)
if wc_values is None:
return False
used = wc_values.pop(0)
output_sequence.append( (None, used) )
if 0 == len(wc_values):
del symbol_set[None]
return True
class SequenceState (ContentState_mixin):
__failed = False
__satisfied = False
def __init__ (self, group, parent_particle_state):
super(SequenceState, self).__init__(group)
self.__sequence = group
self.__parentParticleState = parent_particle_state
self.__particles = group.particles()
self.__index = -1
self.__satisfied = False
self.__failed = False
self.notifyFailure(None, False)
#print 'SS.CTOR %s: %d elts' % (self, len(self.__particles))
def accepts (self, particle_state, instance, value, element_use):
assert self.__parentParticleState == particle_state
assert not self.__failed
#print 'SS.ACC %s: %s %s %s' % (self, instance, value, element_use)
while self.__particleState is not None:
(consume, underflow_exc) = self.__particleState.step(instance, value, element_use)
if consume:
return True
if underflow_exc is not None:
self.__failed = True
raise underflow_exc
return False
def _verifyComplete (self, parent_particle_state):
while self.__particleState is not None:
self.__particleState.verifyComplete()
def notifyFailure (self, sub_state, particle_ok):
self.__index += 1
self.__particleState = None
if self.__index < len(self.__particles):
self.__particleState = ParticleState(self.__particles[self.__index], self)
else:
self.__satisfied = particle_ok
if particle_ok:
self.__parentParticleState.incrementCount()
#print 'SS.NF %s: %d %s %s' % (self, self.__index, particle_ok, self.__particleState)
class ChoiceState (ContentState_mixin):
def __init__ (self, group, parent_particle_state):
self.__parentParticleState = parent_particle_state
super(ChoiceState, self).__init__(group)
self.__choices = [ ParticleState(_p, self) for _p in group.particles() ]
self.__activeChoice = None
#print 'CS.CTOR %s: %d choices' % (self, len(self.__choices))
def accepts (self, particle_state, instance, value, element_use):
#print 'CS.ACC %s %s: %s %s %s' % (self, self.__activeChoice, instance, value, element_use)
if self.__activeChoice is None:
for choice in self.__choices:
#print 'CS.ACC %s candidate %s' % (self, choice)
try:
(consume, underflow_exc) = choice.step(instance, value, element_use)
except Exception, e:
consume = False
underflow_exc = e
#print 'CS.ACC %s: candidate %s : %s' % (self, choice, consume)
if consume:
self.__activeChoice = choice
self.__choices = None
return True
return False
(consume, underflow_exc) = self.__activeChoice.step(instance, value, element_use)
#print 'CS.ACC %s : active choice %s %s %s' % (self, self.__activeChoice, consume, underflow_exc)
if consume:
return True
if underflow_exc is not None:
self.__failed = True
raise underflow_exc
return False
def _verifyComplete (self, parent_particle_state):
rv = True
#print 'CS.VC %s: %s' % (self, self.__activeChoice)
if self.__activeChoice is None:
# Use self.__activeChoice as the iteration value so that it's
# non-None when notifyFailure is invoked.
for self.__activeChoice in self.__choices:
try:
#print 'CS.VC: try %s' % (self.__activeChoice,)
self.__activeChoice.verifyComplete()
return
except Exception, e:
pass
#print 'Missing components %s' % ("\n".join([ "\n ".join([str(_p2.term()) for _p2 in _p.particle().term().particles()]) for _p in self.__choices ]),)
raise pyxb.MissingContentError('choice')
self.__activeChoice.verifyComplete()
def notifyFailure (self, sub_state, particle_ok):
#print 'CS.NF %s %s' % (self, particle_ok)
if particle_ok and (self.__activeChoice is not None):
self.__parentParticleState.incrementCount()
pass
class AllState (ContentState_mixin):
__activeChoice = None
__needRetry = False
def __init__ (self, group, parent_particle_state):
self.__parentParticleState = parent_particle_state
super(AllState, self).__init__(group)
self.__choices = set([ ParticleState(_p, self) for _p in group.particles() ])
#print 'AS.CTOR %s: %d choices' % (self, len(self.__choices))
def accepts (self, particle_state, instance, value, element_use):
#print 'AS.ACC %s %s: %s %s %s' % (self, self.__activeChoice, instance, value, element_use)
self.__needRetry = True
while self.__needRetry:
self.__needRetry = False
if self.__activeChoice is None:
for choice in self.__choices:
#print 'AS.ACC %s candidate %s' % (self, choice)
try:
(consume, underflow_exc) = choice.step(instance, value, element_use)
except Exception, e:
consume = False
underflow_exc = e
#print 'AS.ACC %s: candidate %s : %s' % (self, choice, consume)
if consume:
self.__activeChoice = choice
self.__choices.discard(self.__activeChoice)
return True
return False
(consume, underflow_exc) = self.__activeChoice.step(instance, value, element_use)
#print 'AS.ACC %s : active choice %s %s %s' % (self, self.__activeChoice, consume, underflow_exc)
if consume:
return True
if underflow_exc is not None:
self.__failed = True
raise underflow_exc
return False
def _verifyComplete (self, parent_particle_state):
#print 'AS.VC %s: %s, %d left' % (self, self.__activeChoice, len(self.__choices))
if self.__activeChoice is not None:
self.__activeChoice.verifyComplete()
while self.__choices:
self.__activeChoice = self.__choices.pop()
self.__activeChoice.verifyComplete()
def notifyFailure (self, sub_state, particle_ok):
#print 'AS.NF %s %s' % (self, particle_ok)
self.__needRetry = True
self.__activeChoice = None
if particle_ok and (0 == len(self.__choices)):
self.__parentParticleState.incrementCount()
class ParticleState (pyxb.cscRoot):
def __init__ (self, particle, parent_state=None):
self.__particle = particle
self.__parentState = parent_state
self.__count = -1
#print 'PS.CTOR %s: particle %s' % (self, particle)
self.incrementCount()
def particle (self): return self.__particle
def incrementCount (self):
#print 'PS.IC %s' % (self,)
self.__count += 1
self.__termState = self.__particle.term().newState(self)
self.__tryAccept = True
def verifyComplete (self):
# @TODO@ Set a flag so we can make verifyComplete safe to call
# multiple times?
#print 'PS.VC %s entry' % (self,)
if not self.__particle.satisfiesOccurrences(self.__count):
self.__termState._verifyComplete(self)
if not self.__particle.satisfiesOccurrences(self.__count):
print 'PS.VC %s incomplete' % (self,)
raise pyxb.MissingContentError('incomplete')
if self.__parentState is not None:
self.__parentState.notifyFailure(self, True)
def step (self, instance, value, element_use):
"""Attempt to apply the value as a new instance of the particle's term.
The L{ContentState_mixin} created for the particle's term is consulted
to determine whether the instance can accept the given value. If so,
the particle's maximum occurrence limit is checked; if not, and the
particle has a parent state, it is informed of the failure.
@param instance: An instance of a subclass of
{basis.complexTypeDefinition}, into which the provided value will be
stored if it is consistent with the current model state.
@param value: The value that is being validated against the state.
@param element_use: An optional L{ElementUse} instance that specifies
the element to which the value corresponds. This will be available
when the value is extracted by parsing a document, but will be absent
if the value was passed as a constructor positional parameter.
@return: C{( consumed, underflow_exc )} A tuple where the first element
is C{True} iff the provided value was accepted in the current state.
When this first element is C{False}, the second element will be
C{None} if the particle's occurrence requirements have been met, and
is an instance of C{MissingElementError} if the observed number of
terms is less than the minimum occurrence count. Depending on
context, the caller may raise this exception, or may try an
alternative content model.
@raise pyxb.UnexpectedElementError: if the value satisfies the particle,
but having done so exceeded the allowable number of instances of the
term.
"""
#print 'PS.STEP %s: %s %s %s' % (self, instance, value, element_use)
# Only try if we're not already at the upper limit on occurrences
consumed = False
underflow_exc = None
# We can try the value against the term if we aren't at the maximum
# count for the term. Also, if we fail to consume, but as a side
# effect of the test the term may have reset itself, we can try again.
self.__tryAccept = True
while self.__tryAccept and (self.__count != self.__particle.maxOccurs()):
self.__tryAccept = False
consumed = self.__termState.accepts(self, instance, value, element_use)
#print 'PS.STEP %s: ta %s %s' % (self, self.__tryAccept, consumed)
self.__tryAccept = self.__tryAccept and (not consumed)
#print 'PS.STEP %s: %s' % (self, consumed)
if consumed:
if not self.__particle.meetsMaximum(self.__count):
raise pyxb.UnexpectedElementError('too many')
else:
if self.__parentState is not None:
self.__parentState.notifyFailure(self, self.__particle.satisfiesOccurrences(self.__count))
if not self.__particle.meetsMinimum(self.__count):
# @TODO@ Use better exception; changing this will require
# changing some unit tests.
#underflow_exc = pyxb.MissingElementError('too few')
underflow_exc = pyxb.UnrecognizedContentError('too few')
return (consumed, underflow_exc)
def __str__ (self):
particle = self.__particle
return 'ParticleState(%d:%d,%s:%s)@%x' % (self.__count, particle.minOccurs(), particle.maxOccurs(), particle.term(), id(self))
class ParticleModel (ContentModel_mixin):
"""Content model dealing with particles: terms with occurrence restrictions"""
def minOccurs (self): return self.__minOccurs
def maxOccurs (self): return self.__maxOccurs
def term (self): return self.__term
def meetsMaximum (self, count):
"""@return: C{True} iff there is no maximum on term occurrence, or the
provided count does not exceed that maximum"""
return (self.__maxOccurs is None) or (count <= self.__maxOccurs)
def meetsMinimum (self, count):
"""@return: C{True} iff the provided count meets the minimum number of
occurrences"""
return count >= self.__minOccurs
def satisfiesOccurrences (self, count):
"""@return: C{True} iff the provided count satisfies the occurrence
requirements"""
return self.meetsMinimum(count) and self.meetsMaximum(count)
def __init__ (self, term, min_occurs=1, max_occurs=1):
self.__term = term
self.__minOccurs = min_occurs
self.__maxOccurs = max_occurs
def newState (self):
return ParticleState(self)
def validate (self, symbol_set):
"""Determine whether the particle requirements are satisfiable by the
given symbol set.
The symbol set represents letters in an alphabet. If those letters
can be combined in a way that satisfies the regular expression
expressed in the model, a satisfying sequence is returned and the
symbol set is reduced by the letters used to form the sequence. If
the content model cannot be satisfied, C{None} is returned and the
symbol set remains unchanged.
@param symbol_set: A map from L{ElementUse} instances to a list of
values. The order of the values corresponds to the order in which
they should appear. A key of C{None} identifies values that are
stored as wildcard elements. Values are removed from the lists as
they are used; when the last value of a list is removed, its key is
removed from the map. Thus an empty dictionary is the indicator that
no more symbols are available.
@return: returns C{None}, or a list of tuples C{( eu, val )} where
C{eu} is an L{ElementUse} from the set of symbol keys, and C{val} is a
value from the corresponding list.
"""
output_sequence = []
#print 'Start: %d %s %s : %s' % (self.__minOccurs, self.__maxOccurs, self.__term, symbol_set)
result = self._validate(symbol_set, output_sequence)
#print 'End: %s %s %s' % (result, symbol_set, output_sequence)
if result:
return (symbol_set, output_sequence)
return None
def _validate (self, symbol_set, output_sequence):
symbol_set_mut = self._validateCloneSymbolSet(symbol_set)
output_sequence_mut = self._validateCloneOutputSequence(output_sequence)
count = 0
#print 'VAL start %s: %d %s' % (self.__term, self.__minOccurs, self.__maxOccurs)
last_size = len(output_sequence_mut)
while (count != self.__maxOccurs) and self.__term._validate(symbol_set_mut, output_sequence_mut):
#print 'VAL %s old cnt %d, left %s' % (self.__term, count, symbol_set_mut)
this_size = len(output_sequence_mut)
if this_size == last_size:
# Validated without consuming anything. Assume we can
# continue to do so, jump to the minimum, and exit.
if count < self.__minOccurs:
count = self.__minOccurs
break
count += 1
last_size = this_size
result = self.satisfiesOccurrences(count)
if (result):
self._validateReplaceResults(symbol_set, symbol_set_mut, output_sequence, output_sequence_mut)
#print 'VAL end PRT %s res %s: %s %s %s' % (self.__term, result, self.__minOccurs, count, self.__maxOccurs)
return result
class _Group (ContentModel_mixin):
"""Base class for content information pertaining to a U{model
group<http://www.w3.org/TR/xmlschema-1/#Model_Groups>}.
There is a specific subclass for each group compositor.
"""
_StateClass = None
"""A reference to a L{ContentState_mixin} class that maintains state when
validating an instance of this group."""
def particles (self): return self.__particles
def __init__ (self, *particles):
self.__particles = particles
def newState (self, parent_particle_state):
return self._StateClass(self, parent_particle_state)
# All and Sequence share the same validation code, so it's up here.
def _validate (self, symbol_set, output_sequence):
symbol_set_mut = self._validateCloneSymbolSet(symbol_set)
output_sequence_mut = self._validateCloneOutputSequence(output_sequence)
for p in self.particles():
if not p._validate(symbol_set_mut, output_sequence_mut):
return False
self._validateReplaceResults(symbol_set, symbol_set_mut, output_sequence, output_sequence_mut)
return True
class GroupChoice (_Group):
_StateClass = ChoiceState
# Choice requires a different validation algorithm
def _validate (self, symbol_set, output_sequence):
reset_mutables = True
for p in self.particles():
if reset_mutables:
symbol_set_mut = self._validateCloneSymbolSet(symbol_set)
output_sequence_mut = self._validateCloneOutputSequence(output_sequence)
if p._validate(symbol_set_mut, output_sequence_mut):
self._validateReplaceResults(symbol_set, symbol_set_mut, output_sequence, output_sequence_mut)
return True
reset_mutables = len(output_sequence) != len(output_sequence_mut)
return False
class GroupAll (_Group):
_StateClass = AllState
class GroupSequence (_Group):
_StateClass = SequenceState
## Local Variables:
## fill-column:78
## End:
| 44.168865 | 162 | 0.652708 | 48,964 | 0.97499 | 0 | 0 | 0 | 0 | 0 | 0 | 24,564 | 0.489128 |
6150aa4c86be0709bceaea10c0cd77d46a394ca4 | 2,447 | py | Python | scripts/imglss-query-tycho-veto.py | desihub/imaginglss | 09258d20015869fead9bad6020da2bc0d161f670 | [
"MIT"
] | 6 | 2015-04-30T18:58:28.000Z | 2020-11-23T16:52:02.000Z | scripts/imglss-query-tycho-veto.py | desihub/imaginglss | 09258d20015869fead9bad6020da2bc0d161f670 | [
"MIT"
] | 15 | 2015-07-21T18:44:30.000Z | 2018-09-07T22:40:31.000Z | scripts/imglss-query-tycho-veto.py | desihub/imaginglss | 09258d20015869fead9bad6020da2bc0d161f670 | [
"MIT"
] | 2 | 2017-11-29T16:58:34.000Z | 2018-07-23T15:52:59.000Z | #!/usr/bin/env python
#
# Code to query the VETO mask of objects/randoms
# It takes the NOISES extension as an input
# It writers a VETO extension.
# Usage, see python query_veto.py -h
#
from __future__ import print_function
__author__ = "Yu Feng and Martin White"
__version__ = "1.0"
__email__ = "yfeng1@berkeley.edu or mjwhite@lbl.gov"
import numpy as np
import h5py
from imaginglss import DECALS
from imaginglss.analysis import tycho_veto
from imaginglss.analysis import veto
from imaginglss.cli import CLI
cli = CLI(
"""
Query the TYCHOVETO flags of input data. The position is taken from the NOISES extension of input.
The result is written to the TYCHOVETO extension of output.
Currently, only veto by proximity to tycho stars are implemented. Each veto in
imaginglss.analysis.tycho_veto is calculated as a column in the TYCHOVETO extension.
Unfortunately, this script is not sufficiently smart to decide the correct TYCHOVETO for the target type.
Therefore, no combined veto flag is generated.
"""
)
cli.add_argument("catalogue", help="HDF5 catalogue file, can be either random or objects. TYCHO_VETO dataset will be added ")
ns = cli.parse_args()
decals = DECALS(ns.conf)
np.seterr(divide='ignore', invalid='ignore')
def query_veto(decals, ns):
"""
calculate VETO flag for all proximity vetos defined in tycho_veto.
"""
with h5py.File(ns.catalogue, 'r') as ff:
RA = ff['RA'][:]
DEC = ff['DEC'][:]
allvetos = [i for i in dir(tycho_veto) if not str(i).startswith( '_' )]
dataset = np.zeros(len(RA), dtype=
list(zip(allvetos, ['?'] * len(allvetos)))
)
for ibit, vetoname in enumerate(allvetos):
vetotype = getattr(tycho_veto, vetoname)
RAc, DECc, R = vetotype(decals)
print(R.min(), R.max())
centers = (RAc, DECc)
mask = veto.veto((RA, DEC), centers, R)
# if we want to combine the bits, do it here.
# but there is no point of doing so for all tycho based proximity
# vetos. we will assembly the full veto bitmask later in the pipeline.
dataset[vetoname][mask] = True
print(vetoname, dataset[vetoname].sum())
return dataset
if __name__=="__main__":
VETO = query_veto(decals, ns)
with h5py.File(ns.catalogue, 'r+') as ff:
if 'TYCHO_VETO' in ff:
del ff['TYCHO_VETO']
ds = ff.create_dataset('TYCHO_VETO', data=VETO)
| 30.974684 | 125 | 0.679199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,186 | 0.484675 |
615124af15bbf9dc72d9e0297469abcf9e8f9255 | 5,573 | py | Python | model.py | hafizur-rahman/CarND-Behavioral-Cloning-P3 | 2a094918c2901d0db4496649d66c89d7c5d512c8 | [
"MIT"
] | null | null | null | model.py | hafizur-rahman/CarND-Behavioral-Cloning-P3 | 2a094918c2901d0db4496649d66c89d7c5d512c8 | [
"MIT"
] | null | null | null | model.py | hafizur-rahman/CarND-Behavioral-Cloning-P3 | 2a094918c2901d0db4496649d66c89d7c5d512c8 | [
"MIT"
] | null | null | null | import csv
import cv2
import numpy as np
from sklearn.utils import shuffle
class DrivingLogReader:
def __init__(self, driving_data):
self.driving_data = driving_data
self.driving_log = self.read_all(self.driving_data)
self.record_count = len(self.driving_log)
def read_csv_data(self, path):
lines = []
with open(path) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
return lines
def read_all(self, driving_data):
tmp = []
for path in driving_data:
print("Reading driving log from {}".format(path))
tmp.append(self.read_csv_data(path))
driving_log = np.concatenate(tmp)
return driving_log
def train_valid_split(self, ratio=0.8):
np.random.shuffle(self.driving_log)
n_train = int(ratio * self.record_count)
train, valid = self.driving_log[:n_train], self.driving_log[n_train:]
return train, valid
def read_image_rgb(source_path):
image_bgr = cv2.imread(source_path)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
return image_rgb
# Generators
import random
class Generator:
def next_batch(self, driving_log, batch_size, shape=(140, 50)):
correction_factor = [0, 0.2, -0.2]
# Create empty arrays to contain batch of features and labels#
batch_images = np.zeros((batch_size, shape[1], shape[0], 3))
batch_measurements = np.zeros((batch_size, 1))
while True:
i = 0
while i < batch_size:
# choose random index in features
log = random.choice(driving_log)
img_index = random.choice(range(0, 3))
source_path = log[img_index]
image_rgb = read_image_rgb(source_path)
cropped = image_rgb[60:-10,20:-20]
image = cv2.resize(cropped, shape, interpolation=cv2.INTER_AREA)
measurement = float(log[3]) + correction_factor[img_index]
batch_images[i] = image
batch_measurements[i] = measurement
batch_images[i+1] = np.fliplr(image)
batch_measurements[i+1] = -measurement
i += 2
yield batch_images, batch_measurements
from keras.models import Sequential
from keras.layers import Flatten, Dense,Lambda, Cropping2D, Dropout, Conv2D
from keras.layers.convolutional import Convolution2D
class SteeringAnglePredictor:
def __init__(self, input_shape=(50,140, 3)):
self.input_shape = input_shape
self.model = self.create_model(self.input_shape)
self.generator = Generator()
def create_model(self, input_shape):
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=input_shape))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
# from keras.utils.visualize_util import plot
# plot(model, to_file='output_images/model.png', show_shapes=True, show_layer_names=True)
return model
def train(self, train_data, validation_data, batch_size=64, epochs=10):
samples_per_epoch = batch_size * 100
self.model.fit_generator(self.generator.next_batch(train_data, batch_size),
validation_data=self.generator.next_batch(validation_data, batch_size),
samples_per_epoch=samples_per_epoch,
nb_val_samples=len(validation_data),
nb_epoch=epochs
)
def save_model(self, file_name):
self.model.save(file_name)
def train_model(driving_data):
# Load driving log
driving_log_reader = DrivingLogReader(
driving_data = driving_data
)
print("Total driving log records: {}".format(driving_log_reader.record_count))
# Split dataset to train & validation
train, valid = driving_log_reader.train_valid_split()
print("Training dataset shape: {}".format(train.shape))
print("Validation dataset shape: {}".format(valid.shape))
# Define model
predictor = SteeringAnglePredictor()
# Train the model
predictor.train(train, valid, batch_size=64, epochs=10)
predictor.save_model('model.h5')
if __name__== "__main__":
import argparse
parser = argparse.ArgumentParser(description='Steering Angle Predictor')
parser.add_argument('--base_path', help='Driving log record base path')
parser.add_argument('--tracks', nargs='*', help='Track records')
args = parser.parse_args()
print("Parsed arguments:\n\tBase path: {}\n\tTracks: {}\n".format(args.base_path, args.tracks))
driving_data = ['{}/{}/driving_log.csv'.format(args.base_path, track) for track in args.tracks]
train_model(driving_data)
| 33.371257 | 99 | 0.616903 | 3,927 | 0.704647 | 1,158 | 0.207788 | 0 | 0 | 0 | 0 | 674 | 0.12094 |
615449c99a13e49491115643772692dd1ae14eec | 3,078 | py | Python | backend/bundle/controller.py | FlickerSoul/Graphery | 8b1390e1ba96fd2867f0cd8e5fc1d4ad6108121e | [
"MIT"
] | 5 | 2020-08-26T00:15:01.000Z | 2021-01-11T17:24:51.000Z | backend/bundle/controller.py | FlickerSoul/Graphery | 8b1390e1ba96fd2867f0cd8e5fc1d4ad6108121e | [
"MIT"
] | 69 | 2020-08-02T23:45:44.000Z | 2021-04-17T03:04:32.000Z | backend/bundle/controller.py | FlickerSoul/Graphery | 8b1390e1ba96fd2867f0cd8e5fc1d4ad6108121e | [
"MIT"
] | 4 | 2020-09-10T05:40:49.000Z | 2020-12-20T11:44:16.000Z | from __future__ import annotations
import logging
import pathlib
from logging.handlers import TimedRotatingFileHandler
from os import getenv
from typing import Union, List, Mapping
from bundle.utils.recorder import Recorder
from bundle.utils.cache_file_helpers import CacheFolder, USER_DOCS_PATH
from bundle.seeker import tracer
_CACHE_FOLDER_AUTO_DELETE_ENV_NAME = 'CONTROLLER_CACHE_AUTO_DELETE'
is_auto_delete = bool(int(getenv(_CACHE_FOLDER_AUTO_DELETE_ENV_NAME, False)))
_CACHE_PATH_ENV_NAME = 'CONTROLLER_CACHE_PATH'
controller_cache_path = pathlib.Path(getenv(_CACHE_PATH_ENV_NAME, USER_DOCS_PATH))
class _Controller:
_LOG_FILE_NAME = f'graphery_controller_execution.log'
def __init__(self, cache_path=controller_cache_path, auto_delete: bool = is_auto_delete):
self.main_cache_folder = CacheFolder(cache_path, auto_delete=auto_delete)
self.log_folder = CacheFolder(cache_path / 'log', auto_delete=auto_delete)
# TODO think about this, and the log file location in the sight class
self.log_folder.mkdir(parents=True, exist_ok=True)
self.tracer_cls = tracer
self.recorder = Recorder()
self.controller_logger = self._init_logger()
self.main_cache_folder.__enter__()
self.tracer_cls.set_new_recorder(self.recorder)
def _init_logger(self) -> logging.Logger:
log_file_path = self.log_folder.cache_folder_path / self._LOG_FILE_NAME
logger = logging.getLogger('controller.tracer')
logger.setLevel(logging.DEBUG)
log_file_handler = TimedRotatingFileHandler(log_file_path, when='midnight', backupCount=30)
log_file_handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)-15s::%(levelname)s::%(message)s'
)
log_file_handler.setFormatter(formatter)
logger.addHandler(log_file_handler)
return logger
def get_recorded_content(self) -> List[Mapping]:
return self.recorder.get_change_list()
def get_processed_result(self) -> List[Mapping]:
return self.recorder.get_processed_change_list()
def get_processed_result_json(self) -> str:
return self.recorder.get_change_list_json()
def purge_records(self) -> None:
self.recorder.purge()
def __call__(self, dir_name: Union[str, pathlib.Path] = None,
mode: int = 0o777,
auto_delete: bool = False,
*args, **kwargs) -> CacheFolder:
if dir_name:
return self.main_cache_folder.add_cache_folder(dir_name, mode, auto_delete)
else:
return self.main_cache_folder
def __enter__(self) -> _Controller:
self.tracer_cls.set_logger(self.controller_logger)
# TODO give a prompt that the current session is under this time stamp
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.tracer_cls.set_logger(None)
def __del__(self) -> None:
self.main_cache_folder.__exit__(None, None, None)
controller = _Controller()
| 37.084337 | 99 | 0.71345 | 2,437 | 0.791748 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.099415 |
6154b821741459ad2d01df318a4ca52a34575108 | 9,773 | py | Python | CPC training/tune.py | haoyudong-97/tg2019task | 4f31968cc49105d13bc95487136dc3ae986bf4d1 | [
"MIT"
] | null | null | null | CPC training/tune.py | haoyudong-97/tg2019task | 4f31968cc49105d13bc95487136dc3ae986bf4d1 | [
"MIT"
] | null | null | null | CPC training/tune.py | haoyudong-97/tg2019task | 4f31968cc49105d13bc95487136dc3ae986bf4d1 | [
"MIT"
] | null | null | null | # Copyright 2018 Dong-Hyun Lee, Kakao Brain.
# (Strongly inspired by original Google BERT code and Hugging Face's code)
""" Fine-tuning on A Classification Task with pretrained Transformer """
import itertools
import csv
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import tokenization
import models
import optim
import train
import pdb
import numpy as np
import pandas as pd
from utils import set_seeds, get_device, truncate_tokens_pair
import os
def read_explanations(path):
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
print('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
tables = '/data/jacob/code/nlp/tfidf/data/annotation/expl-tablestore-export-2017-08-25-230344/tables'
questions = '/data/jacob/code/nlp/tfidf/data/questions/ARC-Elementary+EXPL-Dev.tsv'
def parse_e(e):
l = e.split(' ')
l = [ll.split('|')[0] for ll in l]
return l
class CsvDataset(Dataset):
""" Dataset Class for CSV file """
labels = None
def __init__(self, pipeline=[]): # cvs file and pipeline object
Dataset.__init__(self)
explanations = []
for path, _, files in os.walk(tables):
for file in files:
explanations += read_explanations(os.path.join(path, file))
if not explanations:
warnings.warn('Empty explanations')
df_q = pd.read_csv(questions, sep='\t', dtype=str)
df_e = pd.DataFrame(explanations, columns=('uid', 'text'))
# pdb.set_trace()
q_list = []
e_list = []
dict_e = {}
num_e = len(df_e['uid'])
num_q = len(df_q['questionID'])
for i in range(num_e):
dict_e[df_e['uid'][i]]= df_e['text'][i]
for i in range(num_q):
if not df_q['explanation'][i] is np.nan:
q_list.append(df_q['Question'][i])
e_list.append(parse_e(df_q['explanation'][i]))
self.q_list = q_list
self.e_list = e_list
self.dict_e = dict_e
self.pipeline = pipeline
self.es = list(dict_e.keys())
self.num_neg = 75
# pdb.set_trace()
# data = []
# with open(file, "r") as f:
# # list of splitted lines : line is also list
# lines = csv.reader(f, delimiter='\t', quotechar=None)
# pdb.set_trace()
# for instance in self.get_instances(lines): # instance : tuple of fields
# for proc in pipeline: # a bunch of pre-processing
# instance = proc(instance)
# data.append(instance)
# # To Tensors
# self.tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*data)]
def __len__(self):
return len(self.q_list)
def __getitem__(self, index):
# pdb.set_trace()
q = self.q_list[index]
e = self.e_list[index]
pos = self.dict_e[np.random.choice(e)]
# neg = []
samples = []
instance = ('1', q, pos)
for proc in self.pipeline:
instance = proc(instance)
samples.append(instance)
for i in range(self.num_neg):
# pdb.set_trace()
neg = self.dict_e[np.random.choice(self.es)]
instance = ('0', q, neg)
for proc in self.pipeline:
instance = proc(instance)
samples.append(instance)
# pdb.set_trace()
data = [torch.tensor(x, dtype=torch.long) for x in zip(*samples)]
# data = [d for d in zip(data)]
return data
class Pipeline():
""" Preprocess Pipeline Class : callable """
def __init__(self):
super().__init__()
def __call__(self, instance):
raise NotImplementedError
class Tokenizing(Pipeline):
""" Tokenizing sentence pair """
def __init__(self, preprocessor, tokenize):
super().__init__()
self.preprocessor = preprocessor # e.g. text normalization
self.tokenize = tokenize # tokenize function
def __call__(self, instance):
label, text_a, text_b = instance
label = self.preprocessor(label)
tokens_a = self.tokenize(self.preprocessor(text_a))
tokens_b = self.tokenize(self.preprocessor(text_b)) \
if text_b else []
return (label, tokens_a, tokens_b)
class AddSpecialTokensWithTruncation(Pipeline):
""" Add special tokens [CLS], [SEP] with truncation """
def __init__(self, max_len=512):
super().__init__()
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
# -3 special tokens for [CLS] text_a [SEP] text_b [SEP]
# -2 special tokens for [CLS] text_a [SEP]
_max_len = self.max_len - 3 if tokens_b else self.max_len - 2
truncate_tokens_pair(tokens_a, tokens_b, _max_len)
# Add Special Tokens
tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
tokens_b = tokens_b + ['[SEP]'] if tokens_b else []
return (label, tokens_a, tokens_b)
class TokenIndexing(Pipeline):
""" Convert tokens into token indexes and do zero-padding """
def __init__(self, indexer, labels, max_len=512):
super().__init__()
self.indexer = indexer # function : tokens to indexes
# map from a label name to a label index
self.label_map = {name: i for i, name in enumerate(labels)}
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
input_ids = self.indexer(tokens_a + tokens_b)
segment_ids = [0]*len(tokens_a) + [1]*len(tokens_b) # token type ids
input_mask = [1]*(len(tokens_a) + len(tokens_b))
label_id = self.label_map[label]
# zero padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, label_id)
class Classifier(nn.Module):
""" Classifier with Transformer """
def __init__(self, cfg, n_labels):
super().__init__()
self.transformer = models.Transformer(cfg)
self.fc = nn.Linear(cfg.dim, cfg.dim)
self.activ = nn.Tanh()
self.drop = nn.Dropout(cfg.p_drop_hidden)
self.classifier = nn.Linear(cfg.dim, n_labels)
def forward(self, input_ids, segment_ids, input_mask):
h = self.transformer(input_ids, segment_ids, input_mask)
# only use the first h in the sequence
pooled_h = self.activ(self.fc(h[:, 0]))
logits = self.classifier(self.drop(pooled_h))
logits = torch.exp(logits).clamp(0, 100)
return logits
#pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
#pretrain_file='../exp/bert/pretrain_100k/model_epoch_3_steps_9732.pt',
def neg_logloss(logits):
score = logits[0] / logits.sum()
loss = -torch.log(score+1e-4)
return loss
def main(task='mrpc',
train_cfg='config/train_mrpc.json',
model_cfg='config/bert_base.json',
data_file='../glue/MRPC/train.tsv',
model_file=None,
pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
data_parallel=True,
vocab='../uncased_L-12_H-768_A-12/vocab.txt',
save_dir='../exp/bert/mrpc',
max_len=128,
mode='train'):
cfg = train.Config.from_json(train_cfg)
model_cfg = models.Config.from_json(model_cfg)
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
pipeline = [Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize),
AddSpecialTokensWithTruncation(max_len),
TokenIndexing(tokenizer.convert_tokens_to_ids,
('0', '1'), max_len)]
dataset = CsvDataset(pipeline)
# print(dataset[0])
# pdb.set_trace()
data_iter = DataLoader(dataset, batch_size=1, shuffle=True)
model = Classifier(model_cfg, 1)
criterion = nn.CrossEntropyLoss()
trainer = train.Trainer(cfg,
model,
data_iter,
optim.optim4GPU(cfg, model),
save_dir, get_device())
if mode == 'train':
def get_loss(model, batch, global_step): # make sure loss is a scalar tensor
# pdb.set_trace()
input_ids, segment_ids, input_mask, label_id = [b[0] for b in batch]
# pdb.set_trace()
logits = model(input_ids, segment_ids, input_mask)
# pdb.set_trace()
loss = neg_logloss(logits)
# loss = criterion(logits, label_id)
return loss
trainer.train(get_loss, model_file, pretrain_file, data_parallel)
elif mode == 'eval':
def evaluate(model, batch):
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
_, label_pred = logits.max(1)
result = (label_pred == label_id).float() #.cpu().numpy()
accuracy = result.mean()
return accuracy, result
results = trainer.eval(evaluate, model_file, data_parallel)
total_accuracy = torch.cat(results).mean().item()
print('Accuracy:', total_accuracy)
if __name__ == '__main__':
fire.Fire(main)
| 31.525806 | 112 | 0.600941 | 5,781 | 0.591528 | 0 | 0 | 0 | 0 | 0 | 0 | 2,240 | 0.229203 |
6156a185a0ec9e2713a8896fbea860c9a2788539 | 2,069 | py | Python | test/teos/unit/test_extended_appointment.py | ritikramuka/python-teos | daa9d8dc89d5ddc3f29f2b7b2dda05bf0c79c82d | [
"MIT"
] | 86 | 2020-03-20T23:58:40.000Z | 2022-02-27T11:25:11.000Z | test/teos/unit/test_extended_appointment.py | sr-gi/python-teos | f8f214125a2f7c82fed278dc21ee5d517e7de72a | [
"MIT"
] | 160 | 2020-03-23T16:31:51.000Z | 2022-03-05T13:22:21.000Z | test/teos/unit/test_extended_appointment.py | sr-gi/python-teos | f8f214125a2f7c82fed278dc21ee5d517e7de72a | [
"MIT"
] | 17 | 2020-03-21T10:28:37.000Z | 2022-02-23T12:22:00.000Z | import pytest
from teos.extended_appointment import ExtendedAppointment
@pytest.fixture
def ext_appointment_data(generate_dummy_appointment):
return generate_dummy_appointment().to_dict()
# Parent methods are not tested.
def test_init_ext_appointment(ext_appointment_data):
# The appointment has no checks whatsoever, since the inspector is the one taking care or that, and the only one
# creating appointments.
ext_appointment = ExtendedAppointment(
ext_appointment_data["locator"],
ext_appointment_data["encrypted_blob"],
ext_appointment_data["to_self_delay"],
ext_appointment_data["user_id"],
ext_appointment_data["user_signature"],
ext_appointment_data["start_block"],
)
assert (
ext_appointment_data["locator"] == ext_appointment.locator
and ext_appointment_data["to_self_delay"] == ext_appointment.to_self_delay
and ext_appointment_data["encrypted_blob"] == ext_appointment.encrypted_blob
and ext_appointment_data["user_id"] == ext_appointment.user_id
and ext_appointment_data["user_signature"] == ext_appointment.user_signature
and ext_appointment_data["start_block"] == ext_appointment.start_block
)
def test_get_summary(ext_appointment_data):
assert ExtendedAppointment.from_dict(ext_appointment_data).get_summary() == {
"locator": ext_appointment_data["locator"],
"user_id": ext_appointment_data["user_id"],
}
def test_from_dict(ext_appointment_data):
# The appointment should be build if we don't miss any field
ext_appointment = ExtendedAppointment.from_dict(ext_appointment_data)
assert isinstance(ext_appointment, ExtendedAppointment)
# Otherwise it should fail
for key in ext_appointment_data.keys():
prev_val = ext_appointment_data[key]
ext_appointment_data[key] = None
with pytest.raises(ValueError, match="Wrong appointment data"):
ExtendedAppointment.from_dict(ext_appointment_data)
ext_appointment_data[key] = prev_val
| 36.946429 | 116 | 0.744804 | 0 | 0 | 0 | 0 | 119 | 0.057516 | 0 | 0 | 470 | 0.227163 |
6156bf0e3221f1152ca5fede44ceb1bca071a86d | 1,246 | py | Python | toolchain/riscv/MSYS/python/Lib/test/encoded_modules/__init__.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 207 | 2018-10-01T08:53:01.000Z | 2022-03-14T12:15:54.000Z | toolchain/riscv/MSYS/python/Lib/test/encoded_modules/__init__.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 8 | 2019-06-29T14:18:51.000Z | 2022-02-19T07:30:27.000Z | toolchain/riscv/MSYS/python/Lib/test/encoded_modules/__init__.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 76 | 2020-03-16T01:47:46.000Z | 2022-03-21T16:37:07.000Z | # -*- encoding: utf-8 -*-
# This is a package that contains a number of modules that are used to
# test import from the source files that have different encodings.
# This file (the __init__ module of the package), is encoded in utf-8
# and contains a list of strings from various unicode planes that are
# encoded differently to compare them to the same strings encoded
# differently in submodules. The following list, test_strings,
# contains a list of tuples. The first element of each tuple is the
# suffix that should be prepended with 'module_' to arrive at the
# encoded submodule name, the second item is the encoding and the last
# is the test string. The same string is assigned to the variable
# named 'test' inside the submodule. If the decoding of modules works
# correctly, from module_xyz import test should result in the same
# string as listed below in the 'xyz' entry.
# module, encoding, test string
test_strings = (
('iso_8859_1', 'iso-8859-1', "Les hommes ont oublié cette vérité, "
"dit le renard. Mais tu ne dois pas l'oublier. Tu deviens "
"responsable pour toujours de ce que tu as apprivoisé."),
('koi8_r', 'koi8-r', "Познание бесконечности требует бесконечного времени.")
)
| 51.916667 | 81 | 0.731942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,218 | 0.93909 |
6157139686e8da9fc83cd2b69a5a6860b9518492 | 2,554 | py | Python | kinoko/text/patch_tsv.py | koyo922/kinoko | 4750d8e6b1a68ba771cd89b352989ef05b293d45 | [
"MIT"
] | 13 | 2019-10-27T12:19:27.000Z | 2022-03-12T13:30:31.000Z | kinoko/text/patch_tsv.py | iloveslq/kinoko | 4750d8e6b1a68ba771cd89b352989ef05b293d45 | [
"MIT"
] | null | null | null | kinoko/text/patch_tsv.py | iloveslq/kinoko | 4750d8e6b1a68ba771cd89b352989ef05b293d45 | [
"MIT"
] | 10 | 2019-10-27T12:21:21.000Z | 2022-03-12T13:30:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
使用参考文件,对 csv/tsv 文件进行修补
e.g.
<reference.txt>内容如下:
jiaose 角色 juese
xxx 色情词 <DEL>
<file_to_patch>内容如下:
field1 field2 角色 jiaose field4
field1 field2 色情词 xxx field4
<result直接写到stdout>,内容如下:
field1 field2 角色 juese field4
命令行用法举例
patchtsv -r input/ref1.txt input/ref2.txt \
-i ./input/infile.txt \
-o ./input/outfile.txt \
-k 0 1 -v 0
Authors: qianweishuo<qzy922@gmail.com>
Date: 2019/6/27 下午11:20
"""
from __future__ import unicode_literals, print_function
import errno
import sys
from typing import Text
import argparse
from ..func import strip_fields
from ..text.io import file_wrapper
def main(console_args=sys.argv[1:], key_fmt_fn=strip_fields):
"""
入口
:param console_args:
:param key_fmt_fn: 对key的格式化函数;默认为 kinoko.func.strip_fields
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--ref', required=True, nargs='+', type=Text, help='path to reference file(s)')
parser.add_argument('-d', '--delimeter', default='\t')
parser.add_argument('-i', '--input', type=Text, default='/dev/stdin', help='path to the file being patched')
parser.add_argument('-o', '--output', type=Text, default='/dev/stdout')
parser.add_argument('-k', nargs='+', type=int, default=[0])
parser.add_argument('-v', type=int, default=-1)
args = parser.parse_args(console_args)
# 参考文件 读取成 规则
rules = dict()
for ref_path in args.ref: # 遍历多个参考文件(可以是一个)
for line in file_wrapper(ref_path):
sp = line.rstrip('\n').split(args.delimeter)
rules[tuple(sp[:-1])] = sp[-1]
fout = file_wrapper(args.output, mode='wt')
for line in file_wrapper(args.input):
segs = line.rstrip('\n').split(args.delimeter)
key = key_fmt_fn(tuple(segs[i] for i in args.k))
tgt_value = rules.get(key)
if tgt_value is None:
pass
elif tgt_value == '<DEL>':
continue
else:
# 注意,不是直接整体代换,而是replace掉非空格部分
old_tgt_val = segs[args.v] # 旧值有空格 ' いまか'
# 替换新值也要保留空格
segs[args.v] = old_tgt_val.replace(old_tgt_val.strip(), tgt_value)
try:
fout.write(args.delimeter.join(segs) + '\n')
except IOError as ex: # pragma: no cover
if ex.errno == errno.EPIPE: # broken pipe by downstream `head`
break
else:
raise
if __name__ == '__main__':
main() # pragma: no cover
| 28.377778 | 112 | 0.627643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,226 | 0.436921 |
6158ddbd923564b0fc6dc5759ee9c0f2fdcd6fb9 | 3,078 | py | Python | toolkit/utils/report_utils.py | suraj-testing2/Flowers_Toilet | 21c981531a505a8b74ee42c33a3f4d68ef72d7f3 | [
"Apache-2.0"
] | 22 | 2015-01-22T12:10:50.000Z | 2021-10-12T03:30:56.000Z | toolkit/utils/report_utils.py | suraj-testing2/Flowers_Toilet | 21c981531a505a8b74ee42c33a3f4d68ef72d7f3 | [
"Apache-2.0"
] | null | null | null | toolkit/utils/report_utils.py | suraj-testing2/Flowers_Toilet | 21c981531a505a8b74ee42c33a3f4d68ef72d7f3 | [
"Apache-2.0"
] | 17 | 2016-01-28T04:54:39.000Z | 2021-10-12T03:30:49.000Z | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions used in reporting.
Used by both command line tools and ui tools.
"""
from operator import itemgetter
import pprint
import textwrap
DISPLAY_WIDTH = 80
TAB_WIDTH = 4
BORDER = DISPLAY_WIDTH * '-'
SEPARATOR = (DISPLAY_WIDTH / 2) * '-'
# Only need is for wrapping with 3-level indenting.
wrapper = textwrap.TextWrapper(width=DISPLAY_WIDTH,
initial_indent=3 * TAB_WIDTH * ' ',
subsequent_indent=3 * TAB_WIDTH * ' ')
def PrintReportLine(text, indent=False, indent_level=1):
"""Helper to allow report string formatting (e.g. set tabs to 4 spaces).
Args:
text: String text to print.
indent: If True, indent the line.
indent_level: If indent, indent this many tabs.
"""
if indent:
fmt = '%s%%s' % (indent_level * '\t')
else:
fmt = '%s'
print str(fmt % text).expandtabs(TAB_WIDTH)
def WrapReportText(text):
"""Helper to allow report string wrapping (e.g. wrap and indent).
Actually invokes textwrap.fill() which returns a string instead of a list.
We always double-indent our wrapped blocks.
Args:
text: String text to be wrapped.
Returns:
String of wrapped and indented text.
"""
return wrapper.fill(text)
class Counter(object):
"""Replaces Collections.Counter when Python 2.7 is not available."""
def __init__(self):
"""Establish internal data structures for counting."""
self._counter = {}
def DebugPrint(self):
"""For debugging show the data structure."""
pprint.pprint(self._counter)
def Increment(self, counter_key, counter_increment=1):
"""Increment a key.
Args:
counter_key: String key that will collect a count.
counter_increment: Int to increment the count; usually 1.
"""
self._counter.setdefault(counter_key, 0)
self._counter[counter_key] += counter_increment
@property
def data(self):
"""Give access to the dictionary for retrieving keys/values."""
return self._counter
def FilterAndSortMostCommon(self, top_n=None):
"""Determine the top_n keys with highest counts in order descending.
Args:
top_n: Int count of the number of keys of interest. If None, list all.
Returns:
List of 2-tuples (key, count) in descending order.
"""
results = [(k, v) for k, v in self._counter.iteritems()]
if not top_n:
top_n = len(results)
return sorted(results, key=itemgetter(1), reverse=True)[:top_n]
| 28.766355 | 76 | 0.690058 | 1,212 | 0.393762 | 0 | 0 | 120 | 0.038986 | 0 | 0 | 1,895 | 0.61566 |
6158e50e667f81e2d5e70166a7f7dc89a015b4a3 | 323 | py | Python | Sinavro/Types/BaseClass.py | pl-Steve28-lq/SinavroLang | 8f8344bf9d92ba5498f9bbf103fb2c80cde724d2 | [
"MIT"
] | 4 | 2020-12-24T10:11:18.000Z | 2021-09-09T01:24:05.000Z | Sinavro/Types/BaseClass.py | pl-Steve28-lq/SinavroLang | 8f8344bf9d92ba5498f9bbf103fb2c80cde724d2 | [
"MIT"
] | null | null | null | Sinavro/Types/BaseClass.py | pl-Steve28-lq/SinavroLang | 8f8344bf9d92ba5498f9bbf103fb2c80cde724d2 | [
"MIT"
] | null | null | null | class SinavroObject: pass
def init(self, val): self.value = val
gencls = lambda n: type(f'Sinavro{n.title()}', (SinavroObject,), {'__init__': init, 'type': n})
SinavroInt = gencls('int')
SinavroFloat = gencls('float')
SinavroString = gencls('string')
SinavroBool = gencls('bool')
SinavroArray = gencls('array')
| 29.363636 | 96 | 0.681115 | 25 | 0.077399 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.216718 |
615ad8eba732cef089d5695b78282f0eeaae7437 | 2,824 | py | Python | HUGS/Interface/_upload.py | hugs-cloud/hugs | 65aef97d662746f1382bd03b15f46a6647c7b7f5 | [
"Apache-2.0"
] | null | null | null | HUGS/Interface/_upload.py | hugs-cloud/hugs | 65aef97d662746f1382bd03b15f46a6647c7b7f5 | [
"Apache-2.0"
] | 5 | 2020-08-18T12:22:46.000Z | 2020-09-30T14:30:11.000Z | HUGS/Interface/_upload.py | hugs-cloud/hugs | 65aef97d662746f1382bd03b15f46a6647c7b7f5 | [
"Apache-2.0"
] | 2 | 2020-06-11T20:30:24.000Z | 2020-10-29T17:30:21.000Z | import tempfile
from pathlib import Path
import ipywidgets as widgets
from HUGS.Client import Process
from HUGS.Interface import Credentials
class Upload:
def __init__(self):
self._credentials = Credentials()
self._user = None
def login(self):
return self._credentials.login()
def get_user(self):
return self._credentials.get_user()
def get_results(self):
return self._results
def upload(self):
type_widget = widgets.Dropdown(
options=["CRDS", "GC", "ICOS", "NOAA", "TB", "EUROCOM"],
description="Data type:",
disabled=False,
)
base_url = "https://hugs.acquire-aaai.com/t"
upload_widget = widgets.FileUpload(multiple=False, label="Select")
transfer_button = widgets.Button(
description="Transfer",
button_style="info",
layout=widgets.Layout(width="10%"),
)
def do_upload(a):
if type_widget.value is False:
status_text.value = (
"<font color='red'>Please select a data type</font>"
)
return
user = self.get_user()
data_type = type_widget.value
if not user.is_logged_in():
status_text.value = "<font color='red'>User not logged in</font>"
return
# Here we get the data as bytes, write it to a tmp directory so we can
# process it using HUGS
# TODO - better processing method? Allow HUGS to accept bytes?
with tempfile.TemporaryDirectory() as tmpdir:
file_content = upload_widget.value
filename = list(file_content.keys())[0]
data_bytes = file_content[filename]["content"]
tmp_filepath = Path(tmpdir).joinpath(filename)
with open(tmp_filepath, "wb") as f:
f.write(data_bytes)
p = Process(service_url=base_url)
result = p.process_files(
user=user, files=tmp_filepath, data_type=data_type
)
self._results = result
# Upload the file to HUGS
if result:
status_text.value = "<font color='green'>Upload successful</font>"
else:
status_text.value = (
"<font color='red'>Unable to process file</font>"
)
transfer_button.on_click(do_upload)
data_hbox = widgets.HBox(children=[type_widget, upload_widget, transfer_button])
status_text = widgets.HTML(
value="<font color='#00BCD4'>Waiting for file</font>"
)
return widgets.VBox(children=[data_hbox, status_text])
| 31.032967 | 88 | 0.555949 | 2,678 | 0.9483 | 0 | 0 | 0 | 0 | 0 | 0 | 541 | 0.191572 |
615cff61d92d443e22f1204db917f9dba1c6f6b4 | 10,987 | py | Python | tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py | minerba/grpc | 775362a2cea21363339d73215e3b9a1394ad55b2 | [
"Apache-2.0"
] | null | null | null | tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py | minerba/grpc | 775362a2cea21363339d73215e3b9a1394ad55b2 | [
"Apache-2.0"
] | null | null | null | tools/run_tests/xds_k8s_test_driver/tests/url_map/metadata_filter_test.py | minerba/grpc | 775362a2cea21363339d73215e3b9a1394ad55b2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from typing import Tuple
from absl import flags
from absl.testing import absltest
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
_NUM_RPCS = 150
_TEST_METADATA_KEY = 'xds_md'
_TEST_METADATA_VALUE_EMPTY = 'empty_ytpme'
_TEST_METADATA = ((RpcTypeEmptyCall, _TEST_METADATA_KEY,
_TEST_METADATA_VALUE_EMPTY),)
match_labels = [{
'name': 'TRAFFICDIRECTOR_NETWORK_NAME',
'value': 'default-vpc'
}]
not_match_labels = [{'name': 'fake', 'value': 'fail'}]
class TestMetadataFilterMatchAll(xds_url_map_testcase.XdsUrlMapTestCase):
"""" The test url-map has two routeRules: the higher priority routes to
the default backends, but is supposed to be filtered out by TD because
of non-matching metadata filters. The lower priority routes to alternative
backends and metadata filter matches. Thus, it verifies that TD evaluates
metadata filters correctly."""
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Empty',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}],
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Empty")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['name'], _TEST_METADATA_KEY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['exactMatch'], _TEST_METADATA_VALUE_EMPTY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.empty_call_alternative_service_rpc_count)
class TestMetadataFilterMatchAny(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.unary_call_alternative_service_rpc_count)
class TestMetadataFilterMatchAnyAndAll(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': not_match_labels + match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 2)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeUnaryCall],
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.unary_call_alternative_service_rpc_count)
class TestMetadataFilterMatchMultipleRules(
xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [{
'priority': 0,
'matchRules': [{
'prefixMatch':
'/',
'headerMatches': [{
'headerName': _TEST_METADATA_KEY,
'exactMatch': _TEST_METADATA_VALUE_EMPTY
}],
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ANY',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().alternative_backend_service()
}, {
'priority': 1,
'matchRules': [{
'prefixMatch':
'/grpc.testing.TestService/Unary',
'metadataFilters': [{
'filterMatchCriteria': 'MATCH_ALL',
'filterLabels': match_labels
}]
}],
'service': GcpResourceManager().default_backend_service()
}]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 2)
self.assertEqual(len(xds_config.rds['virtualHosts'][0]['routes']), 3)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['name'], _TEST_METADATA_KEY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['headers']
[0]['exactMatch'], _TEST_METADATA_VALUE_EMPTY)
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][1]['match']['prefix'],
"/grpc.testing.TestService/Unary")
self.assertEqual(
xds_config.rds['virtualHosts'][0]['routes'][2]['match']['prefix'],
"")
def rpc_distribution_validate(self, test_client: XdsTestClient):
rpc_distribution = self.configure_and_send(test_client,
rpc_types=[RpcTypeEmptyCall],
metadata=_TEST_METADATA,
num_rpcs=_NUM_RPCS)
self.assertEqual(
_NUM_RPCS,
rpc_distribution.empty_call_alternative_service_rpc_count)
if __name__ == '__main__':
absltest.main()
| 39.521583 | 80 | 0.576408 | 9,343 | 0.850369 | 0 | 0 | 4,403 | 0.400746 | 0 | 0 | 2,869 | 0.261127 |
615eb18c148fdcb7f52ffdef75ab23706867bb3b | 2,268 | py | Python | tmpFile.py | yingyulou/tmpFile | 557e74351a0412b70c0ef1179479920e86b31bc4 | [
"MIT"
] | 2 | 2019-06-11T10:30:35.000Z | 2020-08-27T07:05:41.000Z | tmpFile.py | yingyulou/tmpFile | 557e74351a0412b70c0ef1179479920e86b31bc4 | [
"MIT"
] | null | null | null | tmpFile.py | yingyulou/tmpFile | 557e74351a0412b70c0ef1179479920e86b31bc4 | [
"MIT"
] | 1 | 2019-03-17T02:52:21.000Z | 2019-03-17T02:52:21.000Z | #!/bin/env python
# coding=UTF-8
'''
DESCRIPTION
tmpFile
A module for creating temporary files and folders.
VERSION
1.4.0
LATEST UPDATE
2019.3.4
'''
# Import Python Lib
from os import remove, mkdir
from os.path import join, exists, abspath
from uuid import uuid4
from shutil import rmtree
################################################################################
# Tmp File
################################################################################
class tmpFile(object):
'''
DESCRIPTION
Create a tmp file.
USAGE
from tmpFile import tmpFile
with tmpFile() as tmpFileName:
with open(tmpFileName, 'w') as fo:
...
# File: "tmpFileName" will be deleted automatically (if exist)
ARGUMENT
* ext = '', str
The extension of the tempfile.
* path = './', str
The path of the tempfile.
'''
__slots__ = ('__tmpFileName',)
def __init__(self, ext = '', path = './'):
self.__tmpFileName = abspath(join(path, str(uuid4()) + ext))
def __enter__(self):
return self.__tmpFileName
def __exit__(self, *exc_info):
if exists(self.__tmpFileName):
remove(self.__tmpFileName)
################################################################################
# Tmp Folder
################################################################################
class tmpFolder(object):
'''
DESCRIPTION
Create a tmp folder.
USAGE
from tmpFile import tmpFolder
with tmpFolder() as tmpFolderPath:
...
# Folder: "tmpFolderPath" will be deleted automatically (if exist)
ARGUMENT
* path = './', str
The path of the tmp folder.
'''
__slots__ = ('__tmpFolderName',)
def __init__(self, path = './'):
self.__tmpFolderName = abspath(join(path, str(uuid4())))
def __enter__(self):
mkdir(self.__tmpFolderName)
return self.__tmpFolderName
def __exit__(self, *exc_info):
if exists(self.__tmpFolderName):
rmtree(self.__tmpFolderName) | 19.220339 | 80 | 0.479718 | 1,600 | 0.705467 | 0 | 0 | 0 | 0 | 0 | 0 | 1,419 | 0.625661 |
615ee1b639ae928638b3d914af60405b9ed2a109 | 364 | py | Python | hw_asr/augmentations/wave_augmentations/__init__.py | isdevnull/asr_hw | 9650506b80d4e38574b63390f79a6f01786b7d18 | [
"MIT"
] | null | null | null | hw_asr/augmentations/wave_augmentations/__init__.py | isdevnull/asr_hw | 9650506b80d4e38574b63390f79a6f01786b7d18 | [
"MIT"
] | null | null | null | hw_asr/augmentations/wave_augmentations/__init__.py | isdevnull/asr_hw | 9650506b80d4e38574b63390f79a6f01786b7d18 | [
"MIT"
] | null | null | null | from hw_asr.augmentations.wave_augmentations.Gain import Gain
from hw_asr.augmentations.wave_augmentations.ImpulseResponse import ImpulseResponse
from hw_asr.augmentations.wave_augmentations.Noise import GaussianNoise
from hw_asr.augmentations.wave_augmentations.TimeStretch import TimeStretch
__all__ = [
"Gain",
"ImpulseResponse",
"GaussianNoise"
]
| 33.090909 | 83 | 0.837912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.104396 |
6160876a4005d2218239e0d5b0c8b1ee445778c9 | 2,408 | py | Python | python/ambassador/compile.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | 3,438 | 2017-04-23T23:10:18.000Z | 2021-06-02T10:11:45.000Z | python/ambassador/compile.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | 1,906 | 2017-04-11T17:47:54.000Z | 2021-06-02T14:20:11.000Z | python/ambassador/compile.py | Asher-Wang/ambassador | 393a52832d081e0d8d0e0ecd5a14cfe18c62b837 | [
"Apache-2.0"
] | 591 | 2017-04-17T17:50:08.000Z | 2021-06-01T08:20:34.000Z | # Copyright 2020 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import Any, Dict, Optional, Union
import logging
from .cache import Cache
from .config import Config
from .ir import IR
from .ir.ir import IRFileChecker
from .envoy import EnvoyConfig
from .fetch import ResourceFetcher
from .utils import SecretHandler, NullSecretHandler, Timer
def Compile(logger: logging.Logger, input_text: str,
cache: Optional[Cache]=None,
file_checker: Optional[IRFileChecker]=None,
secret_handler: Optional[SecretHandler]=None,
k8s=False, envoy_version="V2") -> Dict[str, Union[IR, EnvoyConfig]]:
"""
Compile is a helper function to take a bunch of YAML and compile it into an
IR and, optionally, an Envoy config.
The output is a dictionary:
{
"ir": the IR data structure
}
IFF v2 is True, there will be a toplevel "v2" key whose value is the Envoy
V2 config.
:param input_text: The input text (WATT snapshot JSON or K8s YAML per 'k8s')
:param k8s: If true, input_text is K8s YAML, otherwise it's WATT snapshot JSON
:param ir: Generate the IR IFF True
:param v2: Generate the V2 Envoy config IFF True
"""
if not file_checker:
file_checker = lambda path: True
if not secret_handler:
secret_handler = NullSecretHandler(logger, None, None, "fake")
aconf = Config()
fetcher = ResourceFetcher(logger, aconf)
if k8s:
fetcher.parse_yaml(input_text, k8s=True)
else:
fetcher.parse_watt(input_text)
aconf.load_all(fetcher.sorted())
ir = IR(aconf, cache=cache, file_checker=file_checker, secret_handler=secret_handler)
out: Dict[str, Union[IR, EnvoyConfig]] = { "ir": ir }
if ir:
out[envoy_version.lower()] = EnvoyConfig.generate(ir, envoy_version.upper(), cache=cache)
return out
| 31.272727 | 97 | 0.703904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,161 | 0.482143 |