blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75c0615c374382484f2465bf88eac27a1919acd2
|
31d10be1bd37fe037f36134b80a089f746086e69
|
/C_CaptchaBinaryzation.py
|
24dbb0ac55279ac72832cfee3525954ebda2aa3e
|
[] |
no_license
|
ipxplay/CaptchaRecognition
|
de4d9411f62862a4ab4b30cc2403b3a55fc35916
|
a283a69da39121261de1103da311e1288960dbc5
|
refs/heads/master
| 2020-05-16T04:16:48.937682
| 2019-04-20T03:56:20
| 2019-04-20T03:56:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
from PIL import Image
import os
def binaryzation(image):
threshold = 128 # 阈值,在0~255取值,越大越接近白色
width = image.size[0]
height = image.size[1]
image_array = image.load()
for x in range(width):
for y in range(height):
if image_array[x, y] >= threshold:
image_array[x, y] = 255 # 设为白色
else:
image_array[x, y] = 0 # 设为黑色
return image
if __name__ == '__main__':
path = os.getcwd() + r'\TrainSet_fanfou'
task_path = path + r'\GrayScale'
save_folder = path + r'\Binaryzation'
if not os.path.isdir(save_folder):
os.makedirs(save_folder)
for filename in os.listdir(task_path):
if filename.find('_grayscale') >= 0:
image = Image.open(task_path + '\\' + filename)
print('正在将' + filename + '二值化..')
image = binaryzation(image)
bin_filename = filename.replace('_grayscale.jpg', '_binary.bmp')
image.save(save_folder + '\\' + bin_filename)
|
[
"noreply@github.com"
] |
ipxplay.noreply@github.com
|
f7863b634ba4afdfcda2da57856af0e4cb9c2f82
|
57b423223a77b713db18e62a0f936edde768ba0e
|
/Ex_PygalDemo/dice_visual.py
|
8bc879f72bcee5a665a945a240973a6e1db47a74
|
[] |
no_license
|
beenlyons/dataAnalysis
|
980ff2b1a3d592ec13c0657e2087e58da9e04ac1
|
abd2f1f0474edb8d9f5e509002a4030acef960a1
|
refs/heads/master
| 2020-03-17T19:59:38.912245
| 2018-05-18T01:44:37
| 2018-05-18T01:44:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
from Ex_PygalDemo.die import Die
import pygal
# 创建两个骰子
die_1 = Die()
die_2 = Die()
# 投掷骰子多次, 并将结果存入一个列表中去
results = []
for roll_num in range(1000):
result = die_1.roll() + die_2.roll()
results.append(result)
# 分析结果
frequencies = []
max_result = die_2.num_sides + die_1.num_sides
for value in range(2, max_result+1):
frequency = results.count(value)
frequencies.append(frequency)
# 可视化结果
hist = pygal.Bar()
hist.title = "Results of rolling two D6 dice 1000 times"
hist.x_labels = [str(i) for i in range(2, 13)]
hist.x_title = "Results"
hist.y_title = "Frequency of Result"
hist.add("D6 + D6", frequencies)
hist.render_to_file("dice_visual.svg")
|
[
"1335682725@qq.com"
] |
1335682725@qq.com
|
c2a7984c672209a8770e637e54b2f6cb06104785
|
428d315f31fe04f12235244f796e7cc6140e4680
|
/static/tiles/createtiles.py
|
469917191aedaadb7d686f8ea96e9b56449809f9
|
[] |
no_license
|
peterbe/gaffwall
|
a0233a2a9bab7f32734083a4c166985ff8d129b5
|
68f40c0c67db65fd86cf746b1746fc7dd1c4a81c
|
refs/heads/master
| 2021-01-01T15:59:57.860717
| 2011-01-06T01:13:20
| 2011-01-06T01:13:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
#!/usr/bin/env python
import logging
import shutil
import os
from PIL import Image
LOG_FILENAME = 'tiler.log'
logging.basicConfig(filename=LOG_FILENAME,
level=logging.DEBUG)
def generate_tiles(base_image, size, zoom, point=None, save_directory=None):
assert zoom >= 1
img = Image.open(base_image)
if not os.path.isdir(str(size)):
os.mkdir(str(size))
if save_directory is None:
save_directory = os.path.dirname(base_image)
if not os.path.isdir(os.path.join(save_directory, '256/%s' % zoom)):
os.mkdir(os.path.join(save_directory, '256/%s' % zoom))
width = size * 2 ** zoom
if zoom >= 6 and not point:
raise IOError("Too slow. Doing all of this will be too slow")
img = img.resize((width, width), True)
if point:
x, y = point
assert x in range(2 ** zoom) and y in range(2 ** zoom)
yield _crop_and_save(save_directory, img, size, zoom, x, y)
else:
for i in range(2** zoom):
for j in range(2 ** zoom):
#yield (i*size, j*size), (size+i*size, size+j*size)
yield _crop_and_save(save_directory, img, size, zoom, i, j)
def _crop_and_save(save_directory, img, size, zoom, x, y):
region = img.crop((x*size, y*size, size+x*size, size+y*size))
filename = os.path.join(save_directory,
'%s/%s/%s,%s.png' % (size, zoom, x, y))
region.save(filename, img.format)
return filename
def run(*args):
args = list(args)
if '--stdout' in args:
stdout = True
args.remove('--stdout')
else:
stdout = False
base_directory = args[0]
if not os.path.isdir(base_directory):
raise OSError("No directory called %s" % base_directory)
zoom = int(args[1])
try:
(x, y) = [int(e) for e in args[2:]]
point = (x, y)
except ValueError:
point = None
logging.info("zoom=%r, point=%r" %(zoom, point))
base_image = os.path.join(base_directory, 'worldmap.png')
for filename in generate_tiles(base_image, 256, zoom, point):
logging.info("Created: %s" % str(filename))
if stdout:
print open(filename, 'rb').read()
else:
print filename
return 0
if __name__ == '__main__':
import sys
sys.exit(run(*sys.argv[1:]))
|
[
"peter@fry-it.com"
] |
peter@fry-it.com
|
c9255d6daf5ede13987b0e13b69a1815a0a1978c
|
4912c8be7225cfe36cbb9c1511b7e472749b4269
|
/tests/core/middleware/test_middleware_result.py
|
5ae7a461f2e53788628c455583e7c09417cd0420
|
[
"MIT"
] |
permissive
|
nariman/concord
|
7f572c4de2800137ba26b93ef96de5b8f07104e1
|
f7e9f9d05288b41a4725bffd6636bffd2d774397
|
refs/heads/dev
| 2023-04-09T12:48:55.506884
| 2018-11-18T13:18:14
| 2018-11-18T13:18:14
| 95,794,226
| 0
| 0
|
MIT
| 2021-04-29T19:12:17
| 2017-06-29T15:54:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
"""
The MIT License (MIT)
Copyright (c) 2017-2018 Nariman Safiulin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from concord.middleware import MiddlewareResult, is_successful_result
@pytest.mark.parametrize(
"value",
[MiddlewareResult.OK, MiddlewareResult.IGNORE, None, True, False, 42, "42"],
)
def test_successful_result_helper(value):
if value == MiddlewareResult.IGNORE:
assert is_successful_result(value) is False
else:
assert is_successful_result(value) is True
|
[
"woofilee@gmail.com"
] |
woofilee@gmail.com
|
ac30cf6be4ee828b04e5e3c94a1dde35657467a6
|
3364e2d0884c69dae183f3872ab70b4ee47ee18e
|
/blog/urls.py
|
6a1b63c01808155fbc352d4e01b65dc2b089ea80
|
[] |
no_license
|
MichaelLiaoFeng/sae-blog
|
d874682a856a4656586ab7d0623fd5ca79b092ab
|
6533dd531c4b7a093fbaa9dc8bae61c6c4b2d535
|
refs/heads/master
| 2020-05-17T19:22:21.596183
| 2014-04-26T14:38:39
| 2014-04-26T14:38:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from django.conf import settings
from filebrowser.sites import site
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/filebrowser/', include(site.urls)),
(r'^grappelli/', include('grappelli.urls')),
url(r'^', include('dpress.urls')),
# Examples:
#url('^$', 'demoapp.views.index', name='idx'),
#url(r'^demo/', include('demoapp.urls')),
)
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"name.michael.l@gmail.com"
] |
name.michael.l@gmail.com
|
1286cc4f1ea5a639c41dd026c0ced84a440dc88f
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1237-141/sdB_PG_1237-141_lc.py
|
d2479ff4825fb387622fe336fbb0056cf6d66a68
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[189.985708,-14.413278], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1237-141 /sdB_PG_1237-141_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
a5983923a9a99c6545300d6f0651e7501d64ed6d
|
f95a6085e9d8247637f8cac8436657b46b7f888b
|
/Unit3/assignment4.py
|
4d199c5e46e602c9e108e7ab2d3502d2279939b8
|
[] |
no_license
|
vmayoral/cs373-code
|
4b65ca7fe39ec2c2d06dbf0ac906e09ed8b7bab2
|
3a65b9b38b7ff7112050cb1a49eea64696110941
|
refs/heads/master
| 2020-04-08T08:33:03.894225
| 2012-03-26T21:40:59
| 2012-03-26T21:40:59
| 3,585,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,703
|
py
|
# --------------
# USER INSTRUCTIONS
#
# Write a function in the class robot called move()
#
# that takes self and a motion vector (this
# motion vector contains a steering* angle and a
# distance) as input and returns an instance of the class
# robot with the appropriate x, y, and orientation
# for the given motion.
#
# *steering is defined in the video
# which accompanies this problem.
#
# For now, please do NOT add noise to your move function.
#
# Please do not modify anything except where indicated
# below.
#
# There are test cases which you are free to use at the
# bottom. If you uncomment them for testing, make sure you
# re-comment them before you submit.
from math import *
import random
# --------
#
# the "world" has 4 landmarks.
# the robot's initial coordinates are somewhere in the square
# represented by the landmarks.
#
# NOTE: Landmark coordinates are given in (y, x) form and NOT
# in the traditional (x, y) format!
landmarks = [[0.0, 100.0], [0.0, 0.0], [100.0, 0.0], [100.0, 100.0]] # position of 4 landmarks
world_size = 100.0 # world is NOT cyclic. Robot is allowed to travel "out of bounds"
max_steering_angle = pi/4 # You don't need to use this value, but it is good to keep in mind the limitations of a real car.
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation
#
def __init__(self, length = 10.0):
self.x = random.random() * world_size # initial x position
self.y = random.random() * world_size # initial y position
self.orientation = random.random() * 2.0 * pi # initial orientation
self.length = length # length of robot
self.bearing_noise = 0.0 # initialize bearing noise to zero
self.steering_noise = 0.0 # initialize steering noise to zero
self.distance_noise = 0.0 # initialize distance noise to zero
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_b_noise, new_s_noise, new_d_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.bearing_noise = float(new_b_noise)
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
############# ONLY ADD/MODIFY CODE BELOW HERE ###################
# --------
# move:
# move along a section of a circular path according to motion
#
def move(self, motion): # Do not change the name of this function
alpha = motion[0]
d = motion[1]
if alpha > max_steering_angle:
raise ValueError, 'Car cannot turn with angle greater than pi/4'
if(alpha == 0.0):
beta = 0.0
else:
beta = (d/self.length)*tan(alpha)
R = d/beta
if(abs(beta) < 0.001):
# straight movement
x = self.x + d*cos(self.orientation)
y = self.y + d*sin(self.orientation)
thetanew = (self.orientation + beta) % (2*pi)
else:
CX = self.x - sin(self.orientation)*R
CY = self.y + cos(self.orientation)*R
x = CX + sin(self.orientation + beta)*R
y = CY - cos(self.orientation + beta)*R
thetanew = (self.orientation + beta) % (2*pi)
result = robot(self.length)
result.set(x,y,thetanew)
result.set_noise(0.0,0.0,0.0)
return result # make sure your move function returns an instance
# of the robot class with the correct coordinates.
############## ONLY ADD/MODIFY CODE ABOVE HERE ####################
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
## --------
## TEST CASE:
##
## 1) The following code should print:
## Robot: [x=0.0 y=0.0 orient=0.0]
## Robot: [x=10.0 y=0.0 orient=0.0]
## Robot: [x=19.861 y=1.4333 orient=0.2886]
## Robot: [x=39.034 y=7.1270 orient=0.2886]
##
##
##length = 20.
##bearing_noise = 0.0
##steering_noise = 0.0
##distance_noise = 0.0
##
##myrobot = robot(length)
##myrobot.set(0.0, 0.0, 0.0)
##myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
##
##motions = [[0.0, 10.0], [pi / 6.0, 10], [0.0, 20.0]]
##
##T = len(motions)
##
##print 'Robot: ', myrobot
##for t in range(T):
## myrobot = myrobot.move(motions[t])
## print 'Robot: ', myrobot
##
##
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
## 2) The following code should print:
## Robot: [x=0.0 y=0.0 orient=0.0]
## Robot: [x=9.9828 y=0.5063 orient=0.1013]
## Robot: [x=19.863 y=2.0201 orient=0.2027]
## Robot: [x=29.539 y=4.5259 orient=0.3040]
## Robot: [x=38.913 y=7.9979 orient=0.4054]
## Robot: [x=47.887 y=12.400 orient=0.5067]
## Robot: [x=56.369 y=17.688 orient=0.6081]
## Robot: [x=64.273 y=23.807 orient=0.7094]
## Robot: [x=71.517 y=30.695 orient=0.8108]
## Robot: [x=78.027 y=38.280 orient=0.9121]
## Robot: [x=83.736 y=46.485 orient=1.0135]
##
##
##length = 20.
##bearing_noise = 0.0
##steering_noise = 0.0
##distance_noise = 0.0
##
##myrobot = robot(length)
##myrobot.set(0.0, 0.0, 0.0)
##myrobot.set_noise(bearing_noise, steering_noise, distance_noise)
##
##motions = [[0.2, 10.] for row in range(10)]
##
##T = len(motions)
##
##print 'Robot: ', myrobot
##for t in range(T):
## myrobot = myrobot.move(motions[t])
## print 'Robot: ', myrobot
## IMPORTANT: You may uncomment the test cases below to test your code.
## But when you submit this code, your test cases MUST be commented
## out. Our testing program provides its own code for testing your
## move function with randomized motion data.
|
[
"v.mayoralv@gmail.com"
] |
v.mayoralv@gmail.com
|
6084e98698ef23eaf775a72086eaeb3ba1528eed
|
5477c7ed0e67c1fa973d05adae4ae2369511d79e
|
/sqs_consumer/testing.py
|
5b7a2e5dade8fdb439d4dd6a9eff2a10007318ef
|
[
"BSD-3-Clause"
] |
permissive
|
gjo/sqs_consumer
|
635d1a804bf662be46271b2430180431e9c63acd
|
8a5f2ad11b293297a37840140161fcb105bcd620
|
refs/heads/master
| 2021-07-08T22:07:01.379422
| 2016-11-08T11:16:36
| 2016-11-08T11:16:36
| 33,021,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
# -*- coding: utf-8 -*-
import logging
from mock import Mock
from zope.interface import classImplements
from .interfaces import IApplication, IMessage, ITransport, IWorker
logger = logging.getLogger(__name__)
# http://programmaticallyspeaking.com/mocking-zope-interfaces.html
def _iface_mock(iface):
"""
:type iface: zope.interface.Interface
:rtype: Mock
"""
def init(self, *args, **kwargs):
Mock.__init__(self, spec=list(iface.names()), *args, **kwargs)
name = iface.__name__ + 'Mock'
cls = type(name, (Mock,), {'__init__': init})
classImplements(cls, iface)
# globals()[name] = cls
return cls # for IDE support
IApplicationMock = _iface_mock(IApplication)
IMessageMock = _iface_mock(IMessage)
ITransportMock = _iface_mock(ITransport)
IWorkerMock = _iface_mock(IWorker)
def dummy_sqs_message():
"""
:rtype: dict[bytes, bytes]
"""
return {
'MessageId': '__dummy_id__',
'Body': '__dummy_body__',
'ReceiptHandle': '__dummy_receipt_handle__',
}
def sqs_client_mock():
"""
:rtype: Mock
"""
client = Mock(spec=['delete_message', 'get_queue_url', 'receive_message'])
client.get_queue_url.return_value = {'QueueUrl': '__dummy_url__'}
return client
def transport_mock_factory(config, prefix='transport.mock.'):
"""
:type config: dict
:type prefix: str
:rtype: ITransport
"""
transport = ITransportMock()
return transport
def worker_mock_factory(app, config, prefix='worker.mock.'):
"""
:type app: callable
:type config: dict
:type prefix: str
:rtype: IWorker
"""
worker = IWorkerMock()
return worker
|
[
"gjo.ext@gmail.com"
] |
gjo.ext@gmail.com
|
1b25e140876b304583f62bf4f1a799e1726b29b5
|
339c61a88691ebad76d818673297967a52d6b8b8
|
/utils.py
|
a7c199441bf9775d71899e92b00ecf222a5a4f38
|
[
"MIT"
] |
permissive
|
RyanWhitell/Deep-Learning-for-Music-Recommendation
|
269dc36e3807a965695ff09f2d0d963fea888e8e
|
019feb61896ca1d5ab87e71910f3f31fcb6b08fe
|
refs/heads/master
| 2020-04-26T19:43:59.623766
| 2020-02-04T19:16:28
| 2020-02-04T19:16:28
| 173,784,848
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,472
|
py
|
import os
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
import itertools
import keras
from keras.models import Sequential, Model
from keras.layers import Conv2D
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
import math
def convert_labels(y, class_map):
inv_map = {v: k for k, v in class_map.items()}
labels = []
for x in y:
labels.append(inv_map[x])
return np.array(labels)
def get_metrics(logits, predicted, y_true_oh, y_true, class_map, plot_cm=True, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
genres_never = set(np.unique(y_true)) - set(np.unique(predicted))
print(' {} out of {} genres were never predicted: {}'.format(len(genres_never), len(np.unique(y_true)), genres_never))
loss = metrics.log_loss(y_true_oh, logits)
print(' Mean log loss: {:.7f}'.format(loss))
acc = sum(predicted == y_true) / len(predicted)
print(' Accuracy: {:.2%}'.format(acc))
f1 = metrics.f1_score(y_true, predicted, average='micro')
print(' Micro F1 score: {:.2%}'.format(f1))
f1 = metrics.f1_score(y_true, predicted, average='weighted')
print(' Weighted F1 score: {:.2%}'.format(f1))
if plot_cm:
plot_cm(predicted, y_true, class_map, figsize=(15,10), normalize=normalize, title='Confusion matrix', cmap=plt.cm.Blues)
def plot_cm(predicted, y_true, class_map, figsize=(15,10), normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
y_true = convert_labels(y_true, class_map)
predicted = convert_labels(predicted, class_map)
cm = metrics.confusion_matrix(y_true, predicted, labels=list(class_map.keys()))
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.rcParams['figure.figsize'] = figsize
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(class_map.keys()))
plt.xticks(tick_marks, class_map.keys(), rotation=45)
plt.yticks(tick_marks, class_map.keys())
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def get_conv_output(out_channels, kh, kw, sh, sw, ih, iw, ic, padding='VALID', framework='KERAS'):
if (framework == 'KERAS'):
model = Sequential()
model.add(Conv2D(out_channels, kernel_size=(kh,kw), strides=(sh,sw), input_shape=(ih, iw, ic), padding=padding, name='conv'))
out_h = model.get_layer('conv').output_shape[1]
out_w = model.get_layer('conv').output_shape[2]
out_c = model.get_layer('conv').output_shape[3]
print(out_h, out_w, out_c)
if (framework == 'TORCH'):
if (padding == 'VALID'):
ph, pw = 0, 0
if (padding == 'SAME'):
if (kh % 2 == 0):
ph = kh//2
else:
ph = math.ceil(kh//2)
if (kw % 2 == 0):
pw = kw//2
else:
pw = math.ceil(kw//2)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(ic, out_channels, kernel_size=(kh,kw), stride=(sh,sw), padding=(ph,pw), bias=False)
def forward(self, x):
return self.conv1(x)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # PyTorch v0.4.0
model = Net().to(device)
summary(model, (ic, ih, iw))
names = os.listdir(filepath)
names.remove('README.txt')
names.remove('checksums')
files = []
for name in names:
i_names = os.listdir(filepath + f'/{name}/')
for n in i_names:
if int(n[:6]) in self.FILES_FAULTY:
continue
files.append(filepath + f'/{name}/{n}')
return np.asarray(files)
|
[
"ryanwhitell@hotmail.com"
] |
ryanwhitell@hotmail.com
|
b9197d8540c36bc11b798d2530940cd59de288f0
|
9340bb810b996c6296078200ba4320aad34b297a
|
/runtests.py
|
45cc48368c5bd4bdb2b8a53510a939cb14c27b75
|
[
"BSD-2-Clause"
] |
permissive
|
resmio/django-sendgrid
|
fa2a66b6df676549d07174b95316c9a254b10b19
|
4abbc6a5d426000b6e546be1d934b69e97ab3577
|
refs/heads/master
| 2022-02-01T01:49:00.154453
| 2022-01-26T07:57:48
| 2022-01-26T07:57:48
| 20,224,087
| 8
| 11
|
BSD-2-Clause
| 2022-01-21T07:14:10
| 2014-05-27T15:01:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
#!/usr/bin/env python
import sys
import os
import django
from django.conf import settings
if not settings.configured:
# Choose database for settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sendgrid',
'USER': 'root',
})
elif test_db == 'postgres':
DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sendgrid',
'OPTIONS': {
'autocommit': True,
}
})
settings.configure(
DATABASES=DATABASES,
INSTALLED_APPS=(
'django.contrib.contenttypes',
'sendgrid',
),
SITE_ID=1,
SECRET_KEY='this-is-just-for-tests-so-not-that-secret',
ROOT_URLCONF='sendgrid.urls',
TIME_ZONE='UTC', # so we can switch USE_TZ on and off in-flight with postgres
MIDDLEWARE_CLASSES=('django.middleware.csrf.CsrfViewMiddleware', )
)
from django.test.utils import get_runner
def run_tests():
if hasattr(django, 'setup'):
django.setup()
apps = sys.argv[1:] or ['sendgrid', ]
test_runner = get_runner(settings)
test_runner = test_runner(verbosity=1, interactive=True, failfast=False)
failures = test_runner.run_tests(apps)
sys.exit(failures)
if __name__ == '__main__':
run_tests()
|
[
"jann.kleen@freshx.de"
] |
jann.kleen@freshx.de
|
3fa2ada0f15c38d61cdd038610a73009669e0897
|
ffb72f901652b05b7afa6ef6861ce75f819b38be
|
/spark_work_1_28.py
|
1149d07039f0b1d7c6f3d8103f64f7356f7bce05
|
[] |
no_license
|
enaj1125/Insight_project_2017
|
f8080d7de0f9c1c079a5924e3e34e21ee3752d2c
|
ffcd99cce26ae5b6ab35d06e54d784e103c647ac
|
refs/heads/master
| 2021-01-11T16:15:14.000425
| 2017-02-09T00:23:40
| 2017-02-09T00:23:40
| 80,049,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
import pyspark
import sys
import json
from pyspark import SparkConf, SparkContext
from elasticsearch import Elasticsearch
conf = SparkConf().setAppName("YanJ_app").setMaster("spark://ip-172-31-1-12:7077")
sc = SparkContext(conf = conf)
# Input files
textFile = sc.textFile("s3n://timo-twitter-data/2015/05/01/00/30.json")
# Map reduce
def map_func(line):
each_line = json.loads(line)
es = Elasticsearch(['ip-172-31-1-8'], http_auth=('elastic', 'changeme'), verify_certs=False)
if "created_at" in each_line:
#ret = (each_line['id'], each_line['user']['id'], each_line['timestamp_ms'], each_line['text'] )
if 'user' in each_line:
doc = {'usr_id': each_line['user']['id'], 'ttext': each_line['text'], 'time': each_line['timestamp_ms']}
# es.index(index='test_3', doc_type='inputs', body=doc)
if 'geo' in each_line:
print each_line['geo']
else:
if 'user' in each_line:
doc = {'usr_id': each_line['user']['id'], 'ttext': ''}
# es.index(index='test_3', doc_type='inputs', body=doc)
def reduce_func(a, b):
return a + b
counts = textFile.map(map_func)
counts.saveAsTextFile("/tmp/result1")
print "----------------------------------------------"
|
[
"noreply@github.com"
] |
enaj1125.noreply@github.com
|
7f74516731d4590aaa29094755644a564edeece0
|
e167965757b28e56fb9c09edf508e83b87b64535
|
/tests/test_schema.py
|
012011812727005fbd0cce15ae342ca308f6e0fa
|
[
"MIT"
] |
permissive
|
pOctav/starlette-jsonapi
|
b284a315a9a7ff1e2053b4cc71bf6867f51b3a55
|
bec6a1fb5f19f1a6d48174aba1e7999ebd7670e5
|
refs/heads/master
| 2022-12-08T13:32:28.695724
| 2020-07-18T14:59:45
| 2020-07-18T14:59:45
| 280,895,286
| 0
| 0
| null | 2020-07-19T15:33:21
| 2020-07-19T15:33:20
| null |
UTF-8
|
Python
| false
| false
| 3,349
|
py
|
import pytest
from marshmallow_jsonapi import fields
from starlette.applications import Starlette
from starlette_jsonapi.resource import BaseResource
from starlette_jsonapi.schema import JSONAPISchema
def test_schema_urls(app: Starlette):
class TResource(BaseResource):
type_ = 'test-resource'
TResource.register_routes(app, '/')
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_route = 'test-resource:get'
self_route_kwargs = {'id': '<id>'}
self_route_many = 'test-resource:get_all'
rv = TSchema().dump(dict(id='foo', name='foo-name'))
assert rv == {
'data': {
'id': 'foo',
'type': 'test-resource',
'attributes': {
'name': 'foo-name',
}
}
}
rv = TSchema(app=app).dump(dict(id='foo', name='foo-name'))
assert rv == {
'data': {
'id': 'foo',
'type': 'test-resource',
'attributes': {
'name': 'foo-name',
},
'links': {
'self': '/test-resource/foo',
},
},
'links': {
'self': '/test-resource/foo',
},
}
def test_schema_raises_wrong_meta_parameters():
with pytest.raises(ValueError) as exc:
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_url = 'foo'
assert str(exc.value) == 'Use `self_route` instead of `self_url` when using the Starlette extension.'
with pytest.raises(ValueError) as exc:
class TSchema2(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_url_kwargs = 'foo'
assert str(exc.value) == 'Use `self_route_kwargs` instead of `self_url_kwargs` when using the Starlette extension.'
with pytest.raises(ValueError) as exc:
class TSchema3(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_url_many = 'foo'
assert str(exc.value) == 'Use `self_route_many` instead of `self_url_many` when using the Starlette extension.'
with pytest.raises(ValueError) as exc:
class TSchema4(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_route_kwargs = 'foo'
assert str(exc.value) == 'Must specify `self_route` Meta option when `self_route_kwargs` is specified.'
def test_schema_excludes_unknown():
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
d = TSchema().loads('{"data": {"type": "test-resource", "id": "foo", "attributes": {"unknown": "bar"}}}')
assert d == {}
d = TSchema().loads('{"data": {"type": "test-resource", "id": "foo", "attributes": {"name": "bar"}, "unknown": 1}}')
assert d == {'name': 'bar'}
|
[
"vladstefanmunteanu@gmail.com"
] |
vladstefanmunteanu@gmail.com
|
547e254f2d69e00cdc87d6d3926109a7cae6eaea
|
e481d33cd77bd51d7e489e49415a04c860a4099b
|
/0x08-python-more_classes/6-rectangle.py
|
9c1879b41a7e6553c462d7a19d6e9daedc8556cc
|
[] |
no_license
|
ksualcode/holbertonschool-higher_level_programming
|
ece13504176a6f95a34e000043a9cd63a832b398
|
b151454adcd3b3e182bafe94a6ab1580a5630d5c
|
refs/heads/main
| 2023-07-27T04:34:05.428218
| 2021-09-14T20:42:20
| 2021-09-14T20:42:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
#!/usr/bin/python3
" Module that creates a rectangle "
class Rectangle:
"""
Creates a rectangle class
"""
number_of_instances = 0
def __init__(self, width=0, height=0):
""" Constructor method """
self.width = width
self.height = height
Rectangle.number_of_instances += 1
def __del__(self):
""" Delete method """
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
@property
def height(self):
""" Getter of height """
return self.__height
@height.setter
def height(self, value):
""" Setter of height """
if type(value) is not int:
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__height = value
@property
def width(self):
""" Getter of width """
return self.__width
@width.setter
def width(self, value):
""" Setter of width """
if type(value) is not int:
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
def area(self):
""" Calculates the area of a rectangle """
return self.__width * self.__height
def perimeter(self):
""" Calculates the perimeter of a rectangle """
if self.__width == 0 or self.__height == 0:
return 0
return 2 * (self.__width + self.__height)
def __str__(self):
""" Prints a rectangle """
square = ""
if self.__width == 0 or self.__height == 0:
return square
for i in range(self.height):
square += "#" * self.width
if i != self.height - 1:
square += "\n"
return square
def __repr__(self):
""" Returns a string representation """
return ("Rectangle({}, {})".format(self.__width, self.__height))
|
[
"2758@holbertonschool.com"
] |
2758@holbertonschool.com
|
43720931ad61ccde99c8de7cd7a620bdb65d9248
|
e05f8d36c70336a8714cc260c02fe85ecee2e62e
|
/subject/cmd/api.py
|
7de04f2ed78f5b7f5b7b8f1e897949b0bc88a7ae
|
[
"Apache-2.0"
] |
permissive
|
laoyigrace/subject
|
eafa442b5d9ebf83c78a01ce3bb5d088d08d620d
|
e6ed989fdc250917a19788112b22322b73b3550f
|
refs/heads/master
| 2021-01-11T00:06:54.790751
| 2016-10-24T02:13:32
| 2016-10-24T02:13:32
| 70,754,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,253
|
py
|
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance API Server
"""
import os
import sys
import eventlet
from oslo_utils import encodeutils
# Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
select=True, thread=True, os=True)
# If ../subject/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'subject', '__init__.py')):
sys.path.insert(0, possible_topdir)
import subject_store
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
import osprofiler.notifier
import osprofiler.web
from subject.common import config
from subject.common import exception
from subject.common import wsgi
from subject import notifier
CONF = cfg.CONF
CONF.import_group("profiler", "subject.common.wsgi")
logging.register_options(CONF)
KNOWN_EXCEPTIONS = (RuntimeError,
exception.WorkerCreationFailure,
subject_store.exceptions.BadStoreConfiguration)
def fail(e):
global KNOWN_EXCEPTIONS
return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1
sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e))
sys.exit(return_code)
def main():
try:
config.parse_api_args()
config.set_config_defaults()
wsgi.set_eventlet_hub()
logging.setup(CONF, 'subject')
notifier.set_defaults()
if cfg.CONF.profiler.enabled:
_notifier = osprofiler.notifier.create("Messaging",
oslo_messaging, {},
notifier.get_transport(),
"subject", "api",
cfg.CONF.bind_host)
osprofiler.notifier.set(_notifier)
osprofiler.web.enable(cfg.CONF.profiler.hmac_keys)
else:
osprofiler.web.disable()
server = wsgi.Server(initialize_subject_store=True)
server.start(config.load_paste_app('subject-api'), default_port=9292)
server.wait()
except KNOWN_EXCEPTIONS as e:
fail(e)
if __name__ == '__main__':
main()
|
[
"yibo_grace@163.com"
] |
yibo_grace@163.com
|
6945be5828ba5a3cb5e7ddeffbf005e2959775e1
|
9026fa54f4e5f636fff252686f4fd019198e81c7
|
/algorithms/utils/torch.py
|
3e732c4f390e6f35276f4e7912f54054b7044c56
|
[
"MIT"
] |
permissive
|
ZikangXiong/ToyRLAlgorithms
|
7c44f7fbf4c0f0e9b235c7774de1a2f415d3df31
|
6b54a3a845fdc227b1fb619f4a682859a36060fc
|
refs/heads/master
| 2023-03-19T12:48:02.980975
| 2021-03-11T05:25:49
| 2021-03-11T05:25:49
| 339,210,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
import torch as th
def change_optim_lr(optim: th.optim.Optimizer, lr):
for param_group in optim.param_groups:
param_group["lr"] = lr
def grad_clip(model, clip_val):
for param in model.parameters():
param.grad.data.clamp_(-clip_val, clip_val)
def polyak_update(original_model, target_model, tau):
# polyak update
for target_param, param in zip(target_model.parameters(), original_model.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
|
[
"zikangxiong@icloud.com"
] |
zikangxiong@icloud.com
|
30adef4a3467a6d90dfc793d1c1c8c875ceb2ba1
|
52c93868150e17ce39b021907076954fccfce060
|
/myshop/myshop/urls.py
|
c8c1942c7df10e724c93bea5e91d739f464cce0b
|
[] |
no_license
|
Abdeljalil97/shop_app
|
22bbdf78abc4b7006565c73f5eda771ade254202
|
6aa26519bd84610476598ea378ca54c2ef3d1026
|
refs/heads/main
| 2023-07-24T14:27:39.055245
| 2021-09-02T10:33:13
| 2021-09-02T10:33:13
| 402,379,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
"""myshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('payment/', include('payment.urls', namespace='payment')),
path('cart/', include('cart.urls', namespace='cart')),
path('', include('shop.urls', namespace='shop')),
path('orders/', include('orders.urls', namespace='orders')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"abdeljalilbensoudane97@gmail.com"
] |
abdeljalilbensoudane97@gmail.com
|
d7b3aa5dbfc539c9679fb4770e347c3a9c923b17
|
3df8b0c2963c66e5068d95b3721ec70647bb63f1
|
/Week8Assignment1.py
|
3af8a48d21bbd9fa6a2223a2ead10a1d898e7bec
|
[] |
no_license
|
jcidras/Coursera
|
d5ec02d1dc127bd3492e09493cd24c3bad6803e6
|
48149a005c023ffd3732d229058fb178e5e7fb11
|
refs/heads/master
| 2016-09-05T22:52:58.785670
| 2015-04-06T18:09:19
| 2015-04-06T18:09:35
| 32,801,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
words = []
fname = raw_input("Enter file name: ")
try:
fh = open(fname)
except:
print "Cannot open file."
for line in fh:
line = line.rstrip()
wordsInLine = line.split(' ')
for word in wordsInLine:
if word not in words:
words.append(word)
words.sort()
print words
|
[
"jcidras@me.com"
] |
jcidras@me.com
|
ad66e95a0dbe9085608b81fcdc6ac357b678332d
|
462e0b4a7b271c288e99f6c9ac5882b7dd27e6a5
|
/quiz.py
|
d93bd9e89300c948ed2a633ffcc9940e72fe5ed3
|
[] |
no_license
|
dennyhong96/learn-python
|
7c08f2fc7cfce86043fcd5b969b086d98d6f8cea
|
2177d8fa8495b0d3468e11e5407c47adec6e7ab8
|
refs/heads/master
| 2022-11-17T16:41:39.206535
| 2020-07-10T01:54:21
| 2020-07-10T01:54:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
from Question import Question
question_prompts = [
"What color are apples?\n(a) Red/Green\n(b) Purple\n(c) Orange\n\n",
"What color are Bananas?\n(a) Teal\n(b) Magenta\n(c) Yellow\n\n",
"What color are strawberries?\n(a) Yellow\n(b) Red\n(c) Blue\n\n",
]
questions = [
Question(question_prompts[0], "a"),
Question(question_prompts[1], "c"),
Question(question_prompts[2], "b"),
]
def run_test(questions):
score = 0
for question in questions:
user_answer = input(question.prompt)
if (question.check_anser(user_answer)):
score += 1
print('You are correct!')
else:
print('You are wrong!')
print(f"You got {score} out of {len(questions)} correct!")
run_test(questions)
|
[
"haiyanghong@Haiyangs-iMac.local"
] |
haiyanghong@Haiyangs-iMac.local
|
e3934fd38de621d01fbd6fe77f1e2441b445abb7
|
58ba995b5d430246d28f1cf048fdae83808bf71d
|
/src/classes/model/SketchCodeModel.py
|
b018c1a233308ec39985de3fae95710f40ee0ee8
|
[] |
no_license
|
sarvasvkulpati/PageBasic
|
039b7677fe047695f458ac1334c978feded0ec94
|
f4978b3ba58fe5417e85c5a2fa9b242bb9e0f856
|
refs/heads/master
| 2020-04-17T11:24:35.567398
| 2019-01-21T05:06:17
| 2019-01-21T05:06:17
| 166,539,390
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,489
|
py
|
from __future__ import absolute_import
from keras.models import Model, Sequential, model_from_json
from keras.callbacks import ModelCheckpoint, CSVLogger, Callback
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers import Embedding, GRU, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense
from keras.layers.convolutional import Conv2D
from keras.optimizers import RMSprop
from .ModelUtils import *
from classes.dataset.Dataset import *
MAX_LENGTH = 48
MAX_SEQ = 150
class SketchCodeModel():
def __init__(self, model_output_path, model_json_file=None, model_weights_file=None):
# Create model output path
self.model_output_path = model_output_path
# If we have an existing model json / weights, load in that model
if model_json_file is not None and model_weights_file is not None:
self.model = self.load_model(model_json_file, model_weights_file)
optimizer = RMSprop(lr=0.0001, clipvalue=1.0)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print("Loaded pretrained model from disk")
# Create a new model if we don't have one
else:
self.create_model()
print("Created new model, vocab size: {}".format(self.vocab_size))
print(self.model.summary())
def load_model(self, model_json_file, model_weights_file):
json_file = open(model_json_file, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_weights_file)
return loaded_model
def save_model(self):
model_json = self.model.to_json()
with open("{}/model_json.json".format(self.model_output_path), "w") as json_file:
json_file.write(model_json)
self.model.save_weights("{}/weights.h5".format(self.model_output_path))
def create_model(self):
tokenizer, vocab_size = Dataset.load_vocab()
self.vocab_size = vocab_size
# Image encoder
image_model = Sequential()
image_model.add(Conv2D(16, (3, 3), padding='valid', activation='relu', input_shape=(256, 256, 3,)))
image_model.add(Conv2D(16, (3,3), activation='relu', padding='same', strides=2))
image_model.add(Conv2D(32, (3,3), activation='relu', padding='same'))
image_model.add(Conv2D(32, (3,3), activation='relu', padding='same', strides=2))
image_model.add(Conv2D(64, (3,3), activation='relu', padding='same'))
image_model.add(Conv2D(64, (3,3), activation='relu', padding='same', strides=2))
image_model.add(Conv2D(128, (3,3), activation='relu', padding='same'))
image_model.add(Flatten())
image_model.add(Dense(1024, activation='relu'))
image_model.add(Dropout(0.3))
image_model.add(Dense(1024, activation='relu'))
image_model.add(Dropout(0.3))
image_model.add(RepeatVector(MAX_LENGTH))
visual_input = Input(shape=(256, 256, 3,))
encoded_image = image_model(visual_input)
# Language encoder
language_input = Input(shape=(MAX_LENGTH,))
language_model = Embedding(vocab_size, 50, input_length=MAX_LENGTH, mask_zero=True)(language_input)
language_model = GRU(128, return_sequences=True)(language_model)
language_model = GRU(128, return_sequences=True)(language_model)
# Decoder
decoder = concatenate([encoded_image, language_model])
decoder = GRU(512, return_sequences=True)(decoder)
decoder = GRU(512, return_sequences=False)(decoder)
decoder = Dense(vocab_size, activation='softmax')(decoder)
# Compile the model
self.model = Model(inputs=[visual_input, language_input], outputs=decoder)
optimizer = RMSprop(lr=0.0001, clipvalue=1.0)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def train(self, training_path, validation_path, epochs):
# Setup data generators
training_generator, train_steps_per_epoch = Dataset.create_generator(training_path, max_sequences=MAX_SEQ)
validation_generator, val_steps_per_epoch = Dataset.create_generator(validation_path, max_sequences=MAX_SEQ)
# Setup model callbacks
callbacks_list = self.construct_callbacks(validation_path)
# Begin training
print("\n### Starting model training ###\n")
self.model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=epochs, shuffle=False, validation_steps=val_steps_per_epoch, steps_per_epoch=train_steps_per_epoch, callbacks=callbacks_list, verbose=1)
print("\n### Finished model training ###\n")
self.save_model()
def construct_callbacks(self, validation_path):
checkpoint_filepath="{}/".format(self.model_output_path) + "weights-epoch-{epoch:04d}--val_loss-{val_loss:.4f}--loss-{loss:.4f}.h5"
csv_logger = CSVLogger("{}/training_val_losses.csv".format(self.model_output_path))
checkpoint = ModelCheckpoint(checkpoint_filepath,
verbose=0,
save_weights_only=True,
save_best_only=True,
mode= 'min',
period=2)
callbacks_list = [checkpoint, csv_logger]
return callbacks_list
|
[
"sarvasvkulpati@gmail.com"
] |
sarvasvkulpati@gmail.com
|
fc2a5c414be6f24fb81993bd57bacd41c5ff4ece
|
d46577a83f627dd46c9f6ffe8aab8d432da34999
|
/src/ghutil/cli/milestone/open.py
|
6fa11e5ac06bdaa89d5f3b1e4fcfd05dee7a6929
|
[
"MIT"
] |
permissive
|
jwodder/ghutil
|
4426872613aa080db33d948c7eb6521e9ad0d151
|
763ab2a4b33292b0a5538df38a7bda2e408e3632
|
refs/heads/master
| 2023-01-07T10:14:41.878228
| 2022-12-26T22:09:19
| 2022-12-26T22:09:19
| 91,839,769
| 6
| 1
| null | 2017-08-21T15:04:10
| 2017-05-19T19:40:57
|
Python
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
import click
from ghutil.types import Repository
@click.command()
@Repository.option(
"-R",
"--repo",
"--repository",
"repo",
help="Repository to which the milestone belongs",
)
@click.argument("milestone")
def cli(repo, milestone):
"""Open a milestone"""
repo.milestone(milestone).patch(json={"state": "open"})
|
[
"git@varonathe.org"
] |
git@varonathe.org
|
8d7fe868b6b0bcc163e60dbcbbbdebd3155b7ba2
|
20a53d9a52f839ddec0cacff6ac12b63626c9548
|
/phonopy/interface/dftbp.py
|
aa082ea33c7ec05e0f8db2d0d85df9a67d5ab5d0
|
[] |
permissive
|
ntq1982/phonopy
|
339c6756c38cd7301167fc26fa117afdf1343b90
|
fc73c9ba8815180bff8428174495c157d9444c68
|
refs/heads/master
| 2021-09-25T01:00:58.826347
| 2021-09-15T07:50:17
| 2021-09-15T07:50:17
| 238,440,992
| 0
| 0
|
BSD-3-Clause
| 2020-02-05T12:06:21
| 2020-02-05T12:06:20
| null |
UTF-8
|
Python
| false
| false
| 6,577
|
py
|
# Copyright (C) 2015 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
from phonopy.structure.atoms import Atoms
from phonopy.units import dftbpToBohr
from phonopy.file_IO import collect_forces
from phonopy.interface.vasp import (get_scaled_positions_lines, check_forces,
get_drift_forces)
def parse_set_of_forces(num_atoms, forces_filenames, verbose=True):
hook = 'forces :real:2:'
is_parsed = True
force_sets = []
for i, filename in enumerate(forces_filenames):
if verbose:
sys.stdout.write("%d. " % (i + 1))
f = open(filename)
dftbp_forces = collect_forces(f, num_atoms, hook, [0, 1, 2])
if check_forces(dftbp_forces, num_atoms, filename, verbose=verbose):
drift_force = get_drift_forces(dftbp_forces,
filename=filename,
verbose=verbose)
force_sets.append(np.array(dftbp_forces) - drift_force)
else:
is_parsed = False
if is_parsed:
return force_sets
else:
return []
#
# read dftbp-files
#
def read_dftbp(filename):
""" Reads DFTB+ structure files in gen format.
Args:
filename: name of the gen-file to be read
Returns:
atoms: an object of the phonopy.Atoms class, representing the structure
found in filename
"""
infile = open(filename, 'r')
lines = infile.readlines()
# remove any comments
for ss in lines:
if ss.strip().startswith('#'):
lines.remove(ss)
natoms = int(lines[0].split()[0])
symbols = lines[1].split()
if (lines[0].split()[1].lower() == 'f'):
is_scaled = True
scale_pos = 1
scale_latvecs = dftbpToBohr
else:
is_scaled = False
scale_pos = dftbpToBohr
scale_latvecs = dftbpToBohr
# assign positions and expanded symbols
positions = []
expaned_symbols = []
for ii in range(2, natoms+2):
lsplit = lines[ii].split()
expaned_symbols.append(symbols[int(lsplit[1]) - 1])
positions.append([float(ss)*scale_pos for ss in lsplit[2:5]])
# origin is ignored, may be used in future
origin = [float(ss) for ss in lines[natoms+2].split()]
# assign coords of unitcell
cell = []
for ii in range(natoms+3, natoms+6):
lsplit = lines[ii].split()
cell.append([float(ss)*scale_latvecs for ss in lsplit[:3]])
cell = np.array(cell)
if is_scaled:
atoms = Atoms(symbols=expaned_symbols,
cell=cell,
scaled_positions=positions)
else:
atoms = Atoms(symbols=expaned_symbols,
cell=cell,
positions=positions)
return atoms
#
# write dftb+ .gen-file
#
def get_reduced_symbols(symbols):
"""Reduces expanded list of symbols.
Args:
symbols: list containing any chemical symbols as often as
the atom appears in the structure
Returns:
reduced_symbols: any symbols appears only once
"""
reduced_symbols = []
for ss in symbols:
if not (ss in reduced_symbols):
reduced_symbols.append(ss)
return reduced_symbols
def write_dftbp(filename, atoms):
"""Writes DFTB+ readable, gen-formatted structure files
Args:
filename: name of the gen-file to be written
atoms: object containing information about structure
"""
scale_pos = dftbpToBohr
lines = ""
# 1. line, use absolute positions
natoms = atoms.get_number_of_atoms()
lines += str(natoms)
lines += ' S \n'
# 2. line
expaned_symbols = atoms.get_chemical_symbols()
symbols = get_reduced_symbols(expaned_symbols)
lines += ' '.join(symbols) + '\n'
atom_numbers = []
for ss in expaned_symbols:
atom_numbers.append(symbols.index(ss) + 1)
positions = atoms.get_positions()/scale_pos
for ii in range(natoms):
pos = positions[ii]
pos_str = "{:3d} {:3d} {:20.15f} {:20.15f} {:20.15f}\n".format(
ii + 1, atom_numbers[ii], pos[0], pos[1], pos[2])
lines += pos_str
# origin arbitrary
lines +='0.0 0.0 0.0\n'
cell = atoms.get_cell()/scale_pos
for ii in range(3):
cell_str = "{:20.15f} {:20.15f} {:20.15f}\n".format(
cell[ii][0], cell[ii][1], cell[ii][2])
lines += cell_str
outfile = open(filename, 'w')
outfile.write(lines)
def write_supercells_with_displacements(supercell, cells_with_disps, filename="geo.gen"):
"""Writes perfect supercell and supercells with displacements
Args:
supercell: perfect supercell
cells_with_disps: supercells with displaced atoms
filename: root-filename
"""
# original cell
write_dftbp(filename + "S", supercell)
# displaced cells
for ii in range(len(cells_with_disps)):
write_dftbp(filename + "S-{:03d}".format(ii+1), cells_with_disps[ii])
|
[
"benjamin.hourahine@strath.ac.uk"
] |
benjamin.hourahine@strath.ac.uk
|
df673618a14a4adaaa4757a50d9f63bde011f0b3
|
6f7ef265722d442c8ea782a2af6fb957a6b36865
|
/mozdns/models.py
|
d8c54db69672c478387617cc3c9f78fe34bd61dc
|
[] |
no_license
|
Marlburo/inventory
|
751149a0085c8ec8ef82d81ca6e1edc79d4a293d
|
bb2aa99e41f532c35eda47a210a12be134ca62c0
|
refs/heads/master
| 2021-01-22T16:38:58.901807
| 2015-01-28T15:37:07
| 2015-01-28T15:37:07
| 33,836,752
| 1
| 0
| null | 2015-04-12T22:39:12
| 2015-04-12T22:39:10
| null |
UTF-8
|
Python
| false
| false
| 9,439
|
py
|
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
import mozdns
from mozdns.domain.models import Domain
from mozdns.view.models import View
from mozdns.mixins import ObjectUrlMixin, DisplayMixin
from mozdns.validation import validate_first_label, validate_name
from mozdns.validation import validate_ttl
class DomainMixin(models.Model):
domain = models.ForeignKey(Domain, null=False, help_text="FQDN of the "
"domain after the short hostname. "
"(Ex: <i>Vlan</i>.<i>DC</i>.mozilla.com)")
class Meta:
abstract = True
class LabelMixin(models.Model):
# "The length of any one label is limited to between 1 and 63 octets."
# -- RFC218
label = models.CharField(max_length=63, blank=True, null=True,
validators=[validate_first_label],
help_text="Short name of the fqdn")
class Meta:
abstract = True
class FQDNMixin(models.Model):
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_name], db_index=True)
class Meta:
abstract = True
class LabelDomainMixin(LabelMixin, DomainMixin, FQDNMixin):
"""
This class provides common functionality that many DNS record
classes share. This includes a foreign key to the ``domain`` table
and a ``label`` CharField.
If you plan on using the ``unique_together`` constraint on a Model
that inherits from ``LabelDomainMixin``, you must include ``domain`` and
``label`` explicitly.
All common records have a ``fqdn`` field. This field is updated
every time the object is saved::
fqdn = name + domain.name
or if name == ''
fqdn = domain.name
``fqdn`` makes searching for records much easier. Instead of
looking at ``obj.label`` together with ``obj.domain.name``, you can
just search the ``obj.fqdn`` field.
"the total number of octets that represent a name (i.e., the sum of
all label octets and label lengths) is limited to 255" - RFC 4471
"""
class Meta:
abstract = True
class ViewMixin(models.Model):
def validate_views(instance, views):
for view in views:
instance.clean_views(views)
views = models.ManyToManyField(
View, blank=True, validators=[validate_views]
)
class Meta:
abstract = True
def clean_views(self, views):
"""cleaned_data is the data that is going to be called with for
updating an existing or creating a new object. Classes should implement
this function according to their specific needs.
"""
for view in views:
if hasattr(self, 'domain') and self.domain:
self.check_no_ns_soa_condition(self.domain, view=view)
if hasattr(self, 'reverse_domain') and self.reverse_domain:
self.check_no_ns_soa_condition(self.reverse_domain, view=view)
def check_no_ns_soa_condition(self, domain, view=None):
if domain.soa:
fail = False
root_domain = domain.soa.root_domain
if root_domain and not root_domain.nameserver_set.exists():
fail = True
elif (view and
not root_domain.nameserver_set.filter(views=view).exists()):
fail = True
if fail:
raise ValidationError(
"The zone you are trying to assign this record into does "
"not have an NS record, thus cannnot support other "
"records.")
class TTLRRMixin(object):
def check_for_illegal_rr_ttl(self, field_name='fqdn', rr_value=None):
"""
"You have different records in the same RRset <name,type,class>
with different TTLs. This is not allowed and is being
corrected."
-- Mark Andrews
BUG 892531
A new record's ttl will override any old record's TTL if those records
belong to a set or rr (Round Robin) records.
"""
if not rr_value:
rr_value = getattr(self, field_name)
for record in self.__class__.objects.filter(**{field_name: rr_value}):
if self.pk and record.pk == self.pk:
continue
if self.ttl != record.ttl:
# This sucks because I'm bypassing the records' save/clean
# call.
self.__class__.objects.filter(pk=record.pk).update(
ttl=self.ttl
)
class MozdnsRecord(ViewMixin, TTLRRMixin, DisplayMixin, ObjectUrlMixin):
ttl = models.PositiveIntegerField(default=None, blank=True, null=True,
validators=[validate_ttl],
help_text="Time to Live of this record")
description = models.CharField(max_length=1000, blank=True, null=True,
help_text="A description of this record.")
def __str__(self):
self.set_fqdn()
return self.bind_render_record()
def __repr__(self):
return "<{0} '{1}'>".format(self.rdtype, str(self))
class Meta:
abstract = True
@classmethod
def get_api_fields(cls):
"""
The purpose of this is to help the API decide which fields to expose
to the user when they are creating and updateing an Object. This
function should be implemented in inheriting models and overriden to
provide additional fields. Tastypie ignores any relational fields on
the model. See the ModelResource definitions for view and domain
fields.
"""
return ['fqdn', 'ttl', 'description', 'views']
def clean(self):
# The Nameserver and subclasses of BaseAddressRecord do not call this
# function
self.set_fqdn()
self.check_TLD_condition()
self.check_for_illegal_rr_ttl()
self.check_no_ns_soa_condition(self.domain)
self.check_for_delegation()
if self.rdtype != 'CNAME':
self.check_for_cname()
def delete(self, *args, **kwargs):
if self.domain.soa:
self.domain.soa.schedule_rebuild()
from mozdns.utils import prune_tree
call_prune_tree = kwargs.pop('call_prune_tree', True)
objs_domain = self.domain
super(MozdnsRecord, self).delete(*args, **kwargs)
if call_prune_tree:
prune_tree(objs_domain)
def save(self, *args, **kwargs):
self.full_clean()
if self.pk:
# We need to get the domain from the db. If it's not our current
# domain, call prune_tree on the domain in the db later.
db_domain = self.__class__.objects.get(pk=self.pk).domain
if self.domain == db_domain:
db_domain = None
else:
db_domain = None
no_build = kwargs.pop("no_build", False)
super(MozdnsRecord, self).save(*args, **kwargs)
if no_build:
pass
else:
# Mark the soa
if self.domain.soa:
self.domain.soa.schedule_rebuild()
if db_domain:
from mozdns.utils import prune_tree
prune_tree(db_domain)
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label, self.domain.name)
except ObjectDoesNotExist:
return
def check_for_cname(self):
"""
"If a CNAME RR is preent at a node, no other data should be
present; this ensures that the data for a canonical name and its
aliases cannot be different."
-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_
Call this function in models that can't overlap with an existing
CNAME.
"""
CNAME = mozdns.cname.models.CNAME
if hasattr(self, 'label'):
if CNAME.objects.filter(domain=self.domain,
label=self.label).exists():
raise ValidationError("A CNAME with this name already exists.")
else:
if CNAME.objects.filter(label='', domain=self.domain).exists():
raise ValidationError("A CNAME with this name already exists.")
def check_for_delegation(self):
"""
If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
if not (self.domain and self.domain.delegated):
return
if self.domain.nameserver_set.filter(server=self.fqdn).exists():
return
else:
raise ValidationError(
"You can only create a record in a delegated domain if "
"there is an NS record pointing to the record's fqdn."
)
def check_TLD_condition(self):
domain = Domain.objects.filter(name=self.fqdn)
if not domain:
return
if self.label == '' and domain[0] == self.domain:
return # This is allowed
else:
raise ValidationError("You cannot create an record that points "
"to the top level of another domain.")
|
[
"uberj@onid.orst.edu"
] |
uberj@onid.orst.edu
|
9a0329062e8d096820e8bc53ae2c7631b65eacbf
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnconventu.py
|
2bcf29cae970aba9fb65a9069b495efd0c277ce3
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 175
|
py
|
ii = [('ShawHDE.py', 1), ('ClarGE.py', 1), ('DaltJMA.py', 1), ('WadeJEB.py', 1), ('CoopJBT.py', 1), ('SoutRD2.py', 1), ('MackCNH.py', 1), ('DequTKM.py', 1), ('BeckWRE.py', 9)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
48cc31dbeacf007c6f1773c6ddbbf598a018c38d
|
33374c7d37fa7027ef6d86c0afa9bb0833cea1bd
|
/src/tests/test_influx_logs.py
|
8ea73a744551c44fb24966eb4bb6eefd6b54a255
|
[
"MIT"
] |
permissive
|
optimuspaul/themis
|
8a86f11a63a8b3b029017e2dd3c525b964e0b478
|
e088315cdae7e915aece8af9965914c4c89bd776
|
refs/heads/master
| 2020-12-11T09:22:58.983644
| 2015-10-22T19:50:09
| 2015-10-22T19:50:09
| 44,275,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
import logging
import unittest
import uuid
from themis.log import TSData
from tests import influx
class TestEmit(unittest.TestCase):
def test_emit_001(self):
logger = logging.getLogger(__name__)
tag = uuid.uuid4()
measurement = "test.themis.emit.{0}".format(tag)
td = TSData(measurement, {"value": 1}, {"test": "TestEmit.test_emit_001", "test_id": tag})
logger.debug("just me testing", extra={"ts_data": td})
logger.error("is this an error?", extra={"ts_data": td})
results = influx.query("SELECT sum(value) FROM \"themis_tests\".\"default\".\"{1}\" WHERE time > now() - 1m AND test_id = '{0}'".format(tag, measurement))
points = list(results.get_points())
self.assertEqual(len(points), 1)
self.assertEqual(points[0]["sum"], 2)
influx.query("DROP MEASUREMENT \"{0}\"".format(measurement), database="themis_tests")
|
[
"paul@decoursey.net"
] |
paul@decoursey.net
|
abf408ac905e606fd464fe14a63a5afca7ee754b
|
1581d6667c7534ff3413348d3e643fac43aa92ec
|
/07-06.py
|
3ea2d30197f71c51940cc99bb781e136e7993c2a
|
[
"MIT"
] |
permissive
|
Michael-E-Rose/Woolridge_IntroEconometrics_Solutions
|
a3d72051d4ec2140e11b8e807cc3f5195b8dc6dc
|
8d56856ba7e0efc9ef66011af555df389409414d
|
refs/heads/master
| 2022-06-18T05:25:36.206499
| 2020-04-23T17:32:26
| 2020-04-23T17:32:26
| 239,853,791
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
"""Python script for C6, Chapter 7 of Wooldridge: Intr. Economometrics"""
import pandas as pd
import statsmodels.formula.api as smf
import numpy as np
import statsmodels.api as sm
# Read in df
file = "./data/sleep75.dta"
df = pd.read_stata(file)
print(df.head())
print(">>>>")
# i)
print("Results for men:")
dfmen = df.loc[df['male'] == 1]
lm1 = smf.ols('sleep ~ totwrk + educ + age + agesq + yngkid', data=dfmen).fit()
print(lm1.summary())
print("Results for women:")
dfwomen = df.loc[df['male'] == 0]
lm2 = smf.ols(' sleep ~ totwrk + educ + age + agesq + yngkid', data=dfwomen).fit()
print(lm2.summary())
print(">>>>")
# ii)
df['totwrk_male'] = df['totwrk'] * df['male']
df['educ_male'] = df['educ'] * df['male']
df['age_male'] = df['age'] * df['male']
df['agesq_male'] = df['agesq'] * df['male']
df['yngkid_male'] = df['yngkid'] * df['male']
lm3 = smf.ols('sleep ~ totwrk + educ + age + agesq + yngkid + male + totwrk_male + educ_male + age_male + agesq_male + yngkid_male', data=df).fit()
print("F-test of joint statistical significance:")
# CHECK AGAIN
# Normaler F-Test nicht möglich -> Anova Alternative?
#print(sm.stats.anova_lm(lm3))
#print(lm3.f_test('(male = 0), (totwrk_male = 0), (educ_male = 0), (age_male = 0), (male_agesq = 0), (yngkid_male = 0)'))
print(">>>>")
# iii)
print("F-test of joint statistical significance:")
print(lm3.f_test('(totwrk_male = 0), (educ_male = 0), (age_male = 0), (agesq_male = 0), (yngkid_male = 0)'))
print(">>>>")
|
[
"carolin.formella@web.de"
] |
carolin.formella@web.de
|
ec404df2e18a084d728987b46dd0434c5cfe01f1
|
d25e581e62ca383bbefbd4dc8e460625922126e1
|
/w select/utils/options.py
|
d9c9a28561a6661b9b46e1bed488c8d07cda47a9
|
[] |
no_license
|
GowayGoway/Federated-Learning
|
60c589fa6aabba37bf0d5f445620df79c7944197
|
24674cd3f4e0a1810e3b62fb20bb8dee9223923f
|
refs/heads/main
| 2023-06-18T06:08:19.227761
| 2021-07-19T03:24:39
| 2021-07-19T03:24:39
| 372,105,380
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,078
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import argparse
def args_parser():
parser = argparse.ArgumentParser()
# federated arguments
parser.add_argument('--epochs', type=int, default=10, help="rounds of training")
parser.add_argument('--num_users', type=int, default=20, help="number of users: K") #用户的个数
parser.add_argument('--frac', type=float, default=1, help="the fraction of clients: C") #每个用户的部分,frac*num_user就是每次挑的用户数
parser.add_argument('--local_bs', type=int, default=50, help="local batch size: B") # B 用于客户机更新的批处理大小
parser.add_argument('--local_ep', type=int, default=5, help="the number of local epochs: E") # E 每个客户机在每轮上通过其本地数据集执行的训练次数
parser.add_argument('--bs', type=int, default=128, help="test batch size")
parser.add_argument('--lr', type=float, default=0.01, help="learning rate")
parser.add_argument('--momentum', type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument('--split', type=str, default='user', help="train-test split type, user or sample")
# model arguments
#parser.add_argument('--model', type=str, default='mlp', help='model name')
parser.add_argument('--model', type=str, default='cnn', help='model name')
parser.add_argument('--kernel_num', type=int, default=9, help='number of each kind of kernel')
parser.add_argument('--kernel_sizes', type=str, default='3,4,5',
help='comma-separated kernel size to use for convolution')
parser.add_argument('--norm', type=str, default='batch_norm', help="batch_norm, layer_norm, or None")
parser.add_argument('--num_filters', type=int, default=32, help="number of filters for conv nets")
parser.add_argument('--max_pool', type=str, default='True',
help="Whether use max pooling rather than strided convolutions")
# other arguments
parser.add_argument('--dataset', type=str, default='mnist', help="name of dataset")
#parser.add_argument('--dataset', type=str, default='cifar', help="name of dataset")
#parser.add_argument('--iid', action='store_true', help='whether i.i.d or not') #独立同分布
parser.add_argument('--iid', default=True, help='whether i.i.d or not')
parser.add_argument('--num_classes', type=int, default=10, help="number of classes")
parser.add_argument('--num_channels', type=int, default=1, help="number of channels of imges")
parser.add_argument('--gpu', type=int, default=0, help="GPU ID, -1 for CPU")
parser.add_argument('--stopping_rounds', type=int, default=10, help='rounds of early stopping')
parser.add_argument('--verbose', action='store_true', help='verbose print')
#parser.add_argument('--verbose', default=True, help='verbose print')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
args = parser.parse_args()
return args
|
[
"noreply@github.com"
] |
GowayGoway.noreply@github.com
|
1059dd02112d27e3e8c595fd50f49e552c76655e
|
400957aa41d1de8572a4708e78482fc94529b483
|
/python/ccpnmr/update/temp/python__temp_ccpnmr__temp_analysis__temp_popups_EditExperiment.py
|
356ebd27583509eb820a4c40384b5871974f37f2
|
[] |
no_license
|
bopopescu/ccpnmr2.4-2
|
c3e4323b73ad56a631db0ac3bef6c4df802ace08
|
c39ed7f0a840be006543d1c6bd1e4ccb37376962
|
refs/heads/master
| 2022-11-21T23:59:53.507265
| 2016-09-21T01:53:44
| 2016-09-21T01:53:44
| 282,246,114
| 0
| 0
| null | 2020-07-24T14:52:05
| 2020-07-24T14:52:04
| null |
UTF-8
|
Python
| false
| false
| 110,065
|
py
|
"""
======================COPYRIGHT/LICENSE START==========================
EditExperiment.py: Part of the CcpNmr Analysis program
Copyright (C) 2003-2010 Wayne Boucher and Tim Stevens (University of Cambridge)
=======================================================================
The CCPN license can be found in ../../../../license/CCPN.license.
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- email: ccpn@bioc.cam.ac.uk
- contact the authors: wb104@bioc.cam.ac.uk, tjs23@cam.ac.uk
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
from memops.gui.ButtonList import ButtonList, UtilityButtonList
from memops.gui.CheckButton import CheckButton
from memops.gui.DataEntry import askInteger, askString
from memops.gui.Entry import Entry
from memops.gui.FloatEntry import FloatEntry
from memops.gui.Frame import Frame
from memops.gui.IntEntry import IntEntry
from memops.gui.LabelDivider import LabelDivider
from memops.gui.Label import Label
from memops.gui.LabelFrame import LabelFrame
from memops.gui.MessageReporter import showYesNo, showOkCancel, showWarning
from memops.gui.MultiWidget import MultiWidget
from memops.gui.PulldownList import PulldownList
from memops.gui.ScrolledMatrix import ScrolledMatrix
from memops.gui.TabbedFrame import TabbedFrame
from ccpnmr.analysis.popups.BasePopup import BasePopup
from ccpnmr.analysis.core.ExperimentBasic import newShiftList, isSpectrum, setExperimentShiftList
from ccpnmr.analysis.core.ExperimentBasic import getPrimaryExpDimRef, getRefExperimentCategories
from ccpnmr.analysis.core.ExperimentBasic import getFilteredRefExperiments, getRefExperiments, setRefExperiment, initExpTransfers
from ccpnmr.analysis.core.ExperimentBasic import cloneExperiment, defaultExperiment
from ccpnmr.analysis.core.AssignmentBasic import getShiftLists
from ccpnmr.analysis.core.MoleculeBasic import STANDARD_ISOTOPES
from ccp.api.nmr import Nmr
VOLUME_UNITS = ['ul','ml','l']
SAMPLE_STATES = ['liquid', 'solid', 'ordered', 'powder', 'crystal']
SHIFT_REF_COMPOUNDS = ['DSS','TSP','TMS','HDO','CHCl3','DMSO','DMSO-d5',
'dioxane','CH3OH','acetone','acetone-d5','TFE','TFA',
'H3PO4','TMP','PCr','liquid NH3']
STANDARD_UNITS = ['ppm','ppt','ppb']
NMR_PROBE_TYPES = ['liquid', 'solid', 'nano', 'flow', 'MAS']
class EditExperimentPopup(BasePopup):
"""
**Curate Experiment Parameters**
This popup window is used to control parameters that relate to the NMR
experiment entities within a CCPN project. It should be noted that an
experiment is specifically a record of *what was done*, not a reference to the
data that was obtained; this is what Analysis refers to as a spectrum. Also,
an experiment doesn't just refer to what kind of NMR experiment was performed,
but rather to something that was done on a particular occasion to a particular
sample. The kinds of information that are stored at the experiment level
describe how the experiment was performed and how certain data that derives
from the experiment should be interpreted. Several tabs are used to sub-divide
the popup window into several sections to control different aspects of the
experiments.
**Main "Experiments" Tab**
This table is the main display of all if the NMR experiments described within
the CCPN project. Each experiment may refer to one or more spectra (and even
an FID) that resulted from the experimental operation; the names of these are
listed in the "Data Sources" column. Several experimental parameters may be
adjusted in this table and, as far as resonance assignment is concerned, the
most important of these are the "Shift List" and "Mol Systems". It should
be noted that the "Shift Ref" column can only be configured if chemical
shift reference details have been entered at the "Shift References" tab.
The shift list of an experiment states how chemical shift measurements should
be grouped. An experiment's assignments, on the peak of its spectra, only
contribute to the chemical shift measurenemts in one shift list. In normal
operation each shift list corresponds to a given set of conditions, where
resonance positions in spectra a fairly static. Different experiments may use
different shift lists if their sample conditions are different enough to cause
peaks to move. Accordingly, a resonance derived from a given atom may have
several different recorded chemical shift values, each of which resides in a
different chemical shift list. Because each experiment is associated with a
single shift list it is thus known which chemical shift average the
assignments in its spectra contribute to and which chemical shift values to
look at when suggesting assignments for peaks. The shift list that an
experiment is linked to may be changed at any time, and when an experiment is
moved from one shift list to another (which may be new and empty) the
contributions that its spectrum peaks make to the calculation of average
chemical shift values will automatically be adjusted; the experiments that are
linked to a given shift list dictate which peaks to average chemical shift
values over.
The "Mol Systems" for an experiment specify which molecular systems were
present in the sample. In essence this means which group of molecular chains
the spectrum peaks, that come from the experiment, could be assigned to (caused
by). In normal operation the molecular information need not specifically be
set in this table, because the connection is made automatically when a peak is
assigned to a resonance that is known to be from a specific atom in a specific
residue. Once an experiment is associated with a particular molecular system,
subsequent attempts to assign its peaks to atoms in a different molecular system will
trigger a warning. The molecular system for an experiment may nonetheless be
set via this table. Sometimes this is to preemptively associate particular
experiments, and hence spectra, with specific molecular systems so that there
is less chance of accidentally assigning a peak to the wrong thing. The
molecular system, and hence residues, that an experiment is linked to is used
to facilitate several operations in Analysis. For example, when operating on a
peak to associate one of its assigned resonances with an atom, the molecular
system link allows the correct group of chains to be displayed
automatically.
**Experiment Types**
This table is used to specify what kind of NMR experiment was done for each of
the experiment records within the project. The general idea is that the basic
magnetisation transfer pathway is given and how this relates to the
experimental dimensions. Setting such information helps facilitate several
operations in Analysis. For example when making NMR derived distance
restraints only peaks from "through-space" experiments like NOESY are listed.
Also, and perhaps most importantly, the linking of experimental dimensions to
references, which correspond to particular points of the magnetisation transfer
pathway, enables the assignment system to have knowledge of experimental
dimensions that are linked together via covalent "one-bond" connectivity.
Thus, if an experiment is set to be of 15N HSQC type, then it is known that
any spectrum peaks represent one-bond correlations between hydrogen and
nitrogen. This dictates what the isotope type of any assignments must be, and
if a peak dimension is assigned to a specific atom then the assignment on the
other peak dimension must be to the covalently bound atom.
Setting the experiment type means setting the 'Full Type' column. The many
possibilities and difficult names makes this hard to do directly, and we have
tried to help with the process. The 'Type Synonym' column also sets the 'Full
Type' column but gives alternative and more human-readable names like 'HNCA'
instead of 'H[N[CA]]' or '15N HSQC-NOESY' instead of 'H[N]_H.NOESY'. Some
'Type Synonyms' correspond to more than one Full Type; anything in the 'Alt
Types' is an alternative Full Type. The possibilities given for these columns
are filtered, so that experiments that do not fit the nuclei on the axes are
not shown. The most common experiments are shown at the top of the selection
menu, the rest can be found in one or more of the categories lower down. The
'Categories' column divides experiments into a few rough groups and are used
to filter the possibilities in the other columns. Experiments that could
belong to more than one group are listed only in the most 'interesting' one.
The 'use external' category is a special case; it means that the program
should use the information in the 'External Source' and 'External Name'
columns to set the Full Type. This option is selected automatically when you
are loading a spectrum that contains information about the Full Type. At the
moment the program understands only Bruker pulse program names, and only if
they follow the standard naming conventions used by the Bruker Applications
department.
In normal operation the user specifies the type of NMR experiment that was run
by selecting options in the "Category", "Type Synonym" and "Full Type" columns.
If the full CCPN name of the experiment type is known then the user can go
straight for the "Full Type", but selecting the category and/or synonym first
allows the number available options to be reduced dramatically; without this
all possible experiment types that have matching isotopes are shown.
Setting the category for the NMR experiment gives a rough sub division between
through-bond, through-space, quantification and other types. Strictly speaking
an experiment may belong to more than one category, but in this system it is
only listed in the least populous. For example a 15N HSQC-NOESY has both
through-bond and though-space transfers but is categorised as through-space.
If the category for an experiment is unknown, or not particularly helpful, the
user may set the synonym in the first instance. The "synonym" of an
experimental type in Analysis is a common human-readable name, for example
"HNCA" or "15N HSQC NOESY", but this may still not be sufficient to fully
specify the exact NMR experiment that was run. To do this the full CCPN type
should be considered. The External Source and corresponding name columns are
only used in situations where the loading of a spectrum specifies what kind of
experiment was run. At present this only occurs for data loaded from Bruker
files, and then only if the pulse sequence name in the parameters is known to
the system. Nevertheless, if this data is present the experiment type
information can be automatically be filled in.
The full CCPN type for an experiment uses a special nomenclature that is
described in the CCPN `experiment nomenclature paper`_ (slightly out of date
now). In essence the user can distinguish between different magnetisation
transfer pathways, some of which may have the same common name (synonym). For
example a 15N HSQC-TOCSY could have either the HSQC step or the TOCSY step
first. In this instance the system offers a choice between H[N]_H.TOCSY (HSQC
first) and H_H[N].TOCSY (TOCSY first). The experiment naming system for the
full CCPN type is fairly complex, and is designed to give a precise
specification of the magnetisation steps, which atom sites they visit and what
measurements are made; giving rise to an experimental dimension. It should be
noted however, that this system does not describe the precise NMR pulse
sequence that was used. For example no distinction is made between HSQC and
HMQC. The essential features of the nomenclature are as follows: capital
letters indicate atom sites that were recorded and result in an experimental
dimension; lower case letters are atom sites that are part of the pathway but
not recorded, e.g. carbonyl in H[N[co[CA]]]; square brackets represent
out-and-back transfers; curly brackets with "|" sub-divisions represent
alternative pathways; underscores represent transfers that are not one-bond or
J-coupling (the transfer type is listed after at the end after a dot).
The lower tables are used to show how the dimensions of the actual experiment
relate to the reference dimensions that described in the experiment type. Even
when an experiment type is set it will not always be possible to
automatically determine which of the experimental dimensions relates to which
part of the magnetisation transfer pathway. For example a 3D HCCH TOCSY
experiment (full type HC_cH.TOCSY) has two hydrogen dimensions; one dimension
is the hydrogen bound to the measured carbon, and one dimension is the
hydrogen in the acquisition dimension. Deciding which is which is crucial for
the correct assignment and interpretation of spectra. The problem only arises
when there are two or more dimensions with the same nucleus. Sometimes
Analysis guesses wrong and the user has to check. Changing the dimension
mapping is a matter of looking in the lower left table and seeing how one
dimension relates to another. Each dimension that has a direct transfer to
another recorded dimension is listed. For example, in an HCCH TOCSY dimension
1 (hydrogen) might be 'onebond' to dimension 3 (carbon), but the user may know
that it is actually dimension 2 that is really 'onebond' to the carbon. This
problem may be fixed by double-clicking either the "First Dim" or the "Second
Dim to to change the transfer pathways so that dimension 3 (carbon) is listed
as 'onebond' to dimension 2 (hydrogen) (the other rows in the table will
adjust automatically). The Numbering of dimensions in this table is the same
as that presented when assigning a peak, i.e. in the `Assignment Panel`_. It
helps to know that dimension '1' is usually (but not always) the acquisition
dimension.
The lower right "Reference Dimension Mapping" is an alternative way of looking
at the same information and shows how the experimental dimensions have been
mapped to their reference counterpart in the experiment type database. Here,
the "Ref Measurement" column can be used to follow the steps in the
magnetisation transfer pathway by following increasing measurement numbers.
Changing the "Ref Exp Dim" column in this table is equivalent to making
changes in the lower left table, but is perhaps more difficult to understand.
**Experimental Details, Instruments and Shift References**
The "Experimental Details" table is used to list and edit details about the
recorded experiments in terms of its physical setup. The user may specify
which instruments were used and information about the sample and NMR tube. It
should be noted that in order to specify a spectrometer or probe the
specification for the instrument must first be entered in the "NMR
Instruments" tab. Currently, none of the NMR details given in this table have
any influence on resonance assignment or NMR data analysis, although spinning
information may be used for solid-state spectra at some point. However, any
experimental details entered into the CCPN project will be present when
submissions to the BioMagResBank database are made; initially using the
`CcpNmr ECI`_.
The "Shift References" table is use to enter chemical shift reference
information into the CCPN project. This may them be linked to experiments via
the first "Experiments" tab, and such information is required for database
deposition. To add a chemical shift reference specification the user first
clicks on either "Add Internal Reference" (internal to the sample) or "Add
External Reference" as appropriate. Then for the rows that appear in the table
the user double-clicks to edit the columns to specify: which atom in which
kind of molecule was used, what the reference value and unit is, and whether
the reference is direct or indirect. A reference atom allows the direct
referencing of resonances that have the same kind of isotope, but other
isotopes may be referenced indirectly by using a shift ratio to relate to a
direct reference.
The "NMR Instruments" section contains two table that allows the user to add
descriptions of the NMR probe and spectrometer that were used during the
experiments. To achieve this the user adds a new specification for the
appropriate kind of instrument then double-clicks to fill in the details for
each of the rows that appears in the table.
**Caveats & Tips**
An experiment may be linked temporarily with a new shift list; selecting
"<New>" in the Shift List column of the first tab then reseting the shift list
back to the original one, in order to make a shift list that contains only
chemical shift value for that experiment at that time. Without any experiment
links these chemical shift values will not alter as peaks and assignments
change.
**References**
*A nomenclature and data model to describe NMR experiments.
Fogh RH, Vranken WF, Boucher W, Stevens TJ, Laue ED.
J Biomol NMR. 2006 Nov;36(3):147-55* (link_)
.. _`experiment nomenclature paper`: http://www.ncbi.nlm.nih.gov/pubmed/17031528
.. _`link`: http://www.ncbi.nlm.nih.gov/pubmed/17031528
.. _`Assignment Panel`: EditAssignmentPopup.html
.. _`CcpNmr ECI`: EntryCompletionPopup.html
"""
def __init__(self, parent, isModal=False, *args, **kw):
self.guiParent = parent
self.detailsExp = None
self.experiment = None
self.waiting = False
self.waitingT = False
self.waitingD = False
self.waitingS = False
self.waitingI = False
self.isModal = isModal
self.typeExpt = None
self.typeExps = []
self.shiftReference = None
self.probe = None
self.spectrometer = None
self.expDimRef = None
self.transferExpDimRefs = None
BasePopup.__init__(self, parent, modal=isModal, title="Experiments : Experiments", **kw)
def body(self, guiFrame):
self.geometry('750x500')
guiFrame.expandGrid(0,0)
tipTexts = ['A table listing the NMR experiments available within the current project including details of which shift lists they use',
'The experiment type definitions that are associated with the current NMR experiments',
'A table of ancillary experimental details, relating to samples and instrumentation',
'A table of chemical shift reference information that is used by th experiments of the project',
'Descriptions of NMR spectrometers and probes used in the experiments']
options = ['Experiments','Experiment Types','Experimental Details',
'Shift References','NMR Instruments']
if self.isModal:
options[1] = 'Specify ' + options[1]
tabbedFrame = TabbedFrame(guiFrame, options=options,
tipTexts=tipTexts, grid=(0, 0))
self.tabbedFrame = tabbedFrame
frameA, frameB, frameC, frameD, frameE = tabbedFrame.frames
if self.isModal:
tabbedFrame.select(1)
# Experiment frame
frameA.grid_columnconfigure(0, weight=1)
frameA.grid_rowconfigure(0, weight=1)
self.detailsEntry = Entry(self,text='', returnCallback = self.setDetails, width=30)
self.acqDimPulldown = PulldownList(self, callback=self.setAcqDim)
self.nameEntry = Entry(self,text='', returnCallback = self.setName, width=20)
self.shiftListPulldown = PulldownList(self, callback=self.setShiftList)
self.shiftRefSelect = MultiWidget(self, CheckButton, callback=self.setShiftRefs, minRows=0, useImages=False)
self.molSystemSelect = MultiWidget(self, CheckButton, callback=self.setMolSystems, minRows=0, useImages=False)
row = 0
tipTexts = ['The serial number of the experiment',
'A short identifying name for the experiment, for graphical display',
'The number or independent dimensions recorded in the experiment',
'The name of the reference type for the experiment; to indicate what kind of experiment was run',
'Which of the recorded experimental dimensions was the final acquisition dimension',
'Which shift list the experiment uses to record the chemical shifts for it\'s assigned peaks',
'The names of the spectra (or other data sources) that are derived form the experiment',
'The molecular systems (groups of chains) that assignments in the experiment are made to',
'The chemical shift reference records used by the experiment',
'The number of fully processed spectra resulting from the experiment',
'Whether there is an unprocessed data source (spectrum) associated with the experiment',
'A user-editable textual comment for the experiment']
colHeadings = ['#','Name','Num.\nDim','Exp. Type', 'Acquisition\nDim',
'Shift List','Data\nSources','Mol\nSystems',
'Shift Ref','Num.\nSpectra','Raw\nData','Details']
editWidgets = [None, self.nameEntry, None, None, self.acqDimPulldown,
self.shiftListPulldown, None,
self.molSystemSelect, self.shiftRefSelect,
None, None, self.detailsEntry]
editGetCallbacks = [None, self.getName, None, None, self.getAcqDim,
self.getShiftList, None,
self.getMolSystems, self.getShiftRefs,
None, None, self.getDetails]
editSetCallbacks = [None, self.setName, None, None, self.setAcqDim,
self.setShiftList, None,
self.setMolSystems, self.setShiftRefs,
None, None, self.setDetails]
self.scrolledMatrix = ScrolledMatrix(frameA, tipTexts=tipTexts,
multiSelect=True,
initialRows=10, grid=(0, 0),
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
headingList=colHeadings,
callback=self.selectCell,
deleteFunc=self.deleteExperiment)
row += 1
tipTexts = ['Show a table of NMR measurement lists (chemical shifts, T1, J-coupling etc) that relate to the selected experiment',
'Clone an existing experiment and spectrum (excluding peakLists)',
'Make a new experiment and spectrum',
'Delete the selected experiments from the project; any spectrum data on disk is preserved']
texts = ['Measurement Lists','Clone Experiment...','New Experiment...','Delete']
commands = [self.showMeasurementLists, self.cloneExperiment,
self.newExperiment, self.deleteExperiment]
self.experimentButtons = ButtonList(frameA, commands=commands, texts=texts,
tipTexts=tipTexts, grid=(row, 0))
# Experiment Types
frameB.grid_columnconfigure(0, weight=1)
frameB.grid_columnconfigure(1, weight=1)
frameB.grid_rowconfigure(0, weight=1)
self.categoryPulldown = PulldownList(self, texts=self.getCategories(),
callback=self.setCategory )
self.extSourcePulldown = PulldownList(self, texts=self.getExtSources(),
callback=self.setExtSource )
self.extNameEntry = Entry(self, width=16, returnCallback=self.setExtName)
self.synonymPulldown = PulldownList(self, callback=self.setSynonym )
self.fullTypePulldown = PulldownList(self, callback=self.setFullType )
#dimNums = [1,2,3]
#self.refExpDimPulldown = PulldownList(self, texts=[str(x) for x in dimNums],
# objects=dimNums, callback=self.setRefExpDim)
self.refExpDimPulldown = PulldownList(self, callback=self.setRefExpDim)
self.transferPulldownA = PulldownList(self, callback=self.setTransferExpDimA)
self.transferPulldownB = PulldownList(self, callback=self.setTransferExpDimB)
row = 0
tipTexts = ['The serial number of the experiment',
'The name of the experiment used in graphical displays',
'Whether the experiment type information comes from an external source, e.g. Bruker pulse programs',
'The name of the experiment type as specified fom an external source, e.g. from the Bruker pulse program ',
'A loose, mutually exclusive classification of the experiment to help simplify subsequent setting of experiment type',
'A common human readable name for the type of experiment',
'The precise CCPN name for the reference experiment type; indicating what experiment was performed',
'Alternative reference experiment names which share the same human readable synonym']
headingList = ['#','Experiment\nName', 'External\nSource', 'External\nName',
'Category','Type\nSynonym','Full\nType', 'Alt\nTypes']
editWidgets = [None, None, self.extSourcePulldown, self.extNameEntry,
self.categoryPulldown, self.synonymPulldown,
self.fullTypePulldown, None]
editGetCallbacks = [None, None, self.getExtSource, self.getExtName,
self.getCategory, self.getSynonym, self.getFullType,
None]
editSetCallbacks = [None, None, self.setCategory, self.setExtSource,
self.setExtName, self.setSynonym, self.setFullType,
None]
self.expTypeMatrix = ScrolledMatrix(frameB, headingList=headingList,
callback=self.selectTypeExpt,
multiSelect=True, grid=(0,0),
gridSpan=(1,2), tipTexts=tipTexts,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets)
row += 1
tipTexts = ['Spread the experiment type specification, where possible, from the last selected experiment to all highlighted experiments',
'Open a popup to view and administer the reference experiment type specifications; can be use to describe new experiment types']
texts = ['Propagate Experiment Type','Edit Experiment Prototypes']
commands = [self.propagateExpType,self.guiParent.editExpPrototype]
self.expTypeButtons = ButtonList(frameB, commands=commands, grid=(row,0),
gridSpan=(1,2),texts=texts, tipTexts=tipTexts)
row += 1
frame = LabelFrame(frameB, text='Experiment Dim-Dim Transfers', grid=(row,0))
frame.expandGrid(0,0)
tipTexts = ['The first recorded experimental dimension involved in a magnetisation transfer',
'The type of magnetisation transfer (e.g. NOSY, TOCY, J-coupling, one-bond) between the experimental dimensions',
'The second recorded experimental dimension involved in a magnetisation transfer']
headingList = ['First Dim','Transfer Type\nBetween Dims','Second Dim']
editWidgets = [self.transferPulldownA, None, self.transferPulldownB]
editGetCallbacks = [self.getTransferExpDimA, None, self.getTransferExpDimB]
editSetCallbacks = [self.setTransferExpDimA, None, self.setTransferExpDimB]
self.transferMatrix = ScrolledMatrix(frame, headingList=headingList,
callback = self.selectExpTransfer,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
grid=(0,0), tipTexts=tipTexts)
frame = LabelFrame(frameB, text='Reference Dimension Mapping', grid=(row,1))
frame.expandGrid(0,0)
tipTexts = ['The number of the experimental dimension',
'Which dimension in the reference experiment (prototype) specification the real experimental dimension corresponds to',
'The isotope involved with the reference experimental dimension',
'What is recorded on the experimental dimension, and which steps of the magnetisation transfer pathway it applies to']
headingList = ['Exp\nDim','Ref Exp\nDim(.Ref)','Isotope','Ref Measurement']
editWidgets = [None, self.refExpDimPulldown, None, None]
editGetCallbacks = [None, self.getRefExpDim, None, None]
editSetCallbacks = [None, self.setRefExpDim, None, None]
self.refExpDimMatrix = ScrolledMatrix(frame, headingList=headingList,
callback = self.selectExpDimRef,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
grid=(0,0), editWidgets=editWidgets,
tipTexts=tipTexts)
if self.isModal:
row +=1
buttons = ButtonList(frameB, commands=[self.close,], grid=(row,0),
gridSpan=(1,2), texts=['Close - All Done',],
expands=True)
buttons.buttons[0].config(bg='#B0FFB0')
# Experimental details
frameC.grid_columnconfigure(0, weight=1)
frameC.grid_rowconfigure(0, weight=1)
row = 0
self.scansEntry = IntEntry(self, text=0, width=10,
returnCallback=self.setNumScans)
self.spectrometerPulldown = PulldownList(self, callback=self.setSpectrometer)
self.probePulldown = PulldownList(self, callback=self.setProbe)
self.statePulldown = PulldownList(self, texts=SAMPLE_STATES,
callback=self.setSampleState)
self.volumeEntry = FloatEntry(self, text=0.0, width=10,
returnCallback=self.setSampleVolume)
self.unitPulldown = PulldownList(self, texts=VOLUME_UNITS, index=1,
callback=self.setSampleVolUnit)
self.tubeEntry = Entry(self, text='', width=10,
returnCallback=self.setNmrTube)
self.spinRateEntry = FloatEntry(self, text=0.0, width=10,
returnCallback=self.setSpinRate)
self.spinAngleEntry = FloatEntry(self, text=0.0, width=10,
returnCallback=self.setSpinAngle)
tipTexts = ['The serial number of the experiment',
'The textual name for the experiment, for graphical displays',
'The specification of the spectrometer used to record the NMR experiment',
'The specification of the NMR probe which was used in the experiment',
'The number of repeated scans made during the experiment',
'The state that best describes the sample and any molecular ordering; liquid (solution), solid, powder, ordered or crystalline',
'The total volume of sample used the experiment',
'The physical units used to describe the sample volume',
'A description of the type of NMR tube used in the experiment',
'If the experiment involved a spinning sample, what the angle of spin was',
'If the experiment involved a spinning sample, what the rate of spin was, in Hz']
colHeadings = ['#','Name','Spectrometer','Probe',
'Num.\nScans','Sample\nState',
'Sample\nVolume','Volume\nUnit',
'NMR Tube\nType','Spinning\nAngle',
'Spinning\nRate (Hz)']
editWidgets = [None, None,
self.spectrometerPulldown,self.probePulldown,
self.scansEntry,self.statePulldown,
self.volumeEntry,self.unitPulldown,
self.tubeEntry,self.spinAngleEntry,
self.spinRateEntry]
editGetCallbacks = [None, None,
self.getSpectrometer,self.getProbe,
self.getNumScans,self.getSampleState,
self.getSampleVolume,self.getSampleVolUnit,
self.getNmrTube,self.getSpinAngle,
self.getSpinRate]
editSetCallbacks = [None, None,
self.setSpectrometer,self.setProbe,
self.setNumScans,self.setSampleState,
self.setSampleVolume,self.setSampleVolUnit,
self.setNmrTube,self.setSpinAngle,
self.setSpinRate]
self.detailsMatrix = ScrolledMatrix(frameC, multiSelect=True,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
headingList=colHeadings,
callback=self.selectExperiment,
tipTexts=tipTexts, grid=(row, 0))
row += 1
div = LabelDivider(frameC, text='Sample Conditions')
#frame4.grid(row=row,column=0,sticky='nsew')
#frame4.grid_columnconfigure(1, weight=1)
#frame4.grid_rowconfigure(1, weight=1)
label = Label(frameC, text='Current set:')
#label.grid(row=0,column=0,sticky='w')
self.conditionsPulldown = PulldownList(frameC, callback=self.setConditionSet)
#self.conditionsPulldown.grid(row=0,column=1,sticky='w')
tipTexts = ['The type of experimental condition that is described for the sample',
'The value of the specified condition',
'The error in the measurement of the condition value',
'The units of measurement for the condition value and error']
colHeadings = ['Condition','Value','Error','Unit']
editWidgets = [None,None,None,None]
editGetCallbacks = [None,None,None,None]
editSetCallbacks = [None,None,None,None]
self.conditionsMatrix = ScrolledMatrix(frameC, tipTexts=tipTexts,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
initialRows=5,
headingList=colHeadings,
callback=None)
#self.conditionsMatrix.grid(row=1, column=0, sticky='nsew')
texts = ['Edit Conditions',]
tipTexts = ['Open a table to view and edit the specification of experimental and sample conditions',]
commands = [self.editConditions,]
self.conditionsButtons = ButtonList(frameC,texts=texts, commands=commands, tipTexts=tipTexts)
#self.conditionsButtons.grid(row=2, column=0, sticky='ew')
row += 1
tipTexts = ['Display a table to view and administer NMR spectrometer and probe specifications',
'Spread the experimental details in the table from the last selected experiment to all highlighted experiments']
texts = ['Edit NMR Instruments','Propagate Experimental Details']
commands = [self.editInstruments, self.propagateDetails]
self.expDetailsButtons = ButtonList(frameC, texts=texts, commands=commands,
tipTexts=tipTexts, grid=(row, 0), gridSpan=(1,2))
# Shift references
frameD.grid_columnconfigure(0, weight=1)
frameD.grid_rowconfigure(0, weight=1)
self.isotopePulldown = PulldownList(self, texts=STANDARD_ISOTOPES,
callback=self.setShiftRefIsotope)
self.molNamePulldown = PulldownList(self, texts=SHIFT_REF_COMPOUNDS,
callback=self.setShiftRefMolName)
self.atomGroupEntry = Entry(self, text='', width=8, returnCallback=self.setShiftRefAtomGroup)
self.valueEntry = FloatEntry(self, text=0.0, width=6, returnCallback=self.setShiftRefValue)
self.ratioEntry = FloatEntry(self, text=0.0, width=18, formatPlaces=16,
returnCallback=self.setShiftRefRatio)
self.unitPulldown2 = PulldownList(self, texts=STANDARD_UNITS,
callback=self.setShiftRefUnit)
self.geometryEntry = Entry(self, text='', returnCallback=self.setShiftRefGeometry)
self.locationEntry = Entry(self, text='', returnCallback=self.setShiftRefLocation)
self.axisEntry = Entry(self, text='', returnCallback=self.setShiftRefAxis)
tipTexts = ['The serial number of the chemical shift reference specification',
'Whether the chemical shift reference is internal or external to the sample',
'The kind of nuclear isotope to which the reference applies',
'The number of experiments in the project which use the shift reference specification',
'The name of the molecule used to give a reference value to chemical shifts',
'Which atom of the reference molecule provides the reference chemical shift value',
'The reference value of the chemical shift for the specified atom',
'Which measurement unit the chemical shift reference value is in; ppm, ppb or ppt',
'Whether the chemical shift referencing is direct or indirect (and thus uses a shift ratio)',
'The precise numeric ratio used to indirectly get the reference shift value of an isotope, given the direct measurement of a different isotope',
'For external references, a description of the geometry of the container used to hold the reference compound, e.g. cylindrical or spherical',
'For external references, a description of the location of the reference',
'For external references, orientation of the reference container with respect to external magnetic field, e.g. parallel or perpendicular']
colHeadings = ['#','Class','Isotope','Experiments',
'Mol. Name','Atom','Value','Unit',
'Ref Type','Indirect\nShift Ratio',
'Sample\nGeometry','Location','Axis']
editWidgets = [None, None, self.isotopePulldown, None, self.molNamePulldown,
self.atomGroupEntry,self.valueEntry, self.unitPulldown2,None,
self.ratioEntry,self.geometryEntry,self.locationEntry,self.axisEntry]
editGetCallbacks = [None, None, self.getShiftRefIsotope, self.editExperiments, self.getShiftRefMolName,
self.getShiftRefAtomGroup,self.getShiftRefValue,self.getShiftRefUnit,self.toggleShiftRefType,
self.getShiftRefRatio ,self.getShiftRefGeometry,self.getShiftRefLocation,self.getShiftRefAxis]
editSetCallbacks = [None, None, self.setShiftRefIsotope, None, self.setShiftRefMolName,
self.setShiftRefAtomGroup,self.setShiftRefValue,self.setShiftRefUnit,None,
self.setShiftRefRatio ,self.setShiftRefGeometry,self.setShiftRefLocation,self.setShiftRefAxis]
self.shiftRefMatrix = ScrolledMatrix(frameD, multiSelect=True,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
headingList=colHeadings,
callback=self.selectShiftRef,
tipTexts=tipTexts, grid=(0, 0))
self.shiftRefMatrix.doEditMarkExtraRules = self.doShiftRefEditMarkExtraRules
tipTexts = ['Add a new record for a chemical shift reference that is internal to the sample',
'Add a new record for a chemical shift reference that is external to the sample',
'Delete the selected chemical shift reference records']
texts = ['Add Internal\nReference','Add External\nReference','Delete\nSelected']
commands = [self.addInternalShiftRef,self.addExternalShiftRef,self.removeShiftRefs]
self.shiftRefButtons = ButtonList(frameD, texts=texts, tipTexts=tipTexts,
commands=commands, grid=(1, 0))
# NMR Instruments
frameE.grid_columnconfigure(0, weight=1)
frameE.grid_rowconfigure(1, weight=1)
row = 0
div = LabelDivider(frameE, text='NMR Probe')
div.grid(row=row,column=0,sticky='ew')
row += 1
self.probeNameEntry = Entry(self, text='', width=10,
returnCallback=self.setProbeName)
self.probeTypePulldown = PulldownList(self, texts=NMR_PROBE_TYPES,
callback=self.setProbeType)
self.probeModelEntry = Entry(self, text='', width=10,
returnCallback=self.setProbeModel)
self.probeSerialEntry = Entry(self, text='', width=10,
returnCallback=self.setProbeSerial)
self.probeDiameterEntry = FloatEntry(self, text=0.0, width=10,
returnCallback=self.setProbeDiameter)
self.probeDetailsEntry = Entry(self, text='', width=10,
returnCallback=self.setProbeDetails)
tipTexts = ['Serial number of NMR probe specification',
'The name of the probe for graphical representation',
'A classification for the kind of probe used, e.g. liquid, solid, nano, flow or MAS',
'The manufacturer\'s definition of the probe model',
'The manufacturer\'s serial number for the specific NMR probe',
'The probe diameter in cm',
'A user-specified textual comment about the probe']
colHeadings = ['#','Name','Type','Model','Serial #','Diameter (cm)','Details']
editWidgets = [None,self.probeNameEntry, self.probeTypePulldown,
self.probeModelEntry, self.probeSerialEntry,
self.probeDiameterEntry, self.probeDetailsEntry]
editGetCallbacks = [None,self.getProbeName,self.getProbeType,
self.getProbeModel,self.getProbeSerial,
self.getProbeDiameter,self.getProbeDetails]
editSetCallbacks = [None,self.setProbeName,self.setProbeType,
self.setProbeModel,self.setProbeSerial,
self.setProbeDiameter,self.setProbeDetails]
self.probeMatrix = ScrolledMatrix(frameE, tipTexts=tipTexts,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
headingList=colHeadings,
callback=self.selectProbe,
grid=(row, 0))
row += 1
tipTexts = ['Add a new NMR probe specification to the CCPN project',
'Delete the selected NMR probe specification']
texts = ['New Probe Specification','Delete Probe Specification']
commands = [self.newProbe,self.deleteProbe]
self.probeButtons = ButtonList(frameE,texts=texts, tipTexts=tipTexts,
commands=commands, grid=(row, 0))
row += 1
div = LabelDivider(frameE, text='Spectrometer', grid=(row, 0))
row += 1
self.spectrometerNameEntry = Entry(self, text='', width=10,
returnCallback=self.setSpectrometerName)
self.spectrometerFreqEntry = FloatEntry(self, text='', width=12,
returnCallback=self.setSpectrometerFreq)
self.spectrometerModelEntry = Entry(self, text='', width=10,
returnCallback=self.setSpectrometerModel)
self.spectrometerSerialEntry = Entry(self, text='', width=10,
returnCallback=self.setSpectrometerSerial)
self.spectrometerDetailsEntry = Entry(self, text='', width=10,
returnCallback=self.setSpectrometerDetails)
tipTexts = ['Serial number of the NMR spectrometer specification',
'A name for the spectrometer, for graphical displays',
'The rounded spectrometer frequency, from the 1H resonance frequency in MHz, used in textual description, e.g. "500", "900"',
'The actual numeric magnetic field strength expressed as a 1H resonance frequency in MHz, e.g. "500.013"',
'The manufacturer\'s definition of the spectrometer model',
'The manufacturer\'s serial number for the specific NMR spectrometer',
'A user-specified textual comment about the NMR spectrometer']
colHeadings = ['#','Name','Nominal Freq.','Proton Freq. (MHz)','Model','Serial #','Details']
editWidgets = [None,self.spectrometerNameEntry,
None,self.spectrometerFreqEntry,
self.spectrometerModelEntry, self.spectrometerSerialEntry,
self.spectrometerDetailsEntry]
editGetCallbacks = [None,self.getSpectrometerName,
None,self.getSpectrometerFreq,
self.getSpectrometerModel,self.getSpectrometerSerial,
self.getSpectrometerDetails]
editSetCallbacks = [None,self.setSpectrometerName,
None,self.setSpectrometerFreq,
self.setSpectrometerModel,self.setSpectrometerSerial,
self.setSpectrometerDetails]
self.spectrometerMatrix = ScrolledMatrix(frameE, tipTexts=tipTexts,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
headingList=colHeadings,
callback=self.selectSpectrometer,
grid=(row, 0))
row += 1
tipTexts = ['Add a new NMR spectrometer specification to the CCPN project',
'Delete the selected NMR spectrometer specification']
texts = ['New Spectrometer Specification','Delete Spectrometer Specification']
commands = [self.newSpectrometer,self.deleteSpectrometer]
self.spectrometerButtons = ButtonList(frameE,texts=texts, tipTexts=tipTexts,
commands=commands, grid=(row, 0))
# Main window
dismissText = None
if self.isModal:
dismissText = 'Done'
bottomButtons = UtilityButtonList(self.tabbedFrame.sideFrame, helpUrl=self.help_url,
closeText=dismissText)
bottomButtons.grid(row=0, column=0, sticky='e')
self.updateShiftRefsAfter()
self.updateExpDetailsAfter()
self.updateExpTypesAfter()
self.updateInstrumentsAfter()
self.update()
self.administerNotifiers(self.registerNotify)
def administerNotifiers(self, notifyFunc):
for func in ('__init__', 'delete','setName'):
for clazz in ('ccp.nmr.Nmr.Experiment','ccp.nmr.Nmr.DataSource'):
notifyFunc(self.updateAfter,clazz, func)
for func in ('__init__', 'delete'):
notifyFunc(self.updateAfter,'ccp.nmr.Nmr.ShiftList', func)
for func in ('setDetails', 'setName', 'setExperimentType','setShiftList',
'setShiftReferences','addShiftReference','removeShiftReference',
'setMolSystems','addMolSystem','removeMolSystem', 'setVolumeUnit'):
notifyFunc(self.updateAfter,'ccp.nmr.Nmr.Experiment', func)
for clazz in ('ccp.nmr.Nmr.DataSource','ccp.nmr.Nmr.ShiftList'):
notifyFunc(self.updateAfter, clazz, 'setName')
for func in ('setIsotopeCode','setMolName'):
for clazz in ('ccp.nmr.Nmr.ExternalShiftReference','ccp.nmr.Nmr.InternalShiftReference'):
notifyFunc(self.updateAfter,clazz, func)
notifyFunc(self.updateAfter, 'ccp.nmr.Nmr.ExpDim', 'setIsAcquisition')
# Experiment Types
for func in ('setRefExperiment','__init__','delete','setName'):
for clazz in ('ccp.nmr.Nmr.Experiment',):
notifyFunc(self.updateExpTypesAfter,clazz, func)
for func in ('setRefExpDimRef',):
for clazz in ('ccp.nmr.Nmr.ExpDimRef',):
notifyFunc(self.updateExpTypesAfter,clazz, func)
# Experiment Details
for func in ('__init__', 'delete','setName',
'setProbe','setSampleConditionSet',
'setSpectrometer',
'setName','setNmrTubeType',
'setNumScans','setSampleState',
'setSampleVolume','setSpinningAngle',
'setSpinningRate','setVolumeUnit'):
notifyFunc(self.updateExpDetailsAfter,'ccp.nmr.Nmr.Experiment', func)
for func in ('__init__','delete','setName'):
notifyFunc(self.updateConditionSets,'ccp.nmr.Nmr.SampleConditionSet', func)
# Shift References
for func in ('__init__','delete','setAtomGroup','setIndirectShiftRatio',
'setValue','setIsotopeCode','setMolName','setReferenceType',
'setUnit','setExperiments','addExperiment','removeExperiment'):
for clazz in ('ccp.nmr.Nmr.ExternalShiftReference','ccp.nmr.Nmr.InternalShiftReference'):
notifyFunc(self.updateShiftRefsAfter,clazz, func)
for func in ('setShiftReferences',
'addShiftReference','removeShiftReference'):
notifyFunc(self.updateShiftRefsAfter,'ccp.nmr.Nmr.Experiment', func)
# NMR Instruments
for func in ('__init__','delete','setName',
'setSerialNumber','setDetails',
'setModel','setNominalFreq'
'setProtonFreq','setExperiments',
'addExperiment','removeExperiment'):
notifyFunc(self.updateInstrumentsAfter,'ccp.general.Instrument.NmrSpectrometer',func)
for func in ('__init__','delete','setName',
'setSerialNumber','setDetails',
'setModel','setProbeType'
'setDiameter','setExperiments',
'addExperiment','removeExperiment'):
notifyFunc(self.updateInstrumentsAfter, 'ccp.general.Instrument.NmrProbe', func)
def open(self):
self.updateAfter()
self.updateExpTypesAfter()
self.updateExpDetailsAfter()
self.updateShiftRefsAfter()
self.updateInstrumentsAfter()
BasePopup.open(self)
def close(self):
if self.isModal:
names = []
for experiment in self.typeExps:
if not experiment.refExperiment:
names.append(experiment.name)
if names:
if len(names) == 1:
msg = 'Experiment %s does not have a reference experiment type set. Try again?' \
% names[0]
else:
msg = 'Experiments %s and %s do not have reference experiment types set. Try again?' \
% (','.join(names[:-1]), names[-1])
if showYesNo('Warning', msg, parent=self):
return
BasePopup.close(self)
def editInstruments(self):
self.tabbedFrame.select(4)
"""
def editExperimentTypes(self):
self.guiParent.editExpType()
"""
"""
def readMdd(self):
from gothenburg import Usf3Io
from memops.gui.FileSelectPopup import FileSelectPopup
popup = FileSelectPopup(self)
file = popup.getFile()
popup.destroy()
if file:
Usf3Io.readDataSource(self.nmrProject, file)
"""
def getName(self, experiment):
if experiment :
width = max(20, len(experiment.name))
self.nameEntry.config(width=width)
self.nameEntry.set(experiment.name)
def setName(self, event):
text = self.nameEntry.get()
if text and text != ' ':
if text != self.experiment.name:
if self.experiment.nmrProject.findFirstExperiment(name=text):
showWarning('Failure','Name %s already in use' % text, parent=self)
return
self.experiment.setName( text )
def getDetails(self, experiment):
if experiment and experiment.details:
self.detailsEntry.set(experiment.details)
def setDetails(self, event):
text = self.detailsEntry.get()
if text and text != ' ':
self.experiment.setDetails( text )
def getMolSystems(self, experiment):
molSystems = self.project.sortedMolSystems()
names = []
values = []
for molSystem in molSystems:
names.append(molSystem.code)
if molSystem in experiment.molSystems:
values.append(True)
else:
values.append(False)
self.molSystemSelect.set(values=values,options=names)
def setMolSystems(self, obj):
if self.experiment:
if obj is None:
self.scrolledMatrix.keyPressEscape()
else:
molSystems = self.project.sortedMolSystems()
values = self.molSystemSelect.get()
selectedMolSystems = [molSystems[i] for i in range(len(values)) if values[i]]
self.experiment.setMolSystems(selectedMolSystems)
self.scrolledMatrix.keyPressEscape()
def getShiftRefs(self, experiment):
shiftRefs = self.nmrProject.sortedShiftReferences()
names = []
values = []
for shiftReference in shiftRefs:
data = (shiftReference.serial,
shiftReference.isotopeCode,
shiftReference.molName)
names.append('%d:%s:%s' % data)
if shiftReference in experiment.shiftReferences:
values.append(True)
else:
values.append(False)
self.shiftRefSelect.set(values=values,options=names)
def setShiftRefs(self, obj):
if self.experiment:
if obj is None:
self.scrolledMatrix.keyPressEscape()
else:
shiftRefs = self.nmrProject.sortedShiftReferences()
values = self.shiftRefSelect.get()
selectedRefs = [shiftRefs[i] for i in range(len(values)) if values[i]]
self.experiment.setShiftReferences(selectedRefs)
self.scrolledMatrix.keyPressEscape()
def getShiftListNames(self, shiftLists):
names = []
for shiftList in shiftLists:
if shiftList is None:
names.append('<New>')
continue
if not shiftList.name:
shiftList.name = 'ShiftList %d' % shiftList.serial
names.append('%s [%d]' % (shiftList.name, shiftList.serial))
return names
def getAcqDim(self, experiment):
expDims = experiment.sortedExpDims()
objects = [None] + expDims
names = ['None']
for expDim in expDims:
expDimRef = getPrimaryExpDimRef(expDim)
if expDimRef and expDimRef.isotopeCodes:
name = '%d (%s)' % (expDim.dim, ','.join(expDimRef.isotopeCodes))
else:
name = '%d' % expDim.dim
names.append(name)
expDim = experiment.findFirstExpDim(isAcquisition=True)
index = objects.index(expDim)
self.acqDimPulldown.setup(names, objects, index)
def setAcqDim(self, *extra):
if self.experiment:
acqExpDim = self.acqDimPulldown.getObject() # could be None
for expDim in self.experiment.sortedExpDims():
expDim.isAcquisition = (expDim is acqExpDim)
def getShiftList(self, experiment):
index = 0
shiftLists = getShiftLists(self.nmrProject) + [None,]
names = self.getShiftListNames(shiftLists)
shiftList = experiment.shiftList
if shiftList and (shiftList in shiftLists):
index = shiftLists.index(shiftList)
self.shiftListPulldown.setup(names, shiftLists, index)
def setShiftList(self, null):
shiftList = self.shiftListPulldown.getObject()
if self.experiment:
project = self.experiment.root
if shiftList is None:
shiftList = newShiftList(project, unit='ppm')
if shiftList and (shiftList is not self.experiment.shiftList):
setExperimentShiftList(self.experiment, shiftList)
"""
def showExperimentalDetails(self):
self.guiParent.editExpDetails(experiment=self.experiment)
"""
def showMeasurementLists(self):
if self.experiment:
self.guiParent.editMeasurementLists(experiment=self.experiment)
else:
self.guiParent.editMeasurementLists(experiment='<Any>')
def cloneExperiment(self):
if self.experiment:
popup = NewExperimentPopup(self, self.experiment)
popup.destroy()
else:
showWarning('Warning','Need to select experiment to clone', parent=self)
def newExperiment(self):
popup = NewExperimentPopup(self)
popup.destroy()
"""
numDim = int(askInteger('Experiment Dimensions','Number of dimensions?',2,parent=self) or 0)
if numDim:
n = len(self.nmrProject.experiments)+1
name = 'New Exp.' + str(n)
while self.nmrProject.findFirstExperiment(name=name):
n += 1
name = 'New Exp.' + str(n)
if numDim < 6:
experiment = Nmr.Experiment(self.nmrProject,name=name,numDim=numDim)
else:
showWarning('Error','Experiment dimensionality greater\nthan 5 not implemented', parent=self)
"""
def deleteExperiment(self, *event):
experiments = self.scrolledMatrix.currentObjects[:]
if len(experiments) == 1:
if showOkCancel('Delete Experiment','Really delete experiment and\nany spectra, lists, peaks etc... ?', parent=self):
self.experiment.delete()
self.experiment = None
elif len(experiments) > 1:
if showOkCancel('Delete Experiment','Really delete %d experiments and\nany spectra, lists, peaks etc... ?' % (len(experiments)), parent=self):
for experiment in experiments:
experiment.delete()
self.experiment = None
def selectCell(self, object, row, col):
self.experiment = object
if self.experiment:
for n in (1, 3):
self.experimentButtons.buttons[n].enable()
def updateAfter(self, *opt):
if self.waiting:
return
else:
self.waiting = True
self.after_idle(self.update)
def update(self):
if self.experiment:
for n in (1, 3):
self.experimentButtons.buttons[n].enable()
else:
for n in (1, 3):
self.experimentButtons.buttons[n].disable()
allMolSys = len(self.project.molSystems)
objectList = []
textMatrix = []
for experiment in self.nmrProject.sortedExperiments():
objectList.append(experiment)
shiftList = experiment.shiftList
if shiftList:
shiftListText = '%s [%d]' % (shiftList.name or '<No name>', shiftList.serial)
else:
shiftListText = ''
experimentType = None
if experiment.refExperiment:
experimentType = experiment.refExperiment.name
dataSourcesText = ''
numSpectra = 0
dataSources = experiment.sortedDataSources()
for dataSource in dataSources:
dataSourcesText += dataSource.name
if isSpectrum(dataSource):
numSpectra += 1
if dataSource is not dataSources[-1]:
dataSourcesText += ' '
rawData = 'No'
if experiment.rawData:
rawdata = 'Yes'
shiftRefs = ['%d:%s:%s' % (sr.serial,sr.isotopeCode,sr.molName) for sr in experiment.shiftReferences]
msCodesSorted = [(ms.code, ms) for ms in experiment.molSystems]
msCodesSorted.sort()
numMolSys = len(msCodesSorted)
msCodes = []
if (numMolSys == allMolSys) and (numMolSys > 5):
msCodes = ['** ALL **',]
elif numMolSys > 5:
msCodes = ['** %d **' % numMolSys,]
else:
for i, (code, ms) in enumerate(msCodesSorted):
if i > 0:
if i%5 == 0:
msCodes.append('\n%s' % code)
else:
msCodes.append(' %s' % code)
else:
msCodes.append(code)
expDims = experiment.findAllExpDims(isAcquisition=True)
names = []
for expDim in expDims:
expDimRef = getPrimaryExpDimRef(expDim)
if expDimRef and expDimRef.isotopeCodes:
name = '%d (%s)' % (expDim.dim, ','.join(expDimRef.isotopeCodes))
else:
name = '%d' % expDim.dim
names.append(name)
acqDim = ', '.join(names)
datum = [experiment.serial,
experiment.name,
experiment.numDim,
experimentType,
acqDim,
shiftListText,
dataSourcesText,
''.join(msCodes),
','.join(shiftRefs),
numSpectra,
rawData,
experiment.details or '']
textMatrix.append(datum)
if not objectList:
textMatrix.append([])
self.scrolledMatrix.update(objectList=objectList, textMatrix=textMatrix)
self.waiting = False
def destroy(self):
self.administerNotifiers(self.unregisterNotify)
BasePopup.destroy(self)
# Experiment Type functionss
def propagateExpType(self):
experiments = self.expTypeMatrix.currentObjects
if self.typeExpt and (len(experiments) > 1):
refExpDict = {}
refExperiment = None
for experiment in experiments:
refExperiment0 = experiment.refExperiment
if refExperiment0:
refExpDict[refExperiment0] = True
refExperiment = refExperiment0
if len(refExpDict.keys()) > 1:
showWarning('Propagate type failed',
'Ambiguous experiment types in current selection', parent=self)
return
if not refExperiment:
showWarning('Warning',
'No experiment type to propagate', parent=self)
return
name = refExperiment.name
text = 'Really propagate type %s to selected experiments?' % name
if showWarning('Confirm',text, parent=self):
for experiment in experiments:
if experiment.refExperiment:
continue
refExperiments = getFilteredRefExperiments(experiment)
if refExperiment not in refExperiments:
data = (name, experiment.name)
showWarning('Propagate type error',
'Experiment type %s not possible for experiment %s' % data, parent=self)
continue
setRefExperiment(experiment, refExperiment)
initExpTransfers(experiment)
if self.isModal:
self.updateExpTypesAfter() # Modal version doesn't let the notifier calls get through...
def selectExpTransfer(self, obj, row, col):
self.transferExpDimRefs = obj
def selectExpDimRef(self, obj, row, col):
self.expDimRef = object
def getCategories(self):
names = ['<None>', 'through-bond', 'through-space',
'quantification', 'use external', 'other']
return names
def getExtSources(self):
names = ['<None>','ccpn', 'bruker']
return names
def getSynonyms(self):
names = []
refExperimentSets = []
if self.typeExpt:
refExperiments = getRefExperiments(self.typeExpt)
namesDict = {}
for refExperiment in refExperiments:
nmrExpPrototype = refExperiment.nmrExpPrototype
if nmrExpPrototype.synonym:
name = nmrExpPrototype.synonym
else:
name = nmrExpPrototype.name
if name not in namesDict:
namesDict[name] = set()
namesDict[name].add(refExperiment)
names = namesDict.keys()
names.sort()
refExperimentSets = [namesDict[name] for name in names]
return names, refExperimentSets
def getFullTypes(self, experiment=None):
if experiment is None:
experiment = self.typeExpt
names = []
refExperiments = []
if experiment:
if experiment.refExperiment:
if not (hasattr(experiment, 'pulProgName') and
hasattr(experiment, 'pulProgType') and
hasattr(experiment, 'category') and
experiment.category == 'use external'):
# no external source, try to use nmrExpPrototype synonym
synonym = experiment.refExperiment.nmrExpPrototype.synonym
if synonym:
names = [(rx.name, rx) for rx in getRefExperiments(experiment)
if rx.nmrExpPrototype.synonym == synonym]
if not names:
names = [(rx.name,rx) for rx in getRefExperiments(experiment)]
names.sort()
refExperiments = [x[1] for x in names]
names = [x[0] for x in names]
#names = ['<None>',] + names
return names, refExperiments
def getCategory(self, experiment):
names = self.getCategories()
if names:
index = 0
if experiment:
if experiment.refExperiment:
name = experiment.refExperiment.nmrExpPrototype.category or '<None>'
if name in names:
index = names.index(name)
elif hasattr(experiment, 'category'):
name = experiment.category or '<None>'
if name in names:
index = names.index(name)
elif (hasattr(experiment, 'pulProgName') and
hasattr(experiment, 'pulProgType')):
if experiment.pulProgName and experiment.pulProgType:
index = names.index('use external')
self.categoryPulldown.setup(names,names,index)
else:
self.categoryPulldown.setup([],[], 0)
def getExtSource(self, experiment):
names = self.getExtSources()
if names:
index = 0
if experiment and hasattr(experiment, 'pulProgType'):
name = experiment.pulProgType or '<None>'
if name in names:
index = names.index(name)
self.extSourcePulldown.setup(names,names,index)
else:
self.extSourcePulldown.setup([],[], 0)
def getSynonym(self, experiment):
synonyms, refExperimentSets = self.getSynonyms()
if synonyms:
cats = [None,]
names = ['<None>',] + synonyms
for refExperiments in refExperimentSets:
cat = set()
nn = 0
for refExperiment in refExperiments:
xx = getRefExperimentCategories(refExperiment)
if 'Projected' in xx:
nn += 1
cat.update(xx)
if 'Projected' in cat and nn == len(refExperiments):
# This means that if a synonym matches only because of
# projection experiments, then the synonym appears only
# in the 'Projected' category
cat = set(['Projected'])
cats.append(cat or None)
index = 0
if experiment and experiment.refExperiment:
name = experiment.refExperiment.nmrExpPrototype.synonym
if name in names:
index = names.index(name)
self.synonymPulldown.setup(names,names, index, None, cats)
else:
self.synonymPulldown.setup([],[], 0)
def getExtName(self, experiment):
if experiment and hasattr(experiment, 'pulProgName'):
self.extNameEntry.set(experiment.pulProgName)
def getFullType(self, experiment):
names, refExperiments = self.getFullTypes()
if names:
if len(refExperiments) > 5:
cats = [None,]
for refExperiment in refExperiments:
cat = getRefExperimentCategories(refExperiment)
if 'Projected' in cat:
cat = set(['Projected'])
cats.append(cat or None)
else:
cats = None
names.insert(0,'<None>')
index = 0
if experiment and experiment.refExperiment:
name = experiment.refExperiment.name
if name in names:
index = names.index(name)
self.fullTypePulldown.setup(names, names, index, None, cats)
else:
self.fullTypePulldown.setup([],[], 0)
def getExpDimRefName(self, expDimRef):
expDim = expDimRef.expDim
name = '%d' % expDim.dim
if len(expDim.expDimRefs) > 1:
name += '.%d' % (expDimRef.serial)
return name
def getRefExpDimRefName(self, refExpDimRef):
name = '%d' % refExpDimRef.refExpDim.dim
if len(refExpDimRef.refExpDim.refExpDimRefs) > 1:
name += '.%d' % (refExpDimRef.serial)
return name
def getRefExpDimRefs(self, expDimRef):
isotopeCodes = list(expDimRef.isotopeCodes)
isotopeCodes.sort()
refExperiment = expDimRef.expDim.experiment.refExperiment
refExpDimRefs = []
if refExperiment:
for refExpDim in refExperiment.sortedRefExpDims():
for refExpDimRef in refExpDim.sortedRefExpDimRefs():
refIsotopeCodes = [atomSite.isotopeCode for atomSite in refExpDimRef.expMeasurement.atomSites]
refIsotopeCodes.sort()
if refIsotopeCodes == isotopeCodes:
refExpDimRefs.append(refExpDimRef)
return refExpDimRefs
def getRefExpDim(self, expDimRef):
refExpDimRefs = self.getRefExpDimRefs(expDimRef)
if refExpDimRefs:
names = [self.getRefExpDimRefName(x) for x in refExpDimRefs]
index = 0
if expDimRef.refExpDimRef:
name = self.getRefExpDimRefName(expDimRef.refExpDimRef)
if name in names:
index = names.index(name)
self.refExpDimPulldown.setup(names, refExpDimRefs, index)
else:
self.refExpDimPulldown.setup([],[],0)
def getTransferExpDimA(self, transferExpDimRefs):
expDimRefA, expDimRefB = transferExpDimRefs
expDimRefs = []
for expDim in self.typeExpt.expDims:
for expDimRef in expDim.expDimRefs:
if expDimRef is expDimRefB:
continue
if expDimRef.isotopeCodes == expDimRefA.isotopeCodes:
expDimRefs.append(expDimRef)
names = [self.getExpDimRefName(x) for x in expDimRefs]
index = expDimRefs.index(expDimRefA)
self.transferPulldownA.setup(names, expDimRefs, index)
def getTransferExpDimB(self, transferExpDimRefs):
expDimRefA, expDimRefB = transferExpDimRefs
expDimRefs = []
for expDim in self.typeExpt.expDims:
for expDimRef in expDim.expDimRefs:
if expDimRef is expDimRefA:
continue
if expDimRef.isotopeCodes == expDimRefB.isotopeCodes:
expDimRefs.append(expDimRef)
names = [self.getExpDimRefName(x) for x in expDimRefs]
index = expDimRefs.index(expDimRefB)
self.transferPulldownB.setup(names, expDimRefs, index)
def swapRefExpDimRefs(self, expDimRef1, expDimRef2):
# Old ref mappings
expDim1 = expDimRef1.expDim
expDim2 = expDimRef2.expDim
refExpDim1 = expDim1.refExpDim
refExpDim2 = expDim2.refExpDim
refExpDimRef1 = expDimRef1.refExpDimRef
refExpDimRef2 = expDimRef2.refExpDimRef
# Swap expDimRef refMappings
expDim1.setRefExpDim(None)
expDimRef1.setRefExpDimRef(None)
expDim2.setRefExpDim(None)
expDimRef2.setRefExpDimRef(None)
expDim1.setRefExpDim(refExpDim2)
expDimRef1.setRefExpDimRef(refExpDimRef2)
expDim2.setRefExpDim(refExpDim1)
expDimRef2.setRefExpDimRef(refExpDimRef1)
def setTransferExpDimA(self, null):
if self.transferExpDimRefs:
expDimRefA, expDimRefB = self.transferExpDimRefs
expDimRefC = self.transferPulldownA.getObject()
if expDimRefC is not expDimRefA:
self.swapRefExpDimRefs(expDimRefA, expDimRefC)
initExpTransfers(self.typeExpt)
def setTransferExpDimB(self, null):
if self.transferExpDimRefs:
expDimRefA, expDimRefB = self.transferExpDimRefs
expDimRefC = self.transferPulldownB.getObject()
if expDimRefC is not expDimRefB:
self.swapRefExpDimRefs(expDimRefB, expDimRefC)
initExpTransfers(self.typeExpt)
def setCategory(self, null):
name = self.categoryPulldown.getText()
if self.typeExpt:
if name == '<None>':
self.typeExpt.category = None
if self.typeExpt.refExperiment:
setRefExperiment(self.typeExpt,None)
else:
self.typeExpt.category = name
if (self.typeExpt.refExperiment and
self.typeExpt.refExperiment.nmrExpPrototype.category != name):
setRefExperiment(self.typeExpt,None)
self.updateExpTypesAfter()
def setExtSource(self, null):
name = self.extSourcePulldown.getText()
if self.typeExpt:
if name == '<None>':
self.typeExpt.pulProgType = None
else:
self.typeExpt.pulProgType = name
self.updateExpTypesAfter()
def setSynonym(self, null):
name = self.synonymPulldown.getText()
if self.typeExpt:
if not self.typeExpt.refExperiment or (self.typeExpt.refExperiment.nmrExpPrototype.synonym != name ):
if name == '<None>':
if self.typeExpt.refExperiment:
setRefExperiment(self.typeExpt,None)
initExpTransfers(self.typeExpt)
else:
refExperiments = getRefExperiments(self.typeExpt)
for refExperiment in refExperiments:
if refExperiment.nmrExpPrototype.synonym == name:
setRefExperiment(self.typeExpt, refExperiment)
initExpTransfers(self.typeExpt)
break
else:
for refExperiment in refExperiments:
if refExperiment.nmrExpPrototype.name == name:
setRefExperiment(self.typeExpt, refExperiment)
initExpTransfers(self.typeExpt)
break
if self.isModal:
self.updateExpTypesAfter()
def setExtName(self, null):
if self.typeExpt:
text = self.extNameEntry.get()
if text and text != ' ':
self.typeExpt.pulProgName = text
else:
self.typeExpt.pulProgName = None
if self.isModal:
self.updateExpTypesAfter()
def setFullType(self, null):
name = self.fullTypePulldown.getText()
if self.typeExpt:
if not self.typeExpt.refExperiment or (self.typeExpt.refExperiment.name != name ):
if name == '<None>':
if self.typeExpt.refExperiment:
setRefExperiment(self.typeExpt,None)
initExpTransfers(self.typeExpt)
else:
refExperiments = getRefExperiments(self.typeExpt)
for refExperiment in refExperiments:
if refExperiment.name == name:
setRefExperiment(self.typeExpt, refExperiment)
initExpTransfers(self.typeExpt)
break
if self.isModal:
self.updateExpTypesAfter()
def setRefExpDim(self, null):
expDimRef = self.refExpDimMatrix.currentObject
if expDimRef:
refExpDimRef = self.refExpDimPulldown.getObject()
if not expDimRef.refExpDimRef or \
(expDimRef.refExpDimRef is not refExpDimRef):
expDimRef2 = None
refExpDimRef2 = expDimRef.refExpDimRef
for expDim in self.typeExpt.expDims:
for expDimRef3 in expDim.expDimRefs:
if expDimRef3.refExpDimRef and \
(expDimRef3.refExpDimRef is refExpDimRef):
expDimRef2 = expDimRef3
break
else:
continue
break
expDimRef.setRefExpDimRef(None)
expDimRef.expDim.setRefExpDim(None)
if refExpDimRef:
if refExpDimRef2 and expDimRef2:
expDimRef2.expDim.setRefExpDim(refExpDimRef2.refExpDim)
expDimRef2.setRefExpDimRef(refExpDimRef2)
expDimRef.expDim.setRefExpDim(refExpDimRef.refExpDim)
expDimRef.setRefExpDimRef(refExpDimRef)
initExpTransfers(expDimRef.expDim.experiment)
def selectTypeExpt(self, object, row, col):
if object is not self.typeExpt:
self.typeExpt = object
self.updateExpTransfers()
self.updateRefDims()
def updateExpTypes(self, experiments=None):
textMatrix = []
objectList = []
if experiments:
self.typeExps = experiments
else:
self.typeExps = self.nmrProject.sortedExperiments()
for experiment in self.typeExps:
# get extSource and extName,
extSource = None
if hasattr(experiment,'pulProgName'):
extName = experiment.pulProgName
if hasattr(experiment,'pulProgType'):
# expSource is irrelevant without an extName
extSource = experiment.pulProgType
else:
hasExtName = False
extName = None
# initialise: use external source, if available, to set refExperiment
refExperiment = experiment.refExperiment
if extSource is not None and not hasattr(experiment, 'category'):
# this is first time we get here, and we have external name and source
# use external source to set fullType
experiment.category = 'use external'
refExperiments = getRefExperiments(experiment)
if refExperiments:
if len(refExperiments) < 8:
# set to first refExp found. NB len() check should not be necessary.
refExperiment = refExperiments[0]
setRefExperiment(experiment, refExperiment)
initExpTransfers(experiment)
else:
# no match found. use category None instead.
del experiment.category
# get list of allowed type strings (NB must be after initialisation)
altTypes, altRefExps = self.getFullTypes(experiment)
altTypeStr = None
if refExperiment:
# refExperiment was set - set parameters
category = refExperiment.nmrExpPrototype.category
synonym = refExperiment.nmrExpPrototype.synonym
fullType = refExperiment.name
if fullType in altTypes:
# type fits possibilities - remove from altTypes
altTypes.remove(fullType)
else:
# fullType not in possibilities - add warning
altTypeStr = 'NB! Inconsistent full type'
else:
# no refExperiment - set up without it
if hasattr(experiment, 'category'):
category = experiment.category
else:
category = None
synonym = None
fullType = None
if altTypeStr is None:
# set string for altTypes, if not pre-set
if len(altTypes) > 4:
altTypeStr = ' ... '
elif altTypes:
altTypeStr = '; '.join(altTypes)
else:
altTypeStr = '<None>'
datum = [experiment.serial,
experiment.name,
extSource,
extName,
category,
synonym,
fullType,
altTypeStr]
objectList.append(experiment)
textMatrix.append(datum)
self.expTypeMatrix.update(textMatrix=textMatrix, objectList=objectList)
if self.typeExpt not in objectList:
self.typeExpt = None
self.updateExpTransfers()
self.updateRefDims()
self.waitingT = False
def updateRefDims(self):
textMatrix = []
objectList = []
if self.typeExpt:
for expDim in self.typeExpt.sortedExpDims():
for expDimRef in expDim.sortedExpDimRefs():
measurementText = None
redr = expDimRef.refExpDimRef
if redr:
if redr.expSteps:
steps = ','.join([str(step.stepNumber) for step in redr.expSteps])
else:
steps = ','.join([str(step.stepNumber) for step in redr.expMeasurement.expSteps])
measure = redr.expMeasurement.measurementType
atoms = '(%s)' % ( ','.join([site.name for site in redr.expMeasurement.atomSites]) )
time = ' %s timing' % redr.constantTime
couple = ''
if redr.coupledIsotopeCodes:
couple = ' Coupling:%s' % ( ','.join(redr.coupledIsotopeCodes))
measurementText = '%s %s%s%s%s' % (steps,measure,atoms,couple,time)
expDimText = '%d' % expDim.dim
if len(expDim.expDimRefs) > 1:
expDimText += '.%d' % expDimRef.serial
ss = expDimRef.displayName
if ss:
expDimText += ' (%s)' % ss
refExpDimText = ''
if redr:
refExpDimText = self.getRefExpDimRefName(redr)
datum = [ expDimText,
refExpDimText,
','.join(expDimRef.isotopeCodes),
measurementText]
objectList.append(expDimRef)
textMatrix.append(datum)
self.refExpDimMatrix.update(textMatrix=textMatrix, objectList=objectList)
def updateExpTransfers(self):
textMatrix = []
objectList = []
if self.typeExpt:
expTransfers = set()
for expDim in self.typeExpt.expDims:
for expDimRef in expDim.expDimRefs:
for expTransfer in expDimRef.expTransfers:
expTransfers.add(expTransfer)
for expTransfer in expTransfers:
expDimRefA, expDimRefB = list(expTransfer.expDimRefs)
isotopesA = ','.join(expDimRefA.isotopeCodes)
isotopesB = ','.join(expDimRefB.isotopeCodes)
if expDimRefA.serial > 1:
dimA = '%d.%d (%s)' % (expDimRefA.expDim.dim,expDimRefA.serial,isotopesA)
else:
dimA = '%d (%s)' % (expDimRefA.expDim.dim,isotopesA)
if expDimRefB.serial > 1:
dimB = '%d.%d (%s)' % (expDimRefB.expDim.dim,expDimRefB.serial,isotopesB)
else:
dimB = '%d (%s)' % (expDimRefB.expDim.dim,isotopesB)
(dimA, expDimRefA), (dimB, expDimRefB) = sorted([(dimA, expDimRefA), (dimB, expDimRefB)])
datum = [dimA, expTransfer.transferType, dimB]
objectList.append( (expDimRefA, expDimRefB) )
textMatrix.append(datum)
self.transferMatrix.update(textMatrix=textMatrix,
objectList=objectList)
def updateExpTypesAfter(self, *obj):
if self.waitingT:
return
else:
self.waitingT = True
self.after_idle(self.updateExpTypes)
# Experiment Details Functions
def propagateDetails(self):
experiments = self.detailsMatrix.currentObjects
if len(experiments) < 2:
return
expt = self.detailsExp
experiments.remove(expt)
names = ','.join([e.name for e in experiments])
msg = 'Propagate details from experiment %s to %s?' % (expt.name,names)
if showOkCancel('Confirm',msg, parent=self):
spectrometer = expt.spectrometer
probe = expt.probe
numScans = expt.numScans
sampleState = expt.sampleState
sampleVolume = expt.sampleVolume
volumeUnit = expt.volumeUnit
nmrTubeType = expt.nmrTubeType
spinningAngle = expt.spinningAngle
spinningRate = expt.spinningRate
for expt2 in experiments:
expt2.spectrometer = spectrometer
expt2.probe = probe
expt2.numScans = numScans
expt2.sampleState = sampleState
expt2.sampleVolume = sampleVolume
expt2.volumeUnit = volumeUnit
expt2.nmrTubeType = nmrTubeType
expt2.spinningAngle = spinningAngle
expt2.spinningRate = spinningRate
def selectExperiment(self, obj, row, col):
self.detailsExp = obj
def setNumScans(self, event):
if self.detailsExp:
value = self.scansEntry.get()
if (value is not None) and (value < 1):
value = None
self.detailsExp.numScans = value
def getSpectrometers(self):
store = self.project.currentInstrumentStore
spectrometers = [None,]
spectrometers += store.findAllInstruments(className='NmrSpectrometer')
return spectrometers
def getProbes(self):
store = self.project.currentInstrumentStore
probes = [None, ]
probes += store.findAllInstruments(className='NmrProbe')
return probes
def setSpectrometer(self, null):
spectrometer = self.spectrometerPulldown.getObject()
if self.detailsExp:
self.detailsExp.spectrometer = spectrometer
def setProbe(self, null):
probe = self.probePulldown.getObject()
if self.detailsExp:
self.detailsExp.probe = probe
def setSampleState(self, null):
sampleState = self.statePulldown.getText()
if self.detailsExp:
self.detailsExp.sampleState = sampleState
def setSampleVolume(self, event):
if self.detailsExp:
value = self.volumeEntry.get()
if (value is not None) and (value <= 0.0):
value = None
self.detailsExp.sampleVolume = value
def setSampleVolUnit(self, index, name=None):
volumeUnit = self.unitPulldown.getText()
if self.detailsExp:
self.detailsExp.volumeUnit = volumeUnit
def setNmrTube(self, event):
if self.detailsExp:
self.detailsExp.nmrTubeType = self.tubeEntry.get() or None
def setSpinRate(self, event):
if self.detailsExp:
value = self.spinRateEntry.get()
if (value is not None) and (value < 0.0):
value = None
self.detailsExp.spinningRate = value
def setSpinAngle(self, event):
if self.detailsExp:
self.detailsExp.spinningAngle = self.spinAngleEntry.get()
def getNumScans(self, experiment):
value = 0
if experiment:
value = experiment.numScans
self.scansEntry.set(value)
def getSampleState(self, experiment):
if experiment and experiment.sampleState:
self.statePulldown.set(experiment.sampleState)
def getSpectrometer(self, experiment):
index = 0
names = ['<None>',]
if experiment:
spectrometers = self.getSpectrometers()
names += ['%d:%s' % (s.serial,s.name) for s in spectrometers[1:]]
index = spectrometers.index(experiment.spectrometer)
self.spectrometerPulldown.setup(names, spectrometers, index)
def getProbe(self, experiment):
index = 0
names = ['<None>',]
if experiment:
probes = self.getProbes()
names += ['%d:%s' % (p.serial,p.name) for p in probes[1:]]
index = probes.index(experiment.probe)
self.probePulldown.setup(names, probes, index)
def getSampleVolume(self, experiment):
value = 0.0
if experiment:
value = experiment.sampleVolume
self.volumeEntry.set(value)
def getSampleVolUnit(self, experiment):
index = 1
if experiment.volumeUnit in VOLUME_UNITS:
index = VOLUME_UNITS.index(experiment.volumeUnit)
self.unitPulldown.setup(VOLUME_UNITS, VOLUME_UNITS, index)
def getNmrTube(self, experiment):
text = ''
if experiment:
text = experiment.nmrTubeType or ''
self.tubeEntry.set(text)
def getSpinRate(self, experiment):
value = 0.0
if experiment:
value = experiment.spinningRate
self.spinRateEntry.set(value)
def getSpinAngle(self, experiment):
value = 0.0
if experiment:
value = experiment.spinningAngle
if value is None:
value = 54.74
self.spinAngleEntry.set(value)
def setConditionSet(self, null):
sampleConditionSet = self.conditionsPulldown.getObject()
if self.detailsExp:
self.detailsExp.setSampleConditionSet(sampleConditionSet)
# Could be none
def updateConditionSets(self, sampleConditionSet=None):
index = 0
sampleConditionSets = self.nmrProject.sortedSampleConditionSets()
names = ['%d' % scs.serial for scs in sampleConditionSets]
sampleConditionSets.append(None)
names.append('<None>')
if self.detailsExp and self.detailsExp.sampleConditionSet:
index = sampleConditionSets.index(self.detailsExp.sampleConditionSet)
self.conditionsPulldown.setup(names, sampleConditionSets, index)
def editConditions(self):
self.parent.editSampleConditionSets(sampleConditionSet=self.detailsExp.sampleConditionSet)
def setExperiment(self, index, name):
experiment = self.nmrProject.findFirstExperiment(name=name)
if experiment is not self.detailsExp:
self.detailsExp = experiment
self.updateExpDetailsAfter()
def updateExpDetailsAfter(self, *opt):
if self.waitingD:
return
else:
self.waitingD = True
self.after_idle(self.updateExpDetails)
def updateExpDetails(self):
# Exp details
objectList = []
textMatrix = []
for expt in self.nmrProject.sortedExperiments():
probe = expt.probe
if probe:
probe = '%d:%s' % (probe.serial,probe.name)
spectrometer = expt.spectrometer
if spectrometer:
spectrometer = '%d:%s' % (spectrometer.serial,spectrometer.name)
datum = [expt.serial,
expt.name,
spectrometer,
probe,
expt.numScans,
expt.sampleState,
expt.sampleVolume,
expt.volumeUnit,
expt.nmrTubeType,
expt.spinningAngle,
expt.spinningRate]
objectList.append(expt)
textMatrix.append(datum)
self.detailsMatrix.update(objectList=objectList, textMatrix=textMatrix)
# Conditions
objectList = []
textMatrix = []
if self.detailsExp and self.detailsExp.sampleConditionSet:
for sampleCondition in self.detailsExp.sampleConditionSet.sampleConditions:
datum = [sampleCondition.condition,
sampleCondition.value,
sampleCondition.error,
sampleCondition.unit]
textMatrix.append(datum)
objectList.append(sampleCondition)
self.conditionsMatrix.update(objectList=objectList, textMatrix=textMatrix)
self.waitingD = False
# Shift Reference Functions
def doShiftRefEditMarkExtraRules(self, obj, row, col):
if (col > 9) and (obj.className != 'ExternalShiftReference'):
return False
return True
def editExperiments(self, obj):
self.parent.editExperiment()
def toggleShiftRefType(self, shiftReference):
if shiftReference:
if shiftReference.referenceType == 'direct':
shiftReference.setReferenceType('indirect')
else:
shiftReference.setReferenceType('direct')
def getShiftRefValue(self, shiftReference):
value = 0.0
if shiftReference:
value = shiftReference.value
self.valueEntry.set(value)
def getShiftRefRatio(self, shiftReference):
value = 0.0
if shiftReference:
value = shiftReference.indirectShiftRatio
self.ratioEntry.set(value)
def getShiftRefGeometry(self, shiftReference):
text = ''
if shiftReference and (shiftReference.className == 'ExternalShiftReference'):
text = shiftReference.sampleGeometry
self.geometryEntry.set(text)
def getShiftRefLocation(self, shiftReference):
text = ''
if shiftReference and (shiftReference.className == 'ExternalShiftReference'):
text = shiftReference.location
self.locationEntry.set(text)
def getShiftRefAxis(self, shiftReference):
text = ''
if shiftReference and (shiftReference.className == 'ExternalShiftReference'):
text = shiftReference.axis
self.axisEntry.set(text)
def getShiftRefAtomGroup(self, shiftReference):
text = ''
if shiftReference:
text = shiftReference.atomGroup
self.atomGroupEntry.set(text)
def getShiftRefIsotope(self, shiftReference):
self.isotopePulldown.set(shiftReference.isotopeCode)
def getShiftRefMolName(self, shiftReference):
self.molNamePulldown.set(shiftReference.molName)
def getShiftRefUnit(self, shiftReference):
self.unitPulldown2.set(shiftReference.unit)
def setShiftRefValue(self, event):
if self.shiftReference:
value = self.valueEntry.get() or 0.0
self.shiftReference.value = value
def setShiftRefRatio(self, event):
if self.shiftReference:
value = self.ratioEntry.get() or None
self.shiftReference.indirectShiftRatio = value
def setShiftRefGeometry(self, event):
if self.shiftReference:
text = self.geometryEntry.get() or None
self.shiftReference.sampleGeometry = text
def setShiftRefLocation(self, event):
if self.shiftReference:
text = self.locationEntry.get() or None
self.shiftReference.location = text
def setShiftRefAxis(self, event):
if self.shiftReference:
text = self.axisEntry.get() or None
self.shiftReference.axis = text
def setShiftRefAtomGroup(self, event):
if self.shiftReference:
text = self.atomGroupEntry.get() or None
self.shiftReference.atomGroup = text
def setShiftRefIsotope(self, null):
isotopeCode = self.isotopePulldown.getText()
self.shiftReference.isotopeCode = isotopeCode
def setShiftRefMolName(self, null):
molName = self.molNamePulldown.getText()
self.shiftReference.molName = molName
def setShiftRefUnit(self, null):
unit = self.unitPulldown2.getText()
self.shiftReference.unit = unit
def addInternalShiftRef(self):
if self.nmrProject:
newRef = self.nmrProject.newInternalShiftReference
self.shiftReference = newRef(isotopeCode='1H', molName='TSP',
atomGroup='H', value=0.000,
referenceType='direct')
def addExternalShiftRef(self):
if self.nmrProject:
newRef = self.nmrProject.newExternalShiftReference
self.shiftReference = newRef(isotopeCode='1H', molName='TSP',
atomGroup='H', value=0.000,
referenceType='direct')
def removeShiftRefs(self):
haveExpts = False
for shiftReference in self.shiftRefMatrix.currentObjects:
if shiftReference.experiments:
haveExpts = True
break
if haveExpts and not showOkCancel('Confirm','Really delete shift references with links to experiments?'):
return
for shiftReference in self.shiftRefMatrix.currentObjects:
shiftReference.delete()
def selectShiftRef(self, object, row, col):
if object:
self.shiftReference = object
def updateShiftRefsAfter(self, *opt):
if self.waitingS:
return
else:
self.waitingS = True
self.after_idle(self.updateShiftRefs)
def updateShiftRefs(self):
objectList = []
textMatrix = []
if self.nmrProject:
for shiftReference in self.nmrProject.sortedShiftReferences():
refClass = shiftReference.className[:8]
if refClass == 'External':
geometry = shiftReference.sampleGeometry
location = shiftReference.location
axis = shiftReference.axis
else:
geometry = location = axis = None
#' '.join([e.name for e in shiftReference.experiments]),
datum = [shiftReference.serial,
refClass,
shiftReference.isotopeCode,
len(shiftReference.experiments),
shiftReference.molName,
shiftReference.atomGroup,
shiftReference.value,
shiftReference.unit,
shiftReference.referenceType,
shiftReference.indirectShiftRatio,
geometry,location,axis]
textMatrix.append(datum)
objectList.append(shiftReference)
self.shiftRefMatrix.update(objectList=objectList, textMatrix=textMatrix)
self.waitingS = False
# NMR Instrument functions
def selectProbe(self, obj, row, col):
self.probe = obj
def selectSpectrometer(self, obj, row, col):
self.spectrometer = obj
def newProbe(self):
instrumentStore = None
if instrumentStore is None:
instrumentStore = self.project.currentInstrumentStore
if instrumentStore is None:
instrumentStore = self.project.findFirstInstrumentStore()
if instrumentStore is None:
instrumentStore = self.project.newInstrumentStore(name='Default')
name = ''
while not name:
name = askString('Text Entry','Enter NMR Probe Name')
probe = instrumentStore.newNmrProbe(name=name)
def newSpectrometer(self):
instrumentStore = None
if instrumentStore is None:
instrumentStore = self.project.currentInstrumentStore
if instrumentStore is None:
instrumentStore = self.project.findFirstInstrumentStore()
if instrumentStore is None:
instrumentStore = self.project.newInstrumentStore(name='Default')
name = ''
while not name:
name = askString('Text Entry','Enter NMR Spectrometer Name')
spectrometer = instrumentStore.newNmrSpectrometer(name=name)
def deleteProbe(self):
if self.probe:
msg = 'Really remove specification of NMR probe %s?' % self.probe.name
if showOkCancel('query',msg):
self.probe.delete()
self.probe = None
def deleteSpectrometer(self):
if self.spectrometer:
msg = 'Really remove specification of NMR Spectrometer %s?' % self.spectrometer.name
if showOkCancel('query',msg):
self.spectrometer.delete()
self.spectrometer = None
def setProbeName(self, event):
if self.probe:
default = 'Probe%d' % self.probe.serial
self.probe.name = self.probeNameEntry.get() or default
def setProbeType(self, index, name=None):
if name is None:
index = self.probeTypePulldown.getSelectedIndex()
if self.probe:
self.probe.setProbeType( NMR_PROBE_TYPES[index] )
self.updateInstrumentsAfter()
def setProbeModel(self, event):
if self.probe:
self.probe.setModel( self.probeModelEntry.get() or None )
def setProbeSerial(self, event):
if self.probe:
self.probe.serialNumber = self.probeSerialEntry.get() or None
def setProbeDiameter(self, event):
if self.probe:
self.probe.setDiameter( self.probeDiameterEntry.get() or None )
self.updateInstrumentsAfter()
def setProbeDetails(self, event):
if self.probe:
self.probe.setDetails( self.probeDetailsEntry.get() or None )
def getProbeName(self,probe):
text = ''
if probe:
text = probe.name
self.probeNameEntry.set(text)
def getProbeType(self,probe):
index = -1
if probe and probe.probeType:
index = NMR_PROBE_TYPES.index(probe.probeType)
self.probeTypePulldown.setIndex(index)
def getProbeModel(self,probe):
text = ''
if probe:
text = probe.model
self.probeModelEntry.set(text)
def getProbeSerial(self,probe):
text = ''
if probe:
text = probe.serialNumber
self.probeSerialEntry.set(text)
def getProbeDiameter(self,probe):
value = 0.0
if probe:
value = probe.diameter
self.probeDiameterEntry.set(value)
def getProbeDetails(self,probe):
text = ''
if probe:
text = probe.details
self.probeDetailsEntry.set(text)
def setSpectrometerName(self, event):
if self.spectrometer:
default = 'self.spectrometer%d' % self.spectrometer.serial
self.spectrometer.name = self.spectrometerNameEntry.get() or default
def setSpectrometerFreq(self, event):
if self.spectrometer:
value = self.spectrometerFreqEntry.get() or None
self.spectrometer.setProtonFreq( value )
if value is not None:
value = '%d' % round(value)
self.spectrometer.setNominalFreq( value )
self.updateInstrumentsAfter()
def setSpectrometerModel(self, event):
if self.spectrometer:
self.spectrometer.setModel( self.spectrometerModelEntry.get() or None )
def setSpectrometerSerial(self, event):
if self.spectrometer:
self.spectrometer.serialNumber = self.spectrometerSerialEntry.get() or None
def setSpectrometerDetails(self, event):
if self.spectrometer:
self.spectrometer.setDetails( self.spectrometerDetailsEntry.get() or None )
def getSpectrometerName(self, spectrometer):
text = ''
if spectrometer:
text = spectrometer.name
self.spectrometerNameEntry.set(text)
def getSpectrometerFreq(self, spectrometer):
value = 0.0
if spectrometer:
value = spectrometer.protonFreq
self.spectrometerFreqEntry.set(value)
def getSpectrometerModel(self, spectrometer):
text = ''
if spectrometer:
text = spectrometer.model
self.spectrometerModelEntry.set(text)
def getSpectrometerSerial(self, spectrometer):
text = ''
if spectrometer:
text = spectrometer.serialNumber
self.spectrometerSerialEntry.set(text)
def getSpectrometerDetails(self, spectrometer):
text = ''
if spectrometer:
text = spectrometer.details
self.spectrometerDetailsEntry.set(text)
def updateInstrumentsAfter(self, *opt):
if self.waitingI:
return
else:
self.waitingI = True
self.after_idle(self.updateInstruments)
def updateInstruments(self):
store = self.project.currentInstrumentStore
getInstruments = store.findAllInstruments
# Probe
objectList = []
textMatrix = []
probes = [(p.serial, p) for p in getInstruments(className='NmrProbe')]
probes.sort()
for serial, probe in probes:
datum = [serial,
probe.name,
probe.probeType,
probe.model,
probe.serialNumber,
probe.diameter,
probe.details]
objectList.append(probe)
textMatrix.append(datum)
self.probeMatrix.update(objectList=objectList, textMatrix=textMatrix)
# Spectrometer
objectList = []
textMatrix = []
spectrometers = [(s.serial, s) for s in getInstruments(className='NmrSpectrometer')]
spectrometers.sort()
for serial, spectrometer in spectrometers:
datum = [serial,
spectrometer.name,
spectrometer.nominalFreq,
spectrometer.protonFreq,
spectrometer.model,
spectrometer.serialNumber,
spectrometer.details]
objectList.append(spectrometer)
textMatrix.append(datum)
self.spectrometerMatrix.update(objectList=objectList, textMatrix=textMatrix)
self.waitingI = False
class NewExperimentPopup(BasePopup):
def __init__(self, parent, experiment=None):
self.guiParent = parent
self.experiment = experiment
BasePopup.__init__(self, parent, modal=True, title="Experiment detail")
def body(self, guiFrame):
experiment = self.experiment
self.geometry('400x100')
guiFrame.expandGrid(0,1)
row = 0
if not experiment:
Label(guiFrame, text='Number of dimensions: ', grid=(row, 0))
objects = range(1,7)
texts = ['%s' % object for object in objects]
self.numDimList = PulldownList(guiFrame, texts=texts, objects=objects, index=1,
tipText='The number of dimensions of the experiment',
callback=self.updateIsotopeLists, grid=(row,1), sticky='w')
row += 1
Label(guiFrame, text='Isotopes: ', grid=(row, 0))
self.isotopeFrame = Frame(guiFrame, grid=(row, 1), sticky='w')
self.isotopeLists = []
self.updateIsotopeLists()
row += 1
Label(guiFrame, text='Experiment name: ', grid=(row, 0))
name = ''
self.exptNameEntry = Entry(guiFrame, text=name, tipText='The name of the experiment', width=30, grid=(row,1), sticky='ew')
row += 1
self.fileCheckButton = CheckButton(guiFrame, text='Include link for data file?',
tipText='Whether the new spectrum should have a link to a data file',
selected=True, grid=(row,0), gridSpan=(1,2), sticky='w')
if experiment:
text = 'Clone %s' % experiment.name
ss = ''
else:
text = 'Create new'
ss = ' and number of dimensions'
tipTexts = ['%s experiment and spectrum using this name%s' % (text, ss)]
texts = [text]
commands = [self.ok]
self.buttons = UtilityButtonList(guiFrame, commands=commands, texts=texts,
tipTexts=tipTexts, doClone=False, grid=(row, 1), sticky='e')
def updateIsotopeLists(self, *extra):
numDim = self.numDimList.getObject()
n = 0
for n, isotopeList in enumerate(self.isotopeLists):
if n < numDim:
isotopeList.grid(row=0, column=n)
else:
isotopeList.grid_forget()
for m in range(n, numDim):
texts = ('1H', '2H', '13C', '15N', '19F', '31P', '79Br')
isotopeList = PulldownList(self.isotopeFrame, texts=texts,
tipText='The isotope for dimension %s' % (m+1), grid=(0,m))
self.isotopeLists.append(isotopeList)
def apply(self):
experimentOld = self.experiment
if experimentOld:
numDim = experimentOld.numDim
else:
numDim = self.numDimList.getObject()
isotopeCodes = [isotopeList.getText() for isotopeList in self.isotopeLists[:numDim]]
exptNameNew = self.exptNameEntry.get()
spectrumNameNew = None
if not exptNameNew:
showWarning('Experiment name', 'Experiment name not specified', parent=self)
return False
names = [expt.name for expt in self.nmrProject.experiments]
if exptNameNew in names:
showWarning('Name used', 'Name %s already used' % exptNameNew, parent=self)
return False
includeDataFile = self.fileCheckButton.get()
if experimentOld:
experimentNew = cloneExperiment(experimentOld, exptNameNew, cloneDataFile=includeDataFile)
dataSourceNew = experimentNew.findFirstDataSource()
else:
if includeDataFile:
import os
dataPath = os.path.join(os.getcwd(), 'dataFile.spc')
else:
dataPath = None
experimentNew = defaultExperiment(self.nmrProject, exptNameNew, isotopeCodes, dataPath=dataPath)
dataSourceNew = experimentNew.findFirstDataSource()
from ccpnmr.analysis.popups.EditSpectrum import EditSpectrumPopup
popup = EditSpectrumPopup(self.guiParent, transient=False, modal=True,
useReducedDim=False, spectrum=dataSourceNew)
cancelled = popup.cancelled
popup.destroy()
if cancelled:
dataSourceNew.delete()
self.parent.parent.finishInitSpectrum(dataSourceNew)
return True
|
[
"pigeonfeng@gmail.com"
] |
pigeonfeng@gmail.com
|
704dcd8dcdbc31cfe46bdd55ec4093ec4fe5e5d3
|
5d271f987365c22ce3c3a4c2bbe63697bfec5bd7
|
/manage.py
|
74f787b176d847c2a586328508743cbc862622a6
|
[] |
no_license
|
CCowens93/project_4
|
2f5c57db858221747b7ad1ea210bab70ed46c34e
|
025146c2f86cf01ec8b2e9ef62da122c11a2db88
|
refs/heads/master
| 2022-02-12T01:26:37.381167
| 2019-12-11T21:23:12
| 2019-12-11T21:23:12
| 224,011,131
| 0
| 0
| null | 2021-09-22T18:11:08
| 2019-11-25T18:05:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 636
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wellness_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"cassandracowens93@gmail.com"
] |
cassandracowens93@gmail.com
|
9d34e91d6cfc0b1259b7ebb93596b35090238932
|
771608a893fcd90da7414730bb5c6f90ec286884
|
/model.py
|
11f19ff081d37c4ba58ef770b0b9d9a95a93a706
|
[
"MIT"
] |
permissive
|
aheadlead/PyTetris
|
fc03463cada21926e0b116cd11250baa3a6fb8c9
|
681c932699867203a57317e3365dd51e19c727f7
|
refs/heads/master
| 2021-01-22T06:45:15.542615
| 2015-04-07T03:08:59
| 2015-04-07T03:08:59
| 33,247,929
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,161
|
py
|
#!/usr/bin/env python
# coding=utf-8
__author__ = 'weiyulan'
from exception import *
from timer import Timer
from random import choice, randint
from threading import Thread, Lock
from sys import stderr
from copy import deepcopy
# 难度常量
#
# 难度与方块下落的间隔时间有关系,也与分数计算有关系。
DIFFICULTY = {'so easy': {'interval': 3, 'score_factor': 0.5},
'easy': {'interval': 2, 'score_factor': 1},
'normal': {'interval': 1, 'score_factor': 1.5},
'hard': {'interval': .5, 'score_factor': 2},
'crazy': {'interval': .25, 'score_factor': 2.5}}
class Block(object):
def __init__(self, content):
self._content = content
@property
def content(self):
return self._content
@content.setter
def content(self, value):
# TODO for debug
if len(self._content) > 5:
stderr.write("model: Block: member content has been changed\n")
self._content = value
def rotate(self):
"""顺时针旋转当前方块。
旋转前
[[0, 1, 1],
[1, 1, 0]]
旋转后
[[1, 0],
[1, 1],
[0, 1]]"""
new_content = [[] for i in range(len(self.content[0]))]
for a in self.content:
for index, b in enumerate(a):
new_content[index].append(b)
for new_row in new_content:
new_row.reverse()
self.content = new_content
def is_conflicting(self, map_, position):
"""给定一个棋盘 map_ ,判断方块放在 position 所描述的位置上是否会引起冲突。
引起冲突则不能放在这个位置。
:param map_: 给定的棋盘。
:param position: 描述方块位于棋盘的位置。
:type position: 二维元组
:returns: 是否能放在 position 所描述的位置上。
:rtype: Bool
"""
map_size = (len(map_[0]), len(map_)) # map_ 的宽和高
self_size = (len(self.content[0]), len(self.content))
# 判断是否超出棋盘
if position[0] < 0 or position[1] < 0:
return True
if position[0] > map_size[0]-self_size[0] or \
position[1] > map_size[1]-self_size[1]:
return True
# 判断是否覆盖棋盘上已有的格子
for row_index, row in enumerate(self.content):
for cell_index, cell in enumerate(row):
if cell is not None and \
map_[position[1]+row_index][position[0]+cell_index] is not None:
return True
return False
def __len__(self):
return len(self.content)
def __getitem__(self, item):
return self.content[item]
class PyTetrisModel(object):
blockList = {"I": Block([[1, 1, 1, 1]]),
"J": Block([[1, 0, 0],
[1, 1, 1]]),
"L": Block([[0, 0, 1],
[1, 1, 1]]),
"O": Block([[1, 1],
[1, 1]]),
"S": Block([[0, 1, 1],
[1, 1, 0]]),
"T": Block([[0, 1, 0],
[1, 1, 1]]),
"Z": Block([[1, 1, 0],
[0, 1, 1]])}
blockColorList = [(0, 0, 0),
(194, 54, 33),
(37, 188, 36),
(173, 173, 39),
(73, 46, 255),
(211, 56, 211),
(51, 187, 200),
(203, 204, 205)]
def __init__(self):
self._state = "initialized"
self._difficulty = 'crazy'
# map 以左上角为原点,从左往右是 x 正方向,从上往下是 y 正方向。
#
# (0, 0) (1, 0) (2, 0) ... (9, 0)
# (0, 1) (1, 1) (2, 1) ... (9, 1)
# ... ... ... ... ...
# (0, 19) (1, 19) (2, 19) ... (9, 19)
#
self.map = Block([[None]*10 for i in range(20)])
self.timer = None
self.next_block = Block([[]])
self.active_block = Block([[]])
self.active_block_position = (0, 0)
self.gameover_callback = None
# 在按左右方向键时,可能会和正在对自身进行访问的 tick 函数造成的冲突。
# 所以这儿准备一个 Lock 。
self.lock = Lock()
self.score = 0
@property
def difficulty(self):
return self._difficulty
@difficulty.setter
def difficulty(self, value):
if self.state == "initialized":
if value in DIFFICULTY.keys():
self.difficulty = value
else:
raise NoSuchDifficulty
else:
raise WrongState
@property
def state(self):
return self._state
@state.setter
def state(self, value):
stderr.write("model: set state to " + str(value) + "\n")
self._state = value
def start(self):
if self.state == "initialized":
self.state = "start"
self.timer = Timer(target=self.tick,
interval=DIFFICULTY[self.difficulty]['interval'])
self.timer.run()
else:
stderr.write("model: the state is not initialized, can not start the game")
def pause(self):
self.timer.pause()
def resume(self):
self.timer.resume()
def press_arrow_key(self, direction):
if self.state == "falling":
with self.lock:
class Apple(BaseException):
def __init__(self):
pass
try:
self.active_block_position[0] -= 1 if direction == "left" else -1 # 移一格
if self.active_block.is_conflicting(map_=self.map,
position=self.active_block_position) is True:
raise Apple()
except Apple:
# 不能移动
self.active_block_position[0] += 1 if direction == "left" else -1 # 回滚更改
return
def press_rotate_key(self):
if self.state == "falling":
with self.lock:
self.active_block.rotate()
if self.active_block.is_conflicting(self.map, self.active_block_position) is True:
self.active_block.rotate()
self.active_block.rotate()
self.active_block.rotate()
def press_hard_drop(self):
if self.state == "falling":
with self.lock:
while 1:
self.active_block_position[1] += 1
if self.active_block.is_conflicting(self.map, self.active_block_position) is True:
self.active_block_position[1] -= 1
break
def tick(self):
# TODO for debug
stderr.write("model: ticking!!! state: " + self.state + "\n")
def start(): # 开始状态
self.map = Block([[None]*10 for i in xrange(20)]) # 清空地图
self.next_block = self.choice_a_block() # 下一个方块
self.state = "new_block"
return
def new_block(): # 出现新方块
self.active_block_position = [3, 0] # 方块首次出现在地图的(3, 0)点
# 每个方块的锚点都在于左上角,如同 map 的原点一样。
self.active_block = self.next_block
self.next_block = self.choice_a_block()
# 判断是否和 map 上的碰撞,若有碰撞则游戏结束
if self.active_block.is_conflicting(map_=self.map,
position=self.active_block_position) is True:
self.state = "gameover"
return
# 判断最高的一行有没有方块覆盖,有则游戏结束。
for cell in self.map[0]:
if cell is not None:
self.state = "gameover"
return
self.state = "falling"
def falling(): # 下落
class Apple(BaseException):
def __init__(self):
pass
try:
stderr.write("model: try to fall block (" + str(self.active_block_position) + ")\n")
self.active_block_position[1] += 1 # 下落一格
if self.active_block.is_conflicting(map_=self.map,
position=self.active_block_position) is True:
raise Apple()
except Apple:
# 不能再下落了
stderr.write("model: falling: stop falling\n")
self.active_block_position[1] -= 1 # 回滚更改
# 将 active_block 写入 map
for y in range(len(self.active_block)):
for x in range(len(self.active_block[0])):
if self.active_block[y][x] is not None:
self.map[y+self.active_block_position[1]][x+self.active_block_position[0]] = self.active_block[y][x]
self.state = "line_clear"
else:
self.state = "falling"
def line_clear(): # 行消除
def tmp_check(row): # 检查行 row 是不是存在空白的格子
try:
row.index(None)
return True # 存在空白的格子
except ValueError:
return False # 不存在空白的格子,可以消除
self.map.content = filter(tmp_check, self.map.content) # 清除掉可以清除的行
clear_row_n = 20 - len(self.map.content) # 如果有行消除要统计消除的行数量
self.map.content = [[None]*10 for i in range(clear_row_n)] + self.map.content # 消除了多少行就在顶部补上多少空行
# 下面这一行的float那儿是为了应付PEP8的检查(莫名其妙的错误也是醉了)
score_delta = float(DIFFICULTY[self.difficulty]['score_factor']) * \
int(2**(clear_row_n-1)) * 10 # 分数计算(int那儿是为了应对没有行消除的情况)
self.score += score_delta
stderr.write("model: line_clear: score_delta = " + str(score_delta) + "\n")
self.state = "new_block"
def gameover(): # 游戏结束状态
self.timer.stop()
if hasattr(self.gameover_callback, '__call__') is True:
gameover_thread = Thread(target=self.gameover_callback)
gameover_thread.setDaemon(True)
gameover_thread.run()
self.state = "stopped"
with self.lock:
fsm = {"start": start,
"new_block": new_block,
"falling": falling,
"line_clear": line_clear,
"gameover": gameover}
if self.state in fsm.keys():
fsm[self.state]()
else:
raise WrongState("状态 " + str(self.state) + " 不应该出现在 tick 过程中。")
@staticmethod
def choice_a_block():
"""choice_a_block() -> String
:rtype : String
随机返回一个方块。"""
block = deepcopy(choice(PyTetrisModel.blockList.values())) # 随机选取方块
# TODO for debug
stderr.write("model: choice_a_block: step 1: " + str(block.content) + "\n")
for i in range(randint(0, 3)): # 随机旋转方块
block.rotate()
color = choice(PyTetrisModel.blockColorList) # 随机选择颜色
for index, row in enumerate(block.content):
block.content[index] = [color if item == 1 else None for item in row]
# TODO for debug
stderr.write("model: choice_a_block: step 2: " + str(block.content) + "\n")
return block
|
[
"aheadlead@dlifep.com"
] |
aheadlead@dlifep.com
|
5fb0d972d6642aaba5be0b90b8fbdfa1dc89a4b9
|
2947efe1efd6e19981d0aa5c55dfc5f3700b8a1b
|
/segm/eval/miou.py
|
eff9501d15cfce65950eea3de7c75009fa185cff
|
[] |
no_license
|
srdg/segmenter
|
f71effdade6d11da5ab041cadcb283123e9f1126
|
4f8a4435ea67c8611c5180edc7bec1d24f7342ad
|
refs/heads/master
| 2023-08-01T12:55:21.474549
| 2021-09-14T16:19:46
| 2021-09-14T16:19:46
| 402,280,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,027
|
py
|
import sys
import click
from pathlib import Path
import yaml
import numpy as np
from PIL import Image
import shutil
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from segm.utils import distributed
from segm.utils.logger import MetricLogger
import segm.utils.torch as ptu
from segm.model.factory import load_model
from segm.data.factory import create_dataset
from segm.metrics import gather_data, compute_metrics
from segm.model.utils import inference
from segm.data.utils import seg_to_rgb, rgb_denormalize, IGNORE_LABEL
from segm import config
def blend_im(im, seg, alpha=0.5):
pil_im = Image.fromarray(im)
pil_seg = Image.fromarray(seg)
im_blend = Image.blend(pil_im, pil_seg, alpha).convert("RGB")
return np.asarray(im_blend)
def save_im(save_dir, save_name, im, seg_pred, seg_gt, colors, blend, normalization):
seg_rgb = seg_to_rgb(seg_gt[None], colors)
pred_rgb = seg_to_rgb(seg_pred[None], colors)
im_unnorm = rgb_denormalize(im, normalization)
save_dir = Path(save_dir)
# save images
im_uint = (im_unnorm.permute(0, 2, 3, 1).cpu().numpy()).astype(np.uint8)
seg_rgb_uint = (255 * seg_rgb.cpu().numpy()).astype(np.uint8)
seg_pred_uint = (255 * pred_rgb.cpu().numpy()).astype(np.uint8)
for i in range(pred_rgb.shape[0]):
if blend:
blend_pred = blend_im(im_uint[i], seg_pred_uint[i])
blend_gt = blend_im(im_uint[i], seg_rgb_uint[i])
ims = (im_uint[i], blend_pred, blend_gt)
else:
ims = (im_uint[i], seg_pred_uint[i], seg_rgb_uint[i])
for im, im_dir in zip(
ims,
(save_dir / "input", save_dir / "pred", save_dir / "gt"),
):
pil_out = Image.fromarray(im)
im_dir.mkdir(exist_ok=True)
pil_out.save(im_dir / save_name)
def process_batch(
model,
batch,
window_size,
window_stride,
window_batch_size,
):
ims = batch["im"]
ims_metas = batch["im_metas"]
ori_shape = ims_metas[0]["ori_shape"]
ori_shape = (ori_shape[0].item(), ori_shape[1].item())
filename = batch["im_metas"][0]["ori_filename"][0]
model_without_ddp = model
if ptu.distributed:
model_without_ddp = model.module
seg_pred = inference(
model_without_ddp,
ims,
ims_metas,
ori_shape,
window_size,
window_stride,
window_batch_size,
)
seg_pred = seg_pred.argmax(0)
im = F.interpolate(ims[-1], ori_shape, mode="bilinear")
return filename, im.cpu(), seg_pred.cpu()
def eval_dataset(
model,
multiscale,
model_dir,
blend,
window_size,
window_stride,
window_batch_size,
save_images,
frac_dataset,
dataset_kwargs,
):
db = create_dataset(dataset_kwargs)
normalization = db.dataset.normalization
dataset_name = dataset_kwargs["dataset"]
im_size = dataset_kwargs["image_size"]
cat_names = db.base_dataset.names
n_cls = db.unwrapped.n_cls
if multiscale:
db.dataset.set_multiscale_mode()
logger = MetricLogger(delimiter=" ")
header = ""
print_freq = 50
ims = {}
seg_pred_maps = {}
idx = 0
for batch in logger.log_every(db, print_freq, header):
colors = batch["colors"]
filename, im, seg_pred = process_batch(
model,
batch,
window_size,
window_stride,
window_batch_size,
)
ims[filename] = im
seg_pred_maps[filename] = seg_pred
idx += 1
if idx > len(db) * frac_dataset:
break
seg_gt_maps = db.dataset.get_gt_seg_maps()
if save_images:
save_dir = model_dir / "images"
if ptu.dist_rank == 0:
if save_dir.exists():
shutil.rmtree(save_dir)
save_dir.mkdir()
if ptu.distributed:
torch.distributed.barrier()
for name in sorted(ims):
instance_dir = save_dir
filename = name
if dataset_name == "cityscapes":
filename_list = name.split("/")
instance_dir = instance_dir / filename_list[0]
filename = filename_list[-1]
if not instance_dir.exists():
instance_dir.mkdir()
save_im(
instance_dir,
filename,
ims[name],
seg_pred_maps[name],
torch.tensor(seg_gt_maps[name]),
colors,
blend,
normalization,
)
if ptu.dist_rank == 0:
shutil.make_archive(save_dir, "zip", save_dir)
# shutil.rmtree(save_dir)
print(f"Saved eval images in {save_dir}.zip")
if ptu.distributed:
torch.distributed.barrier()
seg_pred_maps = gather_data(seg_pred_maps)
scores = compute_metrics(
seg_pred_maps,
seg_gt_maps,
n_cls,
ignore_index=IGNORE_LABEL,
ret_cat_iou=True,
distributed=ptu.distributed,
)
if ptu.dist_rank == 0:
scores["inference"] = "single_scale" if not multiscale else "multi_scale"
suffix = "ss" if not multiscale else "ms"
scores["cat_iou"] = np.round(100 * scores["cat_iou"], 2).tolist()
for k, v in scores.items():
if k != "cat_iou" and k != "inference":
scores[k] = v.item()
if k != "cat_iou":
print(f"{k}: {scores[k]}")
scores_str = yaml.dump(scores)
with open(model_dir / f"scores_{suffix}.yml", "w") as f:
f.write(scores_str)
@click.command()
@click.argument("model_path", type=str)
@click.argument("dataset_name", type=str)
@click.option("--im-size", default=None, type=int)
@click.option("--multiscale/--singlescale", default=False, is_flag=True)
@click.option("--blend/--no-blend", default=True, is_flag=True)
@click.option("--window-size", default=None, type=int)
@click.option("--window-stride", default=None, type=int)
@click.option("--window-batch-size", default=4, type=int)
@click.option("--save-images/--no-save-images", default=False, is_flag=True)
@click.option("-frac-dataset", "--frac-dataset", default=1.0, type=float)
def main(
model_path,
dataset_name,
im_size,
multiscale,
blend,
window_size,
window_stride,
window_batch_size,
save_images,
frac_dataset,
):
model_dir = Path(model_path).parent
# start distributed mode
ptu.set_gpu_mode(True)
distributed.init_process()
model, variant = load_model(model_path)
patch_size = model.patch_size
model.eval()
model.to(ptu.device)
if ptu.distributed:
model = DDP(model, device_ids=[ptu.device], find_unused_parameters=True)
cfg = config.load_config()
dataset_cfg = cfg["dataset"][dataset_name]
normalization = variant["dataset_kwargs"]["normalization"]
if im_size is None:
im_size = dataset_cfg.get("im_size", variant["dataset_kwargs"]["image_size"])
if window_size is None:
window_size = dataset_cfg.get(
"window_size", variant["dataset_kwargs"]["crop_size"]
)
if window_stride is None:
window_stride = dataset_cfg.get(
"window_stride", variant["dataset_kwargs"]["crop_size"]
)
dataset_kwargs = dict(
dataset=dataset_name,
image_size=im_size,
crop_size=im_size,
patch_size=patch_size,
batch_size=1,
num_workers=10,
split="val",
normalization=normalization,
crop=False,
rep_aug=False,
)
eval_dataset(
model,
multiscale,
model_dir,
blend,
window_size,
window_stride,
window_batch_size,
save_images,
frac_dataset,
dataset_kwargs,
)
distributed.barrier()
distributed.destroy_process()
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"rstrudel@gmail.com"
] |
rstrudel@gmail.com
|
94358be1fc47c6d41306692c5fcb12bf08bfbe8d
|
caba0ed4459dbec2faf2ea0a4d08e207b6315d1b
|
/lib/3d_visualization.py
|
951ac7c7b63f953bf76c386b0f52dba0e38f9ce4
|
[] |
no_license
|
ZhaoQii/CSC-AI-Study-Group-Project
|
8ea7475aefde899cc4e307c643e9dce3a0273865
|
1534c97357d4036002090339836c4068fd467438
|
refs/heads/master
| 2021-07-07T07:13:19.053647
| 2017-10-05T15:44:44
| 2017-10-05T15:44:44
| 105,844,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
from mpl_toolkits.mplot3d import Axes3D
from pylab import *
def ThreeD_Visualization(X, real_labels):
colors = ['g','r','c','y','b','m','black','purple','orange','brown']
fig = figure()
ax = fig.gca(projection='3d')
for i in range(len(X)):
label = np.int(real_labels[i])
ax.scatter(X[i,0], X[i,1], X[i,2], c=colors[label])
plt.show()
|
[
"noreply@github.com"
] |
ZhaoQii.noreply@github.com
|
e679e27af8377c625974f3edc4c2c03dbc054b70
|
754c47dcd6e944caeeb529c6a68eaa1fc5f5de76
|
/my_wifie_django_taobao/products_app/views/goods_view.py
|
b165a4f762bede27069ba5e293889ca76e2988ad
|
[] |
no_license
|
B9527/my_wifie_django_taobao
|
2b506575374d08e9a9df1aa30e1b7fccaf2ec3e1
|
e8485b9ca61ff5c3303e0e1b1cf49c9464550c33
|
refs/heads/master
| 2020-09-26T15:48:13.416740
| 2019-12-27T03:13:33
| 2019-12-27T03:13:33
| 226,285,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,977
|
py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
__title__ = ''
__author__ = 'BY'
__mtime__ = '2019/12/4'
"""
from django.http import HttpResponse
from django.http import JsonResponse
from myshop.settings import local_url_host
from products_app.models import Products, Category, ProductsImage
from django.views import View
from products_app.serializers.products_serializers import ProductsSerializer, CategorySerializer, ProductsAllSerializer
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
class QueryProducts(View):
def get(self, request):
products_store_on = Products.objects.filter(is_on=1, is_store=1)
products_list_on = Products.objects.filter(is_on=1)
store_serializer = ProductsSerializer(products_store_on, many=True)
no_store_serializer = ProductsSerializer(products_list_on, many=True)
return_data = {"main_data": no_store_serializer.data, "home_data": store_serializer.data}
return JsonResponse(return_data)
class QueryCategoryProducts(View):
def get(self, request):
category_id = request.GET.get("mId", None)
product_name = request.GET.get("name", None)
products_all = Products.objects.filter(is_on=1)
if category_id is not None:
products_all = products_all.filter(category_id=category_id)
if product_name is not None:
products_all = products_all.filter(product_name__icontains=product_name)
products_all_serializer = ProductsSerializer(products_all, many=True)
return_data = {"cate_goods_data": products_all_serializer.data, "mid": category_id}
return JsonResponse(return_data)
class QueryCategory(View):
def get(self, request):
category_id = request.GET.get("mid")
category = Category.objects.all()
category_serializer = CategorySerializer(category, many=True)
return_data = {"left_data": category_serializer.data, "mid": category_id}
return JsonResponse(return_data)
class GoodsDetailView(View):
def get(self, request):
p_id = request.GET.get("p_id")
image_url_list = []
products_one = Products.objects.get(product_id=p_id)
p_serializer = ProductsAllSerializer(products_one, many=False)
goods_data = p_serializer.data
products_image_list = ProductsImage.objects.filter(product_id=p_id)
for products_image_obj in products_image_list:
image_url_list.append(local_url_host+products_image_obj.image_url.name)
if len(image_url_list) == 0:
if "http" in products_one.product_img_url.name:
image_url_list.append(products_one.product_img_url.name)
else:
image_url_list.append(local_url_host + products_one.product_img_url.name)
return_data = {"goods_data": goods_data, "p_id": p_id,
'image_url_list': image_url_list}
return JsonResponse(return_data)
|
[
"1335239218@qq.com"
] |
1335239218@qq.com
|
fafcd1b833fa564d2038d2f4ef4dd091990df367
|
d6674d40b5afe4b6ba8d8fef558890c5dad8e309
|
/music.py
|
55a21a9daca497d464188239d23fac29a420da46
|
[] |
no_license
|
IamNaN/music
|
9c3094ab129e448c3253b21205efd87da7c28da0
|
9396c482a4335be212240dddbe7410957fd1f002
|
refs/heads/master
| 2021-05-15T05:45:55.503104
| 2017-12-28T07:06:44
| 2017-12-28T07:06:44
| 115,595,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,132
|
py
|
#!/usr/bin/env python
# ssh -R 52698:localhost:52698 root@10.0.1.9
# password: musicbox
# rmate music.py
# rmate music.ini
# rmate /opt/musicbox/startup.sh
# rmate /music/playlists/ROTS.m3u
# rmate /var/log/musicbox_startup.log
# rmate /etc/rc.local
#
import RPi.GPIO as GPIO
import os
import time
import ConfigParser
LRoAPin = 23
LRoBPin = 17
LRoPPin = 27
RRoPPin = 24
volume = 50
track = 0
config = ConfigParser.ConfigParser()
config.read('music.ini')
flag = 0
Last_RoB_Status = 0
Current_RoB_Status = 0
def setup():
global config
global volume
global track
global localtrack
volume = config.getint('DEFAULT', 'volume')
track = config.getint('DEFAULT', 'track')
localtrack = config.getint('DEFAULT', 'localtrack')
GPIO.setmode(GPIO.BCM) # Numbers GPIOs by chip location
GPIO.setup(LRoAPin, GPIO.IN,pull_up_down=GPIO.PUD_DOWN) # input mode
GPIO.setup(LRoBPin, GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(LRoPPin, GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(RRoPPin, GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
setVolume(volume)
os.system('mpc -q clear')
os.system('mpc -q single off')
os.system('mpc -q random off')
os.system('mpc -q repeat off')
os.system('mpc -q consume off')
os.system('mpc -q add file:///music/sounds/start.mp3')
os.system('mpc -q play')
queueTrack(track)
with open('music.ini', 'wb') as configfile:
config.write(configfile)
def playLocal():
global config
global track
global localtrack
localtrack = localtrack + 1
if localtrack > 607:
localtrack = 1
config.set('DEFAULT', 'localtrack', localtrack)
with open('music.ini', 'wb') as configfile:
config.write(configfile)
os.system('mpc -q clear')
os.system('mpc -q load ROTS')
os.system("mpc -q play %d" % localtrack)
queueTrack(track)
time.sleep(1)
def changeTrack(extra):
global config
global track
track = track + 1
if track > 4:
track = 0
print "Track %d selected" % track
os.system('mpc -q clear')
os.system('mpc -q add file:///music/sounds/ding.mp3')
os.system('mpc -q play')
queueTrack(track)
config.set('DEFAULT', 'track', track)
with open('music.ini', 'wb') as configfile:
config.write(configfile)
time.sleep(1)
def queueTrack(t):
if t == 0:
os.system('mpc -q insert http://dir.xiph.org/listen/5456/listen.m3u')
elif t == 1:
os.system('mpc -q insert http://files.hawaiipublicradio.org/hpr1.m3u')
elif t == 2:
os.system('mpc -q insert http://www2.kuow.org/stream/kuowhb.m3u')
elif t == 3:
os.system('mpc -q insert http://quarrel.str3am.com:7040/live-aac.m3u')
else:
os.system('mpc -q insert http://wsdownload.bbc.co.uk/worldservice/meta/live/shoutcast/mp3/eieuk.pls')
print "Track %d added" % t
def changeVolume(direction):
global volume
global config
volume = volume + direction
volume = 0 if volume < 0 else volume
volume = 100 if volume > 100 else volume
setVolume(volume)
config.set('DEFAULT', 'volume', volume)
with open('music.ini', 'wb') as configfile:
config.write(configfile)
def setVolume(v):
if v > 90:
v = 90
os.system("mpc -q volume %d" % v)
def rotaryDeal():
global flag
global Last_RoB_Status
global Current_RoB_Status
Last_RoB_Status = GPIO.input(LRoBPin)
while(not GPIO.input(LRoAPin) and not GPIO.input(LRoPPin) and not GPIO.input(RRoPPin)):
if GPIO.input(LRoPPin):
time.sleep(0.3)
changeTrack(0)
elif GPIO.input(RRoPPin):
playLocal()
else:
Current_RoB_Status = GPIO.input(LRoBPin)
flag = 1
if flag == 1:
flag = 0
if (Last_RoB_Status == 0) and (Current_RoB_Status == 1):
changeVolume(1)
if (Last_RoB_Status == 1) and (Current_RoB_Status == 0):
changeVolume(-1)
def loop():
while True:
rotaryDeal()
def destroy():
GPIO.cleanup() # Release resource
setup()
# GPIO.add_event_detect(LRoPPin, GPIO.RISING, callback=changeTrack, bouncetime=500)
if __name__ == '__main__': # Program start from here
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
|
[
"dgerton@gmail.com"
] |
dgerton@gmail.com
|
3356bb672f605644398d97b58d23466904bbf6dd
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/sympy-1.0-py27_0/lib/python2.7/site-packages/sympy/matrices/sparsetools.py
|
06bbafda94526ec6da0c43903ff143073c10c3b5
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy import SparseMatrix
def _doktocsr(dok):
"""Converts a sparse matrix to Compressed Sparse Row (CSR) format.
Parameters
==========
A : contains non-zero elements sorted by key (row, column)
JA : JA[i] is the column corresponding to A[i]
IA : IA[i] contains the index in A for the first non-zero element
of row[i]. Thus IA[i+1] - IA[i] gives number of non-zero
elements row[i]. The length of IA is always 1 more than the
number of rows in the matrix.
"""
row, JA, A = [list(i) for i in zip(*dok.row_list())]
IA = [0]*((row[0] if row else 0) + 1)
for i, r in enumerate(row):
IA.extend([i]*(r - row[i - 1])) # if i = 0 nothing is extended
IA.extend([len(A)]*(dok.rows - len(IA) + 1))
shape = [dok.rows, dok.cols]
return [A, JA, IA, shape]
def _csrtodok(csr):
"""Converts a CSR representation to DOK representation"""
smat = {}
A, JA, IA, shape = csr
for i in range(len(IA) - 1):
indices = slice(IA[i], IA[i + 1])
for l, m in zip(A[indices], JA[indices]):
smat[i, m] = l
return SparseMatrix(*(shape + [smat]))
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
1bdf3446c8ad0cf38eaa3de1da247fad10e2b25a
|
22603ac91c4113bac013de7a1ba669077e11729f
|
/Searching/find_max_in_array_first_increasing_then_decreasing.py
|
b838728fb3633b18a2d92f2235c35934f4995e8b
|
[] |
no_license
|
edyoda/DSA-with-Rudrangshu-310321
|
14e522a4c5251bb7148d22c61cc3a018f2adbde1
|
2c2e0a9e51b73ea2d52a62a175c06d757eafdb20
|
refs/heads/main
| 2023-04-07T05:20:47.474823
| 2021-04-24T15:29:23
| 2021-04-24T15:29:23
| 353,604,283
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
def findMaximum(arr, low, high):
if low == high:
return arr[low]
if high == low + 1 and arr[low] >= arr[high]:
return arr[low]
if high == low + 1 and arr[low] < arr[high]:
return arr[high]
mid = (low + high)//2 #low + (high - low)/2;*/
if arr[mid] > arr[mid + 1] and arr[mid] > arr[mid - 1]:
return arr[mid]
if arr[mid] > arr[mid + 1] and arr[mid] < arr[mid - 1]:
return findMaximum(arr, low, mid-1)
else:
return findMaximum(arr, mid + 1, high)
# Driver program to check above functions */
arr = [1, 3, 50, 10, 9, 7, 6]
n = len(arr)
print ("The maximum element is %d"% findMaximum(arr, 0, n-1))
|
[
"rudrangshu.das@gmail.com"
] |
rudrangshu.das@gmail.com
|
8edad1734bc355ec7f461b5b068362f8717f7cf4
|
2bd851979ca23b1289cffb2304be2b4a0d612328
|
/0x02-python-import_modules/5-variable_load.py
|
0f647da7c672bdca27d67c4b6576fd88cf102cfe
|
[] |
no_license
|
fikepaci/alx-higher_level_programming
|
6fdeda0a538147483a401e95fb1ac70b3b1da87b
|
66f15e86c5ba40a26e8a889ed87546b1e43ac1b5
|
refs/heads/master
| 2023-05-01T05:47:42.465608
| 2021-05-12T17:13:42
| 2021-05-12T17:13:42
| 319,888,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
#!/usr/bin/python3
if __name__ == "__main__":
from variable_load_5 import a
print("{:d}".format(a))
|
[
"pacifiquemachine@gmail.com"
] |
pacifiquemachine@gmail.com
|
96a8b3631b2441f86f62ee118a1bc9ef58b0ca16
|
fdb0e068aaa12199a61f09177b9d1c4bbe7a1969
|
/misc_functs.py
|
89beb39096c80ccce8fdca02244b2ccf0e70d479
|
[] |
no_license
|
emiudeh/Project-Statistical_Natural_Language_Processing
|
e9936a64937a7f4dc1ad37534724f207e527b5f7
|
3752635bbf038dc151e0119c5912e733a00425fd
|
refs/heads/master
| 2020-05-30T09:58:26.795466
| 2019-05-31T21:37:51
| 2019-05-31T21:37:51
| 189,661,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
import collections
# File with miscellaneous functions used to handle nouns in task2.py
# Ignore umlauted characters
def umlauted(char1, char2):
same = {'ā': 'a', 'ō': 'o', 'ē': 'e', 'ī': 'i', 'ū': 'u'}
if char1 in same and same[char1] == char2:
return True
if char2 in same and same[char2] == char1:
return True
return False
# Get the stem by simply getting least common substring
def get_stem(str1, str2):
lcs = ""
longest_temp = ""
for i in range(0, len(str1)):
for j in range(0, len(str2)):
# Ignore umluat changes
if str1[i] == str2[j] or umlauted(str1[i], str2[j]):
(i_temp, j_temp) = (i, j)
# Find longest possible match between strings
while i_temp < len(str1) and j_temp < len(str2) and (str1[i_temp] == str2[j_temp] or umlauted(str1[i_temp], str2[j_temp])):
longest_temp += str1[i_temp]
i_temp += 1
j_temp += 1
lcs = longest_temp if len(longest_temp) > len(lcs) else lcs
longest_temp = ""
return lcs
# Use the least common substring algorithm to split words
# into nouns and verbs
def get_word_parts(lemma, inflected):
lem_st = get_stem(lemma.lower(), inflected.lower())
temp = lemma.lower()
pre_cnt = temp.find(lem_st)
# end block -----
inf_st = get_stem(inflected.lower(), lemma.lower())
inf_pre_cnt = inflected.lower().find(inf_st)
suf_index = len(lem_st)
return [pre_cnt, suf_index, inf_pre_cnt]
# creates prefix and suffix dictionaries
# for all nouns
def get_noun_transformation_dicts(list_arg):
suf_dict = collections.defaultdict(dict)
pre_dict = collections.defaultdict(dict)
for entry in list_arg:
(lemma, inflected, inflection) = tuple(entry)
(pre_cnt, suf_index, inf_pre_cnt) = tuple(get_word_parts(lemma, inflected))
# populate the suffix dictionary
i = pre_cnt+suf_index
y = inf_pre_cnt + suf_index
while i >= pre_cnt:
if not inflection in suf_dict[lemma[i:len(lemma)]]:
suf_dict[lemma[i:len(lemma)]][inflection] = inflected[y:len(inflected)]
i -= 1
y -= 1
# populate the prefix dictionary
pre_dict[lemma[0:pre_cnt]][inflection] = inflected[0:inf_pre_cnt]
return[suf_dict, pre_dict]
def inflect_noun(noun_entry, suffix_dict):
lemma = noun_entry[0]
inflected = noun_entry[0]
inflection = noun_entry[2]
# get the new suffix
for i in range(0, len(lemma)):
if lemma[i:len(lemma)] in suffix_dict:
if inflection in suffix_dict[lemma[i:len(lemma)]]:
new_suffix = suffix_dict[lemma[i:len(lemma)]][inflection]
# replaces lemma suffix with inflected suffix
inflected = lemma[0:i] + new_suffix
continue
return inflected
def a_pre_change(verb):
verb = list(verb)
vowels = ["a", "e", "i", "o", "u", "io", "ā", "ē", "ī", "ō", "ū"]
if verb[0] == "a" and verb[2] in vowels:
verb[0] = "ā"
return "".join(verb)
|
[
"noreply@github.com"
] |
emiudeh.noreply@github.com
|
0780ef8728b4dc930bc979222c530958e034d200
|
0b7e49c48971330c37d0eff9bfdeb03973a12fe0
|
/app.py
|
e7942756af9acc78e976b4a21bd0b96666bc769a
|
[] |
no_license
|
Rohithyeravothula/Markup-UI
|
99dd121fb125e2b9bf585339eddad8c4cf070dd2
|
420693bda6634d5c143fe54217253b45d1d57726
|
refs/heads/master
| 2021-03-19T16:02:10.438924
| 2017-10-19T23:11:45
| 2017-10-19T23:11:45
| 106,876,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return render_template("index.html")
@app.route('/document', methods=['GET'])
def get_document():
docId = request.args.get("id")
return "document with id " + docId
@app.route('/documents', methods=['GET'])
def list_documents():
return "documents list is shown here"
@app.route('/document/', methods=['POST'])
def add_markup():
content = request.get_json()
markup = content["markup"]
return "ack"
@app.route('/documents/annotations', methods=['POST', 'GET'])
def dummy():
if request.method == 'POST':
content = request.get_json()
print(content)
return ""
elif request.method == 'GET':
return ""
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
|
[
"rohithiitj@gmail.com"
] |
rohithiitj@gmail.com
|
aa24bfe7c2a20f5fdcd30858d2a3052d5b72e047
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03838/s751440828.py
|
bca7e9d63e5acfca8551c9d1c6da13b60eef5ce5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
import sys
readline = sys.stdin.buffer.readline
sys.setrecursionlimit(10 ** 8)
INF = float('inf')
MOD = 10 ** 9 + 7
def main():
x, y = map(int, readline().split())
if x * y >= 0:
if x < y:
print(y - x)
else:
print(x - y + 2 if x * y > 0 else x - y + 1)
else:
print(abs(abs(x) - abs(y)) + 1)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
072d0f66c15746e5a318b7f51e6453973b8e2be5
|
dadc3e0df62e9a0876138623ef5c311d0dafcda3
|
/Python Assignment 2/prg2.py
|
10010f652814364fbaa89017c2b3cdadcc376036
|
[] |
no_license
|
Simran0401/Python-Django-LPU
|
9987fb3888cb81263c632b572ef5013377e61210
|
e6558c1d7bc8a33369c4378e2e0502fdc83eb7bf
|
refs/heads/main
| 2023-06-02T17:27:56.107048
| 2021-06-20T09:59:30
| 2021-06-20T09:59:30
| 373,412,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
'''
Q2. Print all odd numbers and even numbers between 1 to 100
'''
start = 1
end = 100
print("All odd numbers from 1 to 100 are:")
for i in range(start, end + 1):
if(i % 2 != 0):
print(i, end = " ")
print("\n")
print("All even numbers from 1 to 100 are:")
for i in range(start, end + 1):
if(i % 2 == 0):
print(i, end = " ")
|
[
"simrandas0401@gmail.com"
] |
simrandas0401@gmail.com
|
a7b858ecd17b16a70e8c47d93ad6977a3c5a1af9
|
fa35b9b97ebc9bb3eed940d83f5d06bb5e0237ef
|
/App/errors/__init__.py
|
53aed05bb31815a3275387dfcabb6a02798fbf80
|
[] |
no_license
|
qinyang-bao/Shopify_Api
|
9d19c7bbd38e669929cc644e7ba3e4c981d1cef9
|
7e3c7ebd64a2ccb15fe99f703f845b70b6732f52
|
refs/heads/master
| 2022-12-22T13:51:13.694925
| 2019-03-02T23:29:07
| 2019-03-02T23:29:07
| 150,023,900
| 1
| 0
| null | 2022-12-08T02:54:11
| 2018-09-23T20:42:50
|
Python
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
from flask import Blueprint
bp = Blueprint('errors', __name__)
from App.errors import handlers
|
[
"q7bao@edu.uwaterloo.ca"
] |
q7bao@edu.uwaterloo.ca
|
91b1c7687c5d44deed1a42842d28e8f92f94d1c1
|
2adebb657870d7af4d4b01560f898da555def8af
|
/naloge/2016/dn1/test/LukaLajovic/cheapmatrix.py
|
b0c9bc246ba4ae3f6f943923f92c7bd154306542
|
[
"MIT"
] |
permissive
|
lukalajovic/PSA1
|
3181b1c92a8f4220ff626656c456c9180e98a38a
|
046701e1156096a25373eb1e9d8fd9f078532922
|
refs/heads/master
| 2020-12-24T14:09:32.033917
| 2016-12-26T18:40:05
| 2016-12-26T18:40:05
| 76,550,969
| 0
| 0
| null | 2016-12-26T18:40:06
| 2016-12-15T10:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,468
|
py
|
# -*- coding: utf-8 -*-
from .slowmatrix import SlowMatrix
class CheapMatrix(SlowMatrix):
"""
Matrika s prostorsko nepotratnim množenjem.
"""
#izvede mnozenje te matrike z drugo
#parameter nova je matrika ki se jo spremijna
def dm(self,druga,nova=None):
n=self.nrow()
m=self.ncol()
k=druga.nrow()
l=druga.ncol()
if nova==None:
nova=self.nicelna_matrika(n,l)
if m!=k:
return "ni mogoce"
elif m==1 or n==1 or k==1 or l==1:
return self.slabo_mnozenje2(druga,nova)
elif n%2==1:
print("hucklberry finn")
c1=self[:n-1,:m].dm(druga,nova[:m-1,:l])
c2=self[n-1,:m].dm(druga,nova[m-1,:l])
nova[:m-1,:l]=c1
nova[m-1,:l]=c2
return nova
elif m%2==1:
print("tom sawyer")
c1=self[:n,:m-1].dm(druga[:k-1,:l],nova)
c2=self[:n,m-1].dm(druga[k-1,:l],nova)
return c1+c2
elif l%2==1:
print("indijanec joe")
c1=self.dm(druga[:k,:l-1],nova[:m,:l-1])
c2=self.dm(druga[:k,l-1],nova[:m,l-1])
nova[:m,:l-1]=c1
nova[:m,l-1]=c2
return nova
else:
print("jim")
mors=nova[:m//2,:l//2]
a=self[:n//2,:m//2]
b=self[:n//2,m//2:]
c=self[n//2:,:m//2]
d=self[n//2:,m//2:]
e=druga[:k//2,:l//2]
f=druga[:k//2,l//2:]
g=druga[k//2:,:l//2]
h=druga[k//2:,l//2:]
#nova[:n//2,:l//2]=p4+p5+p6-p2
#nova[:n//2,l//2:]=p1+p2
#nova[n//2:,:l//2]=p3+p4
#nova[n//2:,l//2:]=p1+p5-p3-p7
p1=a.dm(f-h,mors)
p2=(a+b).dm(h,mors)
p3=(c+d).dm(e,mors)
p4=d.dm(g-e,mors)
p5=(a+d).dm(e+h,mors)
p6=(b-d).dm(g+h,mors)
p7=(a-c).dm(e+f,mors)
#nova[:n//2,:l//2]=p4
#nova[:n//2,:l//2]+=p5
#nova[:n//2,:l//2]+=p6
#nova[:n//2,:l//2]-=p4
#nova[:n//2,l//2:]=p1
#nova[:n//2,l//2:]+=p2
#nova[n//2:,:l//2]=p3
#nova[n//2:,:l//2]+=p4
#nova[n//2:,l//2:]=p1
#nova[n//2:,l//2:]+=p5
#nova[n//2:,l//2:]-=p3
#nova[n//2:,l//2:]-=p7
nova[:n//2,:l//2]=p4+p5+p6-p2
nova[:n//2,l//2:]=p1+p2
nova[n//2:,:l//2]=p3+p4
nova[n//2:,l//2:]=p1+p5-p3-p7
return nova
# izvede naivno mnozenje dveh matrik
def slabo_mnozenje2(self,druga,nula):
if self.ncol()!=druga.nrow():
return "ni mogoce"
else:
vrstice1=self.nrow()
stolpci1=self.ncol()
vrstice2=druga.nrow()
stolpci2=druga.ncol()
for i in range(vrstice1):
for j in range(stolpci2):
k=0
for x in range(stolpci1):
k+=self[i,x]*druga[x,j]
nula[i,j]=k
return nula
#slabo množenje 3 izvede slabo naivno množenje dveh matrik uporabil ga bom ko bom množil matrike ki imajo kakšno dimenzijo ena
def slabo_mnozenje3(self,prva,druga,nula):
vrstice1=prva.nrow()
stolpci1=prva.ncol()
vrstice2=druga.nrow()
stolpci2=druga.ncol()
for i in range(vrstice1):
for j in range(stolpci2):
k=0
for x in range(stolpci1):
k+=prva[i,x]*druga[x,j]
nula[i,j]=k
return nula
#c1=self[:n,:m-1].poceni_mnozenje(druga[:k-1,:l])
#c2=self[:n,m-1].poceni_mnozenje(druga[k-1,:l])
def multiply(self, left, right,delovna=None):
"""
V trenutno matriko zapiše produkt podanih matrik.
Kot neobvezen argument lahko podamo še delovno matriko.
"""
assert left.ncol() == right.nrow(), \
"Dimenzije matrik ne dopuščajo množenja!"
assert self.nrow() == left.nrow() and right.ncol() == self.ncol(), \
"Dimenzije ciljne matrike ne ustrezajo dimenzijam produkta!"
if delovna is None:
delovna = self.__class__(nrow = self.nrow(), ncol = self.ncol())
else:
assert self.nrow() == delovna.nrow() and self.ncol() == delovna.ncol(), \
"Dimenzije delovne matrike ne ustrezajo dimenzijam produkta!"
#raise NotImplementedError("Naredi sam!")
#označimo stolpce in vrstice za lažjo pisavo
n=left.nrow()
m=left.ncol()
k=right.nrow()
l=right.ncol()
self[:n,:l]=left.dm(right,delovna)
v1=CheapMatrix([[1],[1]])
m1=CheapMatrix([[1,2,3],[1,2,3]])
m2=CheapMatrix([[1,1],[1,1],[1,1]])
m3=CheapMatrix([[1,1],[1,1]])
nula=CheapMatrix([[0]])
#print(m2*m3)
m4=CheapMatrix([[1,1]])
m5=CheapMatrix([[1,1],[1,1]])
#print(m4*m5)
m6=CheapMatrix([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])
m7=CheapMatrix([[1,1],[1,1],[1,1],[1,1]])
iv1=CheapMatrix([[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1]])
m5=CheapMatrix([[1,1,1],[1,1,1],[1,1,1]])
print(m5*m5)
|
[
"noreply@github.com"
] |
lukalajovic.noreply@github.com
|
3c271f34c10b2767e3531eb63ec9f63933f48b00
|
f26e5592d46676675d8b92d7ac03f4aa8dfd7f2e
|
/users/migrations/0004_auto_20200528_1017.py
|
f82df93d5fcb0444857c11c9d089054eabf965ed
|
[] |
no_license
|
nkouki98/django-blog
|
5dad6f5b4d1ee8f5339d10c5118b3a822c6a0c61
|
877418f0d29917cd6ef4c0ea85e3b6892f25f990
|
refs/heads/master
| 2022-11-27T05:07:04.638544
| 2020-08-03T14:31:36
| 2020-08-03T14:31:36
| 267,048,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# Generated by Django 3.0.6 on 2020-05-28 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200528_0900'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='email',
field=models.EmailField(max_length=100, unique=True, verbose_name='email'),
),
]
|
[
"farhan1998@live.com"
] |
farhan1998@live.com
|
ea54dfd466048f67b7e78228e9ace2787bd816e3
|
6a245c01d87a75f4719006e3d741e7819fa7825e
|
/apps/taiga/back/django/taiga/taiga_contrib_github_extended_auth/services.py
|
d7c931c446ffa743d3f4c86928a0fba4e46f092c
|
[
"Apache-2.0"
] |
permissive
|
BCDevOps/openshift-components
|
8f6365dfc37a7e749116fc0d0f0d99bb75e44e68
|
0ae3ba611c677056360943b47c95fb25b0ad74a0
|
refs/heads/master
| 2023-04-11T21:08:26.235077
| 2020-11-26T22:01:58
| 2020-11-26T22:01:58
| 52,387,849
| 9
| 28
|
Apache-2.0
| 2020-11-26T22:01:59
| 2016-02-23T20:04:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
from taiga_contrib_github_auth import connector
from taiga_contrib_github_auth.services import github_register
from taiga.auth.services import make_auth_response_data
from urllib.parse import urljoin
from django.conf import settings
import logging
import requests
from taiga.base import exceptions as exc
logger = logging.getLogger(__name__)
def check_org_membership(github_id, org, headers:dict=connector.HEADERS):
logger.debug("Checking membership of user with github id '{0}' in org {1}...".format(github_id, org))
"""
Get authenticated user organization membership.
"""
url = urljoin(connector.API_URL, "orgs/{0}/members/{1}".format(org, github_id))
logger.debug("Checking via URL {0}.".format(url))
logger.debug("Headers: {0}".format(headers))
response = requests.get(url, headers=headers)
if response.status_code not in [204, 302]:
logger.debug("User was not a member of GitHub organization {0}.Status was {1}".format(org, response.status_code))
return False
else:
return True
# a twiddled replacement for the login method in taiga-contrib-github-auth/connector.py that requests a broader scope
def login(access_code:str, client_id: str=connector.CLIENT_ID, client_secret: str=connector.CLIENT_SECRET,
headers: dict=connector.HEADERS):
"""
Get access_token fron an user authorized code, the client id and the client secret key.
(See https://developer.github.com/v3/oauth/#web-application-flow).
"""
if not connector.CLIENT_ID or not connector.CLIENT_SECRET:
raise connector.GitHubApiError({"error_message": _("Login with github account is disabled. Contact "
"with the sysadmins. Maybe they're snoozing in a "
"secret hideout of the data center.")})
url = urljoin(connector.URL, "login/oauth/access_token")
# note -> scope: read:user instead of "user:email"; required to determine *private* org membership
params={"code": access_code,
"client_id": client_id,
"client_secret": client_secret,
"scope": "user:emails"}
data = connector._post(url, params=params, headers=headers)
return connector.AuthInfo(access_token=data.get("access_token", None))
def github_login_func(request):
logger.debug("Attempting login using taiga_contrib_github_extended_auth plugin....")
code = request.DATA.get('code', None)
token = request.DATA.get('token', None)
auth_info = login(code)
headers = connector.HEADERS.copy()
headers["Authorization"] = "token {}".format(auth_info.access_token)
user_info = connector.get_user_profile(headers=headers)
username = user_info.username
logger.debug("username: {0}".format(username))
organization = getattr(settings, "TAIGA_GITHUB_EXTENDED_AUTH_ORG", None)
logger.debug("organization: {0}".format(organization))
if organization and check_org_membership(username, organization, headers=headers):
logger.debug("confirmed membership...")
emails = connector.get_user_emails(headers=headers)
primary_email = next(filter(lambda x: x.is_primary, emails)).email
logger.debug("Primary email is {}".format(primary_email))
user = github_register(username=username,
email=primary_email.lower(),
full_name=user_info.full_name,
github_id=user_info.id,
bio=user_info.bio,
token=token)
return make_auth_response_data(user)
else:
raise exc.PermissionDenied(detail="User {0} was not a member of GitHub organization {1} and is not permitted to register for access to this Taiga instance.".format(username, organization))
|
[
"shea.phillips@gmail.com"
] |
shea.phillips@gmail.com
|
aff5d23268e689afe37f9ab11d1375ee684ae6bc
|
e9d4070993cbf2a2df0f0e4e0fae320c1331d439
|
/Assignments/Lab 03/primenum.py
|
f2a77350bacc898f6ce5251340908338d7433329
|
[] |
no_license
|
dennywachira/Bootcamp
|
d7ab70be5452318dcee4c953a9d73eae0c281b62
|
0e98ced8d58a8c9b742cf95e928283a34263f29f
|
refs/heads/main
| 2023-07-23T17:18:56.715820
| 2021-08-31T12:17:12
| 2021-08-31T12:17:12
| 385,849,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
#printing prime numbers using for loop
for x in range(0,100):
if x>1:
for i in range(2,x):
if (x%i==0):
break
else:print(x)
|
[
"denniz.wachira@gmail.com"
] |
denniz.wachira@gmail.com
|
b7b97d0aefd294d357c39e6a2c805e82c3b297f8
|
92b2b0f75b1525edd4a054cb0f7db2c3941cadc0
|
/kaitai/python/kaitai_sbp/ssr.py
|
51bd34091262eb09bbc051807190782dbc7a708b
|
[
"MIT"
] |
permissive
|
swift-nav/libsbp
|
d3463cf77a5a701328d2a151ce166fd6ea7fe3e2
|
c4daa6fff203c7c26d7ab06234755a14085458f4
|
refs/heads/master
| 2023-08-31T02:09:46.844536
| 2023-08-28T17:20:02
| 2023-08-28T17:20:02
| 26,985,599
| 73
| 117
|
MIT
| 2023-09-04T23:13:49
| 2014-11-22T02:02:43
|
C
|
UTF-8
|
Python
| false
| false
| 29,328
|
py
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from .gnss import *
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Ssr(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
pass
class GridDefinitionHeaderDepA(KaitaiStruct):
"""Defines the grid for MSG_SSR_GRIDDED_CORRECTION messages. Also includes
an RLE encoded validity list.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.region_size_inverse = self._io.read_u1()
self.area_width = self._io.read_u2le()
self.lat_nw_corner_enc = self._io.read_u2le()
self.lon_nw_corner_enc = self._io.read_u2le()
self.num_msgs = self._io.read_u1()
self.seq_num = self._io.read_u1()
class StecResidualNoStd(KaitaiStruct):
"""STEC residual for the given satellite at the grid point.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sv_id = Gnss.SvId(self._io, self, self._root)
self.residual = self._io.read_s2le()
class TroposphericDelayCorrectionNoStd(KaitaiStruct):
"""Troposphere vertical delays at the grid point.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.hydro = self._io.read_s2le()
self.wet = self._io.read_s1()
class MsgSsrStecCorrectionDep(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.StecHeader(self._io, self, self._root)
self.stec_sat_list = []
i = 0
while not self._io.is_eof():
self.stec_sat_list.append(Ssr.StecSatElement(self._io, self, self._root))
i += 1
class StecSatElementIntegrity(KaitaiStruct):
"""STEC polynomial and bounds for the given satellite.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.stec_residual = Ssr.StecResidual(self._io, self, self._root)
self.stec_bound_mu = self._io.read_u1()
self.stec_bound_sig = self._io.read_u1()
self.stec_bound_mu_dot = self._io.read_u1()
self.stec_bound_sig_dot = self._io.read_u1()
class MsgSsrOrbitClockDepA(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.sid = Gnss.GnssSignal(self._io, self, self._root)
self.update_interval = self._io.read_u1()
self.iod_ssr = self._io.read_u1()
self.iod = self._io.read_u1()
self.radial = self._io.read_s4le()
self.along = self._io.read_s4le()
self.cross = self._io.read_s4le()
self.dot_radial = self._io.read_s4le()
self.dot_along = self._io.read_s4le()
self.dot_cross = self._io.read_s4le()
self.c0 = self._io.read_s4le()
self.c1 = self._io.read_s4le()
self.c2 = self._io.read_s4le()
class MsgSsrStecCorrection(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.BoundsHeader(self._io, self, self._root)
self.ssr_iod_atmo = self._io.read_u1()
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.n_sats = self._io.read_u1()
self.stec_sat_list = []
i = 0
while not self._io.is_eof():
self.stec_sat_list.append(Ssr.StecSatElement(self._io, self, self._root))
i += 1
class MsgSsrOrbitClock(KaitaiStruct):
"""The precise orbit and clock correction message is to be applied as a
delta correction to broadcast ephemeris and is an equivalent to the 1060
/1066 RTCM message types.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.sid = Gnss.GnssSignal(self._io, self, self._root)
self.update_interval = self._io.read_u1()
self.iod_ssr = self._io.read_u1()
self.iod = self._io.read_u4le()
self.radial = self._io.read_s4le()
self.along = self._io.read_s4le()
self.cross = self._io.read_s4le()
self.dot_radial = self._io.read_s4le()
self.dot_along = self._io.read_s4le()
self.dot_cross = self._io.read_s4le()
self.c0 = self._io.read_s4le()
self.c1 = self._io.read_s4le()
self.c2 = self._io.read_s4le()
class BoundsHeader(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.num_msgs = self._io.read_u1()
self.seq_num = self._io.read_u1()
self.update_interval = self._io.read_u1()
self.sol_id = self._io.read_u1()
class MsgSsrGriddedCorrection(KaitaiStruct):
"""STEC residuals are per space vehicle, troposphere is not.
It is typically equivalent to the QZSS CLAS Sub Type 9 messages.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.GriddedCorrectionHeader(self._io, self, self._root)
self.index = self._io.read_u2le()
self.tropo_delay_correction = Ssr.TroposphericDelayCorrection(self._io, self, self._root)
self.stec_residuals = []
i = 0
while not self._io.is_eof():
self.stec_residuals.append(Ssr.StecResidual(self._io, self, self._root))
i += 1
class StecResidual(KaitaiStruct):
"""STEC residual (mean and standard deviation) for the given satellite at
the grid point.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sv_id = Gnss.SvId(self._io, self, self._root)
self.residual = self._io.read_s2le()
self.stddev = self._io.read_u1()
class CodePhaseBiasesSatSig(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sat_id = self._io.read_u1()
self.signal_id = self._io.read_u1()
self.code_bias_bound_mu = self._io.read_u1()
self.code_bias_bound_sig = self._io.read_u1()
self.phase_bias_bound_mu = self._io.read_u1()
self.phase_bias_bound_sig = self._io.read_u1()
class SatelliteApc(KaitaiStruct):
"""Contains phase center offset and elevation variation corrections for one
signal on a satellite.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sid = Gnss.GnssSignal(self._io, self, self._root)
self.sat_info = self._io.read_u1()
self.svn = self._io.read_u2le()
self.pco = []
for i in range(3):
self.pco.append(self._io.read_s2le())
self.pcv = []
for i in range(21):
self.pcv.append(self._io.read_s1())
class GriddedCorrectionHeader(KaitaiStruct):
"""The LPP message contains nested variable length arrays which are not
supported in SBP, so each grid point will be identified by the index.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.num_msgs = self._io.read_u2le()
self.seq_num = self._io.read_u2le()
self.update_interval = self._io.read_u1()
self.iod_atmo = self._io.read_u1()
self.tropo_quality_indicator = self._io.read_u1()
class StecSatElement(KaitaiStruct):
"""STEC polynomial for the given satellite.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sv_id = Gnss.SvId(self._io, self, self._root)
self.stec_quality_indicator = self._io.read_u1()
self.stec_coeff = []
for i in range(4):
self.stec_coeff.append(self._io.read_s2le())
class StecHeaderDepA(KaitaiStruct):
"""A full set of STEC information will likely span multiple SBP messages,
since SBP message a limited to 255 bytes. The header is used to tie
multiple SBP messages into a sequence.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.num_msgs = self._io.read_u1()
self.seq_num = self._io.read_u1()
self.update_interval = self._io.read_u1()
self.iod_atmo = self._io.read_u1()
class StecHeader(KaitaiStruct):
"""A full set of STEC information will likely span multiple SBP messages,
since SBP message a limited to 255 bytes. The header is used to tie
multiple SBP messages into a sequence.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.num_msgs = self._io.read_u1()
self.seq_num = self._io.read_u1()
self.update_interval = self._io.read_u1()
self.iod_atmo = self._io.read_u1()
class MsgSsrStecCorrectionDepA(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.StecHeaderDepA(self._io, self, self._root)
self.stec_sat_list = []
i = 0
while not self._io.is_eof():
self.stec_sat_list.append(Ssr.StecSatElement(self._io, self, self._root))
i += 1
class GriddedCorrectionHeaderDepA(KaitaiStruct):
"""The 3GPP message contains nested variable length arrays which are not
supported in SBP, so each grid point will be identified by the index.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.num_msgs = self._io.read_u2le()
self.seq_num = self._io.read_u2le()
self.update_interval = self._io.read_u1()
self.iod_atmo = self._io.read_u1()
self.tropo_quality_indicator = self._io.read_u1()
class MsgSsrOrbitClockBounds(KaitaiStruct):
"""Note 1: Range: 0-17.5 m. i<=200, mean=0.01i; 200<i<=230,
mean=2+0.1(i-200); i>230, mean=5+0.5(i-230).
Note 2: Range: 0-17.5 m. i<=200, std=0.01i; 200<i<=230, std=2+0.1(i-200)
i>230, std=5+0.5(i-230).
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.BoundsHeader(self._io, self, self._root)
self.ssr_iod = self._io.read_u1()
self.const_id = self._io.read_u1()
self.n_sats = self._io.read_u1()
self.orbit_clock_bounds = []
i = 0
while not self._io.is_eof():
self.orbit_clock_bounds.append(Ssr.OrbitClockBound(self._io, self, self._root))
i += 1
class MsgSsrGriddedCorrectionBounds(KaitaiStruct):
"""Note 1: Range: 0-17.5 m. i<= 200, mean = 0.01i; 200<i<=230,
mean=2+0.1(i-200); i>230, mean=5+0.5(i-230).
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.BoundsHeader(self._io, self, self._root)
self.ssr_iod_atmo = self._io.read_u1()
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.tropo_qi = self._io.read_u1()
self.grid_point_id = self._io.read_u2le()
self.tropo_delay_correction = Ssr.TroposphericDelayCorrection(self._io, self, self._root)
self.tropo_v_hydro_bound_mu = self._io.read_u1()
self.tropo_v_hydro_bound_sig = self._io.read_u1()
self.tropo_v_wet_bound_mu = self._io.read_u1()
self.tropo_v_wet_bound_sig = self._io.read_u1()
self.n_sats = self._io.read_u1()
self.stec_sat_list = []
i = 0
while not self._io.is_eof():
self.stec_sat_list.append(Ssr.StecSatElementIntegrity(self._io, self, self._root))
i += 1
class MsgSsrGridDefinitionDepA(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.GridDefinitionHeaderDepA(self._io, self, self._root)
self.rle_list = []
i = 0
while not self._io.is_eof():
self.rle_list.append(self._io.read_u1())
i += 1
class CodeBiasesContent(KaitaiStruct):
"""Code biases are to be added to pseudorange. The corrections conform with
RTCMv3 MT 1059 / 1065.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.code = self._io.read_u1()
self.value = self._io.read_s2le()
class MsgSsrSatelliteApc(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.update_interval = self._io.read_u1()
self.sol_id = self._io.read_u1()
self.iod_ssr = self._io.read_u1()
self.apc = []
i = 0
while not self._io.is_eof():
self.apc.append(Ssr.SatelliteApc(self._io, self, self._root))
i += 1
class PhaseBiasesContent(KaitaiStruct):
"""Phase biases are to be added to carrier phase measurements.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.code = self._io.read_u1()
self.integer_indicator = self._io.read_u1()
self.widelane_integer_indicator = self._io.read_u1()
self.discontinuity_counter = self._io.read_u1()
self.bias = self._io.read_s4le()
class MsgSsrTileDefinitionDepA(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.corner_nw_lat = self._io.read_s2le()
self.corner_nw_lon = self._io.read_s2le()
self.spacing_lat = self._io.read_u2le()
self.spacing_lon = self._io.read_u2le()
self.rows = self._io.read_u2le()
self.cols = self._io.read_u2le()
self.bitmask = self._io.read_u8le()
class OrbitClockBound(KaitaiStruct):
"""Orbit and clock bound.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sat_id = self._io.read_u1()
self.orb_radial_bound_mu = self._io.read_u1()
self.orb_along_bound_mu = self._io.read_u1()
self.orb_cross_bound_mu = self._io.read_u1()
self.orb_radial_bound_sig = self._io.read_u1()
self.orb_along_bound_sig = self._io.read_u1()
self.orb_cross_bound_sig = self._io.read_u1()
self.clock_bound_mu = self._io.read_u1()
self.clock_bound_sig = self._io.read_u1()
class MsgSsrGriddedCorrectionDepA(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.GriddedCorrectionHeaderDepA(self._io, self, self._root)
self.index = self._io.read_u2le()
self.tropo_delay_correction = Ssr.TroposphericDelayCorrection(self._io, self, self._root)
self.stec_residuals = []
i = 0
while not self._io.is_eof():
self.stec_residuals.append(Ssr.StecResidual(self._io, self, self._root))
i += 1
class MsgSsrOrbitClockBoundsDegradation(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.BoundsHeader(self._io, self, self._root)
self.ssr_iod = self._io.read_u1()
self.const_id = self._io.read_u1()
self.sat_bitmask = self._io.read_u8le()
self.orbit_clock_bounds_degradation = Ssr.OrbitClockBoundDegradation(self._io, self, self._root)
class MsgSsrGriddedCorrectionNoStdDepA(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.GriddedCorrectionHeaderDepA(self._io, self, self._root)
self.index = self._io.read_u2le()
self.tropo_delay_correction = Ssr.TroposphericDelayCorrectionNoStd(self._io, self, self._root)
self.stec_residuals = []
i = 0
while not self._io.is_eof():
self.stec_residuals.append(Ssr.StecResidualNoStd(self._io, self, self._root))
i += 1
class MsgSsrCodePhaseBiasesBounds(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = Ssr.BoundsHeader(self._io, self, self._root)
self.ssr_iod = self._io.read_u1()
self.const_id = self._io.read_u1()
self.n_sats_signals = self._io.read_u1()
self.satellites_signals = []
i = 0
while not self._io.is_eof():
self.satellites_signals.append(Ssr.CodePhaseBiasesSatSig(self._io, self, self._root))
i += 1
class MsgSsrCodeBiases(KaitaiStruct):
"""The precise code biases message is to be added to the pseudorange of the
corresponding signal to get corrected pseudorange. It is an equivalent
to the 1059 / 1065 RTCM message types.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.sid = Gnss.GnssSignal(self._io, self, self._root)
self.update_interval = self._io.read_u1()
self.iod_ssr = self._io.read_u1()
self.biases = []
i = 0
while not self._io.is_eof():
self.biases.append(Ssr.CodeBiasesContent(self._io, self, self._root))
i += 1
class MsgSsrTileDefinition(KaitaiStruct):
"""Provides the correction point coordinates for the atmospheric correction
values in the MSG_SSR_STEC_CORRECTION and MSG_SSR_GRIDDED_CORRECTION
messages.
Based on ETSI TS 137 355 V16.1.0 (LTE Positioning Protocol) information
element GNSS-SSR-CorrectionPoints. SBP only supports gridded arrays of
correction points, not lists of points.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.update_interval = self._io.read_u1()
self.sol_id = self._io.read_u1()
self.iod_atmo = self._io.read_u1()
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.corner_nw_lat = self._io.read_s2le()
self.corner_nw_lon = self._io.read_s2le()
self.spacing_lat = self._io.read_u2le()
self.spacing_lon = self._io.read_u2le()
self.rows = self._io.read_u2le()
self.cols = self._io.read_u2le()
self.bitmask = self._io.read_u8le()
class OrbitClockBoundDegradation(KaitaiStruct):
"""Orbit and clock bound degradation.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.orb_radial_bound_mu_dot = self._io.read_u1()
self.orb_along_bound_mu_dot = self._io.read_u1()
self.orb_cross_bound_mu_dot = self._io.read_u1()
self.orb_radial_bound_sig_dot = self._io.read_u1()
self.orb_along_bound_sig_dot = self._io.read_u1()
self.orb_cross_bound_sig_dot = self._io.read_u1()
self.clock_bound_mu_dot = self._io.read_u1()
self.clock_bound_sig_dot = self._io.read_u1()
class MsgSsrSatelliteApcDep(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.apc = []
i = 0
while not self._io.is_eof():
self.apc.append(Ssr.SatelliteApc(self._io, self, self._root))
i += 1
class TroposphericDelayCorrection(KaitaiStruct):
"""Troposphere vertical delays (mean and standard deviation) at the grid
point.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.hydro = self._io.read_s2le()
self.wet = self._io.read_s1()
self.stddev = self._io.read_u1()
class MsgSsrTileDefinitionDepB(KaitaiStruct):
"""Deprecated.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ssr_sol_id = self._io.read_u1()
self.tile_set_id = self._io.read_u2le()
self.tile_id = self._io.read_u2le()
self.corner_nw_lat = self._io.read_s2le()
self.corner_nw_lon = self._io.read_s2le()
self.spacing_lat = self._io.read_u2le()
self.spacing_lon = self._io.read_u2le()
self.rows = self._io.read_u2le()
self.cols = self._io.read_u2le()
self.bitmask = self._io.read_u8le()
class MsgSsrPhaseBiases(KaitaiStruct):
"""The precise phase biases message contains the biases to be added to the
carrier phase of the corresponding signal to get corrected carrier phase
measurement, as well as the satellite yaw angle to be applied to compute
the phase wind-up correction. It is typically an equivalent to the 1265
RTCM message types.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.time = Gnss.GpsTimeSec(self._io, self, self._root)
self.sid = Gnss.GnssSignal(self._io, self, self._root)
self.update_interval = self._io.read_u1()
self.iod_ssr = self._io.read_u1()
self.dispersive_bias = self._io.read_u1()
self.mw_consistency = self._io.read_u1()
self.yaw = self._io.read_u2le()
self.yaw_rate = self._io.read_s1()
self.biases = []
i = 0
while not self._io.is_eof():
self.biases.append(Ssr.PhaseBiasesContent(self._io, self, self._root))
i += 1
|
[
"noreply@github.com"
] |
swift-nav.noreply@github.com
|
428b3420fa6e2b8ea2b2f3fc5889ff9f6ca35207
|
89744412683f818356c0db394b8f1cb9c5235b40
|
/02_列表和元组/list_集合.py
|
0db22c71458cf12717b7d7941751813b76a46047
|
[] |
no_license
|
pleuvoir/python-tutorial
|
a84835ae1d62446fd54eab2c3a69407ed68e2ba8
|
9785bcf06956bc73c86ce0ab83f5e82e6ab2c3e5
|
refs/heads/master
| 2022-04-26T12:07:45.050277
| 2020-05-02T07:21:14
| 2020-05-02T07:21:14
| 245,397,536
| 0
| 0
| null | 2020-04-08T13:00:25
| 2020-03-06T11:01:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 定义集合
a = [1, 2, [3, 4], 5]
"""增加元素"""
a.append(6)
print(a) # [1, 2, [3, 4], 5, 6]
"""两个空的集合是相等的"""
empty_list1 = []
empty_list2 = []
print(empty_list1 == empty_list2) # True
# 判断列表是不是空
if not empty_list1:
print('is empty') # 为空
else:
print('is not empty')
"""可以直接加两个集合"""
language_java = ['java']
language_python = ['python']
language_all = language_java + language_python
print(language_all) # ['java', 'python']
"""注意是不可以减的"""
# language_java_new = language_all - language_python
# print(language_java_new)
"""在指定位置插入"""
language_all.insert(1, 'c++')
print(language_all) # ['java', 'c++', 'python']
"""列表解析"""
list_old = [1, 2, 3, 4] # 想要 [2,3,4,5],普通方法是遍历,快速方法可以使用列表解析
list_new = [item + 1 for item in list_old]
print('list_new', list_new) # [2, 3, 4, 5]
# 同时还可以做过滤
list_new = [item + 1 for item in list_old if item % 2 == 0]
print(list_new) # [3, 5] 先执行右边的过滤,然后再执行左边的格式化+1
|
[
"fuwei@daojia-inc.com"
] |
fuwei@daojia-inc.com
|
ca82545eeabbbaccd5dba5dcb3b7cae40e22558a
|
c23a4970a5f596326f69d282c01582acdaf00a24
|
/shop/carts/carts.py
|
2fe8675235b204b935f20a865779157a2d4707c3
|
[] |
no_license
|
miftahcoiri354/flask_ecommerce
|
7917f10040f1fb1f74626d787ec84e0619c123a4
|
79751c729fb2662b7302624f7bdfc7ed245eaaac
|
refs/heads/main
| 2023-03-04T12:25:40.715772
| 2021-02-19T09:57:45
| 2021-02-19T09:57:45
| 339,663,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,814
|
py
|
from flask import redirect, render_template, url_for, flash, request, session, current_app
from shop import db , app
from shop.products.models import Addproduct
from shop.products.routes import brands, categories
import json
def MagerDicts(dict1,dict2):
if isinstance(dict1, list) and isinstance(dict2,list):
return dict1 + dict2
if isinstance(dict1, dict) and isinstance(dict2, dict):
return dict(list(dict1.items()) + list(dict2.items()))
@app.route('/addcart', methods=['POST'])
def AddCart():
try:
product_id = request.form.get('product_id')
quantity = int(request.form.get('quantity'))
color = request.form.get('colors')
product = Addproduct.query.filter_by(id=product_id).first()
if request.method =="POST":
DictItems = {product_id:{'name':product.name,'price':float(product.price),'discount':product.discount,'color':color,'quantity':quantity,'image':product.image_1, 'colors':product.colors}}
if 'Shoppingcart' in session:
print(session['Shoppingcart'])
if product_id in session['Shoppingcart']:
for key, item in session['Shoppingcart'].items():
if int(key) == int(product_id):
session.modified = True
item['quantity'] += 1
else:
session['Shoppingcart'] = MagerDicts(session['Shoppingcart'], DictItems)
return redirect(request.referrer)
else:
session['Shoppingcart'] = DictItems
return redirect(request.referrer)
except Exception as e:
print(e)
finally:
return redirect(request.referrer)
@app.route('/carts')
def getCart():
if 'Shoppingcart' not in session or len(session['Shoppingcart']) <= 0:
return redirect(url_for('home'))
subtotal = 0
grandtotal = 0
for key,product in session['Shoppingcart'].items():
discount = (product['discount']/100) * int(product['price'])
subtotal += int(product['price']) * int(product['quantity'])
subtotal -= discount
tax =("%.0f" %(.06 * float(subtotal)))
grandtotal = int("%.0f" % (1.06 * subtotal))
return render_template('products/carts.html',tax=tax, grandtotal=grandtotal,brands=brands(),categories=categories())
@app.route('/updatecart/<int:code>', methods=['POST'])
def updatecart(code):
if 'Shoppingcart' not in session or len(session['Shoppingcart']) <= 0:
return redirect(url_for('home'))
if request.method =="POST":
quantity = request.form.get('quantity')
color = request.form.get('color')
try:
session.modified = True
for key , item in session['Shoppingcart'].items():
if int(key) == code:
item['quantity'] = quantity
item['color'] = color
flash('Item is updated!')
return redirect(url_for('getCart'))
except Exception as e:
print(e)
return redirect(url_for('getCart'))
@app.route('/deleteitem/<int:id>')
def deleteitem(id):
if 'Shoppingcart' not in session or len(session['Shoppingcart']) <= 0:
return redirect(url_for('home'))
try:
session.modified = True
for key , item in session['Shoppingcart'].items():
if int(key) == id:
session['Shoppingcart'].pop(key, None)
return redirect(url_for('getCart'))
except Exception as e:
print(e)
return redirect(url_for('getCart'))
@app.route('/clearcart')
def clearcart():
try:
session.pop('Shoppingcart', None)
return redirect(url_for('home'))
except Exception as e:
print(e)
|
[
"miftahcoiri354@gmail.com"
] |
miftahcoiri354@gmail.com
|
517b72f16ed6285858a7fc9ac76b0de4fe68e469
|
cc98fc664002361105e34d388b267589a788f9ee
|
/Apriori Algorithm/apriori.py
|
3894e3132f519d363aa541d4a027523362059b64
|
[] |
no_license
|
ShreenidhiN/Data-Mining
|
42ee7f4418145b88e031301c063f2006d49697bd
|
d84bdf07514bf7873707a8ca02569524fb2192bc
|
refs/heads/main
| 2023-04-22T06:54:22.112093
| 2021-05-06T16:14:42
| 2021-05-06T16:14:42
| 343,116,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
from itertools import combinations
import pandas as pd
def count_singletons(filePath):
n_buckets = 0
c1 = {}
with open(filePath) as f:
for line in f:
bucket = line.split(" ")
if '\n' in bucket: bucket.remove('\n')
for item in bucket:
itemset = frozenset({item})
if itemset in c1.keys():
c1[itemset] = c1[itemset] + 1
else:
c1[itemset] = 1
n_buckets = n_buckets + 1
return c1, n_buckets
def filter_frequent(c : dict, min_count):
return {itemset: count for itemset, count in c.items() if count >= min_count}
def construct_candidates(frequent_itemsets, file_path):
c = {}
last_l = list(frequent_itemsets[-1].keys())
l1 = list(frequent_itemsets[0].keys())
if last_l:
k = len(last_l[0]) + 1
else:
k = 1
if len(last_l) < k :
return {}
with open(file_path) as f:
for line in f:
bucket = line.split(" ")
if '\n' in bucket: bucket.remove('\n')
filtered_items = [item for item in bucket if frozenset({item}) in l1]
filtered_items = [item for item in filtered_items if not count_presence(item, last_l) < k - 1 ]
comb = list(combinations(filtered_items, k))
for itemset in comb:
if frozenset(itemset) not in c.keys():
subsets = list(combinations(itemset, k - 1))
if all(frozenset(s) in last_l for s in subsets):
c[frozenset(itemset)] = 1
else:
c[frozenset(itemset)] = c[frozenset(itemset)] + 1
return c
def frequent_itemsets(file_path, s) :
c1, n_buckets = count_singletons(file_path)
min_count = s
frequent_itemsets = []
c = c1
while len(c) != 0:
l = filter_frequent(c, min_count)
frequent_itemsets.append(l)
c = construct_candidates(frequent_itemsets, file_path)
return frequent_itemsets
def count_presence(item, l):
return sum(1 for i in l if item in i)
def output_itemsets(file_path,freq_itemsets):
f = open(file_path, "w")
for itemsets in freq_itemsets:
for item in sorted(itemsets, key=itemsets.get, reverse = True) :
string = ' '.join(item)
string += " (" + str(itemsets[item]) + ")"
f.write(string)
f.write("\n")
f.write("\n")
f.close()
minsupport = int(input("Minimum Support Value: "))
input_path = input("Input file name: ")
output_path = input("Output file name: ")
print("MINIMUM SUPPORT THRESHOLD : ", minsupport)
print("Finding frequent itemsets...")
freq_itemsets = frequent_itemsets(input_path, minsupport)
output_itemsets(output_path,freq_itemsets)
print("Frequent itemsets written in " + output_path)
|
[
"shreenidhin23@gmail.com"
] |
shreenidhin23@gmail.com
|
b0acb77910bf7cf31a0ffac8243c4a59273c8843
|
da86a8cd3fff6cc982ccff09c86746bfd08f8af8
|
/scripts/test-script.py
|
e9da1232659e47cce16a9e23481d97203acade81
|
[
"Apache-2.0"
] |
permissive
|
milebril/Temporal-SBMC-extension
|
ea86bd80b799f3791ae642ce6825e8eecc7e657e
|
57c56b73786e49d233facffde4ba80f212a00fa8
|
refs/heads/master
| 2023-04-18T20:19:40.488398
| 2021-05-05T07:28:24
| 2021-05-05T07:28:24
| 308,298,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
import os
import argparse
import shutil
import tempfile
import time
import pyexr
import torch as th
import numpy as np
from torch.utils.data import DataLoader
import skimage.io as skio
from multiprocessing import Pool, JoinableQueue, cpu_count, Process
import ttools
from denoise import denoise
import sbmc
inputs = "/home/emil/Documents/sbmc/output/emil/training_sequence/render_samples_seq"
checkpoint = "/home/emil/Documents/sbmc/data/pretrained_models/gharbi2019_sbmc"
if not os.path.exists(inputs):
raise ValueError("input {} does not exist".format(args.input))
data_root = os.path.abspath(inputs)
print("ROOT: ", data_root)
# Load everything into a tpm folder and link it up
name = os.path.basename(data_root)
tmpdir = tempfile.mkdtemp()
os.symlink(data_root, os.path.join(tmpdir, name))
# LOG.info("Loading model {}".format(checkpoint))
meta_params = ttools.Checkpointer.load_meta(checkpoint)
# LOG.info("Setting up dataloader")
data_params = meta_params["data_params"]
data_params["spp"] = 4
data = sbmc.FullImagesDataset(os.path.join(tmpdir, name), **data_params)
|
[
"milebril1@hotmail.com"
] |
milebril1@hotmail.com
|
1fb6bb92aff933a2a2c035f31eee5bb3a02ff373
|
37d21b78f712be90e56d2704c686b64e6b5aab97
|
/jULIE_recordings/jULIE_recording_setting.py
|
61cd87ea1a53e75850786f251929e06367df9252
|
[] |
no_license
|
warnerwarner/bash_scripts
|
844ad7ed10a0ff0e1c7ac9a90c58de9779331c8f
|
a1efc61778b959c654e5a671a547586afaeba535
|
refs/heads/master
| 2021-02-26T03:27:30.160488
| 2020-09-17T16:44:16
| 2020-09-17T16:44:16
| 245,492,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
import sys
sys.path.append('/home/camp/warnert/neurolytics')
import threshold_recording as tr
import matplotlib.pyplot as plt
import numpy as np
dir_index = int(list(sys.argv)[1])
dirs = ["/home/camp/warnert/working/Recordings/2018_general/181205/2018-12-05_18-17-18",
'/home/camp/warnert/working/Recordings/2019_general/190121/2019-01-21_18-10-30',
"/home/camp/warnert/working/Recordings/2019_general/190207/2019-02-07_18-31-33",
"/home/camp/warnert/working/Recordings/2019_general/190211/2019-02-11_16-35-46",
"/home/camp/warnert/working/Recordings/2019_general/190704/2019-07-04_15-21-04",
"/home/camp/warnert/working/Recordings/2019_general/190801/2019-08-01_16-38-19"]
chan_counts = [18, 18, 18, 18, 18, 32]
tc = tr.Threshold_Recording(dirs[dir_index], channel_count=chan_counts[dir_index], dat_name='dat_for_jULIE_analysis.dat')
bp_data = tc.set_threshold_crossings(return_bp=True, bp_indiv_chans=True)
tc.set_all_tcs_amplitudes(bp_data=bp_data)
|
[
"tom.p.a.warner@gmail.com"
] |
tom.p.a.warner@gmail.com
|
d3299a4987bd52c90b6123ec61084b03178ec140
|
866e3bd4b148a7c8f5af1ace397c1c82c9683fed
|
/STR/opencv-text-recognition/text_recognition.py
|
6b3744a6e080cfdd2f8d1bfac8d9230b558b6be4
|
[] |
no_license
|
GYLee1618/MARTH
|
e908a4d297477adfc7f5681372d2640b57d6013e
|
b895eaaa38743ad1acb36a5f13607d182848accc
|
refs/heads/master
| 2020-04-01T15:50:27.435716
| 2019-04-26T20:09:50
| 2019-04-26T20:09:50
| 153,354,105
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,557
|
py
|
# USAGE
# python text_recognition.py --east frozen_east_text_detection.pb --image images/example_01.jpg
# python text_recognition.py --east frozen_east_text_detection.pb --image images/example_04.jpg --padding 0.05
# import the necessary packages
from imutils.object_detection import non_max_suppression
import numpy as np
import pytesseract
import argparse
import cv2
def decode_predictions(scores, geometry):
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < args["min_confidence"]:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str,
help="path to input image")
ap.add_argument("-east", "--east", type=str,
help="path to input EAST text detector")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5,
help="minimum probability required to inspect a region")
ap.add_argument("-w", "--width", type=int, default=320,
help="nearest multiple of 32 for resized width")
ap.add_argument("-e", "--height", type=int, default=320,
help="nearest multiple of 32 for resized height")
ap.add_argument("-p", "--padding", type=float, default=0.0,
help="amount of padding to add to each border of ROI")
args = vars(ap.parse_args())
# load the input image and grab the image dimensions
image = cv2.imread(args["image"])
orig = image.copy()
(origH, origW) = image.shape[:2]
# set the new width and height and then determine the ratio in change
# for both the width and height
(newW, newH) = (args["width"], args["height"])
rW = origW / float(newW)
rH = origH / float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
# decode the predictions, then apply non-maxima suppression to
# suppress weak, overlapping bounding boxes
(rects, confidences) = decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
# initialize the list of results
results = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective
# ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
# in order to obtain a better OCR of the text we can potentially
# apply a bit of padding surrounding the bounding box -- here we
# are computing the deltas in both the x and y directions
dX = int((endX - startX) * args["padding"])
dY = int((endY - startY) * args["padding"])
# apply padding to each side of the bounding box, respectively
startX = max(0, startX - dX)
startY = max(0, startY - dY)
endX = min(origW, endX + (dX * 2))
endY = min(origH, endY + (dY * 2))
# extract the actual padded ROI
roi = orig[startY:endY, startX:endX]
# in order to apply Tesseract v4 to OCR text we must supply
# (1) a language, (2) an OEM flag of 4, indicating that the we
# wish to use the LSTM neural net model for OCR, and finally
# (3) an OEM value, in this case, 7 which implies that we are
# treating the ROI as a single line of text
config = ("-l eng --oem 1 --psm 7")
text = pytesseract.image_to_string(roi, config=config)
# add the bounding box coordinates and OCR'd text to the list
# of results
results.append(((startX, startY, endX, endY), text))
# sort the results bounding box coordinates from top to bottom
results = sorted(results, key=lambda r:r[0][1])
# loop over the results
for ((startX, startY, endX, endY), text) in results:
# display the text OCR'd by Tesseract
print("OCR TEXT")
print("========")
print("{}\n".format(text))
# strip out non-ASCII text so we can draw the text on the image
# using OpenCV, then draw the text and a bounding box surrounding
# the text region of the input image
text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
output = orig.copy()
cv2.rectangle(orig, (startX, startY), (endX, endY),
(0, 0, 255), 10)
cv2.putText(orig, text, (startX, startY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 15, (0, 0, 255), 20)
# show the output image
cv2.imwrite("output.png",orig)
cv2.imshow("Text Detection", output)
cv2.waitKey(0)
|
[
"ialb998@gmail.com"
] |
ialb998@gmail.com
|
b54c8a31ae3f67f7f13df8c43907dc2d37fcfb92
|
c69e4cf8e3ff7fb9b0dd180065e7b3fbc49dea0f
|
/encoder.py
|
1326037a6ddd833e6f8f1a361b0f5c0ee2d38f55
|
[] |
no_license
|
fontysrobotics/AGV_control_ROS_MCU
|
f20d2e272f5ace86cc81f72cf6536a3e1b5d4677
|
d8160ecea303845d0e55954d7af41e77c92dfaf3
|
refs/heads/master
| 2023-06-13T23:08:56.765907
| 2021-07-09T08:06:33
| 2021-07-09T08:06:33
| 369,505,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
import board
import time
from digitalio import DigitalInOut, Direction, Pull
import rotaryio
from pwmio import PWMOut
import math
class motor_enc():
# Encoder resolution: 48 counts per motor shaft revolution
# Gear ratio: 30:1
def __init__(self, enc_pin_A, enc_pin_B):
# self.encoder = rotaryio.IncrementalEncoder(enc_pin_A, enc_pin_B)
self.count = 0
self.position = 0
self.last_position = -1
self.enc_a = DigitalInOut(enc_pin_A)
self.enc_a.direction = Direction.INPUT
self.enc_b = DigitalInOut(enc_pin_B)
self.enc_b.direction = Direction.INPUT
def get_motor_speed(self):
self.position = self.enc_a.value
if self.position != self.last_position:
if (self.enc_b.value != self.position):
self.count = self.count + 1
else:
self.count = self.count - 1
self.last_position = self.position
return self.count
class odom_calc():
def __init__(self):
self.time_last = time.time()
self.old_counter_1 = 0
self.old_counter_2 = 0
def Odometry(self, counter_1, counter_2):
time_now = time.time()
delta_vel_right = ((counter_1 - self.old_counter_1) * 2 * math.pi * 0.05 / 64)/(time_now-self.time_last) # 0.05m = wheel radius, 64 = encoder steps
delta_vel_left = ((counter_2 - self.old_counter_2) * 2 * math.pi * 0.05 / 64)/(time_now-self.time_last)
self.old_counter_1 = counter_1
self.old_counter_2 = counter_2
self.time_last = time_now
delta_vel_center = (delta_vel_right + delta_vel_left) / 2
delta_ang_center = delta_vel_center / 0.05
return delta_vel_center, delta_ang_center
|
[
"quirineengbers@teamrembrandts.com"
] |
quirineengbers@teamrembrandts.com
|
cdbd0d9436036542fab660fed27a6a9af604f0d5
|
67c0d7351c145d756b2a49e048500ff361f7add6
|
/xpresso/ai/admin/controller/authentication/ldap_manager.py
|
7bfcf63d0ab790a42497df4cd589343641ea9831
|
[] |
no_license
|
Krishnaarunangsu/XpressoDataHandling
|
ba339ae85b52e30715f47406ddb74966350848aa
|
0637a465088b468d6fdb6d1bb6f7b087547cec56
|
refs/heads/master
| 2020-06-27T19:58:43.358340
| 2019-08-29T16:59:08
| 2019-08-29T16:59:08
| 200,035,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
__all__ = ['LdapManager']
__author__ = 'Srijan Sharma'
from xpresso.ai.core.logging.xpr_log import XprLogger
from xpresso.ai.core.utils.xpr_config_parser import XprConfigParser
import ldap
import ldap.modlist as modlist
class LdapManager():
"""
Creates a class to perform LDAP operations i.e. authenticating user etc.
"""
LDAP_SECTION = 'ldap'
URL = 'ldap_url'
def __init__(self):
self.config = XprConfigParser(XprConfigParser.DEFAULT_CONFIG_PATH)
self.logger = XprLogger()
self.adminuser = "admin"
self.adminpassword = "admin"
def authenticate(self, username, password):
"""
Authenticates user using LDAP server
Args:
username(str): unique username provided
password(str):user account password
Returns:
bool : return True if user authenticated successfully,
else raises corresponding Excecption
"""
self.logger.info("Authenticating using LDAP")
ldap_server = self.config[self.LDAP_SECTION][self.URL]
user_dn = f'cn={username},dc=abzooba,dc=com'
connect = ldap.initialize(ldap_server)
try:
connect.bind_s(user_dn, password)
self.logger.info(
"User:{} Succesfully Authenticated".format(username))
return True
finally:
connect.unbind_s()
return False
def add(self, username, password):
"""
Adds a new user
Args:
username(str): Name of the user account to be added
password(str): Password specified for the account
Returns:
bool : return True if user added successfully,
else raises corresponding excecption
"""
ldap_server = self.config[self.LDAP_SECTION][self.URL]
connect = ldap.initialize(ldap_server)
user_dn = f'cn={self.adminuser},dc=abzooba,dc=com'
add_dn = f'cn={username},dc=abzooba,dc=com'
attrs = {}
attrs['objectclass'] = [b'simpleSecurityObject', b'organizationalRole']
attrs['cn'] = [str.encode(username)]
attrs['userPassword'] = [str.encode(password)]
attrs['description'] = [b'Xpresso User']
try:
connect.bind_s(user_dn, self.adminpassword)
connect.add_s(add_dn, modlist.addModlist(attrs))
self.logger.info("Successfully added user {}".format(username))
return True
except ldap.INVALID_CREDENTIALS as e:
self.logger.error("Invalid credentials provided : {}".format(e))
raise e
return False
except ldap.LDAPError as e:
self.logger.error("Error : {}".format(e))
raise e
return False
finally:
connect.unbind_s()
return False
def update_password(self, username, old_password, new_password):
"""
Updates an already existing user account password
username(str): Name of the user account to be added
old_password(str)
Args:: Already existing password
new_password(str) : New user password
Returns:
bool : return True if user password updated successfully,
else raises corresponding Excecption
"""
ldap_server = self.config[self.LDAP_SECTION][self.URL]
connect = ldap.initialize(ldap_server)
user_dn = f'cn={username},dc=abzooba,dc=com'
try:
connect.bind_s(user_dn, old_password)
add_pass = [(ldap.MOD_REPLACE, 'userPassword',
[str.encode(new_password)])]
connect.modify_s(user_dn, add_pass)
self.logger.info("Successfully updated password for {}".format(
username))
return True
except ldap.LDAPError as e:
self.logger.error("Error : {}".format(e))
finally:
connect.unbind_s()
return False
if __name__ == "__main__":
ld = LdapManager()
|
[
"arunangsutech@gmail.com"
] |
arunangsutech@gmail.com
|
91daa9eb50f7dcfd4c5a62ce370d6c24252cb435
|
d71a5202e396c90a03e6d091b6537cba2c39c4af
|
/zinnia/tests/views.py
|
296dfa5df018061296d5b9d68a22e3ba7e54e658
|
[
"BSD-3-Clause"
] |
permissive
|
kelsta/django-blog-zinnia
|
7616de493b8ef3ec13acbb3943407839e2ea504f
|
4b72619c93519456677bf9a1cd996d6ae0d029f8
|
refs/heads/master
| 2020-12-30T19:23:39.673992
| 2011-04-05T13:57:02
| 2011-04-05T13:57:02
| 1,402,802
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,450
|
py
|
"""Test cases for Zinnia's views"""
from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.template import TemplateDoesNotExist
from django.utils.translation import ugettext_lazy as _
from zinnia.models import Entry
from zinnia.models import Category
from zinnia.managers import PUBLISHED
from zinnia.settings import PAGINATION
class ZinniaViewsTestCase(TestCase):
"""Test cases for generic views used in the application,
for reproducing and correcting issue :
http://github.com/Fantomas42/django-blog-zinnia/issues#issue/3
"""
urls = 'zinnia.tests.urls'
fixtures = ['zinnia_test_data.json']
def setUp(self):
self.site = Site.objects.get_current()
self.author = User.objects.get(username='admin')
self.category = Category.objects.get(slug='tests')
def create_published_entry(self):
params = {'title': 'My test entry',
'content': 'My test content',
'slug': 'my-test-entry',
'tags': 'tests',
'creation_date': datetime(2010, 1, 1),
'status': PUBLISHED}
entry = Entry.objects.create(**params)
entry.sites.add(self.site)
entry.categories.add(self.category)
entry.authors.add(self.author)
return entry
def check_publishing_context(self, url, first_expected,
second_expected=None):
"""Test the numbers of entries in context of an url,"""
response = self.client.get(url)
self.assertEquals(len(response.context['object_list']), first_expected)
if second_expected:
self.create_published_entry()
response = self.client.get(url)
self.assertEquals(len(response.context['object_list']), second_expected)
return response
def test_zinnia_entry_archive_index(self):
self.check_publishing_context('/', 2, 3)
def test_zinnia_entry_archive_year(self):
self.check_publishing_context('/2010/', 2, 3)
def test_zinnia_entry_archive_month(self):
self.check_publishing_context('/2010/01/', 1, 2)
def test_zinnia_entry_archive_day(self):
self.check_publishing_context('/2010/01/01/', 1, 2)
def test_zinnia_entry_detail(self):
entry = self.create_published_entry()
entry.sites.clear()
# Check a 404 error, but the 404.html may no exist
try:
self.assertRaises(TemplateDoesNotExist, self.client.get,
'/2010/01/01/my-test-entry/')
except AssertionError:
response = self.client.get('/2010/01/01/my-test-entry/')
self.assertEquals(response.status_code, 404)
entry.template = 'zinnia/_entry_detail.html'
entry.save()
entry.sites.add(Site.objects.get_current())
response = self.client.get('/2010/01/01/my-test-entry/')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'zinnia/_entry_detail.html')
def test_zinnia_entry_detail_login(self):
entry = self.create_published_entry()
entry.login_required = True
entry.save()
response = self.client.get('/2010/01/01/my-test-entry/')
self.assertTemplateUsed(response, 'zinnia/login.html')
def test_zinnia_entry_detail_password(self):
entry = self.create_published_entry()
entry.password = 'password'
entry.save()
response = self.client.get('/2010/01/01/my-test-entry/')
self.assertTemplateUsed(response, 'zinnia/password.html')
self.assertEquals(response.context['error'], False)
response = self.client.post('/2010/01/01/my-test-entry/',
{'password': 'bad_password'})
self.assertTemplateUsed(response, 'zinnia/password.html')
self.assertEquals(response.context['error'], True)
response = self.client.post('/2010/01/01/my-test-entry/',
{'password': 'password'})
self.assertEquals(response.status_code, 302)
def test_zinnia_entry_channel(self):
self.check_publishing_context('/channel-test/', 2, 3)
def test_zinnia_category_list(self):
self.check_publishing_context('/categories/', 1)
entry = Entry.objects.all()[0]
entry.categories.add(Category.objects.create(title='New category',
slug='new-category'))
self.check_publishing_context('/categories/', 2)
def test_zinnia_category_detail(self):
response = self.check_publishing_context('/categories/tests/', 2, 3)
self.assertTemplateUsed(response, 'zinnia/category/entry_list.html')
def test_zinnia_category_detail_paginated(self):
"""Test case reproducing issue #42 on category
detail view paginated"""
for i in range(PAGINATION):
params = {'title': 'My entry %i' % i,
'content': 'My content %i' % i,
'slug': 'my-entry-%i' % i,
'creation_date': datetime(2010, 1, 1),
'status': PUBLISHED}
entry = Entry.objects.create(**params)
entry.sites.add(self.site)
entry.categories.add(self.category)
entry.authors.add(self.author)
response = self.client.get('/categories/tests/')
self.assertEquals(len(response.context['object_list']), PAGINATION)
response = self.client.get('/categories/tests/?page=2')
self.assertEquals(len(response.context['object_list']), 2)
response = self.client.get('/categories/tests/page/2/')
self.assertEquals(len(response.context['object_list']), 2)
def test_zinnia_author_list(self):
self.check_publishing_context('/authors/', 1)
entry = Entry.objects.all()[0]
entry.authors.add(User.objects.create(username='new-user',
email='new_user@example.com'))
self.check_publishing_context('/authors/', 2)
def test_zinnia_author_detail(self):
response = self.check_publishing_context('/authors/admin/', 2, 3)
self.assertTemplateUsed(response, 'zinnia/author/entry_list.html')
def test_zinnia_tag_list(self):
self.check_publishing_context('/tags/', 1)
entry = Entry.objects.all()[0]
entry.tags = 'tests, tag'
entry.save()
self.check_publishing_context('/tags/', 2)
def test_zinnia_tag_detail(self):
response = self.check_publishing_context('/tags/tests/', 2, 3)
self.assertTemplateUsed(response, 'zinnia/tag/entry_list.html')
def test_zinnia_entry_search(self):
self.check_publishing_context('/search/?pattern=test', 2, 3)
response = self.client.get('/search/?pattern=ab')
self.assertEquals(len(response.context['object_list']), 0)
self.assertEquals(response.context['error'], _('The pattern is too short'))
response = self.client.get('/search/')
self.assertEquals(len(response.context['object_list']), 0)
self.assertEquals(response.context['error'], _('No pattern to search found'))
def test_zinnia_sitemap(self):
response = self.client.get('/sitemap/')
self.assertEquals(len(response.context['entries']), 2)
self.assertEquals(len(response.context['categories']), 1)
entry = self.create_published_entry()
entry.categories.add(Category.objects.create(title='New category',
slug='new-category'))
response = self.client.get('/sitemap/')
self.assertEquals(len(response.context['entries']), 3)
self.assertEquals(len(response.context['categories']), 2)
def test_zinnia_trackback(self):
# Check a 404 error, but the 404.html may no exist
try:
self.assertRaises(TemplateDoesNotExist, self.client.post,
'/trackback/404/')
except AssertionError:
response = self.client.post('/trackback/404/')
self.assertEquals(response.status_code, 404)
self.assertEquals(self.client.post('/trackback/test-1/').status_code, 302)
self.assertEquals(self.client.get('/trackback/test-1/').status_code, 302)
entry = Entry.objects.get(slug='test-1')
entry.pingback_enabled = False
entry.save()
self.assertEquals(self.client.post('/trackback/test-1/', {'url': 'http://example.com'}).content,
'<?xml version="1.0" encoding="utf-8"?>\n<response>\n \n <error>1</error>\n '
'<message>Trackback is not enabled for Test 1</message>\n \n</response>\n')
entry.pingback_enabled = True
entry.save()
self.assertEquals(self.client.post('/trackback/test-1/', {'url': 'http://example.com'}).content,
'<?xml version="1.0" encoding="utf-8"?>\n<response>\n \n <error>0</error>\n \n</response>\n')
self.assertEquals(self.client.post('/trackback/test-1/', {'url': 'http://example.com'}).content,
'<?xml version="1.0" encoding="utf-8"?>\n<response>\n \n <error>1</error>\n '
'<message>Trackback is already registered</message>\n \n</response>\n')
|
[
"fantomas42@gmail.com"
] |
fantomas42@gmail.com
|
3b7be3e7b1b0af7eeb2bb0fc441591be9c5bef7d
|
fabc11016d9bf3be41df03fe9a76053b905cede6
|
/showcase_backend/projects/migrations/0007_merge.py
|
f0521f52c18bfaa61860ceb4601182d183bf79d9
|
[] |
no_license
|
eluciano11/showcase-backend
|
ee48b681fa2ea63be8dbeae7d316fed5fe410378
|
a046c59ebd48eddecacb75bf09c593d29248ec57
|
refs/heads/develop
| 2016-09-09T23:49:28.180670
| 2015-03-01T14:00:00
| 2015-03-01T14:00:00
| 28,466,872
| 2
| 0
| null | 2015-02-11T14:32:25
| 2014-12-25T01:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_auto_20150123_0113'),
('projects', '0005_auto_20150120_0347'),
]
operations = [
]
|
[
"jpadilla1@users.noreply.github.com"
] |
jpadilla1@users.noreply.github.com
|
0443caff8d52c15be2d25b2895615ab1a0edbb8e
|
ac00afbcefb76cbe271e0cdff5526f8aa1c71da7
|
/lifxlanexamples/sniffer.py
|
a80dc84329492f4cc9b955e3936720a5ea0d4e70
|
[] |
no_license
|
redding1/lifxcontrol
|
3aaeea6fb34a4021e9d23af6af8a9871d1fa512b
|
afb894a9057d7870a877aff61688c48e1f6d4875
|
refs/heads/master
| 2021-01-10T05:06:22.118785
| 2015-11-09T12:09:57
| 2015-11-09T12:09:57
| 45,006,408
| 2
| 0
| null | 2015-11-09T12:09:58
| 2015-10-27T00:21:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,486
|
py
|
#!/usr/bin/env python
# sniffer.py
# Author: Meghan Clark
# Listens to broadcast UDP messages. If you are using the LIFX app to control a bulb,
# you might see some things.
from socket import socket, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR, SO_BROADCAST, timeout
from lifxlan import *
from time import time
class Sniffer(object):
def __init__(self):
self.port = UDP_BROADCAST_PORT
self.sock = None
self.sniff()
def sniff(self):
self.initialize_socket()
try:
while(True):
try:
data = self.sock.recv(1024)
request = unpack_lifx_message(data)
print("\nRECV:"),
print(request)
except timeout:
pass
except KeyboardInterrupt:
self.sock.close()
def send(self, msg):
if self.sock == None:
self.initialize_socket()
msg.origin = 1
print("SEND:"),
print(msg)
self.sock.sendto(msg.packed_message, (UDP_BROADCAST_IP, self.port))
def initialize_socket(self):
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.sock.settimeout(0.5)
port = UDP_BROADCAST_PORT
self.sock.bind(("", port))
if __name__ == "__main__":
Sniffer()
|
[
"matthewreddin@gmail.com"
] |
matthewreddin@gmail.com
|
1e0997427e3255e41b59d8fce3dfb08af2d054f8
|
aaafa161d2f507249dfb984a1d322f803c927177
|
/python/tll/config.pyx
|
3ebd441cc9e77a301056c3bddb532142cfc68130
|
[
"MIT"
] |
permissive
|
shramov/tll
|
4daf76af334c877b99a3f964a66536a1072aa3e7
|
72338ff3dcc351666ed86a814ebf093491820dc1
|
refs/heads/master
| 2023-08-17T13:06:39.833241
| 2023-08-16T08:12:10
| 2023-08-17T06:40:47
| 204,337,361
| 7
| 2
|
MIT
| 2021-11-04T19:54:37
| 2019-08-25T18:59:54
|
C++
|
UTF-8
|
Python
| false
| false
| 10,491
|
pyx
|
#!/usr/bin/env python
# vim: sts=4 sw=4 et
from .buffer cimport *
from .config cimport *
from .s2b cimport *
from cpython.ref cimport Py_INCREF, Py_DECREF
from libc.errno cimport ENOENT
from libc.stdlib cimport malloc
from libc.string cimport memcpy
from .conv import getT
from .error import TLLError
__default_tag = object()
cdef object _check_error(int r, object message):
if r == ENOENT:
raise KeyError(message)
elif r:
raise TLLError(message, r)
return
cdef class Callback:
cdef object _cb
def __init__(self, cb):
self._cb = cb
@property
def callback(self): return self._cb
def __call__(self):
return self._cb()
cdef char * pyvalue_callback(int * length, void * data) with gil:
cdef object cb = <object>data
cdef Py_buffer * buf
cdef char * ptr
try:
v = cb()
if v is None:
return NULL
elif isinstance(v, bytes):
pass
else:
if not isinstance(v, str):
v = str(v)
v = v.encode('utf-8')
v = memoryview(v)
buf = PyMemoryView_GET_BUFFER(v)
ptr = <char *>malloc(buf.len)
memcpy(ptr, buf.buf, buf.len)
length[0] = buf.len
return ptr
except:
return NULL
cdef void pyvalue_callback_free(tll_config_value_callback_t f, void * data) with gil:
if f != pyvalue_callback:
return
cdef cb = <object>data
if not isinstance(cb, Callback):
return
Py_DECREF(cb)
cdef class Config:
def __init__(self, bare=False):
pass
def __cinit__(self, bare=False):
self._ptr = NULL
self._const = 0
if not bare:
self._ptr = tll_config_new()
def __dealloc__(self):
if self._ptr != NULL:
tll_config_unref(self._ptr)
self._ptr = NULL
@staticmethod
cdef Config wrap(tll_config_t * ptr, int ref = False, int _const = False):
r = Config(bare=True)
if ref:
tll_config_ref(ptr)
r._ptr = ptr
r._const = _const
return r
@staticmethod
cdef Config wrap_const(const tll_config_t * ptr, int ref = False):
return Config.wrap(<tll_config_t *>(ptr), ref, True)
@classmethod
def load(self, path):
p = s2b(path)
cdef tll_config_t * cfg = tll_config_load(p, len(p))
if cfg == NULL:
raise TLLError("Failed to load {}".format(path))
return Config.wrap(cfg)
@classmethod
def load_data(self, proto, data):
p = s2b(proto)
d = s2b(data)
cdef tll_config_t * cfg = tll_config_load_data(p, len(p), d, len(d))
if cfg == NULL:
raise TLLError("Failed to load '{}' from '{}'".format(proto, data))
return Config.wrap(cfg)
def copy(self):
return Config.wrap(tll_config_copy(self._ptr), False, self._const)
__copy__ = copy
def __deepcopy__(self, memo):
return self.copy()
def sub(self, path, create=False, throw=True):
p = s2b(path)
cdef tll_config_t * cfg = tll_config_sub(self._ptr, p, len(p), 1 if create else 0)
if cfg == NULL:
if throw:
raise KeyError("Sub-config {} not found".format(path))
return
return Config.wrap(cfg, False, self._const)
def merge(self, cfg, overwrite=True):
if not isinstance(cfg, Config):
raise TypeError("Merge argument must be Config object, got {}".format(Config))
r = tll_config_merge(self._ptr, (<Config>cfg)._ptr, overwrite)
if r:
raise TLLError("Failed to merge config", r)
def process_imports(self, key):
k = s2b(key)
r = tll_config_process_imports(self._ptr, k, len(k))
if r:
raise TLLError("Failed to process imports {}".format(key), r)
def value(self):
return bool(tll_config_value(self._ptr))
def set(self, key, value):
if self._const:
raise RuntimeError("Can not modify const Config")
if isinstance(value, Config):
return self.set_config(key, value)
elif callable(value):
return self.set_callback(key, value)
k = s2b(key)
v = s2b(value)
r = tll_config_set(self._ptr, k, len(k), v, len(v))
if r:
raise TLLError("Failed to set key {}".format(key), r)
def set_link(self, key, value):
if self._const:
raise RuntimeError("Can not modify const Config")
k = s2b(key)
v = s2b(value)
r = tll_config_set_link(self._ptr, k, len(k), v, len(v))
if r:
raise TLLError("Failed to set link {} -> {}".format(key, value), r)
def set_config(self, key, value):
if self._const:
raise RuntimeError("Can not modify const Config")
k = s2b(key)
r = tll_config_set_config(self._ptr, k, len(k), (<Config>value)._ptr, 0)
if r:
raise TLLError("Failed to set sub config {}".format(key), r)
def set_callback(self, key, value):
if self._const:
raise RuntimeError("Can not modify const Config")
k = s2b(key)
cb = Callback(value)
r = tll_config_set_callback(self._ptr, k, len(k), pyvalue_callback, <void *>cb, pyvalue_callback_free)
if r:
raise TLLError(f"Failed to set callback at {key}: {value}", r)
Py_INCREF(cb)
def _get(self, decode=True):
if tll_config_value(self._ptr) == 0: return None
cdef int len = 0;
cdef char * buf = tll_config_get_copy(self._ptr, NULL, 0, &len)
if buf == NULL:
return None
try:
if decode:
return b2s(buf[:len])
else:
return buf[:len]
finally:
tll_config_value_free(buf)
def get(self, key=None, default=__default_tag, decode=True):
if key is None: return self._get(decode=decode)
k = s2b(key)
cdef tll_config_t * cfg = tll_config_sub(self._ptr, k, len(k), 0)
if cfg == NULL:
if default == __default_tag:
raise KeyError("Key {} not found".format(key))
return default
return Config.wrap(cfg).get(decode=decode)
def get_url(self, key=None, default=__default_tag):
cdef Config sub = self
if key is not None:
sub = self.sub(key, create=False, throw=False)
if sub is None:
if default == __default_tag:
raise KeyError(f"Key {key} not found")
return default
r = Config.wrap(tll_config_get_url(sub._ptr, NULL, 0))
error = r.get()
if error is not None:
raise ValueError(f"Invalid url at '{key}': {error}")
return Url(r)
def getT(self, key, default):
return getT(self, key, default)
def unlink(self, key):
if self._const:
raise RuntimeError("Can not modify const Config")
k = s2b(key)
_check_error(tll_config_unlink(self._ptr, k, len(k)), f'Failed to unlink "{key}"')
def unset(self, key):
if self._const:
raise RuntimeError("Can not modify const Config")
k = s2b(key)
_check_error(tll_config_unset(self._ptr, k, len(k)), f'Failed to unset "{key}"')
def remove(self, key):
if self._const:
raise RuntimeError("Can not modify const Config")
k = s2b(key)
_check_error(tll_config_remove(self._ptr, k, len(k)), f'Failed to remove "{key}"')
def has(self, key):
k = s2b(key)
return tll_config_has(self._ptr, k, len(k))
def browse(self, mask, subpath=False, cb=None):
m = s2b(mask)
class appender(list):
def __init__(self, sub):
self.sub = sub
def __call__(self, k, v):
if v.value() or self.sub:
self.append((k, v.get()))
_cb = appender(subpath) if cb is None else cb
tll_config_browse(self._ptr, m, len(m), browse_cb, <void *>_cb)
if cb is None:
return list(_cb)
@staticmethod
def from_dict(d):
r = Config()
for k,v in d.items():
if isinstance(v, dict):
v = Config.from_dict(v)
elif isinstance(v, (list, tuple)):
v = Config.from_dict({f'{i:04d}':x for (i, x) in enumerate(v)})
r.set(k, v)
return r
def as_dict(self):
if self.value():
return self.get()
class cb:
def __init__(self):
self.r = {}
def __call__(self, k, v):
if self.r == {} and k == '0000':
self.r = []
if isinstance(self.r, dict):
self.r[k] = v.as_dict()
else:
self.r.append(v.as_dict())
_cb = cb()
self.browse('*', cb=_cb)
return _cb.r
def __contains__(self, key): return self.has(key)
def __getitem__(self, key): return self.get(key)
def __setitem__(self, key, value): self.set(key, value)
def __delitem__(self, key): self.remove(key)
cdef int browse_cb(const char * key, int klen, const tll_config_t *value, void * data):
cb = <object>data
cfg = Config.wrap(<tll_config_t *>value, ref=True, _const=True)
cb(b2s(key[:klen]), cfg)
return 0
cdef class Url(Config):
def __init__(self, cfg = None):
if cfg is None:
Config.__init__(self)
return
elif not isinstance(cfg, Config):
raise ValueError("Url can be constructed from Config, got {}".format(cfg))
Config.__init__(self, bare=True)
self._ptr = (<Config>cfg)._ptr
tll_config_ref(self._ptr)
def copy(self):
return Url(Config.copy(self))
__copy__ = copy
def __deepcopy__(self, memo):
return self.copy()
@classmethod
def parse(self, s):
return Url(Config.load_data("url", s))
@property
def proto(self): return self.get('tll.proto', '')
@proto.setter
def proto(self, v): self['tll.proto'] = v
@property
def host(self): return self.get('tll.host', '')
@host.setter
def host(self, v): self['tll.host'] = v
def __str__(self):
return '{}://{};{}'.format(self.proto, self.host, ';'.join(['{}={}'.format(k,v) for k,v in self.browse('**') if k not in {'tll.proto', 'tll.host'}]))
|
[
"psha@nguni.psha.org.ru"
] |
psha@nguni.psha.org.ru
|
04326309a2f691efb6a25b07619b9636fcc15d74
|
b15e9e84cf248f69dd6c046f19828da6327bf0f6
|
/Personalenv/bin/isort
|
53537ec29a93146b3364935bf526395414d1c59c
|
[] |
no_license
|
Hanker00/PersonalWebsite
|
fc7bb36a1a1a90125c37c82e774d30cfc63b9137
|
2757e6db9d61c47afd3c8d5c941032334b6638a8
|
refs/heads/master
| 2020-04-10T15:23:15.838056
| 2019-01-11T07:20:23
| 2019-01-11T07:20:23
| 161,107,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
#!/Users/timothyn/Documents/PersonalWebsite/Personalenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nilsson.timothy@gmail.com"
] |
nilsson.timothy@gmail.com
|
|
b1359e0855558c1f67da78c708cd51127a79c159
|
abf3a3712cf12d1de4b2657e7dd38063d2f83534
|
/test_deconvolve_crop_reconvolve_algorithm.py
|
47729761146dceaac7dc4207817ab7539992c94a
|
[] |
no_license
|
anupgp/ephys_analysis
|
2876ca4371c5ced8b7d3db4e6c068bdf284b54b1
|
888ace8a440509de6007bd8e8cacdf6f0e5ddf81
|
refs/heads/master
| 2021-08-11T06:33:37.531018
| 2021-08-08T00:16:06
| 2021-08-08T00:16:06
| 246,890,724
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,799
|
py
|
from ephys_class import EphysClass
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import os
import re
ephysdatapath="/Volumes/GoogleDrive/Shared drives/Beique Lab DATA/Ephys Data/Olympus 2P/Anup/"
ophysdatapath="/Volumes/GoogleDrive/Shared drives/Beique Lab DATA/Imaging Data/Olympus 2P/Anup/"
masterdfpath = "/Users/macbookair/goofy/data/beiquelab/pfc_clusterd_inputs/pfc_clustered_glu_uncaging.xlsx"
masterdf = pd.read_excel(masterdfpath,header=0,use_cols=10)
masterdf["datefolder"] = masterdf["datefolder"].astype(int)
masterdf["date"] = pd.to_datetime(masterdf["datefolder"],format="%Y%m%d")
masterdf["dob"] = pd.to_datetime(masterdf["dob"],format="%Y%m%d")
masterdf["age"] = (masterdf["date"]-masterdf["dob"]).dt.days
print(masterdf)
channels = ['ImLEFT', 'IN1', 'IN7']
resch = "ImLEFT"
clampch = "IN1"
trgch = "IN7"
for row in range(0,masterdf.shape[0]):
# for row in range(20,30):
datefolder = str(masterdf.loc[row,"datefolder"])
cellfolder = str(masterdf.loc[row,"cellfolder"])
ephysfile = os.path.join(ephysdatapath,datefolder,cellfolder,str(masterdf.loc[row,"ephysfile"]))
ophysfile = os.path.join(ophysdatapath,datefolder,cellfolder,str(masterdf.loc[row,"imagefile"]))
print(ephysfile,"\n",ophysfile)
neuronid = masterdf.loc[row,"neuronid"]
branchid = masterdf.loc[row,"neuronid"]
spineid = masterdf.loc[row,"neuronid"]
clampmode = masterdf.loc[row,"clampmode"]
stimmode = masterdf.loc[row,"stim-mode"]
ephys = EphysClass(ephysfile,loaddata=True)
# ephys.info()
ephys.extract_stim_props(trgch)
ephys.extract_res_props(resch,trgch)
if(clampmode == "cc"):
# ephys.estimate_tau_access_res_from_epsp(resch,clampch)
ephys.deconvolve_crop_reconvolve(resch,clampch,trgch)
input()
|
[
"anupgp@gmail.com"
] |
anupgp@gmail.com
|
1d0319089d855a7689fd5f2fbff4a88ceda7d1b9
|
c42624a4207fcf4e92dfc1b854bba4d8d47413e3
|
/txt2pinyin.py
|
7a9e3466b95041ac4fc196efd93cfe7f57868fe0
|
[] |
no_license
|
Jiff-Zhang/labToDo
|
545106f0f73e9445760b638f492cf6c6df3deae4
|
bbec1fbb5162b2c6e80da390e18df12c9acce39a
|
refs/heads/master
| 2021-09-04T21:55:57.726009
| 2018-01-22T08:38:29
| 2018-01-22T08:38:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
# -*- encoding: UTF-8 -*-
import sys
import re
from pypinyin import pinyin, Style
consonant_list = ['b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k',
'h', 'j', 'q', 'x', 'zh', 'ch', 'sh', 'r', 'z',
'c', 's', 'y', 'w']
def pinyinformat(syllabel):
'''format pinyin to system's format'''
translate_dict = {'ju':'jv', 'qu':'qv', 'xu':'xv', 'zi':'zic',
'ci':'cic', 'si':'sic', 'zhi':'zhih',
'chi':'chih', 'shi':'shih', 'ri':'rih',
'yuan':'yvan', 'yue':'yve', 'yun':'yvn',
'iu':'iou', 'ui':'uei', 'un':'uen'}
translate_dict_more = {'ya':'yia', 'ye':'yie', 'yao':'yiao',
'you':'yiou', 'yan':'ian', 'yin':'yin',
'yang':'yiang', 'ying':'ying', 'yong':'yiong',
'wa':'wua', 'wo':'wuo', 'wai':'wuai',
'wei':'wuei', 'wan':'wuan', 'wen':'wuen',
'weng':'wueng', 'wang':'wuang'}
#必须先替代yun为yvn,然后再是替代un为uen
for key, value in translate_dict.items():
syllabel = syllabel.replace(key, value)
for key, value in translate_dict_more.items():
syllabel = syllabel.replace(key, value)
if not syllabel[-1].isdigit():
syllabel = syllabel + '5'
return syllabel
def seprate_syllabel(syllabel):
'''seprate syllable to consonant + ' ' + vowel '''
assert syllabel[-1].isdigit()
if syllabel[0:2] in consonant_list:
return syllabel[0:2].encode('utf-8'),syllabel[2:].encode('utf-8')
elif syllabel[0] in consonant_list:
return syllabel[0].encode('utf-8'),syllabel[1:].encode('utf-8')
else:
return (syllabel.encode('utf-8'),)
def txt2pinyin(txt):
phone_list = []
pinyin_list = pinyin(unicode(txt,'utf-8'), style = Style.TONE3)
for item in pinyin_list:
phone_list.append(seprate_syllabel(pinyinformat(item[0])))
return phone_list
if __name__ == '__main__':
print(txt2pinyin('你好看'))
'''
用法举例
print(txt2pinyin('中华人民共和国论居然'))
['zh ong1', 'h ua2', 'r en2', 'm in2', 'g ong4', 'h e2', 'g uo2', 'l uen4', 'j
v1', 'r an2']
'''
|
[
"src_dis@163.com"
] |
src_dis@163.com
|
e0c4964c2b67277616700f6906273b9c35a62822
|
578a567fc0d18dfc03098f3860c2fdc446258d10
|
/assig4.py
|
24e2c0f9de12bbc84ca46f73fbb31b3761a4e67f
|
[] |
no_license
|
321910303027/assig.py
|
6afa338f79451757a32378882c90dc72d62fceb9
|
da88bcd826be0694bdaef572122a6dcb551aa3c2
|
refs/heads/master
| 2022-11-04T21:21:37.856086
| 2020-07-02T05:22:15
| 2020-07-02T05:22:15
| 276,554,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
b = {1: 'a', 2: 'b', 3: 'c', 4: 'd'}
print(len(b))
|
[
"noreply@github.com"
] |
321910303027.noreply@github.com
|
c2dead0ca11828c797760a9bc8cec2504b67abbd
|
05fe579c12f0013ce83a106083ddb66ace5e8f47
|
/mindinsight/modelarts/adapter_notebook/__init__.py
|
b57e85b5889e5f1b2c2549ee4f0f1ff4a7f132cb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
mindspore-ai/mindinsight
|
59d3f47144ada9a12d2c82d9826ad5f5288aed78
|
a774d893fb2f21dbc3edb5cd89f9e6eec274ebf1
|
refs/heads/master
| 2023-07-22T22:46:43.075617
| 2023-07-17T11:26:58
| 2023-07-17T11:26:58
| 250,692,948
| 224
| 24
|
Apache-2.0
| 2020-12-29T12:22:51
| 2020-03-28T01:58:56
|
Python
|
UTF-8
|
Python
| false
| false
| 801
|
py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""base_notebook module for MindInsight."""
from .base_notebook import BaseNotebook
from .registry import NOTEBOOK
|
[
"9107532+xiaocongcong666@user.noreply.gitee.com"
] |
9107532+xiaocongcong666@user.noreply.gitee.com
|
2bb6baded6434e580fa772bd583a638141d05369
|
a3312431229bcb710e592986dd09617806cb1baf
|
/mysite/settings.py
|
31ca1cb4a7733aa962d3627055ad99e808603c2c
|
[] |
no_license
|
hane1818/Django-Girls
|
09416a97bb0465a0d5b96385d0cfddc92f1fa71c
|
b5ffa065ccb65bfc48c8d3800d101bf23472c64d
|
refs/heads/master
| 2021-01-10T11:04:00.945313
| 2015-10-06T11:45:35
| 2015-10-06T11:45:35
| 43,746,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,703
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9jqjwz^@_x%59nbuqo)vw6&m^_2ph5j^c$b5l9l4j#0!_ne1v4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'trips',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
[
"hane0131@gmail.com"
] |
hane0131@gmail.com
|
6f3d0ff6e741110c553962a4b39ef175db211f25
|
4d3f1884b9aa6cc15b599151c56d7c07ea30d2a9
|
/AutomaticMultipleDatabases/AutoMultDataDemo/app2/models.py
|
821d6b5d92102e94c871c655cd3b2acca0371025
|
[] |
no_license
|
WangYyyyy/DjangoAutomaticDatabaseRouting
|
a203f6cac73087a0a72bfc999c01cacc173800df
|
6155ed6018a535929811ee31ddb1144c8c93af2b
|
refs/heads/master
| 2020-06-27T16:21:49.285702
| 2017-07-13T01:51:15
| 2017-07-13T01:51:15
| 97,065,490
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from django.db import models
# Create your models here.
class App2Model(models.Model):
line1 = models.CharField(max_length=50)
line2 = models.CharField(max_length=50)
line3 = models.IntegerField()
|
[
"5877156@qq.com"
] |
5877156@qq.com
|
1fe074e8abfc274c977fd83d5c91793d3514d173
|
22c0904569696140ea68ad76011314be1ccf8ae8
|
/myApp/migrations/0001_initial.py
|
75f422a25fccbf5deb155f66e5df2cfcb1812dfd
|
[] |
no_license
|
LuizBoina/WebScraping
|
734c0876ed25f916fbe636b388880f189a7bea40
|
e0393a25167decd2c3e039eee450bac308b58423
|
refs/heads/master
| 2022-12-13T03:28:53.313387
| 2020-03-21T22:28:38
| 2020-03-21T22:28:38
| 249,071,816
| 0
| 0
| null | 2022-12-08T03:51:27
| 2020-03-21T22:27:16
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
# Generated by Django 3.0.4 on 2020-03-21 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('search', models.CharField(max_length=500)),
('created', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"lipeboina.boina@hotmail.com"
] |
lipeboina.boina@hotmail.com
|
44d6f0b9112f8e9441f77e4a96fe7f53f4cb6499
|
9e6bd9c1c9b775471b9573dfce29e21f737f0a0d
|
/app/api/v2/models_v2/models.py
|
52bcd78e340a4063ec7f8b6acc24c9a6ed179044
|
[] |
no_license
|
sylviawanjiku/Store_Manager_Api_v2
|
c3510c06fa3f4d660b7ab51564fff31486f62786
|
d8385828b1eca0eda90c777a6fa52915b46e7f45
|
refs/heads/develop
| 2022-12-10T04:48:38.615057
| 2018-11-02T12:25:28
| 2018-11-02T12:25:28
| 154,459,958
| 1
| 0
| null | 2022-05-25T00:35:07
| 2018-10-24T07:46:37
|
Python
|
UTF-8
|
Python
| false
| false
| 12,083
|
py
|
from werkzeug.security import generate_password_hash, check_password_hash
import psycopg2
from psycopg2 import extras, connect
# local imports
from flask import current_app
class Data_base:
""""database connection model"""
def __init__(self):
self.db_host = current_app.config['DB_HOST']
self.db_username = current_app.config['DB_USERNAME']
self.db_password = current_app.config['DB_PASSWORD']
self.db_name = current_app.config['DB_NAME']
# connect to the storemanager database
self.connect = psycopg2.connect(
host=self.db_host,
user=self.db_username,
password=self.db_password,
database=self.db_name
)
# open cursor for performing database operations
self.cur = self.connect.cursor(cursor_factory=extras.RealDictCursor)
# self.cur =self.connect.cursor()
def create_table(self, schema):
"""method for creating tables"""
self.cur.execute(schema)
self.save()
def drop_table(self, name):
"""method for dropping tables"""
self.cur.execute("DROP TABLE IF EXISTS " + name)
self.save()
def save(self):
"""method for saving a change made"""
self.connect.commit()
def close(self):
"""method for closing the cursor"""
self.cur.close()
class User(Data_base):
def __init__(self, username=None, first_name=None, last_name=None,
password=None, email=None, is_admin=False):
super().__init__()
self.username = username
self.first_name = first_name
self.last_name = last_name
if password:
self.hash_password = generate_password_hash(password)
self.email = email
self.is_admin = is_admin
def create(self):
"""create a table for the users"""
self.create_table(
"""
CREATE TABLE users(
id serial PRIMARY KEY,
username VARCHAR NOT NULL,
first_name VARCHAR NOT NULL,
last_name VARCHAR NOT NULL,
password VARCHAR NOT NULL,
email VARCHAR NOT NULL,
is_admin BOOLEAN NOT NULL
);
"""
)
def drop(self):
"""Drop the table for users if it exists"""
self.drop_table('users')
def add(self):
"""Add a user to the created table users"""
insert_user = "INSERT INTO users(username,first_name,last_name,password,email,is_admin) VALUES( %s, %s, %s, %s, %s, %s)"
user_data = (self.username, self.first_name, self.last_name, self.hash_password, self.email, self.is_admin)
self.cur.execute(insert_user, user_data)
self.save()
def make_admin(self, user_id):
is_admin = True
self.cur.execute("""UPDATE users SET is_admin='{}' WHERE id='{}' """.format(is_admin,user_id))
self.save()
self.close()
def is_admin(self, username):
self.cur.execute("SELECT * FROM users WHERE username =%s", (username,))
selected_user = self.cur.fetchone()
if selected_user:
if selected_user["is_admin"] == True :
return True
return False
return False
def update(self, user_id):
"""make store attendant admin"""
self.cur.execute(
""" UPDATE products SET is_admin =%s WHERE id = %s,(user_id,))""",(
self.is_admin, user_id)
)
self.save()
self.close()
def fetch_user_by_id(self, user_id):
"""fetch a single product by user_id"""
self.cur.execute("SELECT * FROM users WHERE id = %s",(user_id,))
selected_user = self.cur.fetchone()
return selected_user
def mapped_user(self, user_data):
"""Map a user to an object"""
self.id = user_data[0]
self.username = user_data[1]
self.first_name = user_data[2]
self.last_name = user_data[3]
self.hash_password = user_data[4]
self.email = user_data[5]
self.is_admin = user_data[6]
return self
def fetch_by_email(self,email):
"Fetch a user through email"
self.cur.execute("SELECT * FROM users WHERE email =%s", (email,))
selected_user = self.cur.fetchone()
return selected_user
def fetch_by_username(self,username):
"Fetch a user through username"
self.cur.execute("SELECT * FROM users WHERE username =%s", (username,))
selected_user = self.cur.fetchone()
return selected_user
def fetch_by_id(self,user_id):
"Fetch a user through id"
self.cur.execute("SELECT * FROM users WHERE id =%s", (user_id,))
selected_user = self.cur.fetchone()
return selected_user
def serialize(self):
"""put the user_data into a dictionary form"""
return dict(
id =self.id,
username = self.username,
first_name = self.first_name,
last_name = self.last_name,
hash_password = self.hash_password,
email = self.email,
is_admin = self.is_admin
)
class Product(Data_base):
products = []
def __init__(self, product_name=None, brand=None, quantity=None, price=None,avail_stock=None ,min_stock=None, uom=None, category=None):
super().__init__()
self.product_name = product_name
self.brand = brand
self.quantity =quantity
self.price = price
self.avail_stock = avail_stock
self.min_stock = min_stock
self.uom = uom
self.category = category
def create(self):
""" create table products """
self.create_table(
"""
CREATE TABLE products (
id serial PRIMARY KEY,
product_name VARCHAR NOT NULL,
brand VARCHAR NOT NULL,
quantity INTEGER,
price INTEGER,
avail_stock INTEGER,
min_stock INTEGER,
uom VARCHAR,
category VARCHAR
);
"""
)
def drop(self):
""" drop table products if it already exists """
self.drop_table('products')
def add(self):
"""Add a product to the created table products """
insert_product="INSERT INTO products(product_name, brand, quantity, price,avail_stock ,min_stock, uom, category) VALUES (%s, %s,%s,%s,%s,%s,%s,%s )"
product_data = (self.product_name,self.brand,self.quantity,self.price,self.avail_stock,self.min_stock,self.uom,self.category)
self.cur.execute(insert_product, product_data)
self.save()
def mapped_product(self,product_data):
"""map the product details to an object"""
self.id = product_data[0]
self.product_name = product_data[1]
self.brand = product_data[2]
self.quantity = product_data[3]
self.price = product_data[4]
self.avail_stock = product_data[5]
self.min_stock = product_data[6]
self.uom = product_data[7]
self.category = product_data[8]
return self
def fetch_by_id(self,product_id):
"""fetch a single product by product_id"""
self.cur.execute("SELECT * FROM products WHERE id = %s",(product_id,))
selected_product = self.cur.fetchone()
return selected_product
def fetch_by_name(self,product_name):
"""fetch a single product by product_name"""
self.cur.execute("SELECT * FROM products WHERE product_name = %s",(product_name,))
selected_product = self.cur.fetchone()
return selected_product
def fetch_min_stock(self,product_name):
"""fetch a single product by product_name"""
self.cur.execute("SELECT * FROM products WHERE product_name = %s",(product_name,))
selected_product = self.cur.fetchone()
return selected_product["min_stock"]
def fetch_product_price(self,product_name):
"""fetch a single product by product_name"""
self.cur.execute("SELECT * FROM products WHERE product_name = %s",(product_name,))
selected_product = self.cur.fetchone()
return selected_product["price"]
def fetch_available_quantity(self,product_name):
"""fetch a single product by product_name"""
self.cur.execute("SELECT * FROM products WHERE product_name = %s",(product_name,))
selected_product = self.cur.fetchone()
return selected_product["avail_stock"]
def fetch_all_products(self):
""" fetch all products"""
self.cur.execute("SELECT * FROM products")
products = self.cur.fetchall()
self.save()
self.close()
return products
def update(self, product_id):
"""update an existing product details"""
self.cur.execute(
""" UPDATE products SET product_name =%s, brand= %s,quantity= %s,
price = %s, avail_stock = %s, min_stock = %s, uom = %s,category= %s
WHERE id =%s""", (elf.product_name, self.brand, self.quantity,
self.price, self.avail_stock, self.min_stock, self.uom, self.category,product_id)
)
self.save()
self.close()
def delete(self, product_id):
"""Delete a product"""
self.cur.execute("DELETE FROM products where id = %s",(product_id,))
self.save()
self.close()
def serialize(self):
"""put the product data in form of a dictionary"""
return dict(
# id =self.id,
product_name = self.product_name,
brand = self.brand,
quantity = self.quantity,
price = self.price,
avail_stock = self.avail_stock,
min_stock = self.min_stock,
category = self.category
)
class Sales(Data_base):
sales = []
def __init__(self,attendant_name = None,product_name = None,quantity = None,price = None ,total_price = None):
super().__init__()
self.attendant_name =attendant_name
self.product_name = product_name
self.quantity = quantity
self.price =price
self.total_price =total_price
def create(self):
""" create table sales """
self.create_table(
"""
CREATE TABLE sales (
id serial PRIMARY KEY,
attendant_name VARCHAR NOT NULL,
product_name VARCHAR NOT NULL,
quantity INTEGER,
price INTEGER,
total_price INTEGER
);
"""
)
def drop(self):
""" drop table sales if it already exists """
self.drop_table('sales')
def add(self):
"""Add a sale to the created table products """
insert_sale = "INSERT INTO sales(attendant_name,product_name,quantity,price,total_price) VALUES( %s, %s, %s, %s, %s)"
sale_data = (self.attendant_name, self.product_name, self.quantity ,self.price ,self.total_price)
self.cur.execute(insert_sale,sale_data)
self.save()
def fetch_all_sales(self):
""" fetch all sales """
self.cur.execute("SELECT * FROM sales")
sales = self.cur.fetchall()
self.save()
self.close()
return sales
def fetch_all_sales_attendant_name(self,username):
""" fetch all sales """
self.cur.execute("SELECT * FROM sales")
sales = self.cur.fetchall()
self.save()
self.close()
return sales
def serialize(self):
"""put the product data in form of a dictionary"""
return dict(
attendant_name = self.attendant_name,
product_name = self.product_name,
quantity = self.quantity,
price = self.price,
total_price = self.total_price
)
|
[
"sylviakatherine22@gmail.com"
] |
sylviakatherine22@gmail.com
|
752ea36f1314b2805b1d1b909b1fd8d9bf264848
|
43ec1c06825a39c31b976906f7bded21c19b6019
|
/google/cloud/talent_v4beta1/types/completion_service.py
|
7dbd241555f611a65bf5c9548fd643089fc1d7d7
|
[
"Apache-2.0"
] |
permissive
|
renovate-bot/python-talent
|
b70b036c9d5a6b2887ef428fca853940df067c4b
|
0413f5114256b8a8c2c157b33aa0cbc1eb9feca5
|
refs/heads/master
| 2023-06-07T05:27:59.879356
| 2021-08-30T16:12:17
| 2021-08-30T16:12:17
| 238,039,237
| 0
| 0
|
Apache-2.0
| 2020-02-03T19:01:22
| 2020-02-03T19:01:22
| null |
UTF-8
|
Python
| false
| false
| 5,044
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.talent_v4beta1.types import common
__protobuf__ = proto.module(
package="google.cloud.talent.v4beta1",
manifest={"CompleteQueryRequest", "CompleteQueryResponse",},
)
class CompleteQueryRequest(proto.Message):
r"""Auto-complete parameters.
Attributes:
parent (str):
Required. Resource name of tenant the completion is
performed within.
The format is "projects/{project_id}/tenants/{tenant_id}",
for example, "projects/foo/tenant/bar".
If tenant id is unspecified, the default tenant is used, for
example, "projects/foo".
query (str):
Required. The query used to generate
suggestions.
The maximum number of allowed characters is 255.
language_codes (Sequence[str]):
The list of languages of the query. This is the BCP-47
language code, such as "en-US" or "sr-Latn". For more
information, see `Tags for Identifying
Languages <https://tools.ietf.org/html/bcp47>`__.
The maximum number of allowed characters is 255.
page_size (int):
Required. Completion result count.
The maximum allowed page size is 10.
company (str):
If provided, restricts completion to specified company.
The format is
"projects/{project_id}/tenants/{tenant_id}/companies/{company_id}",
for example, "projects/foo/tenants/bar/companies/baz".
If tenant id is unspecified, the default tenant is used, for
example, "projects/foo".
scope (google.cloud.talent_v4beta1.types.CompleteQueryRequest.CompletionScope):
The scope of the completion. The defaults is
[CompletionScope.PUBLIC][google.cloud.talent.v4beta1.CompleteQueryRequest.CompletionScope.PUBLIC].
type_ (google.cloud.talent_v4beta1.types.CompleteQueryRequest.CompletionType):
The completion topic. The default is
[CompletionType.COMBINED][google.cloud.talent.v4beta1.CompleteQueryRequest.CompletionType.COMBINED].
"""
class CompletionScope(proto.Enum):
r"""Enum to specify the scope of completion."""
COMPLETION_SCOPE_UNSPECIFIED = 0
TENANT = 1
PUBLIC = 2
class CompletionType(proto.Enum):
r"""Enum to specify auto-completion topics."""
COMPLETION_TYPE_UNSPECIFIED = 0
JOB_TITLE = 1
COMPANY_NAME = 2
COMBINED = 3
parent = proto.Field(proto.STRING, number=1,)
query = proto.Field(proto.STRING, number=2,)
language_codes = proto.RepeatedField(proto.STRING, number=3,)
page_size = proto.Field(proto.INT32, number=4,)
company = proto.Field(proto.STRING, number=5,)
scope = proto.Field(proto.ENUM, number=6, enum=CompletionScope,)
type_ = proto.Field(proto.ENUM, number=7, enum=CompletionType,)
class CompleteQueryResponse(proto.Message):
r"""Response of auto-complete query.
Attributes:
completion_results (Sequence[google.cloud.talent_v4beta1.types.CompleteQueryResponse.CompletionResult]):
Results of the matching job/company
candidates.
metadata (google.cloud.talent_v4beta1.types.ResponseMetadata):
Additional information for the API
invocation, such as the request tracking id.
"""
class CompletionResult(proto.Message):
r"""Resource that represents completion results.
Attributes:
suggestion (str):
The suggestion for the query.
type_ (google.cloud.talent_v4beta1.types.CompleteQueryRequest.CompletionType):
The completion topic.
image_uri (str):
The URI of the company image for
[COMPANY_NAME][google.cloud.talent.v4beta1.CompleteQueryRequest.CompletionType.COMPANY_NAME].
"""
suggestion = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(
proto.ENUM, number=2, enum="CompleteQueryRequest.CompletionType",
)
image_uri = proto.Field(proto.STRING, number=3,)
completion_results = proto.RepeatedField(
proto.MESSAGE, number=1, message=CompletionResult,
)
metadata = proto.Field(proto.MESSAGE, number=2, message=common.ResponseMetadata,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
renovate-bot.noreply@github.com
|
dee8d64aff5b170d25bc626d7d2c905942ea54a1
|
2aa9f05b6136ae82b25f25f51d3f7f5291cc9e8d
|
/src/security.py
|
1f3c91e4665cc8f201c76c968cc67d1e5c0fdcd7
|
[] |
no_license
|
popovegor/povodochek
|
98792573e0fb137b767001082b0a6a2258932768
|
4d218b04264582140b1e223169afe91d0a6068a3
|
refs/heads/master
| 2022-09-03T11:29:21.932029
| 2014-10-01T12:31:06
| 2014-10-01T12:31:06
| 8,465,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from werkzeug.security import generate_password_hash, \
check_password_hash
def hash_password(pwd):
return generate_password_hash(pwd)
def check_password(pwd_hash, pwd):
return check_password_hash(pwd_hash, pwd)
|
[
"root@egor-pc-povodochek.ri.domain"
] |
root@egor-pc-povodochek.ri.domain
|
b0489090abf122939c8f4de0122cc7acd26f5b43
|
167d211594951aeaafe08a01d3172238462902c3
|
/django/seminar2/assignment2/survey/urls.py
|
b28ebe0f9e1ff9a43d0ce101d3ec23e440b815fa
|
[] |
no_license
|
dongjangoon/19.5-rookies
|
50458b15160cc9aadcb686a9a7e2ae64db0fb9e8
|
d847e28a7ecc1258a951628c685c32fe84bd7d87
|
refs/heads/master
| 2023-08-27T21:22:24.981421
| 2021-11-14T16:56:58
| 2021-11-14T16:56:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from django.urls import include, path
from rest_framework.routers import SimpleRouter
from survey.views import OperatingSystemViewSet, SurveyResultViewSet, top_50
app_name = 'survey'
router = SimpleRouter()
router.register('survey', SurveyResultViewSet, basename='survey')
router.register('os', OperatingSystemViewSet, basename='os')
urlpatterns = [
path('', include(router.urls)),
path('template', top_50)
]
|
[
"wlgur7238@snu.ac.kr"
] |
wlgur7238@snu.ac.kr
|
06b889d5e925efb566ec29ec9a08378211f8ba80
|
c0973d6939ef419ed3d261d95167d537499a553a
|
/OnePy/constants.py
|
541ebf3a5bc8a3618a0828c529779f52a22aca7d
|
[
"MIT"
] |
permissive
|
mj3428/OnePy
|
0c6e4be9b4bb36ae66b566dfa85cd44bae2a07de
|
8dc13fc21502daa5786aecaa4451ccba32fc8a14
|
refs/heads/master
| 2020-04-05T10:28:33.550915
| 2018-11-08T04:07:05
| 2018-11-08T04:07:05
| 134,518,682
| 0
| 0
|
MIT
| 2018-05-23T05:38:12
| 2018-05-23T05:38:11
| null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
from enum import Enum
class ActionType(Enum):
Buy = 'Buy'
Sell = 'Sell'
Short = 'Short'
Cover = 'Cover'
Exit_all = 'Exit_all'
Cancel = 'Cancel'
class OrderType(Enum):
Market = 'Market'
Limit = 'Limit'
Stop = 'Stop'
Trailing_stop = 'Trailing_stop'
Limit_pct = 'Limit_pct'
Stop_pct = 'Stop_pct'
Trailing_stop_pct = 'Trailing_stop_pct'
class OrderStatus(Enum):
Created = "Created"
Submitted = "Submitted"
Partial = "Partial"
Completed = "Completed"
Canceled = "Canceled"
Expired = "Expired"
Margin = "Margin"
Rejected = "Rejected"
Triggered = "Triggered"
class EVENT(Enum):
Market_updated = 'Market_updated'
Data_cleaned = 'Data_cleaned'
Signal_generated = 'Signal_generated'
Submit_order = 'Submit_order'
Record_result = 'Record_result'
|
[
"chenjiayicjy@126.com"
] |
chenjiayicjy@126.com
|
eb0dd808b12a66e0297e9f4db5f0228fd397cb14
|
31fce98c1f0c271f39e3d321dfd099ac0b00105a
|
/Machine-Learning/svm.py
|
63bb0eb2eb7dc39fde1809fa31ff383948aad9a5
|
[] |
no_license
|
aung2phyowai/MI-BCI_ML
|
b4024c9472904b32d45f73728f7bf4131d129189
|
0a728c1230ad47bc1e5586101d7d634296048da9
|
refs/heads/master
| 2023-06-23T18:44:47.811555
| 2021-07-24T09:48:10
| 2021-07-24T09:48:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 23:14:24 2019
@author: omi
"""
from sklearn import svm
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report,confusion_matrix
eeg_data=pd.read_csv("E:\\Research\\Ensemble DT in BMI\\2_3_class_data\\2omitrain.csv")
eeg_test_data=pd.read_csv("E:\\Research\\Ensemble DT in BMI\\2_3_class_data\\2omitest.csv")
eeg_data.head()
featute_col=['Theta','Alpha','Low_beta','High_beta','Gamma']
x=eeg_data[featute_col]
y=eeg_data['Class']
y.head()
X_train=eeg_data[featute_col]
y_train=eeg_data['Class']
X_test=eeg_test_data[featute_col]
y_test=eeg_test_data['Class']
#X_train, X_test, y_train, y_test = train_test_split(x,y, random_state=0)
#scaler = StandardScaler()
#X_train_scaled = scaler.fit(X_train).transform(X_train)
#X_test_scaled = scaler.fit(X_test).transform(X_test)
#print(X_train)
#print(X_test)
#print(y_train)
#print(y_test)
print('ok')
#mlp = MLPClassifier(hidden_layer_sizes=(50,40,30),max_iter=1000,random_state=42)
#mlp = MLPClassifier(max_iter=1000,alpha=1,random_state=42)
sv = svm.SVC(kernel='linear')
print('ok')
sv.fit(X_train, y_train)
print('ok')
#print('Accuracy of the training set: {:.2f}'.format(mlp.score(x_train,y_train)*100)+ ' %')
#print('Accuracy of the test set: {:.2f}'.format(mlp.score(x_test,y_test)*100)+' %')
#print('Accuracy on the training subset: {:.3f}'.format(sv.score(X_train, y_train)))
#print('ok')
print('Accuracy on the test subset: {:.3f}'.format(sv.score(X_test, y_test)))
predicted=sv.predict(X_test)
print('completed')
#confusion=confusion_matrix(y_test, predicted, labels=["lefthand", "steady", "righthand"])
confusion=confusion_matrix(y_test, predicted, labels=["steady", "righthand"])
print(confusion)
print(classification_report(y_test, predicted))
|
[
"mmiah131145@bscse.uiu.ac.bd"
] |
mmiah131145@bscse.uiu.ac.bd
|
a7f8c158febbe9dc1db2d51925d25ffd77c09d63
|
f88dc13215e988a4bd8606add59605f9b7a001ef
|
/Gui/Test/SampleQDialog.py
|
558c7a1c9077f195dcdc12fff734bd01d6011cb5
|
[] |
no_license
|
whitePenumbra/SARdrone
|
ebcaf5b3f8577198f28af604b4aef18b549e227b
|
d91b957f04459340dd3b8ce701ca240ffff0b948
|
refs/heads/master
| 2020-12-18T11:42:40.529090
| 2020-06-29T03:34:59
| 2020-06-29T03:34:59
| 235,364,643
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SampleQDialog.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(444, 201)
Dialog.setMinimumSize(QtCore.QSize(444, 201))
Dialog.setMaximumSize(QtCore.QSize(444, 201))
Dialog.setWindowFlags(QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(80, 40, 281, 71))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(160, 130, 121, 41))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Sample Text for QDialog"))
self.pushButton.setText(_translate("Dialog", "Close"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
[
"saballoamiel@gmail.com"
] |
saballoamiel@gmail.com
|
b7fea66f0718d33279aa223bcfb2ff4cdc38d704
|
ebb56da73d9585f83b088d6c9db98a1b525392ed
|
/server/storageusers/main/models.py
|
0c1aca1ef46ac25fb168b902f6ddbe5e8ee6738c
|
[] |
no_license
|
MuraHika/SplicingRNA
|
03cac8b348deb5031c4f3e9bb967b706455a26d6
|
c69c4f7f5dc73ef8376a86f719dd1d4cb9e7fb98
|
refs/heads/main
| 2023-05-12T05:10:17.807016
| 2021-06-03T19:44:17
| 2021-06-03T19:44:17
| 373,615,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,769
|
py
|
from django.db import models
from django.conf import settings
from django.core import validators
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
import jwt
import random
from datetime import datetime
from datetime import timedelta
class UserManager(BaseUserManager):
def _create_user(self, username, password=None, name=None, isTeacher=False, **extra_fields):
if not username:
raise ValueError('Имя пользователя должно быть установлено')
user = self.model(username=username, name=name, isTeacher=isTeacher, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, password=None, name=None, isTeacher=False, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, password, name, isTeacher, **extra_fields)
def create_superuser(self, username, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Суперпользователь должен иметь is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Суперпользователь должен иметь is_superuser=True.')
return self._create_user(username, password, **extra_fields)
def get_user_from_token(self, token):
_id = jwt.decode(token, settings.SECRET_KEY, algorithms=["HS256"])
return self.filter(id=_id['id'])
class MyUser(AbstractBaseUser, PermissionsMixin):
username = models.CharField(db_index=True, max_length=255, unique=True)
name = models.CharField(max_length=255, null=True)
isTeacher = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ()
objects = UserManager()
def __str__(self):
return self.username
@property
def token(self):
return self._generate_jwt_token()
def _generate_jwt_token(self):
dt = datetime.now() + timedelta(days=60)
token = jwt.encode({
'id': self.pk,
'exp': int((dt-datetime(1970,1,1)).total_seconds())
}, settings.SECRET_KEY, algorithm='HS256')
# print(token)
return token
class Content(models.Model):
text = models.TextField(null=True, blank=True)
|
[
"kima.bright@mail.ru"
] |
kima.bright@mail.ru
|
30debb2b661504201123dbaf25d48166d3d2ceed
|
2776195dc0863f5e43c5394767f1f950ce7672bb
|
/twitchbot/command_whitelist.py
|
72f3c340143f0e18145e5165b26a8fbb963d21f3
|
[
"MIT"
] |
permissive
|
sharkbound/PythonTwitchBotFramework
|
a5e6f55c89a0639cb8e3dd16b99bb6388ee5f5f8
|
3d9aff994d531272d53b869c3dac6602b04a9d70
|
refs/heads/master
| 2023-09-04T06:34:44.456338
| 2023-08-16T21:32:58
| 2023-08-16T21:32:58
| 134,095,615
| 111
| 47
|
MIT
| 2023-09-14T20:40:04
| 2018-05-19T20:24:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
from typing import List
from .config import cfg
__all__ = [
'command_whitelist_enabled',
'whitelisted_commands',
'remove_command_from_whitelist',
'add_command_to_whitelist',
'is_command_whitelisted',
'reload_whitelisted_commands',
'send_message_on_command_whitelist_deny',
]
def send_message_on_command_whitelist_deny():
return cfg.send_message_on_command_whitelist_deny
def command_whitelist_enabled() -> bool:
return cfg.use_command_whitelist
def whitelisted_commands() -> List[str]:
return cfg.command_whitelist
def add_command_to_whitelist(cmd_name: str, save: bool = True):
cmd_name = cmd_name.lower()
cmds = whitelisted_commands()
if cmd_name not in cmds:
cmds.append(cmd_name)
if save:
cfg.save()
def remove_command_from_whitelist(cmd_name: str, save: bool = True):
cmd_name = cmd_name.lower()
cmds = whitelisted_commands()
if cmd_name in cmds:
cmds.remove(cmd_name)
if save:
cfg.save()
def is_command_whitelisted(cmd_name: str):
if not command_whitelist_enabled():
return True
return cmd_name.lower() in whitelisted_commands()
def reload_whitelisted_commands():
cfg.load()
|
[
"laptopblaster@gmail.com"
] |
laptopblaster@gmail.com
|
9c140560e878ac2eb94474966729d562927829a5
|
1302a9807b75ec383a9dd7608a62910533fa0a10
|
/lung_cancer_detection/data/nodule.py
|
3c0edc979521420c1cad9dde9fca7f11518aa528
|
[] |
no_license
|
HanXiaoyou/lung-cancer-detection
|
b0e8d9bbcc2289a8e4ca62a865c0f26e3417cdf2
|
a5ff17aaeb420236456b45fb6eda0e9788a9728c
|
refs/heads/master
| 2023-06-24T20:16:45.770284
| 2021-07-28T09:03:04
| 2021-07-28T09:03:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,515
|
py
|
import os
import random
from pathlib import Path
from typing import Dict, Optional, Sequence, Tuple
import numpy as np
import pytorch_lightning as pl
from monai.data import Dataset, PersistentDataset, list_data_collate
from monai.transforms import (AddChanneld, CenterSpatialCropd, Compose,
LoadImaged, MapLabelValued, RandAffined, ScaleIntensityd,
SelectItemsd, Spacingd, SpatialPadd, ToTensord)
from monai.utils import set_determinism
from torch.utils.data import DataLoader
from .reader import LIDCReader
class ClassificationDataModule(pl.LightningDataModule):
def __init__(self,
data_dir: Path,
cache_dir: Path,
splits: Sequence[Sequence[Dict]],
target: str = "malignancy",
min_anns: int = 3,
exclude_labels: Sequence[int] = [3],
label_mapping: Tuple[Sequence[int]] = (
[1, 2, 4, 5], [0, 0, 1, 1]),
batch_size: int = 16,
spacing: Sequence[float] = (1.5, 1.5, 2.0),
roi_size: Sequence[int] = [40, 40, 30],
aug_prob: float = 0.0,
seed: int = 47):
"""Handles all things data related for classifying lung nodules from the LIDC-IDRI dataset. Adheres to the PyTorch Lightning DataModule interface.
Args:
data_dir (Path): Directory with preprocessed LIDC dataset, as outputted by `preprocess_data` script.
cache_dir (Path): Directory where deterministic transformations of input samples will be cached.
splits (Sequence[Dict]): Dictionaries containing metadata of training and validation sets. See `split_data` script for more information.
target (str): Target variable, as denoted in splits dictionary. Defaults to malignancy.
min_anns (int): Minimum number of annotations required for including nodule. Defaults to 0.
exclude_labels (Sequence[int]): Label values to exclude in dataset.
label_mapping (Tuple[Sequence[int]]): Label mapping for discretization.
batch_size (int, optional): Batch size for training and validation. Defaults to 16.
spacing (Sequence[float], optional): Pixel spacing (in mm) that inputs will be transformed into. Defaults to (1.5, 1.5, 2.0).
roi_size (Sequence[int], optional): Shape that inputs will be transformed into. Defaults to [40, 40, 30].
aug_prob (float): Probability of applying random data augmentation. Defaults to 0.0.
seed (int, optional): Random seed for transformations etc. Defaults to 47.
"""
super().__init__()
self.data_dir = data_dir
self.cache_dir = cache_dir
self.splits = splits
self.batch_size = batch_size
self.spacing = spacing
self.roi_size = roi_size
self.seed = seed
self.target = target
self.min_anns = min_anns
self.exclude_labels = exclude_labels
self.label_mapping = label_mapping
self.aug_prob = aug_prob
self.hparams = {
"batch_size": self.batch_size,
"spacing": self.spacing,
"roi_size": self.roi_size,
"seed": self.seed,
"target": self.target,
"min_anns": self.min_anns,
"exclude_labels": self.exclude_labels,
"label_mapping": self.label_mapping,
}
reader = LIDCReader(self.data_dir, nodule_mode=True)
self.train_transforms = Compose([
LoadImaged(keys=["image"], reader=reader),
AddChanneld(keys=["image"]),
Spacingd(keys=["image"], pixdim=self.spacing, mode="bilinear"),
ScaleIntensityd(keys=["image"]),
SpatialPadd(keys=["image"], spatial_size=self.roi_size,
mode="constant"),
CenterSpatialCropd(keys=["image"], roi_size=self.roi_size),
MapLabelValued(keys=["label"], orig_labels=self.label_mapping[0],
target_labels=self.label_mapping[1]),
RandAffined(
keys=["image"],
spatial_size=self.roi_size,
prob=self.aug_prob,
mode="bilinear",
rotate_range=(np.pi/18, np.pi/18, np.pi/4),
scale_range=(0.1, 0.1, 0.1),
padding_mode="border",
),
ToTensord(keys=["image", "label"]),
SelectItemsd(keys=["image", "label"]),
])
self.val_transforms = Compose([
LoadImaged(keys=["image"], reader=reader),
AddChanneld(keys=["image"]),
Spacingd(keys=["image"], pixdim=self.spacing, mode="bilinear"),
ScaleIntensityd(keys=["image"]),
SpatialPadd(keys=["image"], spatial_size=self.roi_size,
mode="constant"),
CenterSpatialCropd(keys=["image"], roi_size=self.roi_size),
MapLabelValued(keys=["label"], orig_labels=self.label_mapping[0],
target_labels=self.label_mapping[1]),
ToTensord(keys=["image", "label"]),
SelectItemsd(keys=["image", "label"]),
])
return
def prepare_data(self):
"""Not needed in current library version.
"""
return
def setup(self, stage: Optional[str] = None):
"""Creates persistent training and validation sets based on provided splits.
Args:
stage (Optional[str], optional): Stage (e.g., "fit", "eval") for more efficient setup. Defaults to None.
"""
set_determinism(seed=self.seed)
if stage == "fit" or stage is None:
train_scans, val_scans = self.splits
self.train_dicts = [
{"image": nod["image"], "label": nod[self.target]} for
scan in train_scans for nod in scan["nodules"] if
nod["annotations"] >= self.min_anns and nod[self.target] not in
self.exclude_labels
]
self.val_dicts = [
{"image": nod["image"], "label": nod[self.target]} for
scan in val_scans for nod in scan["nodules"] if
nod["annotations"] >= self.min_anns and nod[self.target] not in
self.exclude_labels
]
self.train_ds = PersistentDataset(
self.train_dicts, transform=self.train_transforms,
cache_dir=self.cache_dir)
self.val_ds = PersistentDataset(
self.val_dicts, transform=self.val_transforms,
cache_dir=self.cache_dir)
return
def train_dataloader(self) -> DataLoader:
"""Creates training data loader.
Returns:
DataLoader: PyTorch data loader
"""
return DataLoader(self.train_ds, batch_size=self.batch_size,
shuffle=True, num_workers=os.cpu_count(),
collate_fn=list_data_collate)
def val_dataloader(self) -> DataLoader:
"""Creates validation data loader.
Returns:
DataLoader: PyTorch data loader
"""
return DataLoader(self.val_ds, batch_size=self.batch_size,
shuffle=True, num_workers=os.cpu_count(),
collate_fn=list_data_collate)
def test_dataloader(self):
"""Not needed in current library version.
"""
return
def query_by_label(self, split: str = "train", n: int = 20, labels: Sequence[int] =
None, sort: bool = True) -> Dataset:
"""Returns data sample containing nodules which match the given labels.
Args:
split (str): Data split to query. Defaults to training set.
n (int): Number of samples to return. Defaults to 20.
labels (Sequence[int]): Only return samples with given labels.
sort (bool): Whether to sort returned samples by label. Defaults to
true.
Returns:
Dataset: Dataset containing samples. Transformations depend on
which split was used.
"""
ds = self.train_ds if split == "train" else self.val_ds
if labels:
ds = [item for item in ds if int(item["label"]) in labels]
if n:
ds = ds[:n]
return ds
def query_by_case(self, patient_id: str) -> Dataset:
"""Return nodule volumes for one specific case.
Args:
patient_id (str): Patient ID of desired case.
Returns:
Dataset: Dataset containing case nodules.
"""
train_cases, valid_cases = self.splits
train_pids = [case["pid"] for case in train_cases]
valid_pids = [case["pid"] for case in valid_cases]
if patient_id in train_pids:
data_dict = [
{"image": nod["image"], "label": nod[self.target]} for
case in train_cases if case["pid"] == patient_id for nod in case["nodules"]
]
elif patient_id in valid_pids:
data_dict = [
{"image": nod["image"], "label": nod[self.target]} for
case in valid_cases if case["pid"] == patient_id for nod in case["nodules"]
]
else:
raise ValueError("Case with given ID could not be found.")
return Dataset(data_dict, transform=self.val_transforms)
|
[
"felix@felixpeters.me"
] |
felix@felixpeters.me
|
e019a88e40c96e48c5c022e57cc08499dc193e39
|
d6c117812a618ff34055488337aaffea8cf81ca1
|
/scenes/SceneMovingComplexObjects.py
|
72215bb54c43b5a908ca09beb9e2a058a32f6954
|
[] |
no_license
|
c0ns0le/Pythonista
|
44829969f28783b040dd90b46d08c36cc7a1f590
|
4caba2d48508eafa2477370923e96132947d7b24
|
refs/heads/master
| 2023-01-21T19:44:28.968799
| 2016-04-01T22:34:04
| 2016-04-01T22:34:04
| 55,368,932
| 3
| 0
| null | 2023-01-22T01:26:07
| 2016-04-03T21:04:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
# coding: utf-8
# https://forum.omz-software.com/topic/2531/animation-of-complex-objects
from scene import *
import ui
class MyScene(Scene):
def draw(self):
startx = 20
starty = 20
length = 100
width = 200
#simple shape
# begin location
fill(.5,.5,.5)
rect(startx, starty, width, length )
fill(0,1,0)
rect(startx*2, starty, width/2, length/2)
fill(1,0,0)
ellipse(startx*2, starty*2, 10,10)
ellipse(startx*8, starty*2, 10,10)
def touch_began(self, touch):
#end location
print touch.location.x, touch.location.y
push_matrix()
scale(1.5, 1.5)
translate(touch.location.x, touch.location.y)
rotate(180)
pop_matrix()
class SceneViewer(ui.View):
def __init__(self, in_scene):
self.present('fullscreen')
self.scene_view = SceneView(frame=self.bounds)
self.scene_view.scene = in_scene
self.add_subview(self.scene_view)
SceneViewer(MyScene())
|
[
"itdamdouni@gmail.com"
] |
itdamdouni@gmail.com
|
b33d12b7b484a4ab60fb7387c55714c1af7da98b
|
3ed6c8176ece7d368f388f9598c87e9b0735d287
|
/Q12.py
|
bac7ff51625a815adb3b35631caab1d8529a06e5
|
[] |
no_license
|
PhamKhoa96/pandas
|
8373e3494c54427ca8b94ad2b2f4b9ca5c59f253
|
59a31291e6b9f2938ca74f92a451cf88f0037b9c
|
refs/heads/master
| 2022-12-17T22:07:45.413536
| 2020-09-22T09:40:15
| 2020-09-22T09:40:15
| 297,603,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 18:08:11 2020
@author: phamk
"""
import numpy as np
import pandas as pd
df = pd.read_excel('Superstore.xls')
#df = pd.read_csv('Superstore.csv')
df2 = df.pivot_table(index=['Category' , 'Sub-Category'], aggfunc={'Sales': np.sum, 'Profit': np.sum})
df2['Ratio'] = df2['Profit']/df2['Sales']
print(df2)
|
[
"="
] |
=
|
b4c9caae26d89dc0881150a7570cc45e61117797
|
2d5e825aeadffd9b99e7ad27d18324e791bff609
|
/source/openwarpgui/bundled/paraview.app/Contents/Python/vtk/web/testing.py
|
2003ce26b02f49bf8db7ed8febe101c26a49b51e
|
[
"Apache-2.0"
] |
permissive
|
binod65/OpenWARP
|
fe6ea495be8a8db9f74b52bd1335e10e86fe8c87
|
566c024def1695f196124287cc41976f1e609983
|
refs/heads/master
| 2021-01-18T12:29:24.685442
| 2016-09-24T14:21:13
| 2016-09-24T14:21:13
| 68,737,943
| 0
| 0
| null | 2016-09-24T14:24:32
| 2016-09-20T17:46:11
|
Python
|
UTF-8
|
Python
| false
| false
| 29,542
|
py
|
r"""
This module provides some testing functionality for paraview and
vtk web applications. It provides the ability to run an arbitrary
test script in a separate thread and communicate the results back
to the service so that the CTest framework can be notified of the
success or failure of the test.
This test harness will notice when the test script has finished
running and will notify the service to stop. At this point, the
test results will be checked in the main thread which ran the
service, and in the case of failure an exeception will be raised
to notify CTest of the failure.
Test scripts need to follow some simple rules in order to work
within the test harness framework:
1) implement a function called "runTest(args)", where the args
parameter contains all the arguments given to the web application
upon starting. Among other important items, args will contain the
port number where the web application is listening.
2) import the testing module so that the script has access to
the functions which indicate success and failure. Also the
testing module contains convenience functions that might be of
use to the test scripts.
from vtk.web import testing
3) Call the "testPass(testName)" or "testFail(testName)" functions
from within the runTest(args) function to indicate to the framework
whether the test passed or failed.
"""
import_warning_info = ""
test_module_comm_queue = None
import vtk
import server
# Try standard Python imports
try :
import os, re, time, datetime, threading, imp, inspect, Queue, types, io
except :
import_warning_info += "\nUnable to load at least one basic Python module"
# Image comparison imports
try:
try:
from PIL import Image
except ImportError:
import Image
except:
raise
import base64
import itertools
except:
import_warning_info += "\nUnable to load at least one modules necessary for image comparison"
# Browser testing imports
try :
import selenium
from selenium import webdriver
except :
import_warning_info += "\nUnable to load at least one module necessary for browser tests"
# HTTP imports
try :
import requests
except :
import_warning_info += "\nUnable to load at least one module necessary for HTTP tests"
# Define some infrastructure to support different (or no) browsers
test_module_browsers = ["firefox", "chrome", "internet_explorer", "safari", "nobrowser"]
class TestModuleBrowsers:
firefox, chrome, internet_explorer, safari, nobrowser = range(5)
# =============================================================================
# We can use this exception type to indicate that the test shouldn't actually
# "fail", rather that it was unable to run because some dependencies were not
# met.
# =============================================================================
class DependencyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# =============================================================================
# Checks whether test script supplied, if so, safely imports needed modules
# =============================================================================
def initialize(opts, reactor=None) :
"""
This function should be called to initialize the testing module. The first
important thing it does is to store the options for later, since the
startTestThread function will need them. Then it checks the arguments that
were passed into the server to see if a test was actually requested, making
a note of this fact. Then, if a test was required, this function then
checks if all the necessary testing modules were safely imported, printing
a warning if not. If tests were requested and all modules were present,
then this function sets "test_module_do_testing" to True and sets up the
startTestThread function to be called after the reactor is running.
"""
global import_warning_info
global testModuleOptions
testModuleOptions = opts
# Check if a test was actually requested
if (testModuleOptions.testScriptPath != "" and testModuleOptions.testScriptPath is not None) :
# Check if we ran into trouble with any of the testing imports
if import_warning_info != "" :
print "WARNING: Some tests may have unmet dependencies"
print import_warning_info
if reactor is not None :
# Add startTest callback to the reactor callback queue, so that
# the test thread get started after the reactor is running. Of
# course this should only happen if everything is good for tests.
reactor.callWhenRunning(_start_test_thread)
else :
# Otherwise, our aim is to start the thread from another process
# so just call the start method.
_start_test_thread()
# =============================================================================
# Grab out the command-line arguments needed for by the testing module.
# =============================================================================
def add_arguments(parser) :
"""
This function retrieves any command-line arguments that the client-side
tester needs. In order to run a test, you will typically just need the
following:
--run-test-script => This should be the full path to the test script to
be run.
--baseline-img-dir => This should be the 'Baseline' directory where the
baseline images for this test are located.
--test-use-browser => This should be one of the supported browser types,
or else 'nobrowser'. The choices are 'chrome', 'firefox', 'internet_explorer',
'safari', or 'nobrowser'.
"""
parser.add_argument("--run-test-script",
default="",
help="The path to a test script to run",
dest="testScriptPath")
parser.add_argument("--baseline-img-dir",
default="",
help="The path to the directory containing the web test baseline images",
dest="baselineImgDir")
parser.add_argument("--test-use-browser",
default="nobrowser",
help="One of 'chrome', 'firefox', 'internet_explorer', 'safari', or 'nobrowser'.",
dest="useBrowser")
parser.add_argument("--temporary-directory",
default=".",
help="A temporary directory for storing test images and diffs",
dest="tmpDirectory")
parser.add_argument("--test-image-file-name",
default="",
help="Name of file in which to store generated test image",
dest="testImgFile")
# =============================================================================
# Initialize the test client
# =============================================================================
def _start_test_thread() :
"""
This function checks whether testing is required and if so, sets up a Queue
for the purpose of communicating with the thread. then it starts the
after waiting 5 seconds for the server to have a chance to start up.
"""
global test_module_comm_queue
test_module_comm_queue = Queue.Queue()
t = threading.Thread(target=launch_web_test,
args = [],
kwargs = { 'serverOpts': testModuleOptions,
'commQueue': test_module_comm_queue,
'serverHandle': server,
'testScript': testModuleOptions.testScriptPath })
t.start()
# =============================================================================
# Test scripts call this function to indicate passage of their test
# =============================================================================
def test_pass(testName) :
"""
Test scripts should call this function to indicate that the test passed. A
note is recorded that the test succeeded, and is checked later on from the
main thread so that CTest can be notified of this result.
"""
global test_module_comm_queue
resultObj = { testName: 'pass' }
test_module_comm_queue.put(resultObj)
# =============================================================================
# Test scripts call this function to indicate failure of their test
# =============================================================================
def test_fail(testName) :
"""
Test scripts should call this function to indicate that the test failed. A
note is recorded that the test did not succeed, and this note is checked
later from the main thread so that CTest can be notified of the result.
The main thread is the only one that can signal test failure in
CTest framework, and the main thread won't have a chance to check for
passage or failure of the test until the main loop has terminated. So
here we just record the failure result, then we check this result in the
processTestResults() function, throwing an exception at that point to
indicate to CTest that the test failed.
"""
global test_module_comm_queue
resultObj = { testName: 'fail' }
test_module_comm_queue.put(resultObj)
# =============================================================================
# Concatenate any number of strings into a single path string.
# =============================================================================
def concat_paths(*pathElts) :
"""
A very simple convenience function so that test scripts can build platform
independent paths out of a list of elements, without having to import the
os module.
pathElts: Any number of strings which should be concatenated together
in a platform independent manner.
"""
return os.path.join(*pathElts)
# =============================================================================
# So we can change our time format in a single place, this function is
# provided.
# =============================================================================
def get_current_time_string() :
"""
This function returns the current time as a string, using ISO 8601 format.
"""
return datetime.datetime.now().isoformat(" ")
# =============================================================================
# Uses vtkTesting to compare images. According to comments in the vtkTesting
# C++ code (and this seems to work), if there are multiple baseline images in
# the same directory as the baseline_img, and they follow the naming pattern:
# 'img.png', 'img_1.png', ... , 'img_N.png', then all of these images will be
# tried for a match.
# =============================================================================
def compare_images(test_img, baseline_img, tmp_dir="."):
"""
This function creates a vtkTesting object, and specifies the name of the
baseline image file, using a fully qualified path (baseline_img must be
fully qualified). Then it calls the vtkTesting method which compares the
image (test_img, specified only with a relative path) against the baseline
image as well as any other images in the same directory as the baseline
image which follow the naming pattern: 'img.png', 'img_1.png', ... , 'img_N.png'
test_img: File name of output image to be compared agains baseline.
baseline_img: Fully qualified path to first of the baseline images.
tmp_dir: Fully qualified path to a temporary directory for storing images.
"""
# Create a vtkTesting object and specify a baseline image
t = vtk.vtkTesting()
t.AddArgument("-T")
t.AddArgument(tmp_dir)
t.AddArgument("-V")
t.AddArgument(baseline_img)
# Perform the image comparison test and print out the result.
return t.RegressionTest(test_img, 0.0)
# =============================================================================
# Provide a wait function
# =============================================================================
def wait_with_timeout(delay=None, limit=0, criterion=None):
"""
This function provides the ability to wait for a certain number of seconds,
or else to wait for a specific criterion to be met.
"""
for i in itertools.count():
if criterion is not None and criterion():
return True
elif delay * i > limit:
return False
else:
time.sleep(delay)
# =============================================================================
# Define a WebTest class with five stages of testing: initialization, setup,
# capture, postprocess, and cleanup.
# =============================================================================
class WebTest(object) :
"""
This is the base class for all automated web-based tests. It defines five
stages that any test must run through, and allows any or all of these
stages to be overridden by subclasses. This class defines the run_test
method to invoke the five stages overridden by subclasses, one at a time:
1) initialize, 2) setup, 3) capture, 4) postprocess, and 5) cleanup.
"""
class Abort:
pass
def __init__(self, url=None, testname=None, **kwargs) :
self.url = url
self.testname = testname
def run_test(self):
try:
self.checkdependencies()
self.initialize()
self.setup()
self.capture()
self.postprocess()
except WebTest.Abort:
# Placeholder for future option to return failure result
pass
except :
self.cleanup()
raise
self.cleanup()
def checkdependencies(self):
pass
def initialize(self):
pass
def setup(self):
pass
def capture(self):
pass
def postprocess(self):
pass
def cleanup(self):
pass
# =============================================================================
# Define a WebTest subclass designed specifically for browser-based tests.
# =============================================================================
class BrowserBasedWebTest(WebTest):
"""
This class can be used as a base for any browser-based web tests. It
introduces the notion of a selenium browser and overrides phases (1) and
(3), initialization and cleanup, of the test phases introduced in the base
class. Initialization involves selecting the browser type, setting the
browser window size, and asking the browser to load the url. Cleanup
involves closing the browser window.
"""
def __init__(self, size=None, browser=None, **kwargs):
self.size = size
self.browser = browser
self.window = None
WebTest.__init__(self, **kwargs)
def initialize(self):
try:
if self.browser is None or self.browser == TestModuleBrowsers.chrome:
self.window = webdriver.Chrome()
elif self.browser == TestModuleBrowsers.firefox:
self.window = webdriver.Firefox()
elif self.browser == TestModuleBrowsers.internet_explorer:
self.window = webdriver.Ie()
else:
raise DependencyError("self.browser argument has illegal value %r" % (self.browser))
except DependencyError as dErr:
raise
except Exception as inst:
raise DependencyError(inst)
if self.size is not None:
self.window.set_window_size(self.size[0], self.size[1])
if self.url is not None:
self.window.get(self.url)
def cleanup(self):
try:
self.window.quit()
except:
print 'Unable to call window.quit, perhaps this is expected because of unmet browser dependency.'
# =============================================================================
# Extend BrowserBasedWebTest to handle vtk-style image comparison
# =============================================================================
class ImageComparatorWebTest(BrowserBasedWebTest):
"""
This class extends browser based web tests to include image comparison. It
overrides the capture phase of testing with some functionality to simply
grab a screenshot of the entire browser window. It overrides the
postprocess phase with a call to vtk image comparison functionality.
Derived classes can then simply override the setup function with a series
of selenium-based browser interactions to create a complete test. Derived
classes may also prefer to override the capture phase to capture only
certain portions of the browser window for image comparison.
"""
def __init__(self, filename=None, baseline=None, temporaryDir=None, **kwargs):
if filename is None:
raise TypeError("missing argument 'filename'")
if baseline is None:
raise TypeError("missing argument 'baseline'")
BrowserBasedWebTest.__init__(self, **kwargs)
self.filename = filename
self.baseline = baseline
self.tmpDir = temporaryDir
def capture(self):
self.window.save_screenshot(self.filename)
def postprocess(self):
result = compare_images(self.filename, self.baseline, self.tmpDir)
if result == 1 :
test_pass(self.testname)
else :
test_fail(self.testname)
# =============================================================================
# Given a css selector to use in finding the image element, get the element,
# then base64 decode the "src" attribute and return it.
# =============================================================================
def get_image_data(browser, cssSelector) :
"""
This function takes a selenium browser and a css selector string and uses
them to find the target HTML image element. The desired image element
should contain it's image data as a Base64 encoded JPEG image string.
The 'src' attribute of the image is read, Base64-decoded, and then
returned.
browser: A selenium browser instance, as created by webdriver.Chrome(),
for example.
cssSelector: A string containing a CSS selector which will be used to
find the HTML image element of interest.
"""
# Here's maybe a better way to get at that image element
imageElt = browser.find_element_by_css_selector(cssSelector)
# Now get the Base64 image string and decode it into image data
base64String = imageElt.get_attribute("src")
b64RegEx = re.compile(ur'data:image/jpeg;base64,(.+)', re.UNICODE)
b64Matcher = b64RegEx.match(base64String)
imgdata = base64.b64decode(b64Matcher.group(1))
return imgdata
# =============================================================================
# Combines a variation on above function with the write_image_to_disk function.
# converting jpg to png in the process, if necessary.
# =============================================================================
def save_image_data_as_png(browser, cssSelector, imgfilename) :
"""
This function takes a selenium browser instance, a css selector string,
and a file name. It uses the css selector string to finds the target HTML
Image element, which should contain a Base64 encoded JPEG image string,
it decodes the string to image data, and then saves the data to the file.
The image type of the written file is determined from the extension of the
provided filename.
browser: A selenium browser instance as created by webdriver.Chrome(),
for example.
cssSelector: A string containing a CSS selector which will be used to
find the HTML image element of interest.
imgFilename: The filename to which to save the image. The extension is
used to determine the type of image which should be saved.
"""
imageElt = browser.find_element_by_css_selector(cssSelector)
base64String = imageElt.get_attribute("src")
b64RegEx = re.compile(ur'data:image/jpeg;base64,(.+)', re.UNICODE)
b64Matcher = b64RegEx.match(base64String)
img = Image.open(io.BytesIO(base64.b64decode(b64Matcher.group(1))))
img.save(imgfilename)
# =============================================================================
# Given a decoded image and the full path to a file, write the image to the
# file.
# =============================================================================
def write_image_to_disk(imgData, filePath) :
"""
This function takes an image data, as returned by this module's
get_image_data() function for example, and writes it out to the file given by
the filePath parameter.
imgData: An image data object
filePath: The full path, including the file name and extension, where
the image should be written.
"""
with open(filePath, 'wb') as f:
f.write(imgData)
# =============================================================================
# There could be problems if the script file has more than one class defn which
# is a subclass of vtk.web.testing.WebTest, so we should write some
# documentation to help people avoid that.
# =============================================================================
def instantiate_test_subclass(pathToScript, **kwargs) :
"""
This function takes the fully qualified path to a test file, along with
any needed keyword arguments, then dynamically loads the file as a module
and finds the test class defined inside of it via inspection. It then
uses the keywork arguments to instantiate the test class and return the
instance.
pathToScript: Fully qualified path to python file containing defined
subclass of one of the test base classes.
kwargs: Keyword arguments to be passed to the constructor of the
testing subclass.
"""
# Load the file as a module
moduleName = imp.load_source('dynamicTestModule', pathToScript)
instance = None
# Inspect dynamically loaded module members
for name, obj in inspect.getmembers(moduleName) :
# Looking for classes only
if inspect.isclass(obj) :
instance = obj.__new__(obj)
# And only classes defined in the dynamically loaded module
if instance.__module__ == 'dynamicTestModule' :
try :
instance.__init__(**kwargs)
break;
except Exception as inst:
print 'Caught exception: ' + str(type(inst))
print inst
raise
return instance
# =============================================================================
# For testing purposes, define a function which can interact with a running
# paraview or vtk web application service.
# =============================================================================
def launch_web_test(*args, **kwargs) :
"""
This function loads a python file as a module (with no package), and then
instantiates the class it must contain, and finally executes the run_test()
method of the class (which the class may override, but which is defined in
both of the testing base classes, WebTest and ImageComparatorBaseClass).
After the run_test() method finishes, this function will stop the web
server if required. This function expects some keyword arguments will be
present in order for it to complete it's task:
kwargs['serverHandle']: A reference to the vtk.web.server should be
passed in if this function is to stop the web service after the test
is finished. This should normally be the case.
kwargs['serverOpts']: An object containing all the parameters used
to start the web service. Some of them will be used in the test script
in order perform the test. For example, the port on which the server
was started will be required in order to connect to the server.
kwargs['testScript']: The full path to the python file containing the
testing subclass.
"""
serverHandle = None
serverOpts = None
testScriptFile = None
# If we got one of these, we'll use it to stop the server afterward
if 'serverHandle' in kwargs :
serverHandle = kwargs['serverHandle']
# This is really the thing all test scripts will need: access to all
# the options used to start the server process.
if 'serverOpts' in kwargs :
serverOpts = kwargs['serverOpts']
# print 'These are the serverOpts we got: '
# print serverOpts
# Get the full path to the test script
if 'testScript' in kwargs :
testScriptFile = kwargs['testScript']
testName = 'unknown'
# Check for a test file (python file)
if testScriptFile is None :
print 'No test script file found, no test script will be run.'
test_fail(testName)
# The test name will be generated from the python script name, so
# match and capture a bunch of contiguous characters which are
# not '.', '\', or '/', followed immediately by the string '.py'.
fnamePattern = re.compile('([^\.\/\\\]+)\.py')
fmatch = re.search(fnamePattern, testScriptFile)
if fmatch :
testName = fmatch.group(1)
else :
print 'Unable to parse testScriptFile (' + str(testScriptfile) + '), no test will be run'
test_fail(testName)
# If we successfully got a test name, we are ready to try and run the test
if testName != 'unknown' :
# Output file and baseline file names are generated from the test name
imgFileName = testName + '.png'
knownGoodFileName = concat_paths(serverOpts.baselineImgDir, imgFileName)
tempDir = serverOpts.tmpDirectory
testImgFileName = serverOpts.testImgFile
testBrowser = test_module_browsers.index(serverOpts.useBrowser)
# Now try to instantiate and run the test
try :
testInstance = instantiate_test_subclass(testScriptFile,
testname=testName,
host=serverOpts.host,
port=serverOpts.port,
browser=testBrowser,
filename=testImgFileName,
baseline=knownGoodFileName,
temporaryDir=tempDir)
# If we were able to instantiate the test, run it, otherwise we
# consider it a failure.
if testInstance is not None :
try:
testInstance.run_test()
except DependencyError as derr:
# TODO: trigger return SKIP_RETURN_CODE when CMake 3 is required
print 'Some dependency of this test was not met, allowing it to pass'
test_pass(testName)
else :
print 'Unable to instantiate test instance, failing test'
test_fail(testName)
return
except Exception as inst :
import sys, traceback
tb = sys.exc_info()[2]
print 'Caught an exception while running test script:'
print ' ' + str(type(inst))
print ' ' + str(inst)
print ' ' + ''.join(traceback.format_tb(tb))
test_fail(testName)
# If we were passed a server handle, then use it to stop the service
if serverHandle is not None :
serverHandle.stop_webserver()
# =============================================================================
# To keep the service module clean, we'll process the test results here, given
# the test result object we generated in "launch_web_test". It is
# passed back to this function after the service has completed. Failure of
# of the test is indicated by raising an exception in here.
# =============================================================================
def finalize() :
"""
This function checks the module's global test_module_comm_queue variable for a
test result. If one is found and the result is 'fail', then this function
raises an exception to communicate the failure to the CTest framework.
In order for a test result to be found in the test_module_comm_queue variable,
the test script must have called either the testPass or testFail functions
provided by this test module before returning.
"""
global test_module_comm_queue
if test_module_comm_queue is not None :
resultObject = test_module_comm_queue.get()
failedATest = False
for testName in resultObject :
testResult = resultObject[testName]
if testResult == 'fail' :
print ' Test -> ' + testName + ': ' + testResult
failedATest = True
if failedATest is True :
raise Exception("At least one of the requested tests failed. " +
"See detailed output, above, for more information")
|
[
"binit92"
] |
binit92
|
324129f2df389187ece896e98fe0893e6c5bb427
|
062aa3b492cee3042a50b3e1ed524b49452aa95d
|
/apps/budgets/models.py
|
da184b62c0e921944c733cc3f72cacd736b501a1
|
[] |
no_license
|
andresow/SICAPD
|
c3258767aba620c661305fe84ed36f96b42d8f2b
|
ab047460325c048044f9a85098052129ce1c2ffc
|
refs/heads/main
| 2023-02-04T18:17:28.733526
| 2020-10-02T14:06:20
| 2020-10-02T14:06:20
| 300,633,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,427
|
py
|
from django.db import models
from django.forms.fields import DateField
class Bussines(models.Model):
# user = models.ForeignKey(Nodes, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=100, unique=True)
nit = models.CharField(max_length=100, unique=True)
description = models.TextField()
address = models.CharField(max_length=100)
phone = models.CharField(max_length=20)
representative = models.CharField(max_length=100)
rubroPattern = models.CharField(max_length=100)
accountPattern = models.CharField(max_length=100)
def __str__(self):
return '{}'.format(self.name)
class AccountPeriod(models.Model):
CHOICES = [('Activo','Activo'),('Inactivo', 'Inactivo')]
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
state = models.CharField(max_length=100,choices=CHOICES)
initialDate = models.DateField()
finalDate = models.DateField()
def __str__(self):
return '{}'.format(self.name)
class Origin(models.Model):
# Opcional poner la empresa
# bussines = models.ForeignKey(Bussines, null=False, blank=True, on_delete=models.CASCADE)
accountPeriod = models.ForeignKey(AccountPeriod, null=False, blank=True, on_delete=models.CASCADE)
nameOrigin = models.CharField(max_length=100)
codeOrigin = models.CharField(max_length=100)
descriptionOrigin = models.TextField()
orderOrigin = models.IntegerField()
finalDateOrigin = models.DateField()
def __str__(self):
return '{}'.format(self.nameOrigin)
class Operation(models.Model):
CHOICES = [('+','+'),('-', '-'),('*', '*'),('/', '/')]
origin = models.ManyToManyField(Origin, blank=True)
codeOp = models.CharField(max_length=2)
nameOp = models.CharField(max_length=100)
descriptionOp = models.TextField()
operation = models.CharField(max_length=10,choices=CHOICES)
orderOp = models.IntegerField()
contraOperar = models.BigIntegerField(null=True, blank=True)
contraOrigin = models.BigIntegerField(null=True, blank=True)
# Opcional poner el periodo por facilidad
# accountPeriod = models.ForeignKey(AccountPeriod, null=False, blank=True, on_delete=models.CASCADE)
class TypeAgreement(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
codeTA = models.CharField(max_length=100)
nameTA = models.CharField(max_length=100)
descriptionTA = models.TextField()
ordenTA = models.IntegerField()
validacionTA = models.CharField(max_length=100, blank=True)
mensajeTA = models.CharField(max_length=100, blank=True)
def __str__(self):
return '{}'.format(self.nameTA)
class Agreement(models.Model):
origin = models.ForeignKey(Origin, null=True, blank=True, on_delete=models.CASCADE)
numberAg = models.BigIntegerField()
descriptionAg = models.TextField()
dateAg = models.DateField()
typeAgreement = models.ForeignKey(TypeAgreement, null=False, blank=True, on_delete=models.CASCADE)
class Inform(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
nameI = models.CharField(max_length=100)
category = models.CharField(max_length=100)
digitI = models.BigIntegerField()
class InformDetall(models.Model):
inform = models.ForeignKey(Inform, null=True, blank=True, on_delete=models.CASCADE)
codeInfD = models.CharField(max_length=100)
descriptionInfD = models.TextField()
activity = models.CharField(max_length=100)
class Rubro(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
origin = models.ForeignKey(Origin, null=True, blank=True, on_delete=models.CASCADE)
rubro = models.CharField(max_length=100)
rubroFather = models.BigIntegerField(null=True)
nivel = models.IntegerField()
description = models.TextField()
dateCreation = models.DateField()
initialBudget = models.BigIntegerField()
typeRubro = models.CharField(max_length=100)
inform = models.ManyToManyField(Inform)
informdetall = models.ManyToManyField(InformDetall)
realBudget = models.BigIntegerField()
imported = models.CharField(max_length=100, null=True)
class Movement(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
agreement = models.ForeignKey(Agreement, null=True, blank=True, on_delete=models.CASCADE)
nameRubro = models.BigIntegerField(null=True)
concept = models.CharField(max_length=100)
value = models.BigIntegerField()
balance = models.BigIntegerField()
date = models.DateField()
disponibility = models.BigIntegerField(null=True)
register = models.BigIntegerField(null=True)
obligation = models.BigIntegerField(null=True)
origin = models.ForeignKey(Origin, null=True, blank=True, on_delete=models.CASCADE)
observation = models.TextField(null=True)
class RubroMovement(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
value = models.BigIntegerField()
valueP = models.BigIntegerField()
balance = models.BigIntegerField()
date = models.DateField()
nameRubro = models.BigIntegerField()
movement = models.ForeignKey(Movement, null=True, blank=True, on_delete=models.CASCADE)
class RubroBalanceOperation(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
typeOperation = models.CharField(max_length=100)
value = models.BigIntegerField()
balance = models.BigIntegerField()
date = models.DateField()
nameRubro = models.BigIntegerField()
class Voucher(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
code = models.CharField(max_length=100)
name = models.CharField(max_length=100)
description = models.TextField()
order = models.IntegerField()
number = models.IntegerField()
category = models.CharField(max_length=100)
dateCreation = models.DateField()
class Third(models.Model):
CHOICES = [('Cédula de Ciudadanía', 'Cédula de Ciudadanía'), ('Cédula Extranjeria', 'Cédula Extrangeria'), ('Pasaporte', 'Pasaporte')]
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
typeIdentification = models.CharField(max_length=64, choices=CHOICES)
identification = models.BigIntegerField()
name = models.CharField(max_length=100)
surnames = models.CharField(max_length=100)
reason = models.CharField(max_length=100)
phone = models.CharField(max_length=20)
city = models.CharField(max_length=100)
class TypeContract(models.Model):
bussines = models.ForeignKey(Bussines, null=True, blank=True, on_delete=models.CASCADE)
nameTC = models.CharField(max_length=100)
description = models.TextField()
categoryTC = models.CharField(max_length=100)
digitsTC = models.BigIntegerField()
class TypeContractDetail(models.Model):
typeContract = models.ForeignKey(TypeContract, null=True, blank=True, on_delete=models.CASCADE)
codeTypeC = models.CharField(max_length=100)
descriptionTypeC = models.TextField()
activity = models.CharField(max_length=100)
|
[
"andres.serrato@correounivalle.edu.co"
] |
andres.serrato@correounivalle.edu.co
|
36b0dfedbde47dbdd0fbbb96eb13ca88de1d0468
|
4e21454de5c5b373fab45fe41c65ada8d13bffc8
|
/musiques/regexp/expreg1.py
|
64ce4c66d93c26de909dda6b76f498e9cde9c212
|
[] |
no_license
|
mamagalubru/python
|
5b8c03ff13ec34cabb01a92a36f5ce7f361e7655
|
e7cccec79257b5d1d14d70795fbeea6ebf1430ad
|
refs/heads/master
| 2021-08-24T10:31:02.526807
| 2017-12-09T06:55:26
| 2017-12-09T06:55:26
| 112,942,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
#!C:\Python34\python.exe
#-*- coding: utf-8 -*-
''' il faut toujours commencer petit, construire brique par brique '''
# import du module adéquat
import re
# initialisation de la chaine de test
string = "TEfdsST Test TEdSTtest TEST"
# initialisation de la chaine de recherche
regexp = "(TEST)"
# test de correspondance
if re.match(regexp, string) is not None:
# succès
print ("Trouvé")
# recherche avec affichage de l'élément trouvé
print (re.search(regexp, string).groups())
else:
# échec
print ("Non Trouvé")
|
[
"noreply@github.com"
] |
mamagalubru.noreply@github.com
|
6a5a1e8d3edb3b03cd56bc7af1ff161a61def715
|
15248595074a9165402bff44ae21331b4dbf8025
|
/demo/nodes_data.py
|
057f2992d9c876f7065f9f2e99e18a787a4f1c69
|
[] |
no_license
|
Hyyudu/mgl_api
|
ecabd1802632768a63b6a07c6dafb5f48f1dc6c6
|
e3b8c16ad9f30b511727ad0fd4d62e0817715dfb
|
refs/heads/master
| 2020-03-11T16:13:13.960767
| 2018-08-18T14:59:01
| 2018-08-18T14:59:01
| 130,109,775
| 1
| 3
| null | 2018-08-16T18:25:33
| 2018-04-18T19:08:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
node_names = {"hull": "Корпус", "march_engine": "Маршевый двигатель", "shields": "Щиты"}
param_names = {
"radiation_def": "Радиационная защита",
"desinfect_level": "Уровень дезинфекции",
"mechanical_def": "Механическая защита",
"weight": "Масса",
"volume": "Объем",
"thrust": "Тяга",
"thrust_rev": "Реверсная тяга",
"thrust_acc": "Ускорение тяги",
"thrust_rev_acc": "Ускорение реверсной тяги",
"thrust_slow": "Сброс тяги",
"thrust_rev_slow": "Сброс реверсной тяги",
"heat_capacity": "Теплоемкость",
"heat_sink": "Теплоотвод",
"heat_prod": "Тепловыделение",
"reflection": "Уровень отражения",
}
node_params = {
123: {
"type": "hull",
"name": "Геракл-5Е",
"params": {
"weight": 2500,
"volume": 1200
},
"slots": {
"!": {1: 1},
"*": {2: 8, 3: 8, 4: 2},
"+": {2: 8, 3: 9, 4: 2, 5: 1}
}
},
124: {
"type": "shields",
"name": "Эгида-12",
"params": {
"radiation_def": 150,
"desinfect_level": 120,
"mechanical_def": 1000,
"reflection": 25,
"heat_capacity": 350000,
"heat_sink": 1200,
"weight": 200,
"volume": 100,
}
},
125: {
"type": "march_engine",
"name": "Ласточка-GT",
"params": {
"thrust": 1000,
"thrust_rev": 200,
"thrust_acc": 10,
"thrust_rev_acc": 50,
"thrust_slow": 30,
"thrust_rev_slow": 50,
"heat_prod": 30000,
"heat_sink": 800,
"weight": 120,
"volume": 80
}
}
}
|
[
"hyyudu@gmail.com"
] |
hyyudu@gmail.com
|
fa908ca1cbd019bdee113d7e0752453a93d32e45
|
52017ca16e38ccedda18053c374b82ab7961557c
|
/logfile2sqldb2.py
|
7f162959d84adbc98bb49d0cc474fa85a909522d
|
[] |
no_license
|
ubx/canlog-correct-ts
|
2ec7a38c0e8369d15e4ca743297891845c091fbf
|
53e91a3ee76c4a9cc98d6be88e63671fd9ef30f1
|
refs/heads/master
| 2023-08-01T14:18:55.087986
| 2021-09-14T09:26:32
| 2021-09-14T09:26:32
| 406,295,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
#!/usr/bin/env python
# coding: utf-8
"""
Import a can-bus logfile into sqlite3 db.
Note: don't forget to add an index to ts field:
sqlite3 -line log-data.db 'CREATE unique INDEX ts_idx ON messages (ts);'
"""
from __future__ import absolute_import, print_function
import sqlite3
import sys
import argparse
from datetime import datetime
import can
from can import LogReader, MessageSync
def my_logger(conn, messages):
conn.executemany("INSERT INTO messages VALUES (?, ?, ?, ?, ?, ?, ?)", messages)
conn.commit()
def main():
parser = argparse.ArgumentParser(
"python logfile2sql",
description="Import can-bus logfile into sqlite3 db.")
parser.add_argument('infile', metavar='input-file', type=str,
help='The file to read. For supported types see can.LogReader.')
parser.add_argument('outfile', metavar='output-file', type=str,
help='The file to write. For supported types see can.LogReader.')
parser.add_argument("-v", action="count", dest="verbosity",
help='''How much information do you want to see at the command line?
You can add several of these e.g., -vv is DEBUG''', default=2)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
import errno
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
verbosity = results.verbosity
logging_level_name = ['critical', 'error', 'warning', 'info', 'debug', 'subdebug'][min(5, verbosity)]
can.set_logging_level(logging_level_name)
reader = LogReader(results.infile)
in_nosync = MessageSync(reader, timestamps=False, skip=3600)
print('Can LogReader (Started on {})'.format(datetime.now()))
conn = sqlite3.connect(results.outfile)
conn.cursor().execute("""
CREATE TABLE IF NOT EXISTS messages
(
ts REAL,
arbitration_id INTEGER,
extended INTEGER,
remote INTEGER,
error INTEGER,
dlc INTEGER,
data BLOB
)""")
conn.commit()
messages = []
m = 0
try:
for msg in in_nosync:
if verbosity >= 3:
print(msg)
messages.append((
msg.timestamp,
msg.arbitration_id,
msg.is_extended_id,
msg.is_remote_frame,
msg.is_error_frame,
msg.dlc,
memoryview(msg.data)))
if len(messages) >= 100_000:
my_logger(conn, messages)
m += len(messages)
print('Commits', m)
messages = []
except KeyboardInterrupt:
pass
finally:
reader.stop()
if len(messages) > 0:
my_logger(conn, messages)
conn.close()
if __name__ == "__main__":
main()
|
[
"andreas.luethi@gmx.net"
] |
andreas.luethi@gmx.net
|
77417b95d8e039d76c30d7899ff7fdb9a2d761e3
|
4a512701620b812e72f0bdf2d70a06d47b810cb3
|
/src/portafolio_app/apps.py
|
f94c108f3e823a7f05abe76a833a958daa01bd06
|
[
"MIT"
] |
permissive
|
kmacho16/Portafolio-Django
|
7f734abda1592f5f4fae9c245b59d57ff8b5684e
|
2a4409dbf5c904ad146cd00734b8e7cf4993ec2a
|
refs/heads/master
| 2022-10-22T18:51:27.693530
| 2017-04-25T21:03:13
| 2017-04-25T21:03:13
| 89,301,018
| 0
| 2
|
MIT
| 2022-10-01T03:36:02
| 2017-04-25T00:54:47
|
Python
|
UTF-8
|
Python
| false
| false
| 102
|
py
|
from django.apps import AppConfig
class PortafolioAppConfig(AppConfig):
name = 'portafolio_app'
|
[
"Videos"
] |
Videos
|
824b38d43ef4dfb3c81559322218ad104da11564
|
b99061bd825d2486b663dfa43a394d375ee9d7c9
|
/src/plansys2_turtlesim/launch/noplan_example.launch.py
|
bacc3d3bdfb09d5fa8e773f3ab0a3c6ae54c0200
|
[
"Apache-2.0"
] |
permissive
|
devis12/ros2_ws_src
|
6e7ab53b5a66bbbb8aa87a58ba6d3f9320d50a42
|
3a3552a2240767a2d6776c424681b70cad83e989
|
refs/heads/main
| 2023-06-15T04:43:04.635330
| 2021-07-02T10:04:38
| 2021-07-02T10:04:38
| 361,429,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
turtlesim = Node(
package="turtlesim",
executable="turtlesim_node"
)
remap_mykill_srv = ("master_kill", "eat")
spawn_turtles = Node(
package="plansys2_turtlesim",
executable="turtles_master",
parameters=[{"spawning_frequency": 0.5}],
remappings=[remap_mykill_srv]
)
controller = Node(
package="plansys2_turtlesim",
executable="turtle_controller",
parameters=[{"turtle_name": "turtle1"},{"step": 0.16}],
remappings=[remap_mykill_srv]
)
ld.add_action(turtlesim)
ld.add_action(spawn_turtles)
ld.add_action(controller)
return ld
|
[
"devisdalmoro@gmail.com"
] |
devisdalmoro@gmail.com
|
e3f744b8f38c24fa0e6c5172b34e030e993c24ed
|
6c26a9bd075d3d54a307d7c1e5a0bc67b50df8c2
|
/python_intermediate/python3/09_convert_birth.py
|
30e29cff21e2d39d7c5f1c16223b0736e767edda
|
[] |
no_license
|
marialobillo/dataquest
|
86efc49c0339c07e6263d428b5ecd2f80d395ecb
|
49e8b653adf23a12fb9eb6a972d85bc1797dba0a
|
refs/heads/master
| 2021-08-28T08:01:36.301087
| 2017-12-11T16:02:18
| 2017-12-11T16:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
for row in legislators:
parts = row[2].split('-')
try:
birth_year = int(parts[0])
except Exception:
birth_year = 0
row.append(birth_year)
|
[
"maria.lobillo.santos@gmail.com"
] |
maria.lobillo.santos@gmail.com
|
eb4dc7337dcb18b1bf04a50f56b47ba5e8d1282a
|
b5afbb78aa0e5259e00687b02c891bd186fb068c
|
/detector.py
|
ce6afab1accf4bc62118076e68342ec99a6f84eb
|
[] |
no_license
|
idanbuller/BullCrack
|
a21a2b5e8fca9c087b7d31bc12475c6bf88b4575
|
42c9c91c45f09b94d4c920763a5136b992f2da6b
|
refs/heads/master
| 2022-11-25T07:02:24.929960
| 2020-07-24T14:12:11
| 2020-07-24T14:12:11
| 282,235,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
import re
HASH_TYPE_REGEX = {
re.compile(r"^[a-f0-9]{32}(:.+)?$", re.IGNORECASE): ["MD5"],
re.compile(r"^[a-f0-9]{64}(:.+)?$", re.IGNORECASE): ["SHA-256"],
re.compile(r"^[a-f0-9]{128}(:.+)?$", re.IGNORECASE): ["SHA-512"],
re.compile(r"^[a-f0-9]{56}$", re.IGNORECASE): ["SHA-224"],
re.compile(r"^[a-f0-9]{40}(:.+)?$", re.IGNORECASE): ["SHA-1"],
re.compile(r"^[a-f0-9]{96}$", re.IGNORECASE): ["SHA-384"],
re.compile(r"^[a-f0-9]{16}$", re.IGNORECASE): ["MySQL323"],
re.compile(r"^[a-f0-9]{48}$", re.IGNORECASE): ["Haval-192"]
}
def obtain_hash_type(check_hash):
for algorithm, items in HASH_TYPE_REGEX.items():
if algorithm.match(check_hash):
for i in range(1):
for item in items:
return item
|
[
"noreply@github.com"
] |
idanbuller.noreply@github.com
|
779b8ec6257873ac5253cce4489f3ef673ddf560
|
15656b9c1aff60d1c9c85712f5cbe4ffee40a588
|
/chapter3/power.py
|
e397464539fb33310a7f629f6d0b789e8d8ea84b
|
[] |
no_license
|
Super-Louis/key2offer
|
23dceffc40253d013874074f629bc2a321dc21a3
|
341cc50773ad0614eb5e9eaeeed2dd1d8858bde9
|
refs/heads/master
| 2022-12-25T16:30:50.643487
| 2020-09-30T14:39:09
| 2020-09-30T14:39:09
| 299,948,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
# -*- coding: utf-8 -*-
# Author : Super~Super
# FileName: power.py
# Python : python3.6
# Time : 18-10-2 16:16
"""
题目十六:数值的整数次方
实现函数power, 求base的n次方
"""
def power(base, exp):
# 代码完整性
if base == 0 and exp < 0:
raise RuntimeError
if base == 0 and exp >= 0:
return 0
if exp == 0:
return 1
if exp == 1:
return base
if exp == -1:
return 1.0/base
# 包含exp取负值的情况
if exp % 2 == 0:
result = power(base, exp/2)
return result * result
else:
result = power(base, (exp-1)/2)
return result * result * base
if __name__ == '__main__':
print(power(-2, 3))
|
[
"liuchao15@xiaomi.com"
] |
liuchao15@xiaomi.com
|
415aaa59ff9c0d19c55e3d6bf2d9e60bb5cc54dd
|
4566360f507441e994171c5c349b6f8f83619414
|
/msk/__main__.py
|
81c0145e348d1e1c9f9de8c89f8375e9ecf7f76a
|
[
"Apache-2.0"
] |
permissive
|
KathyReid/mycroft-skills-kit
|
48951fffb0308160a6301d270a5562e829f4caa2
|
bf8859869034134b66645b9f798a7b8af1c332a9
|
refs/heads/master
| 2020-03-19T10:22:12.055473
| 2018-06-06T15:13:12
| 2018-06-06T15:22:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
# Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from argparse import ArgumentParser
from msm import MycroftSkillsManager, SkillRepo
from msk.actions.create import CreateAction
from msk.actions.create_test import CreateTestAction
from msk.actions.upgrade import UpgradeAction
from msk.actions.upload import UploadAction
from msk.exceptions import MskException
from msk.global_context import GlobalContext
console_actions = {
'upgrade': UpgradeAction,
'upload': UploadAction,
'create': CreateAction,
'create-test': CreateTestAction
}
def main():
parser = ArgumentParser()
parser.add_argument('-l', '--lang', default='en-us')
parser.add_argument('-u', '--repo-url', help='Url of GitHub repo to upload skills to')
parser.add_argument('-b', '--repo-branch', help='Branch of skills repo to upload to')
parser.add_argument('-s', '--skills-dir', help='Directory to look for skills in')
parser.add_argument('-c', '--repo-cache', help='Location to store local skills repo clone')
subparsers = parser.add_subparsers(dest='action')
subparsers.required = True
for action, cls in console_actions.items():
cls.register(subparsers.add_parser(action))
args = parser.parse_args(sys.argv[1:])
context = GlobalContext()
context.lang = args.lang
context.msm = MycroftSkillsManager(
skills_dir=args.skills_dir, repo=SkillRepo(url=args.repo_url, branch=args.repo_branch)
)
try:
return console_actions[args.action](args).perform()
except MskException as e:
print('{}: {}'.format(e.__class__.__name__, str(e)))
except (KeyboardInterrupt, EOFError):
pass
if __name__ == '__main__':
main()
|
[
"matthew331199@gmail.com"
] |
matthew331199@gmail.com
|
f62e5b821207c19165f276f316ece8645c9a718a
|
91c6942f6d82c614c4006b405e9107396db3ba13
|
/tests/mvc_vf_product/other/compare/test_space_compair.py
|
fcd45e69eed7011ecdcfcb1d0284899f13a5eb55
|
[] |
no_license
|
WJJPro/nmc_verification
|
51ff6128abf62efdfdc90577bf2723bb532df1fd
|
488b06468f7be559c43c127c819e4a8fd08e3f68
|
refs/heads/master
| 2021-02-08T06:17:30.977303
| 2020-02-24T01:53:20
| 2020-02-24T01:53:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
from nmc_verification.nmc_vf_product.base.space_compare import *
import nmc_verification.nmc_vf_base as nvb
grid = nvb.grid([73,135,0.25],[18,53,0.25])
#grid = nvb.grid([110,120,0.25],[20,45,0.25])
data_fo = nvb.io.read_griddata.read_from_nc('I:/ppt/ec/grid/rain24/BT18070108.024.nc')
#nvb.set_coords(data_fo,dtime= [24],member= "ecmwf")
data_ob = nvb.io.read_stadata.read_from_micaps3('I:/ppt/ob/sta/rain24/BT18070208.000')
data_ob = nvb.function.get_from_sta_data.sta_between_value_range(data_ob, 0, 1000)
#space_compair.rain_24h_sg(data_fo, data_ob) #简单对比图
#space_compair.rain_24h_comprehensive_sg(data_ob,data_fo, filename="H:/rain24.png") #综合对比图
data_fo = nvb.function.gxy_gxy.interpolation_linear(data_fo,grid)
#space_compair.rain_24h_comprehensive_sg(data_ob, data_fo,filename="H:/rain24.png") #改变区域后,重新制作综合对比图
rain_24h_comprehensive_chinaland_sg(data_ob,data_fo,filename=r"H:\test_data\output\nmc_vf_produce\continue\rain24_china.png") # 显示范围锁定为中国陆地的综合对比图,布局进行了针对性优化
#grid = None
#grd_fo = nvb.io.read_griddata.read_from_nc(r"H:\test_data\ecmwf\temp_2m\19111608.024.nc",grid=grid)
#print(grd_fo)
#nvb.set_coords(grd_fo,dtime= [24],member= "ecmwf")
#grd_ob = nvb.io.read_griddata.read_from_nc(r"H:\task\develop\python\git\nmc_verification\tests\data\ecmwf\temp_2m\19111708.000.nc",grid=grid)
#space_compare.temper_gg(grd_ob, grd_fo, "H:/temp.png")
|
[
"liucouhua@163.com"
] |
liucouhua@163.com
|
a7cb757e88826ac4688f3015dc656cb6576717dc
|
a1465eddf03203c704df4ee8ebd1e58d6cd3e09a
|
/Final proyect/GUI-Examples/app.py
|
bc49a7ccdb6e29d0ef803992a65730beaa7a3ce9
|
[] |
no_license
|
dtapian94/numericalMethods
|
02bd8fdea6223eb92732536c1976f48108ae3173
|
f4c384a3ebc1890e06f641b15ec01122b92c8943
|
refs/heads/master
| 2021-08-08T04:54:47.736225
| 2018-11-26T19:41:32
| 2018-11-26T19:41:32
| 146,498,988
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.dropdown import DropDown
class Menu(BoxLayout):
pass
class BracketAndClosedMethods(BoxLayout):
pass
class MenuApp(App):
def build(self):
root = ScreenManager()
root.add_widget(Menu())
root.add_widget(BracketAndClosedMethods())
return root
menu = MenuApp()
menu.run()
|
[
"eugenio.leal.333@gmail.com"
] |
eugenio.leal.333@gmail.com
|
5866de6756a1868129d74cb1d3b2a55187fdd30b
|
781f408fd9dc9fd111d5ac47009ab580636625e5
|
/examples/test_window_switching.py
|
ae716575c6b862f53b84476de77172fcbcf51085
|
[
"MIT"
] |
permissive
|
doiteachday/SeleniumBase
|
fb003257b63e157b734d2b34a9c5794d74748322
|
8ded5fac84b85f1d4f43384d0836dbf4a1fc390e
|
refs/heads/master
| 2023-04-10T10:13:50.372864
| 2021-05-04T02:51:43
| 2021-05-04T02:51:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
"""
Sometimes tests open new tabs/windows, and you'll need
to switch to them first in order to interact with them.
The starting window is window(0). Then increments by 1.
"""
from seleniumbase import BaseCase
class TabSwitchingTests(BaseCase):
def test_switch_to_tabs(self):
self.open("data:text/html,<h1>Page A</h1>")
self.assert_text("Page A")
self.open_new_window()
self.open("data:text/html,<h1>Page B</h1>")
self.assert_text("Page B")
self.switch_to_window(0)
self.assert_text("Page A")
self.assert_text_not_visible("Page B")
self.switch_to_window(1)
self.assert_text("Page B")
self.assert_text_not_visible("Page A")
|
[
"mdmintz@gmail.com"
] |
mdmintz@gmail.com
|
a3fb783c2f4748e486c8c1020d9480c1df098e2c
|
31266820a547fbe85fbb3a47cbee3cf052a9f2b7
|
/hello_world.py
|
35c5c1ab12843c16d80ee724e11e6559e1b7938f
|
[] |
no_license
|
JaiTorres13/Python_Practice
|
b272ec65ec404713c1d3be3c768fc94d9a0dfe49
|
3b7babae05cbb6d43beb5f892424a762c9f1118a
|
refs/heads/master
| 2020-03-10T20:13:11.215711
| 2018-04-15T01:04:11
| 2018-04-15T01:04:11
| 129,565,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
"""
First Python Class I've created.
Practicing with print method.
"""
print("Hello World!")
print()
message = "Hello Python"
print(message)
print()
message = "I will rule the world"
print(message)
print(message.upper())
print()
sum_num = 10 + 2
print(sum_num)
print()
screech = "legend of zelda"
print(screech.upper())
print(screech.title())
print()
white_space = " hi "
# These methods eliminate all whitespaces in the borderlines of text
print(white_space.strip())
print(white_space.lstrip())
print(white_space.rstrip())
|
[
"jainel.torres@upr.edu"
] |
jainel.torres@upr.edu
|
3bf48bc5482d25603c821893ec51cb3b9b9ade29
|
5c95077c2d1a01ac7c2834c126f1b277b4fa64c9
|
/qa/rpc-tests/invalidateblock.py
|
4759a2c1d771a4471f004ccb97251cc5af85b177
|
[
"MIT"
] |
permissive
|
itgoldcoins/itgoldcoins
|
335209e8bdecefe883335e8aab138ff665463b8c
|
2373f5877d8294063ac89ff106d7fe2347b2f5cb
|
refs/heads/master
| 2020-04-26T04:12:26.268230
| 2019-03-03T01:43:40
| 2019-03-03T01:43:40
| 173,294,052
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,127
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Itgoldcoins Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework.test_framework import ItgoldcoinsTestFramework
from test_framework.util import *
class InvalidateTest(ItgoldcoinsTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
print "Mine 4 blocks on Node 0"
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print "\nMake sure we won't reorg to a lower work chain:"
connect_nodes_bi(self.nodes,1,2)
print "Sync node 2 to node 1 so both have 6 blocks"
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print "Invalidate block 5 on node 1 so its tip is now at 4"
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print "Invalidate block 3 on node 2, so its tip is now 2"
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print "..and then mine a block"
self.nodes[2].generate(1)
print "Verify all nodes are at the right height"
time.sleep(5)
for i in xrange(3):
print i,self.nodes[i].getblockcount()
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
[
"fantasyfatey@gmail.com"
] |
fantasyfatey@gmail.com
|
e49ff9b52e0f8a30dac020a3c7b080d333e92588
|
85a1d522b4917ae7235f7637ee0ca262c79e402d
|
/lexconvert.py
|
969841d0d4de5f3f9b763cda6570c70d096277c0
|
[] |
no_license
|
leecody/Trans-Actor
|
63b51cc2ed3a720e8724a13cd97e76e7dbc753d1
|
a54f296bc4eeb6cc22d459b27b1d0911471b3636
|
refs/heads/master
| 2020-12-25T14:14:20.187596
| 2016-08-24T17:43:38
| 2016-08-24T17:43:38
| 66,484,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165,750
|
py
|
#!/usr/bin/env python
"""lexconvert v0.24 - convert phonemes between different speech synthesizers etc
(c) 2007-16 Silas S. Brown. License: GPL"""
# Run without arguments for usage information
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Old versions of this code are being kept in the E-GuideDog SVN repository at
# http://svn.code.sf.net/p/e-guidedog/code/ssb22/lexconvert
# although some early ones are missing.
def Phonemes():
"""Create phonemes by calling vowel(), consonant(),
variant() and other().
For the variants, if a particular variant does not
exist in the destination format then we will treat it
as equivalent to the last non-variant we created.
For anything else that does not exist in the
destination format, we will first try to break the
source's phoneme into parts (e.g. see the treatment
of opt_ol_as_in_gold by eSpeak and bbcmicro), and if
that still doesn't work then we drop a character
(warning depending on the source format's setting of
safe_to_drop_characters). makeDic does however warn
about any non-variant consonants, or non-variant
vowels that weren't marked optional, missing from a
format. """
a_as_in_ah = vowel()
_, var1_a_as_in_ah = variant()
_, var3_a_as_in_ah = variant()
_, var4_a_as_in_ah = variant()
_, var5_a_as_in_ah = variant()
a_as_in_apple = vowel()
u_as_in_but = vowel() # or the first part of un as in hunt
_, var1_u_as_in_but = variant()
o_as_in_orange = vowel()
_, var1_o_as_in_orange = variant()
_, var2_o_as_in_orange = variant()
o_as_in_now = vowel()
_, var1_o_as_in_now = variant()
a_as_in_ago = vowel()
_, var1_a_as_in_ago = variant()
e_as_in_herd = vowel()
_, ar_as_in_year = variant()
eye = vowel()
_, var1_eye = variant()
b = consonant()
ch = consonant()
d = consonant()
th_as_in_them = consonant()
e_as_in_them = vowel()
_, var1_e_as_in_them = variant()
a_as_in_air = vowel()
_, var1_a_as_in_air = variant()
_, var2_a_as_in_air = variant()
_, var3_a_as_in_air = variant()
_, var4_a_as_in_air = variant()
a_as_in_ate = vowel()
_, var1_a_as_in_ate = variant()
f = consonant()
g = consonant()
h = consonant()
i_as_in_it = vowel()
_, var1_i_as_in_it = variant()
_, var2_i_as_in_it = variant()
ear = vowel()
_, var1_ear = variant()
_, var2_ear = variant()
e_as_in_eat = vowel()
_, var1_e_as_in_eat = variant()
j_as_in_jump = consonant()
k = consonant()
_, opt_scottish_loch = variant()
l = consonant()
_, var1_l = variant()
m = consonant()
n = consonant()
ng = consonant()
o_as_in_go = vowel()
_, var1_o_as_in_go = variant()
_, var2_o_as_in_go = variant()
opt_ol_as_in_gold = opt_vowel() # see eSpeak / bbcmicro
oy_as_in_toy = vowel()
_, var1_oy_as_in_toy = variant()
p = consonant()
r = consonant()
_, var1_r = variant()
s = consonant()
sh = consonant()
t = consonant()
_, var1_t = variant()
th_as_in_think = consonant()
oor_as_in_poor = vowel()
_, var1_oor_as_in_poor = variant()
_, opt_u_as_in_pull = variant()
opt_ul_as_in_pull = opt_vowel() # see eSpeak / bbcmicro
oo_as_in_food = vowel()
_, var1_oo_as_in_food = variant()
_, var2_oo_as_in_food = variant()
close_to_or = vowel()
_, var1_close_to_or = variant()
_, var2_close_to_or = variant()
_, var3_close_to_or = variant()
v = consonant()
w = consonant()
_, var1_w = variant()
y = consonant()
z = consonant()
ge_of_blige_etc = consonant()
glottal_stop = other()
syllable_separator = other()
_, primary_stress = variant()
_, secondary_stress = variant()
text_sharp = other()
text_underline = other()
text_question = other()
text_exclamation = other()
text_comma = other()
ipa_colon = other() # for catching missed cases
del _ ; return locals()
def LexFormats():
"""Makes the phoneme conversion tables of each format.
Each table has string to phoneme entries and phoneme
to string entries. The string to phoneme entries are
used when converting OUT of that format, and the
phoneme to string entries are used when converting IN
(so you can recognise phonemes you don't support and
convert them to something else). By default, a tuple
of the form (string,phoneme) will create entries in
BOTH directions; one-directional entries are created
via (string,phoneme,False) or (phoneme,string,False).
The makeDic function checks the keys are unique.
First parameter is always a description of the
format, then come the phoneme entries as described
above, then any additional settings:
stress_comes_before_vowel (default False means any
stress mark goes AFTER the affected vowel; set to
True if the format requires stress placed before)
word_separator (default same as phoneme_separator)
phoneme_separator (default " ")
clause_separator (default newline)
(For a special case, clause_separator can also be
set to a function. If that happens, the function
will be called whenever lexconvert needs to output
a list of (lists of words) in this format. See
bbcmicro for an example function clause_separator)
safe_to_drop_characters (default False, can be a
string of safe characters or True = all; controls
warnings when unrecognised characters are found)
approximate_missing (default False) - if True,
makeDic will attempt to compensate for missing
phonemes by approximating them to others, instead of
warning about them. This is useful for American codes
that can't cope with all the British English phonemes.
(Approximation is done automatically anyway in the
case of variant phonemes; approximate_missing adds in
some additional approximations - see comments in code)
cleanup_regexps (default none) - optional list of
(search,replace) regular expressions to "clean up"
after converting each word INTO this format
cleanup_func (default none) - optional special-case
function to pass result through after cleanup_regexps
cvtOut_regexps (default none) - optional list of
(search,replace) regular expressions to "clean up"
before starting to convert OUT of this format
cvtOut_func (default none) - optional special-case
function to pass through before any cvtOut_regexps
inline_format (default "%s") the format string for
printing a word with --phones or --phones2phones
(can be used to put markup around each word)
(can also be a function taking the phonetic word
and returning the resulting string, e.g. bbcmicro)
output_is_binary (default False) - True if the output
is almost certainly unsuitable for a terminal; will
cause lexconvert to refuse to print phonemes unless
its standard output is redirected to a file or pipe
(affects the --phones and --phones2phones options)
inline_header (default none) text to print first
when outputting from --phones or --phones2phones
inline_footer (default none) text to print last
inline_oneoff_header (default none) text to print
before inline_header on the first time only
lex_filename - filename of a lexicon file. If this
is not specified, there is no support for writing a
lexicon in this format: there can still be READ
support if you define lex_read_function to open the
lexicon by itself, but otherwise the format can be
used only with --phones and --phones2phones.
lex_entry_format - format string for writing each
(word, pronunciation) entry to the lexicon file.
This is also needed for lexicon-write support.
lex_header, lex_footer - optional strings to write
at the beginning and at the end of the lexicon file
(can also be functions that take the open file as a
parameter, e.g. for bbcmicro; lex_footer is
allowed to close the file if it needs to do
something with it afterwards)
lex_word_case - optional "upper" or "lower" to
force a particular case for lexicon words (not
pronunciations - they're determined by the table).
The default is to allow words to be in either case.
lex_type (default "") - used by the --formats
option when summarising the support for each format
lex_read_function - Python function to READ the
lexicon file and return a (word,definition) list.
If this is not specified, there's no read support
for lexicons in this format (but there can still be
write support - see above - and you can still use
--phones and --phones2phones). If lex_filename is
specified then this function will be given the open
file as a parameter. """
phonemes = Phonemes() ; globals().update(phonemes)
return { "festival" : makeDic(
"Festival's British voice",
('0',syllable_separator),
('1',primary_stress),
('2',secondary_stress),
('aa',a_as_in_ah),
('a',a_as_in_apple),
('uh',u_as_in_but),
('o',o_as_in_orange),
('au',o_as_in_now),
('@',a_as_in_ago),
('@@',e_as_in_herd),
('ai',eye),
('b',b),
('ch',ch),
('d',d),
('dh',th_as_in_them),
('e',e_as_in_them),
(ar_as_in_year,'@@',False),
('e@',a_as_in_air),
('ei',a_as_in_ate),
('f',f),
('g',g),
('h',h),
('i',i_as_in_it),
('i@',ear),
('ii',e_as_in_eat),
('jh',j_as_in_jump),
('k',k),
('l',l),
('m',m),
('n',n),
('ng',ng),
('ou',o_as_in_go),
('oi',oy_as_in_toy),
('p',p),
('r',r),
('s',s),
('sh',sh),
('t',t),
('th',th_as_in_think),
('u@',oor_as_in_poor),
('u',opt_u_as_in_pull),
('uu',oo_as_in_food),
('oo',close_to_or),
('v',v),
('w',w),
('y',y),
('z',z),
('zh',ge_of_blige_etc),
lex_filename=ifset("HOME",os.environ.get("HOME","")+os.sep)+".festivalrc",
lex_entry_format="(lex.add.entry '( \"%s\" n %s))\n",
lex_header=";; -*- mode: lisp -*-\n(eval (list voice_default))\n",
lex_read_function = lambda *args:eval('['+commands.getoutput("grep '^(lex.add.entry' ~/.festivalrc | sed -e 's/;.*//' -e 's/[^\"]*\"/[\"/' -e 's/\" . /\",(\"/' -e 's/$/\"],/' -e 's/[()]/ /g' -e 's/ */ /g'")+']'),
safe_to_drop_characters=True, # TODO: really? (could instead give a string of known-safe characters)
cleanup_func = festival_group_stress,
),
"example" : makeVariantDic(
"A small built-in example lexicon for testing when you don't have your full custom lexicon to hand. Use --convert to write it in one of the other formats and see if a synth can import it.",
lex_read_function = lambda *args: [
("Shadrach","shei1drak"),
("Meshach","mii1shak"),
("Abednego","@be1dniigou"),
], cleanup_func = None,
lex_filename=None, lex_entry_format=None),
"espeak" : makeDic(
"eSpeak's default British voice", # but eSpeak's phoneme representation isn't always that simple, hence the regexps at the end
('%',syllable_separator),
("'",primary_stress),
(',',secondary_stress),
# TODO: glottal_stop? (in regional pronunciations etc)
('A:',a_as_in_ah),
('A@',a_as_in_ah,False),
('A',var1_a_as_in_ah),
('a',a_as_in_apple),
('aa',a_as_in_apple,False),
('a2',a_as_in_apple,False), # TODO: this is actually an a_as_in_apple variant in espeak; festival @1 is not in mrpa PhoneSet
('&',a_as_in_apple,False),
('V',u_as_in_but),
('0',o_as_in_orange),
('aU',o_as_in_now),
('@',a_as_in_ago),
('a#',a_as_in_ago,False), # (TODO: eSpeak sometimes uses a# in 'had' when in a sentence, and this doesn't always sound good on other synths; might sometimes want to convert it to a_as_in_apple; not sure what contexts would call for this though)
('3:',e_as_in_herd),
('3',var1_a_as_in_ago),
('@2',a_as_in_ago,False),
('@-',a_as_in_ago,False), # (eSpeak @- sounds to me like a shorter version of @, TODO: double-check the relationship between @ and @2 in Festival)
('aI',eye),
('aI2',eye,False),
('aI;',eye,False),
('aI2;',eye,False),
('b',b),
('tS',ch),
('d',d),
('D',th_as_in_them),
('E',e_as_in_them),
(ar_as_in_year,'3:',False),
('e@',a_as_in_air),
('eI',a_as_in_ate),
('f',f),
('g',g),
('h',h),
('I',i_as_in_it),
('I;',i_as_in_it,False),
('i',i_as_in_it,False),
('I2',var2_i_as_in_it,False),
('I2;',var2_i_as_in_it,False),
('i@',ear),
('i@3',var2_ear),
('i:',e_as_in_eat),
('i:;',e_as_in_eat,False),
('dZ',j_as_in_jump),
('k',k),
('x',opt_scottish_loch),
('l',l),
('L',l,False),
('m',m),
('n',n),
('N',ng),
('oU',o_as_in_go),
('oUl',opt_ol_as_in_gold), # (espeak says "gold" in a slightly 'posh' way though) (if dest format doesn't have opt_ol_as_in_gold, it'll get o_as_in_go + the l)
('OI',oy_as_in_toy),
('p',p),
('r',r),
('r-',r,False),
('s',s),
('S',sh),
('t',t),
('T',th_as_in_think),
('U@',oor_as_in_poor),
('U',opt_u_as_in_pull),
('@5',opt_u_as_in_pull,False),
('Ul',opt_ul_as_in_pull), # if dest format doesn't have this, it'll get opt_u_as_in_pull from the U, then the l
('u:',oo_as_in_food),
('O:',close_to_or),
('O@',var3_close_to_or),
('o@',var3_close_to_or,False),
('O',var3_close_to_or,False),
('v',v),
('w',w),
('j',y),
('z',z),
('Z',ge_of_blige_etc),
lex_filename = "en_extra",
lex_entry_format = "%s %s\n",
lex_read_function = lambda lexfile: [x for x in [l.split()[:2] for l in lexfile.readlines()] if len(x)==2 and not '//' in x[0]],
lex_footer=lambda f:(f.close(),os.system("espeak --compile=en")), # see also a bit of special-case code in mainopt_convert
inline_format = "[[%s]]",
word_separator=" ",phoneme_separator="",
stress_comes_before_vowel=True,
safe_to_drop_characters="_: !",
cleanup_regexps=[
("k'a2n","k'@n"),
("ka2n","k@n"),
("gg","g"),
("@U","oU"), # (eSpeak uses oU to represent @U; difference is given by its accent parameters)
("([iU]|([AO]:))@r$","\1@"),
("([^e])@r",r"\1_remove_3"),("_remove_",""),
# (r"([^iU]@)l",r"\1L") # only in older versions of espeak (not valid in more recent versions)
("rr$","r"),
("3:r$","3:"),
# TODO: 'declared' & 'declare' the 'r' after the 'E' sounds a bit 'regional' (but pretty). but sounds incomplete w/out 'r', and there doesn't seem to be an E2 or E@
# TODO: consider adding 'g' to words ending in 'N' (if want the 'g' pronounced in '-ng' words) (however, careful of words like 'yankee' where the 'g' would be followed by a 'k'; this may also be a problem going into the next word)
],
cvtOut_regexps = [
("e@r$","e@"), ("e@r([bdDfghklmnNprsStTvwjzZ])",r"e@\1"), # because the 'r' is implicit in other synths (but DO have it if there's another vowel to follow)
],
),
"sapi" : makeDic(
"Microsoft Speech API (American English)",
('-',syllable_separator),
('1',primary_stress),
('2',secondary_stress),
('aa',a_as_in_ah),
('ae',a_as_in_apple),
('ah',u_as_in_but),
('ao',o_as_in_orange),
('aw',o_as_in_now),
('ax',a_as_in_ago),
('er',e_as_in_herd),
('ay',eye),
('b',b),
('ch',ch),
('d',d),
('dh',th_as_in_them),
('eh',e_as_in_them),
('ey',var1_e_as_in_them),
(a_as_in_ate,'ey',False),
('f',f),
('g',g),
('h',h), # Jan suggested 'hh', but I can't get this to work on Windows XP (TODO: try newer versions of Windows)
('ih',i_as_in_it),
('iy',e_as_in_eat),
('jh',j_as_in_jump),
('k',k),
('l',l),
('m',m),
('n',n),
('ng',ng),
('ow',o_as_in_go),
('oy',oy_as_in_toy),
('p',p),
('r',r),
('s',s),
('sh',sh),
('t',t),
('th',th_as_in_think),
('uh',oor_as_in_poor),
('uw',oo_as_in_food),
('AO',close_to_or),
('v',v),
('w',w),
# ('x',var1_w), # suggested by Jan, but I can't get this to work on Windows XP (TODO: try newer versions of Windows)
('y',y),
('z',z),
('zh',ge_of_blige_etc),
approximate_missing=True,
lex_filename="run-ptts.bat", # write-only for now
lex_header = "rem You have to run this file\nrem with ptts.exe in the same directory\nrem to add these words to the SAPI lexicon\n\n",
lex_entry_format='ptts -la %s "%s"\n',
inline_format = '<pron sym="%s"/>',
safe_to_drop_characters=True, # TODO: really?
),
"cepstral" : makeDic(
"Cepstral's British English SSML phoneset",
('0',syllable_separator),
('1',primary_stress),
('a',a_as_in_ah),
('ae',a_as_in_apple),
('ah',u_as_in_but),
('oa',o_as_in_orange),
('aw',o_as_in_now),
('er',e_as_in_herd),
('ay',eye),
('b',b),
('ch',ch),
('d',d),
('dh',th_as_in_them),
('eh',e_as_in_them),
('e@',a_as_in_air),
('ey',a_as_in_ate),
('f',f),
('g',g),
('h',h),
('ih',i_as_in_it),
('i',e_as_in_eat),
('jh',j_as_in_jump),
('k',k),
('l',l),
('m',m),
('n',n),
('ng',ng),
('ow',o_as_in_go),
('oy',oy_as_in_toy),
('p',p),
('r',r),
('s',s),
('sh',sh),
('t',t),
('th',th_as_in_think),
('uh',oor_as_in_poor),
('uw',oo_as_in_food),
('ao',close_to_or),
('v',v),
('w',w),
('j',y),
('z',z),
('zh',ge_of_blige_etc),
approximate_missing=True,
lex_filename="lexicon.txt",
lex_entry_format = "%s 0 %s\n",
lex_read_function = lambda lexfile: [(word,pronunc) for word, ignore, pronunc in [l.split(None,2) for l in lexfile.readlines()]],
lex_word_case = "lower",
inline_format = "<phoneme ph='%s'>p</phoneme>",
safe_to_drop_characters=True, # TODO: really?
cleanup_regexps=[(" 1","1"),(" 0","0")],
),
"mac" : makeDic(
"approximation in American English using the [[inpt PHON]] notation of Apple's US voices",
('=',syllable_separator),
('1',primary_stress),
('2',secondary_stress),
('AA ',a_as_in_ah),
('aa ',var5_a_as_in_ah),
('AE ',a_as_in_apple),
('UX ',u_as_in_but),
(o_as_in_orange,'AA ',False),
('AW ',o_as_in_now),
('AX ',a_as_in_ago),
(e_as_in_herd,'AX ',False), # TODO: is this really the best approximation?
('AY ',eye),
('b ',b),
('C ',ch),
('d ',d),
('D ',th_as_in_them),
('EH ',e_as_in_them),
('EY ',a_as_in_ate),
('f ',f),
('g ',g),
('h ',h),
('IH ',i_as_in_it),
('IX ',var2_i_as_in_it),
('IY ',e_as_in_eat),
('J ',j_as_in_jump),
('k ',k),
('l ',l),
('m ',m),
('n ',n),
('N ',ng),
('OW ',o_as_in_go),
('OY ',oy_as_in_toy),
('p ',p),
('r ',r),
('s ',s),
('S ',sh),
('t ',t),
('T ',th_as_in_think),
('UH ',oor_as_in_poor),
('UW ',oo_as_in_food),
('AO ',close_to_or),
('v ',v),
('w ',w),
('y ',y),
('z ',z),
('Z ',ge_of_blige_etc),
approximate_missing=True,
lex_filename="substitute.sh", # write-only for now
lex_type = "substitution script",
lex_header = "#!/bin/bash\n\n# I don't yet know how to add to the Apple US lexicon,\n# so here is a 'sed' command you can run on your text\n# to put the pronunciation inline:\n\nsed -E -e :S \\\n",
lex_entry_format=r" -e 's/(^|[^A-Za-z])%s($|[^A-Za-z[12=])/\1[[inpt PHON]]%s[[inpt TEXT]]\2/g'"+" \\\n",
# but /g is non-overlapping matches and won't catch 2 words in the lex right next to each other with only one non-alpha in between, so we put :S at start and tS at end to make the whole operation repeat until it hasn't done any more substitutions (hence also the exclusion of [, 1, 2 or = following a word so it doesn't try to substitute stuff inside the phonemes; TODO: assert the lexicon does not contain "inpt", "PHON" or "TEXT")
lex_footer = lambda f:(f.write(" -e tS\n"),f.close(),os.chmod("substitute.sh",0755)),
inline_format = "%s",
word_separator=" ",phoneme_separator="",
safe_to_drop_characters=True, # TODO: really?
),
"mac-uk" : makeDic(
"Scansoft/Nuance British voices in Mac OS 10.7+ (system lexicon editing required, see --mac-uk option)",
('.',syllable_separator),
("'",primary_stress),
(secondary_stress,'',False),
('A',a_as_in_ah),
('@',a_as_in_apple),
('$',u_as_in_but),
(a_as_in_ago,'$',False),
('A+',o_as_in_orange),
('a&U',o_as_in_now),
('E0',e_as_in_herd),
('a&I',eye),
('b',b),
('t&S',ch),
('d',d),
('D',th_as_in_them),
('E',e_as_in_them),
('0',ar_as_in_year),
('E&$',a_as_in_air),
('e&I',a_as_in_ate),
('f',f),
('g',g),
('h',h),
('I',i_as_in_it),
('I&$',ear),
('i',e_as_in_eat),
('d&Z',j_as_in_jump),
('k',k),
('l',l),
('m',m),
('n',n),
('nK',ng),
('o&U',o_as_in_go),
('O&I',oy_as_in_toy),
('p',p),
('R+',r),
('s',s),
('S',sh),
('t',t),
('T',th_as_in_think),
('O',oor_as_in_poor),
('U',opt_u_as_in_pull),
('u',oo_as_in_food),
(close_to_or,'O',False),
('v',v),
('w',w),
('j',y),
('z',z),
('Z',ge_of_blige_etc),
# lex_filename not set (mac-uk code does not permanently save the lexicon; see --mac-uk option to read text)
inline_oneoff_header = "(mac-uk phonemes output is for information only; you'll need the --mac-uk or --trymac-uk options to use it)\n",
word_separator=" ",phoneme_separator="",
stress_comes_before_vowel=True,
safe_to_drop_characters=True, # TODO: really?
cleanup_regexps=[(r'o\&U\.Ol', r'o\&Ul')],
),
"x-sampa" : makeDic(
"General X-SAMPA notation (contributed by Jan Weiss)",
('.',syllable_separator),
('"',primary_stress),
('%',secondary_stress),
('A',a_as_in_ah),
(':',ipa_colon),
('A:',var3_a_as_in_ah),
('Ar\\',var4_a_as_in_ah),
('a:',var5_a_as_in_ah),
('{',a_as_in_apple),
('V',u_as_in_but),
('Q',o_as_in_orange),
(var1_o_as_in_orange,'A',False),
('O',var2_o_as_in_orange),
('aU',o_as_in_now),
('{O',var1_o_as_in_now),
('@',a_as_in_ago),
('3:',e_as_in_herd),
('aI',eye),
('Ae',var1_eye),
('b',b),
('tS',ch),
('d',d),
('D',th_as_in_them),
('E',e_as_in_them),
('e',var1_e_as_in_them),
(ar_as_in_year,'3:',False),
('E@',a_as_in_air),
('Er\\',var1_a_as_in_air),
('e:',var2_a_as_in_air),
('E:',var3_a_as_in_air),
('e@',var4_a_as_in_air),
('eI',a_as_in_ate),
('{I',var1_a_as_in_ate),
('f',f),
('g',g),
('h',h),
('I',i_as_in_it),
('1',var1_i_as_in_it),
('I@',ear),
('Ir\\',var1_ear),
('i',e_as_in_eat),
('i:',var1_e_as_in_eat),
('dZ',j_as_in_jump),
('k',k),
('x',opt_scottish_loch),
('l',l),
('m',m),
('n',n),
('N',ng),
('@U',o_as_in_go),
('oU',var2_o_as_in_go),
('@}',var1_u_as_in_but),
('OI',oy_as_in_toy),
('oI',var1_oy_as_in_toy),
('p',p),
('r\\',r),
(var1_r,'r',False),
('s',s),
('S',sh),
('t',t),
('T',th_as_in_think),
('U@',oor_as_in_poor),
('Ur\\',var1_oor_as_in_poor),
('U',opt_u_as_in_pull),
('}:',oo_as_in_food),
('u:',var1_oo_as_in_food),
(var2_oo_as_in_food,'u:',False),
('O:',close_to_or),
(var1_close_to_or,'O',False),
('o:',var2_close_to_or),
('v',v),
('w',w),
('W',var1_w),
('j',y),
('z',z),
('Z',ge_of_blige_etc),
lex_filename="acapela.txt",
lex_entry_format = "%s\t#%s\tUNKNOWN\n", # TODO: may be able to convert part-of-speech (NOUN etc) to/from some other formats e.g. Festival
lex_read_function=lambda lexfile:[(word,pronunc.lstrip("#")) for word, pronunc, ignore in [l.split(None,2) for l in lexfile.readlines()]],
# TODO: inline_format ?
word_separator=" ",phoneme_separator="",
safe_to_drop_characters=True, # TODO: really?
),
"android-pico" : makeVariantDic(
'X-SAMPA phonemes for the default \"Pico\" voice in Android (1.6+, American), wrapped in Java code', # you could put en-GB instead of en-US, but it must be installed on the phone
('A:',a_as_in_ah), # won't sound without the :
(var5_a_as_in_ah,'A:',False), # a: won't sound
('@U:',o_as_in_go),
('I',var1_i_as_in_it), # '1' won't sound
('i:',e_as_in_eat), # 'i' won't sound
('u:',oo_as_in_food), # }: won't sound
('a_I',eye),('a_U',o_as_in_now),('e_I',a_as_in_ate),('O_I',oy_as_in_toy),(var1_oy_as_in_toy,'O_I',False),('o_U',var2_o_as_in_go),
cleanup_regexps=[(r'\\',r'\\\\'),('"','"'),('::',':')],
lex_filename="",lex_entry_format="",
lex_read_function=None,
inline_oneoff_header=r'class Speak { public static void speak(android.app.Activity a,String s) { class OnInit implements android.speech.tts.TextToSpeech.OnInitListener { public OnInit(String s) { this.s = s; } public void onInit(int i) { mTts.speak(this.s, android.speech.tts.TextToSpeech.QUEUE_ADD, null); } private String s; }; if(mTts==null) mTts=new android.speech.tts.TextToSpeech(a,new OnInit(s),"com.svox.pico"); else mTts.speak(s, android.speech.tts.TextToSpeech.QUEUE_ADD, null); } private static android.speech.tts.TextToSpeech mTts = null; };'+'\n',
inline_header=r'Speak.speak(this,"<speak xml:lang=\"en-US\">',
inline_format=r'<phoneme alphabet=\"xsampa\" ph=\"%s\"/>',
clause_separator=r".\n", # note r"\n" != "\n"
inline_footer='</speak>");',
),
"acapela-uk" : makeDic(
'Acapela-optimised X-SAMPA for UK English voices (e.g. "Peter"), contributed by Jan Weiss',
('.',syllable_separator),('"',primary_stress),('%',secondary_stress), # copied from "x-sampa", not tested
('A:',a_as_in_ah),
('{',a_as_in_apple),
('V',u_as_in_but),
('Q',o_as_in_orange),
('A',var1_o_as_in_orange),
('O',var2_o_as_in_orange),
('aU',o_as_in_now),
('{O',var1_o_as_in_now),
('@',a_as_in_ago),
('3:',e_as_in_herd),
('aI',eye),
('A e',var1_eye),
('b',b),
('t S',ch),
('d',d),
('D',th_as_in_them),
('e',e_as_in_them),
(ar_as_in_year,'3:',False),
('e @',a_as_in_air),
('e r',var1_a_as_in_air),
('e :',var2_a_as_in_air),
(var3_a_as_in_air,'e :',False),
('eI',a_as_in_ate),
('{I',var1_a_as_in_ate),
('f',f),
('g',g),
('h',h),
('I',i_as_in_it),
('1',var1_i_as_in_it),
('I@',ear),
('I r',var1_ear),
('i',e_as_in_eat),
('i:',var1_e_as_in_eat),
('dZ',j_as_in_jump),
('k',k),
('x',opt_scottish_loch),
('l',l),
('m',m),
('n',n),
('N',ng),
('@U',o_as_in_go),
('o U',var2_o_as_in_go),
('@ }',var1_u_as_in_but),
('OI',oy_as_in_toy),
('o I',var1_oy_as_in_toy),
('p',p),
('r',r),
('s',s),
('S',sh),
('t',t),
('T',th_as_in_think),
('U@',oor_as_in_poor),
('U r',var1_oor_as_in_poor),
('U',opt_u_as_in_pull),
('u:',oo_as_in_food),
('O:',close_to_or),
(var1_close_to_or,'O',False),
('v',v),
('w',w),
('j',y),
('z',z),
('Z',ge_of_blige_etc),
lex_filename="acapela.txt",
lex_entry_format = "%s\t#%s\tUNKNOWN\n", # TODO: part-of-speech (as above)
lex_read_function=lambda lexfile:[(word,pronunc.lstrip("#")) for word, pronunc, ignore in [l.split(None,2) for l in lexfile.readlines()]],
inline_format = "\\Prn=%s\\",
safe_to_drop_characters=True, # TODO: really?
),
"cmu" : makeDic(
'format of the US-English Carnegie Mellon University Pronouncing Dictionary (contributed by Jan Weiss)', # http://www.speech.cs.cmu.edu/cgi-bin/cmudict
('0',syllable_separator),
('1',primary_stress),
('2',secondary_stress),
('AA',a_as_in_ah),
(var1_a_as_in_ah,'2',False),
(ipa_colon,'1',False),
('AE',a_as_in_apple),
('AH',u_as_in_but),
(o_as_in_orange,'AA',False),
('AW',o_as_in_now),
(a_as_in_ago,'AH',False),
('ER',e_as_in_herd), # TODO: check this one
('AY',eye),
('B ',b),
('CH',ch),
('D ',d),
('DH',th_as_in_them),
('EH',e_as_in_them),
(ar_as_in_year,'ER',False),
(a_as_in_air,'ER',False),
('EY',a_as_in_ate),
('F ',f),
('G ',g),
('HH',h),
('IH',i_as_in_it),
('EY AH',ear),
('IY',e_as_in_eat),
('JH',j_as_in_jump),
('K ',k),
('L ',l),
('M ',m),
('N ',n),
('NG',ng),
('OW',o_as_in_go),
('OY',oy_as_in_toy),
('P ',p),
('R ',r),
('S ',s),
('SH',sh),
('T ',t),
('TH',th_as_in_think),
('UH',oor_as_in_poor),
('UW',oo_as_in_food),
('AO',close_to_or),
('V ',v),
('W ',w),
('Y ',y),
('Z ',z),
('ZH',ge_of_blige_etc),
# lex_filename not set (does CMU have a lex file?)
safe_to_drop_characters=True, # TODO: really?
),
# BEGIN PRE-32bit ERA SYNTHS (TODO: add an attribute to JS-hide them by default in HTML? what about the SpeakJet which probably isn't a 32-bit chip but is post 32-bit era? and then what about the 'approximation' formats - kana etc - would they need hiding by default also? maybe best to just leave it)
"apollo" : makeDic(
'Dolphin Apollo 2 serial-port and parallel-port hardware synthesizers (in case anybody still uses those)',
(syllable_separator,'',False), # I don't think the Apollo had anything to mark stress; TODO: control the pitch instead like bbcmicro ?
('_QQ',syllable_separator,False), # a slight pause
('_AA',a_as_in_apple),
('_AI',a_as_in_ate),
('_AR',a_as_in_ah),
('_AW',close_to_or),
('_A',a_as_in_ago),
('_B',b),
('_CH',ch),
('_D',d),
('_DH',th_as_in_them),
('_EE',e_as_in_eat),
('_EI',a_as_in_air),
('_ER',e_as_in_herd),
('_E',e_as_in_them),
('_F',f),
('_G',g),
('_H',h),
('_IA',ear),
('_IE',eye),
('_I',i_as_in_it),
('_J',j_as_in_jump),
('_K',k),
('_KK',k,False), # sCHool
('_L',l),
('_M',m),
('_NG',ng),
('_N',n),
('_OA',o_as_in_go),
('_OO',opt_u_as_in_pull),
('_OR',var3_close_to_or),
('_OW',o_as_in_now),
('_OY',oy_as_in_toy),
('_O',o_as_in_orange),
('_P',p),
('_PP',p,False), # sPeech (a stronger P ?)
# _Q = k w - done by cleanup_regexps below
('_R',r),
('_SH',sh),
('_S',s),
('_TH',th_as_in_think),
('_T',t), ('_TT',t,False),
('_UU',oo_as_in_food),
('_U',u_as_in_but),
('_V',v),
('_W',w),
# _X = k s - done by cleanup_regexps below
('_Y',y),
('_ZH',ge_of_blige_etc),
('_Z',z),
# lex_filename not set (the hardware doesn't have one; HAL has an "exceptions dictionary" but I don't know much about it)
approximate_missing=True,
safe_to_drop_characters=True, # TODO: really?
word_separator=" ",phoneme_separator="",
cleanup_regexps=[('_K_W','_Q'),('_K_S','_X')],
cvtOut_regexps=[('_Q','_K_W'),('_X','_K_S')],
),
"dectalk" : makeDic(
'DECtalk hardware synthesizers (American English)', # (1984-ish serial port; later ISA cards)
(syllable_separator,'',False),
("'",primary_stress),
('aa',o_as_in_orange),
('ae',a_as_in_apple),
('ah',u_as_in_but),
('ao',close_to_or), # bought
('aw',o_as_in_now),
('ax',a_as_in_ago),
('ay',eye),
('b',b),
('ch',ch),
('d',d), ('dx',d,False),
('dh',th_as_in_them),
('eh',e_as_in_them),
('el',l,False), # -le of bottle, allophone ?
# TODO: en: -on of button (2 phonemes?)
('ey',a_as_in_ate),
('f',f),
('g',g),
('hx',h),
('ih',i_as_in_it), ('ix',i_as_in_it,False),
('iy',e_as_in_eat), ('q',e_as_in_eat,False),
('jh',j_as_in_jump),
('k',k),
('l',l), ('lx',l,False),
('m',m),
('n',n),
('nx',ng),
('ow',o_as_in_go),
('oy',oy_as_in_toy),
('p',p),
('r',r), ('rx',r,False),
('rr',e_as_in_herd),
('s',s),
('sh',sh),
('t',t), ('tx',t,False),
('th',th_as_in_think),
('uh',opt_u_as_in_pull),
('uw',oo_as_in_food),
('v',v),
('w',w),
('yx',y),
('z',z),
('zh',ge_of_blige_etc),
('ihr',ear), # DECtalk makes this from ih + r
approximate_missing=True,
cleanup_regexps=[('yxuw','yu')], # TODO: other allophones ("x',False" stuff above)?
cvtOut_regexps=[('yu','yxuw')],
# lex_filename not set (depends on which model etc)
stress_comes_before_vowel=True,
safe_to_drop_characters=True, # TODO: really?
word_separator=" ",phoneme_separator="",
inline_header="[:phoneme on]\n",
inline_format="[%s]",
),
"doubletalk" : makeDic(
'DoubleTalk PC/LT serial-port hardware synthesizers (American English; assumes DOS driver by default, otherwise set DTALK_COMMAND_CODE to your current command-code binary value, e.g. export DTALK_COMMAND_CODE=1)', # (1 is the synth's default; the DOS driver lets you put * instead)
(syllable_separator,'',False),
("/",primary_stress), # TODO: check it doesn't need a balancing \ afterwards (docs do say it's a "temporary" change of pitch, but it's unclear how long a 'temporary')
('M',m),('N',n),('NX',ng),('O',o_as_in_go),
('OW',o_as_in_go,False), # allophone
(o_as_in_orange,'O',False), # TODO: is this the best approximation we can do?
('OY',oy_as_in_toy),('P',p),
('R',r),('S',s),('SH',sh),('T',t),
('TH',th_as_in_think),('V',v),('W',w),('Z',z),
('ZH',ge_of_blige_etc),('K',k),('L',l),
('PX',p,False), ('TX',t,False), # aspirated allophones
('WH',w,False), ('KX',k,False), # ditto
('YY',y),('Y',y,False),
('UH',opt_u_as_in_pull),('UW',oo_as_in_food),
('AA',a_as_in_ah),('AE',a_as_in_apple),
('AH',u_as_in_but),('AO',close_to_or),
('AW',o_as_in_now),('AX',a_as_in_ago),
('AY',eye),('B',b),('CH',ch),('D',d),
('DH',th_as_in_them),
('DX',t,False), # an American "d"-like "t"
('EH',e_as_in_them),('ER',e_as_in_herd),
('EY',a_as_in_ate),('F',f),('G',g),('H',h),
('IH',i_as_in_it),('IX',i_as_in_it,False),
('IY',e_as_in_eat),('JH',j_as_in_jump),
approximate_missing=True,
stress_comes_before_vowel=True,
inline_format=markup_doubleTalk_word,
format_is_binary=ifset('DTALK_COMMAND_CODE',True),
# DoubleTalk does have a loadable "exceptions dictionary" but usually relies on a DOS tool to write it; I don't have the documentation about it (and don't know how much RAM is available for it - it's taken from the input buffer)
),
"keynote" : makeDic(
'Phoneme-read and lexicon-add codes for Keynote Gold hardware synthesizers (American English)', # ISA, PCMCIA, serial, etc; non-serial models give you an INT 2Fh param to get the address of an API function to call; not sure which software can send these codes directly to it)
(syllable_separator,'',False),
(primary_stress,"'"),(secondary_stress,'"'),
('w',w),('y',y),('h',h),('m',m),('n',n),('ng',ng),
('l',l),('r',r),('f',f),('v',v),('s',s),('z',z),
('th',th_as_in_think),('dh',th_as_in_them),('k',k),
('ch',ch),('zh',ge_of_blige_etc),('sh',sh),('g',g),
('jh',j_as_in_jump),('b',b),('p',p),('d',d),('t',t),
('i',e_as_in_eat),('I',i_as_in_it),
('e',a_as_in_ate),('E',e_as_in_them),
('ae',a_as_in_apple),('u',oo_as_in_food),
('U',opt_u_as_in_pull),('o',o_as_in_go),
('O',close_to_or),('a',o_as_in_orange),
('^',u_as_in_but),('R',e_as_in_herd),
('ay',eye),('Oy',oy_as_in_toy),('aw',o_as_in_now),
('=',a_as_in_ago),
approximate_missing=True,
inline_format="[p]%s[t]",
lex_filename="keynote.dat", # you have to somehow get this directly dumped to the card, see comment above
lex_entry_format="[x]%s %s", lex_footer="[t]\n",
stress_comes_before_vowel=False, # even though it's "'"
),
"audapter" : makeVariantDic(
"Audapter Speech System, an old hardware serial/parallel-port synthesizer (American English)", # 1989 I think. The phonemes themselves are the same as the Keynote above, but there's an extra binary byte in the commands and the lex format is stricter. I haven't checked but my guess is Audapter came before Keynote.
inline_format='\x05[p] %s\x05[t]',
format_is_binary=True,
lex_filename="audapter.dat",
lex_entry_format="\x05[x]%s %s\x05[t]\n", lex_footer="",
),
"bbcmicro" : makeDic(
"BBC Micro Speech program from 1985 (see comments in lexconvert.py for more details)",
# Speech was written by David J. Hoskins and published by Superior Software. It took 7.5k of RAM including 3.1k of samples (49 phonemes + 1 for fricatives at 64 bytes each, 4-bit ~5.5kHz), 2.2k of lexicon, and 2.2k of machine code; sounds "retro" by modern standards but quite impressive for the BBC Micro in 1985. Samples are played by amplitude-modulating the BBC's tone generator.
# If you use an emulator like BeebEm, you'll need diskimg/Speech.ssd. This can be made from your original Speech disc, or you might be able to find one but beware of copyright! Same goes with the ROM images included in BeebEm (you might want to delete ones you didn't have). There has been considerable discussion over whether UK copyright law does or should allow "format-shifting" your own legally-purchased media, and I don't fully understand all the discussion so I don't want to give advice on it here. The issue is "format-shifting" your legally-purchased BBC Micro ROM code and Speech disc to emulator images; IF this is all right then I suspect downloading someone else's copy is arguably allowed as long as you bought it legally "back in the day", but I'm not a solicitor so I don't know.
# (Incidentally, yes I was the Silas Brown referred to in Beebug 11.1 p.59, 11.9 p.50/11.10 p.47 and 12.10 p.24, and, no, the question in the final issue wasn't quite how I put it, but all taken in good humour.)
# lexconvert's --phones bbcmicro option creates *SPEAK commands which you can type into the BBC Micro or paste into an emulator, either at the BASIC prompt, or in a listing with line numbers provided by AUTO. You have to load the Speech program first of course.
# To script this on BeebEm, first turn off the Speech disc's boot option (by turning off File / Disc options / Write protect and entering "*OPT 4,0"; use "*OPT 4,3" if you want it back later; if you prefer to edit the disk image outside of the emulator then change byte 0x106 from 0x33 to 0x03), and then you can do (e.g. on a Mac) open /usr/local/BeebEm3/diskimg/Speech.ssd && sleep 1 && (echo '*SPEECH';python lexconvert.py --phones bbcmicro "Greetings from 19 85") | pbcopy && osascript -e 'tell application "System Events" to keystroke "v" using command down'
# or if you know it's already loaded: echo "Here is some text" | python lexconvert.py --phones bbcmicro | pbcopy && osascript -e 'tell application "BeebEm3" to activate' && osascript -e 'tell application "System Events" to keystroke "v" using command down'
# (unfortunately there doesn't seem to be a way of doing it without giving the emulator window focus)
# If you want to emulate a Master, you might need a *DISK before the *SPEECH (to take it out of ADFS mode).
# You can also put Speech into ROM, but this can cause problems: see comments on SP8000 later.
(syllable_separator,'',False),
('4',primary_stress),
('5',secondary_stress), # (these are pitch numbers on the BBC; normal pitch is 6, and lower numbers are higher pitches, so try 5=secondary and 4=primary; 3 sounds less calm)
('AA',a_as_in_ah),
('AE',a_as_in_apple),
('AH',u_as_in_but),
('O',o_as_in_orange),
('AW',o_as_in_now),
(a_as_in_ago,'AH',False),
('ER',e_as_in_herd),
('IY',eye),
('B',b),
('CH',ch),
('D',d),
('DH',th_as_in_them),
('EH',e_as_in_them),
(ar_as_in_year,'ER',False),
('AI',a_as_in_air),
('AY',a_as_in_ate),
('F',f),
('G',g),
('/H',h),
('IH',i_as_in_it),
('IX',var2_i_as_in_it), # (IX sounds to me like a slightly shorter version of IH)
('IXAH',ear),
('EER',var2_ear), # e.g. 'hear', 'near' - near enough
('EE',e_as_in_eat),
('J',j_as_in_jump),
('K',k),
('C',k,False), # for CT as in "fact", read out as K+T
('L',l),
('M',m),
('N',n),
('NX',ng),
('OW',o_as_in_go),
('OL',opt_ol_as_in_gold), # (if dest format doesn't have this, it'll get o_as_in_orange from the O, then the l)
('OY',oy_as_in_toy),
('P',p),
('R',r),
('S',s),
('SH',sh),
('T',t),
('TH',th_as_in_think),
('AOR',oor_as_in_poor),
('UH',oor_as_in_poor,False), # TODO: really? (espeak 'U' goes to opt_u_as_in_pull, and eSpeak also used U for the o in good, which sounds best with Speech's default UH4, hence the line below, but where did we get UH->oor_as_in_poor from? Low-priority though because how often do you convert OUT of bbcmicro format)
(opt_u_as_in_pull,'UH',False),
('/U',opt_u_as_in_pull,False),
('/UL',opt_ul_as_in_pull), # if dest format doesn't have this, it'll get opt_u_as_in_pull from the /U, then l
('UW',oo_as_in_food),
('UX',oo_as_in_food,False),
('AO',close_to_or),
('V',v),
('W',w),
('Y',y),
('Z',z),
('ZH',ge_of_blige_etc),
lex_filename=ifset("MAKE_SPEECH_ROM","SPEECH.ROM","BBCLEX"),
lex_entry_format="> %s_"+chr(128)+"%s", # (specifying 'whole word' for now; remove the space before or the _ after if you want)
lex_read_function = lambda lexfile: [(w[0].lstrip().rstrip('_').lower(),w[1]) for w in filter(lambda x:len(x)==2,[w.split(chr(128)) for w in lexfile.read().split('>')])], # TODO: this reads back the entries we generate, but is unlikely to work well with the wildcards in the default lexicon that would have been added if SPEECH_DISK was set (c.f. trying to read eSpeak's en_rules instead of en_extra)
lex_word_case = "upper",
lex_header = bbc_prepDefaultLex,
lex_footer = bbc_appendDefaultLex, # + ">**"
inline_format = markup_bbcMicro_word,
word_separator=" ",phoneme_separator="",
clause_separator=write_bbcmicro_phones, # special case
safe_to_drop_characters=True, # TODO: really?
cleanup_regexps=[
('KT','CT'), # Speech instructions: "CT as in fact"
('DYUW','DUX'), # "DUX as in duke"
('AHR$','AH'), # usually sounds a bit better
],
cvtOut_regexps=[('DUX','DYUW')], # CT handled above
),
"amiga" : makeDic(
'AmigaOS speech synthesizer (American English)', # shipped with the 1985 Amiga release; developed by SoftVoice Inc
# All I had to go by for this was a screenshot on Marcos Miranda's "blog". I once saw this synth demonstrated but never tried it. My early background was the BBC Micro, not Amigas etc. But I know some people are keen on Amigas so I might as well include it.
# (By the way I think David Hoskins had it harder than SoftVoice. Yes they were both in 1985, but the Amiga was a new 16-bit machine while the BBC was an older 8-bit one. See the "sam" format for an even older one though, although probably not written by one person.)
(syllable_separator,'',False),
('4',primary_stress),('3',secondary_stress),
('/H',h),
('EH',e_as_in_them),
('L',l),
('OW',o_as_in_go),
('AY',eye),
('AE',a_as_in_apple),
('M',m),
('DH',th_as_in_them),
('IY',e_as_in_eat),
('AH',a_as_in_ago),
('G',g),
('K',k),
('U',u_as_in_but),
('P',p),
('Y',y),
('UW',oo_as_in_food),
('T',t),
('ER',var1_a_as_in_ago),
('IH',i_as_in_it),
('S',s),
('Z',z),
('AW',o_as_in_now),
('AA',a_as_in_ah),
('R',r),
('D',d),('F',f),('N',n),('NX',ng),('J',j_as_in_jump),
('B',b),('V',v),('TH',th_as_in_think),
('OH',close_to_or),('EY',a_as_in_ate),
# The following consonants were not on the screenshot
# (or at least I couldn't find them) so I'm guessing.
# I think this should work given the way the other
# consonants work in this table.
('W',w),('CH',ch),('SH',sh),
# The following vowels were not in the screenshot and
# we just have to hope this guess is right - when
# someone tries it on an Amiga and says it doesn't
# work, maybe we can update this....
('O',o_as_in_orange),('OY',oy_as_in_toy),
# and these ones we can approximate to ones we already know (given that we're having to approximate British to an American voice anyway, it can't hurt TOO much more)
(a_as_in_air,'EH',False),
(e_as_in_herd,'ER',False),
(ar_as_in_year,'ER',False),
(ear,'IYAH',False), # or try IYER, or there might be a phoneme for it
(ge_of_blige_etc,'J',False),
(oor_as_in_poor,'OH',False),
# lex_filename not set (I have no idea how the Amiga lexicon worked)
safe_to_drop_characters=True, # TODO: really?
word_separator=" ",phoneme_separator="",
),
"sam" : makeDic(
'Software Automatic Mouth (1982 American English synth that ran on C64, Atari 400/800/etc and Apple II/etc)', # *might* be similar to Macintalk on the 1st Macintosh in 1984
(syllable_separator,'',False),
(primary_stress,'4'),
(secondary_stress,'5'),
('IY',e_as_in_eat),
('IH',i_as_in_it),
('EH',e_as_in_them),
('AE',a_as_in_apple),
('AA',o_as_in_orange),
('AH',u_as_in_but),
('AO',close_to_or),
('OH',o_as_in_go),
('UH',opt_u_as_in_pull),
('UX',oo_as_in_food),
('ER',e_as_in_herd),
('AX',a_as_in_apple,False), # allophone?
('IX',i_as_in_it,False), # allophone?
('EY',a_as_in_ate),
('AY',eye),('OY',oy_as_in_toy),
('AW',o_as_in_now),('OW',o_as_in_go,False),
('UW',oo_as_in_food,False), # allophone?
('R',r),('L',l),('W',w),('WH',w,False),('Y',y),('M',m),
('N',n),('NX',ng),('B',b),('D',d),('G',g),('Z',z),
('J',j_as_in_jump),('ZH',ge_of_blige_etc),('V',v),
('DH',th_as_in_them),('S',s),('SH',sh),('F',f),
('TH',th_as_in_think),('P',p),('T',t),('K',k),
('CH',ch),('/H',h),('Q',glottal_stop),
approximate_missing=True,
word_separator=" ",phoneme_separator="",
# TODO: inline_format etc similar to bbcmicro?
# In Atari BASIC, you set SAM$ to the phonemes and then
# do A=USR(8192). I don't know about the C64 etc versions.
# (max 255 phonemes per string; don't know max line len.)
),
# END (?) PRE-32bit ERA SYNTHS (but see TODO above re SpeakJet, which is below)
"speakjet" : makeDic(
'Allophone codes for the American English "SpeakJet" speech synthesis chip (the conversion from phonemes to allophones might need tweaking). Set the SPEAKJET_SYM environment variable to use mnemonics, otherwise numbers are used (set SPEAKJET_BINARY for binary output).',
# TODO: might want to do something similar for the older Votrax SC-02 chip, but would need to check how exactly its phoneme interface was exposed to software by the PC cards that used it (Heathkit HV-2000 etc; not sure if any are still in use though)
(syllable_separator,'',False), # TODO: instead of having emphasis, the Speakjet has a 'faster' code for all NON-emphasized syllables
(speakjet('IY',128),e_as_in_eat),
(speakjet('IH',129),i_as_in_it),
(speakjet('EY',130),a_as_in_ate),
(speakjet('EH',131),e_as_in_them),
(speakjet('AY',132),a_as_in_apple),
(speakjet('AX',133),a_as_in_ago),
(speakjet('UX',134),u_as_in_but),
(speakjet('OH',135),o_as_in_orange),
(speakjet('AW',136),a_as_in_ah),
(speakjet('OW',137),o_as_in_go),
(speakjet('UH',138),opt_u_as_in_pull),
(speakjet('UW',139),oo_as_in_food),
(speakjet('MM',140),m),
(speakjet('NE',141),n,False),
(speakjet('NO',142),n),
(speakjet('NGE',143),ng,False),
(speakjet('NGO',144),ng),
(speakjet('LE',145),l,False),
(speakjet('LO',146),l),
(speakjet('WW',147),w),
(speakjet('RR',148),r),
(speakjet('IYRR',149),ear),
(speakjet('EYRR',150),a_as_in_air),
(speakjet('AXRR',151),e_as_in_herd),
(speakjet('AWRR',152),a_as_in_ah,False),
(speakjet('OWRR',153),close_to_or),
(speakjet('EYIY',154),a_as_in_ate,False),
(speakjet('OHIY',155),eye),
(speakjet('OWIY',156),oy_as_in_toy),
(speakjet('OHIH',157),eye,False),
(speakjet('IYEH',158),y),
(speakjet('EHLL',159),l,False),
(speakjet('IYUW',160),oo_as_in_food,False),
(speakjet('AXUW',161),o_as_in_now),
(speakjet('IHUW',162),oo_as_in_food,False),
# TODO: 163 AYWW = o_as_in_now a_as_in_ago ? handle in cleanup_regexps + cvtOut_regexps ?
(speakjet('OWWW',164),o_as_in_go,False),
(speakjet('JH',165),j_as_in_jump),
(speakjet('VV',166),v),
(speakjet('ZZ',167),z),
(speakjet('ZH',168),ge_of_blige_etc),
(speakjet('DH',169),th_as_in_them),
# TODO: get cleanup_regexps to clean up some of these according to what's coming next etc:
(speakjet('BE',170),b,False),
(speakjet('BO',171),b),
(speakjet('EB',172),b,False),
(speakjet('OB',173),b,False),
(speakjet('DE',174),d,False),
(speakjet('DO',175),d),
(speakjet('ED',176),d,False),
(speakjet('OD',177),d,False),
(speakjet('GE',178),g,False),
(speakjet('GO',179),g),
(speakjet('EG',180),g,False),
(speakjet('OG',181),g,False),
(speakjet('CH',182),ch),
(speakjet('HE',183),h,False),
(speakjet('HO',184),h),
(speakjet('WH',185),w,False),
(speakjet('FF',186),f),
(speakjet('SE',187),s,False),
(speakjet('SO',188),s),
(speakjet('SH',189),sh),
(speakjet('TH',190),th_as_in_think),
(speakjet('TT',191),t),
(speakjet('TU',192),t,False),
# TODO: 193 TS in cleanup_regexps and cvtOut_regexps
(speakjet('KE',194),k,False),
(speakjet('KO',195),k),
(speakjet('EK',196),k,False),
(speakjet('OK',197),k,False),
(speakjet('PE',198),p,False),
(speakjet('PO',199),p),
# lex_filename not set (I think the front-end software might have one, but don't know if it's accessible; chip itself just takes phonemes)
approximate_missing=True,
word_separator=ifset('SPEAKJET_BINARY',""," "),
phoneme_separator=ifset('SPEAKJET_BINARY',""," "),
clause_separator=ifset('SPEAKJET_BINARY',"","\n"), # TODO: is there a pause code?
output_is_binary=ifset('SPEAKJET_BINARY',True),
safe_to_drop_characters=True, # TODO: really?
),
"rsynth" : makeDic(
'rsynth text-to-speech C library (American English)', # TODO: test
(syllable_separator,'',False), # TODO: emphasis?
("i:",e_as_in_eat),
("I",i_as_in_it),
("eI",a_as_in_ate),
("E",e_as_in_them),
("{",a_as_in_apple),
("V",u_as_in_but),
("Q",o_as_in_orange),
("A:",a_as_in_ah),
("oU",o_as_in_go),
("U",opt_u_as_in_pull),
("u:",oo_as_in_food),
("m",m),
("n",n),
("N",ng),
("l",l),
("w",w),
("r",r),
("I@",ear),
("e@",a_as_in_air),
("3:",e_as_in_herd),
("Qr",close_to_or),
("OI",oy_as_in_toy),
("aI",eye),
("j",y),
("U@",oo_as_in_food,False),
("aU",o_as_in_now),
("@U",o_as_in_go,False),
("dZ",j_as_in_jump),
("v",v),
("z",z),
("Z",ge_of_blige_etc),
("D",th_as_in_them),
("b",b),
("d",d),
("g",g),
("tS",ch),
("h",h),
("f",f),
("s",s),
("S",sh),
("T",th_as_in_think),
("t",t),
("k",k),
("p",p),
approximate_missing=True,
# lex_filename not set (TODO: check what sort of lexicon is used by rsynth's "say" front-end)
safe_to_drop_characters=True, # TODO: really?
word_separator=" ",phoneme_separator="",
),
"unicode-ipa" : makeDic(
"IPA symbols in Unicode, as used by an increasing number of dictionary programs, websites etc",
('.',syllable_separator,False),
(syllable_separator,'',False),
(u'\u02c8',primary_stress),
(u'\u02cc',secondary_stress),
('#',text_sharp),
('_',text_underline),
('?',text_question),
('!',text_exclamation),
(',',text_comma),
(u'\u0251',a_as_in_ah),
(u'\u02d0',ipa_colon),
(u'\u0251\u02d0',var3_a_as_in_ah),
(u'\u0251\u0279',var4_a_as_in_ah),
('a\u02d0',var5_a_as_in_ah),
(u'\xe6',a_as_in_apple),
('a',a_as_in_apple,False),
(u'\u028c',u_as_in_but),
(u'\u0252',o_as_in_orange),
(var1_o_as_in_orange,u'\u0251',False),
(u'\u0254',var2_o_as_in_orange),
(u'a\u028a',o_as_in_now),
(u'\xe6\u0254',var1_o_as_in_now),
(u'\u0259',a_as_in_ago),
(u'\u0259\u02d0',e_as_in_herd),
(u'\u025a',var1_a_as_in_ago),
(u'a\u026a',eye), (u'\u028c\u026a',eye,False),
(u'\u0251e',var1_eye),
('b',b),
(u't\u0283',ch),
(u'\u02a7',ch,False),
('d',d),
(u'\xf0',th_as_in_them),
(u'\u025b',e_as_in_them),
('e',var1_e_as_in_them),
(u'\u025d',ar_as_in_year),
(u'\u025c\u02d0',ar_as_in_year,False),
(u'\u025b\u0259',a_as_in_air),
(u'\u025b\u0279',var1_a_as_in_air),
(u'e\u02d0',var2_a_as_in_air),
(u'\u025b\u02d0',var3_a_as_in_air),
(u'e\u0259',var4_a_as_in_air),
(u'e\u026a',a_as_in_ate),
(u'\xe6\u026a',var1_a_as_in_ate),
('f',f),
(u'\u0261',g), ('g',g,False),
('h',h),
(u'\u026a',i_as_in_it),
(u'\u0268',var1_i_as_in_it),
(u'\u026a\u0259',ear),
(u'\u026a\u0279',var1_ear),
(u'\u026a\u0279\u0259',var2_ear), # ?
('i',e_as_in_eat),
(u'i\u02d0',var1_e_as_in_eat),
(u'd\u0292',j_as_in_jump),
(u'\u02a4',j_as_in_jump,False),
('k',k),
('x',opt_scottish_loch),
('l',l),
(u'd\u026b',var1_l),
('m',m),
('n',n),
(u'\u014b',ng),
(u'\u0259\u028a',o_as_in_go),
('o',var1_o_as_in_go),
(u'o\u028a',var2_o_as_in_go),
(u'\u0259\u0289',var1_u_as_in_but),
(u'\u0254\u026a',oy_as_in_toy),
(u'o\u026a',var1_oy_as_in_toy),
('p',p),
(u'\u0279',r), ('r',r,False),
(var1_r,'r',False),
('s',s),
(u'\u0283',sh),
('t',t),
(u'\u027e',var1_t),
(u'\u03b8',th_as_in_think),
(u'\u028a\u0259',oor_as_in_poor),
(u'\u028a\u0279',var1_oor_as_in_poor),
(u'\u028a',opt_u_as_in_pull),
(u'\u0289\u02d0',oo_as_in_food),
(u'u\u02d0',var1_oo_as_in_food),
('u',var2_oo_as_in_food),
(u'\u0254\u02d0',close_to_or),
(var1_close_to_or,u'\u0254',False),
(u'o\u02d0',var2_close_to_or),
('v',v),
('w',w),
(u'\u028d',var1_w),
('j',y),
('z',z),
(u'\u0292',ge_of_blige_etc),
(u'\u0294',glottal_stop),
lex_filename="words-ipa.html", # write-only for now
lex_type = "HTML",
lex_header = '<html><head><meta name="mobileoptimized" content="0"><meta name="viewport" content="width=device-width"><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head><body><table>',
lex_entry_format="<tr><td>%s</td><td>%s</td></tr>\n",
lex_footer = "</table></body></html>\n",
word_separator=" ",phoneme_separator="",
stress_comes_before_vowel=True,
safe_to_drop_characters=True, # TODO: really? (at least '-' should be safe to drop)
cvtOut_func=unicode_preprocess,
),
"yinghan" : makeVariantDic(
"As unicode-ipa but, when converting a user lexicon, generates Python code that reads Wenlin Yinghan dictionary entries and adds IPA bands to matching words",
lex_filename="yinghan-ipa.py", # write-only for now
lex_type = "Python script",
lex_header = '#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys; d={',
lex_entry_format='"%s":u"%s",\n',
lex_footer = "}\nfor k in d.keys(): d[k.lower()]=d[k]\nnextIsHead=False\nfor l in sys.stdin:\n sys.stdout.write(l)\n if nextIsHead and l.strip():\n w=l.split()\n if w[0]=='ehw': l=' '.join(w[1:])\n if l.strip().lower() in d: sys.stdout.write('ipa '+d[l.strip().lower()].encode('utf-8')+'\\n')\n if l.startswith('*** '): nextIsHead=True\n",
noInherit=True
),
"unicode-rough" : makeVariantDic(
"A non-standard notation that's reminiscent of unicode-ipa but changed so that more of the characters show in old browsers with incomplete fonts",
("'",primary_stress),
(',',secondary_stress),
('ar-',a_as_in_ah),
(':',ipa_colon),
(var3_a_as_in_ah,'ar-',False),
(var4_a_as_in_ah,'ar-',False),
('uh',u_as_in_but),
(u'\u0259:',e_as_in_herd),
('ai',eye),
('ch',ch),
('e',e_as_in_them),
('3:',ar_as_in_year),
(a_as_in_air,'e:',False),
(var1_a_as_in_air,'e:',False),
(var2_a_as_in_air,'e:',False),
(var3_a_as_in_air,'e:',False),
(var4_a_as_in_air,'e:',False),
(u'ei',a_as_in_ate),
(u'\xe6i',var1_a_as_in_ate),
('g',g),
('i',i_as_in_it), (var1_i_as_in_it,'i',False),
('eeuh-',ear), (var2_ear,'eeuh-',False),
('ee',e_as_in_eat), (var1_e_as_in_eat,'ee',False),
('j',j_as_in_jump),
('ng',ng),
('o',o_as_in_go),
(var2_o_as_in_go,'o',False), # override unicode-ipa
(var1_u_as_in_but,'o',False), # ditto (?? '+'?)
('oy',oy_as_in_toy), (var1_oy_as_in_toy,'oy',False),
('r',r),
('sh',sh),
(var1_t,'t',False),
('th',th_as_in_think),
('or',oor_as_in_poor),
(var1_oor_as_in_poor,'or',False),
('u',opt_u_as_in_pull), ('oo',oo_as_in_food),
(var1_oo_as_in_food,'oo',False),
(var2_oo_as_in_food,'oo',False),
(close_to_or,'or',False),
(var1_close_to_or,'or',False),
(var2_close_to_or,'or',False),
(var1_w,'w',False),
('y',y),
('3',ge_of_blige_etc),
cleanup_regexps=[('-$','')],
cvtOut_func=None,
),
"braille-ipa" : makeDic(
"IPA symbols in Braille (2008 BANA standard). By default Braille ASCII is output; if you prefer to see the Braille dots via Unicode, set the BRAILLE_UNICODE environment variable.", # BANA = Braille Authority of North America. TODO: check if the UK accepted this standard.
# TODO: add Unicode IPA signs that aren't used in English IPA, so we can do a general IPA conversion
('_B',primary_stress),
('_2',secondary_stress),
('*',a_as_in_ah),
('3',ipa_colon),
('*3',var3_a_as_in_ah),
('*#',var4_a_as_in_ah),
('A3',var5_a_as_in_ah),
('%',a_as_in_apple),
('A',a_as_in_apple,False),
('+',u_as_in_but),
('4*',o_as_in_orange),
(var1_o_as_in_orange,'*',False),
('<',var2_o_as_in_orange),
('A(',o_as_in_now),
('%<',var1_o_as_in_now),
('5',a_as_in_ago),
('53',e_as_in_herd),
('5"R.',var1_a_as_in_ago),
('A/',eye),
('*E',var1_eye),
('B',b),
('T:',ch),
('T":.',ch,False),
('D',d),
(']',th_as_in_them),
('>',e_as_in_them),
('E',var1_e_as_in_them),
('4>3',ar_as_in_year), # (from \u025c\u02d0; TODO: check what happens to \u025d)
('>5',a_as_in_air),
('>#',var1_a_as_in_air),
('E3',var2_a_as_in_air),
('>3',var3_a_as_in_air),
('E5',var4_a_as_in_air),
('E/',a_as_in_ate),
('%/',var1_a_as_in_ate),
('F',f),
('G',g),
('H',h),
('/',i_as_in_it),
('0I',var1_i_as_in_it),
('/5',ear),
('/#',var1_ear),
('/#5',var2_ear), # ?
('I',e_as_in_eat),
('I3',var1_e_as_in_eat),
('D!',j_as_in_jump),
('K',k),
('X',opt_scottish_loch),
('L',l),
('D6L',var1_l),
('M',m),
('N',n),
('$',ng),
('5(',o_as_in_go),
('O',var1_o_as_in_go),
('O(',var2_o_as_in_go),
('50U',var1_u_as_in_but),
('</',oy_as_in_toy),
('O/',var1_oy_as_in_toy),
('P',p),
('#',r),
(var1_r,'R',False),
('S',s),
(':',sh),
('T',t),
('6R',var1_t),
('.?',th_as_in_think),
('(5',oor_as_in_poor),
('(#',var1_oor_as_in_poor),
('(',opt_u_as_in_pull),
('0U3',oo_as_in_food),
('U3',var1_oo_as_in_food),
('U',var2_oo_as_in_food),
('<3',close_to_or),
(var1_close_to_or,'<',False),
('O3',var2_close_to_or),
('V',v),
('W',w),
('6W',var1_w),
('J',y),
('Z',z),
('!',ge_of_blige_etc),
('2',glottal_stop),
lex_filename=ifset("BRAILLE_UNICODE","words-ipa.txt","words-ipa.brl"), # write-only for now
lex_type = "document",
# inline_format=",7%s7'", # -> do this in cleanup_func so it's included in BRAILLE_UNICODE if necessary
lex_entry_format="%s = %s\n", # ditto with the markers
word_separator=" ",phoneme_separator="",
stress_comes_before_vowel=True,
safe_to_drop_characters=True, # TODO: really?
cleanup_func=lambda r:ifset("BRAILLE_UNICODE",ascii_braille_to_unicode,lambda x:x)(",7"+r+"7'"),
),
"latex-ipa" : makeDic(
'IPA symbols for typesetting in LaTeX using the "tipa" package',
('.',syllable_separator,False),
('"',primary_stress),
('\\textsecstress{}',secondary_stress),
('\\#',text_sharp),
('\\_',text_underline),
('?',text_question),
('!',text_exclamation),
(',',text_comma),
('A',a_as_in_ah),
(':',ipa_colon),
('A:',var3_a_as_in_ah),
('A\\textturnr{}',var4_a_as_in_ah),
('a:',var5_a_as_in_ah),
('\\ae{}',a_as_in_apple),
('2',u_as_in_but),
('6',o_as_in_orange),
(var1_o_as_in_orange,'A',False),
('O',var2_o_as_in_orange),
('aU',o_as_in_now),
('\\ae{}O',var1_o_as_in_now),
('@',a_as_in_ago),
('@:',e_as_in_herd),
('\\textrhookschwa{}',var1_a_as_in_ago),
('aI',eye),
('Ae',var1_eye),
('b',b),
('tS',ch),
('d',d),
('D',th_as_in_them),
('E',e_as_in_them),
('e',var1_e_as_in_them),
('3:',ar_as_in_year),
('E@',a_as_in_air),
('E\\textturnr{}',var1_a_as_in_air),
('e:',var2_a_as_in_air),
('E:',var3_a_as_in_air),
('e@',var4_a_as_in_air),
('eI',a_as_in_ate),
('\\ae{}I',var1_a_as_in_ate),
('f',f),
('g',g),
('h',h),
('I',i_as_in_it),
('1',var1_i_as_in_it),
('I@',ear),
('I\\textturnr{}',var1_ear),
('I@\\textturnr{}',var2_ear), # ?
('i',e_as_in_eat),
('i:',var1_e_as_in_eat),
('dZ',j_as_in_jump),
('k',k),
('x',opt_scottish_loch),
('l',l),
('d\\textltilde{}',var1_l),
('m',m),
('n',n),
('N',ng),
('@U',o_as_in_go),
('o',var1_o_as_in_go),
('oU',var2_o_as_in_go),
('@0',var1_u_as_in_but),
('OI',oy_as_in_toy),
('oI',var1_oy_as_in_toy),
('p',p),
('\\textturnr{}',r),
(var1_r,'r',False),
('s',s),
('S',sh),
('t',t),
('R',var1_t),
('T',th_as_in_think),
('U@',oor_as_in_poor),
('U\\textturnr{}',var1_oor_as_in_poor),
('U',opt_u_as_in_pull),
('0:',oo_as_in_food),
('u:',var1_oo_as_in_food),
('u',var2_oo_as_in_food),
('O:',close_to_or),
(var1_close_to_or,'O',False),
('o:',var2_close_to_or),
('v',v),
('w',w),
('\\textturnw{}',var1_w),
('j',y),
('z',z),
('Z',ge_of_blige_etc),
('P',glottal_stop),
lex_filename="words-ipa.tex", # write-only for now
lex_type = "document",
lex_header = r'\documentclass[12pt,a4paper]{article} \usepackage[safe]{tipa} \usepackage{longtable} \begin{document} \begin{longtable}{ll}',
lex_entry_format=r"%s & \textipa{%s}\\"+"\n",
lex_footer = r"\end{longtable}\end{document}"+"\n",
inline_format = "\\textipa{%s}",
inline_oneoff_header = r"% In preamble, put \usepackage[safe]{tipa}"+"\n", # (the [safe] part is recommended if you're mixing with other TeX)
word_separator=" ",phoneme_separator="",
clause_separator=r"\\"+"\n",
stress_comes_before_vowel=True,
safe_to_drop_characters=True, # TODO: really?
),
"pinyin-approx" : makeDic(
"Rough approximation using roughly the spelling rules of Chinese Pinyin (for getting Chinese-only voices to speak some English words - works with some words better than others)", # write-only for now
('4',primary_stress),
('2',secondary_stress),
('a5',a_as_in_ah),
('ya5',a_as_in_apple),
('e5',u_as_in_but),
('yo5',o_as_in_orange),
('ao5',o_as_in_now),
(e_as_in_herd,'e5',False),
('ai5',eye),
('bu0',b),
('che0',ch),
('de0',d),
('ze0',th_as_in_them),
('ye5',e_as_in_them),
(a_as_in_air,'ye5',False),
('ei5',a_as_in_ate),
('fu0',f),
('ge0',g),
('he0',h),
('yi5',i_as_in_it),
('yi3re5',ear),
(e_as_in_eat,'yi5',False),
('zhe0',j_as_in_jump),
('ke0',k),
('le0',l),
('me0',m),
('ne0',n),
('eng0',ng),
('ou5',o_as_in_go),
('ruo2yi5',oy_as_in_toy),
('pu0',p),
('re0',r),
('se0',s),
('she0',sh),
('te0',t),
(th_as_in_think,'zhe0',False),
(oor_as_in_poor,'wu5',False),
('yu5',oo_as_in_food),
('huo5',close_to_or),
(v,'fu0',False),
('wu0',w),
('yu0',y),
(z,'ze0',False),
(ge_of_blige_etc,'zhe0',False),
approximate_missing=True,
lex_filename="words-pinyin-approx.txt", # write-only for now
lex_type = "text",
lex_header = "Pinyin approxmations (very approximate!)\n----------------------------------------\n",
lex_entry_format = "%s ~= %s\n",
word_separator=" ",phoneme_separator="",
cleanup_regexps=[
("te0ye","tie"),
("e0e5","e5"),("([^aeiou][uo])0e(5)",r"\1\2"),
("yu0y","y"),
("wu0yo5","wo5"),
("([bdfghklmnpwz])[euo]0ei",r"\1ei"),
("([bdghklmnpstwz])[euo]0ai",r"\1ai"),
("([ghklmnpstyz])[euo]0ya",r"\1a"),("([ghklmnpstz])a([0-5]*)ne0",r"\1an\2"),
("([bdfghklmnpstwyz])[euo]0a([1-5])",r"\1a\2"),
("([bdjlmnpt])[euo]0yi",r"\1i"),("([bjlmnp])i([1-5]*)ne0",r"\1in\2"),
("([zs])he0ei",r"\1hei"),
("([dfghklmnprstyz])[euo]0ou",r"\1ou"),
("([dghklnrst])[euo]0huo",r"\1uo"),
("([bfpm])[euo]0huo",r"\1o"),
("([bdghklmnprstyz])[euo]0ao",r"\1ao"),
("([zcs])h[eu]0ao",r"\1hao"),
("re0r","r"),
("zhe0ne0","zhun5"),
("54","4"),
("52","2"),
("([bdjlmnpty])i([1-9])eng0",r"\1ing\2"),
("ya([1-9])eng0",r"yang\1"),
("ya([1-9])ne0",r"an\1"),
("ye([1-9])ne0",r"yan\1"),("([wr])[eu]0yan",r"\1en"),
("yi([1-9])ne0",r"yin\1"),
("yu0","yu5"),("eng0","eng5"), # they won't work unvoiced anyway
("0","5"), # comment out if the synth supports 'tone 0 for unvoiced'
#("[euo]0","0"), # comment in if it expects consonants only when doing that
],
),
"kana-approx" : makeDic(
"Rough approximation using kana (for getting Japanese computer voices to speak some English words - works with some words better than others). Set KANA_TYPE environment variable to hiragana or katakana (which can affect the sounds of some voices); default is hiragana", # for example on Mac OS 10.7+ (with Japanese voice installed in System Preferences) try PHONES_PIPE_COMMAND='say -v Kyoko' (this voice has a built-in converter from English as well, but lexconvert --phones kana-approx can work better with some complex words, although the built-in converter does seem to have access to slightly more phonemes and can therefore produce words like "to" better). Default is hiragana because I find hiragana easier to read than katakana, although the Kyoko voice does seem to be able to say 'v' a little better when using kata. Mac OS 10.7+'s Korean voices (Yuna and Narae) can also read kana, and you could try doing a makeVariantDic and adding in some Korean jamo letters for them (you'd be pushed to represent everything in jamo but kana+jamo seems more hopeful in theory), but again some words work better than others (not all phonetic combinations are supported and some words aren't clear at all).
# This kana-approx format is 'write-only' for now (see comment in cleanup_regexps re possible reversal)
(u'\u30fc',primary_stress),
(secondary_stress,ifset('KANA_MORE_EMPH',u'\u30fc'),False), # set KANA_MORE_EMPH environment variable if you want to try doubling the secondary-stressed vowels as well (doesn't always work very well; if it did, I'd put this line in a makeVariantDic called kana-approx-moreEmph or something)
# The following Unicode codepoints are hiragana; KANA_TYPE is handled by cleanup_func below
(u'\u3042',a_as_in_apple),
(u'\u3044',e_as_in_eat),
(u'\u3046',oo_as_in_food),
(u'\u3048',e_as_in_them),
(u'\u304a',o_as_in_orange),
(u'\u3042\u3044',eye), # ai
(u'\u3042\u304a',o_as_in_now), # ao
(u'\u3048\u3044',a_as_in_ate), # ei
(u'\u304a\u3044',oy_as_in_toy), # oi
(u'\u304a\u3046',o_as_in_go), # ou
(a_as_in_ah,u'\u3042',False),
(a_as_in_ago,u'\u3046\u304a',False), # TODO: \u3042, \u304a or \u3046 depending on the word?
(e_as_in_herd,u'\u3042',False), # TODO: really?
(i_as_in_it,u'\u3044',False), # TODO: really?
(u_as_in_but,u'\u3046',False), # TODO: really?
(ar_as_in_year,u'\u3048',False), # TODO: really?
(ear,u'\u3044\u304a',False), # TODO: really?
(a_as_in_air,u'\u3048',False), # TODO: really?
(oor_as_in_poor,u'\u304a',False), # TODO: really?
(close_to_or,u'\u304a\u30fc'), # TODO: really?
(u'\u3076',b), # bu (with vowel replacements later)
(u'\u3061\u3047',ch), # chu (ditto)
(u'\u3065',d), # du (and so on)
(u'\u3066\u3085',th_as_in_think), (th_as_in_them,u'\u3066\u3085',False),
(u'\u3075',f),
(u'\u3050',g),
(u'\u306f',h), # ha (as hu == fu)
(u'\u3058\u3085',j_as_in_jump), (ge_of_blige_etc,u'\u3058\u3085',False),
(u'\u304f',k),
(u'\u308b',l), (r,u'\u308b',False),
(u'\u3080',m),
(u'\u306c',n),
(u'\u3093\u3050',ng),
(u'\u3077',p),
(u'\u3059',s),
(u'\u3057\u3085',sh),
(u'\u3064',t),
(u'\u308f',w), # use 'wa' (as 'wu' == 'u')
(v,ifset('KANA_V_AS_W',u'\u308f',u'\u3094'),False), # TODO: document KANA_V_AS_W variable. Is vu always supported? (it doesn't seem to show up in all fonts)
(u'\u3086',y),
(u'\u305a',z),
lex_filename="words-kana-approx.txt",
lex_type = "text",
lex_header = "Kana approxmations (very approximate!)\n--------------------------------------\n",
lex_entry_format = "%s ~= %s\n",
word_separator=" ",phoneme_separator="",
clause_separator=u"\u3002\n".encode('utf-8'),
cleanup_regexps=[(u"\u306c$",u"\u3093\u30fc"), # TODO: or u"\u3093\u3093" ?
# now the vowel replacements (bu+a -> ba, etc) (in most cases these can be reversed into cvtOut_regexps if you want to use the kana-approx table to convert hiragana into approximate English phonemes (plus add a (u"\u3093\u30fc*",u"\u306c") and perhaps de-doubling rules to convert back to emphasis) but the result is unlikely to be any good)
(u"\u3076\u3042",u"\u3070"),(u"\u3076\u3044",u"\u3073"),(u"\u3076\u3048",u"\u3079"),(u"\u3076\u304a",u"\u307c"),(u"\u3076\u3046",u"\u3076"),
(u"\u3061\u3085\u3042",u"\u3061\u3083"),(u"\u3061\u3085\u3046",u"\u3061\u3085"),(u"\u3061\u3085\u3048",u"\u3061\u3047"),(u"\u3061\u3085\u304a",u"\u3061\u3087"),(u"\u3061\u3085\u3044",u"\u3061"),
(u"\u3065\u3042",u"\u3060"),(u"\u3065\u3044",u"\u3062"),(u"\u3065\u3048",u"\u3067"),(u"\u3065\u304a",u"\u3069"),(u"\u3065\u3046",u"\u3065"),
(u"\u3066\u3085\u3042",u"\u3066\u3083"),(u"\u3066\u3085\u3044",u"\u3066\u3043"),(u"\u3066\u3043\u3046",u"\u3066\u3085"),(u"\u3066\u3085\u3048",u"\u3066\u3047"),(u"\u3066\u3085\u304a",u"\u3066\u3087"),
(u"\u3075\u3042",u"\u3075\u3041"),(u"\u3075\u3044",u"\u3075\u3043"),(u"\u3075\u3048",u"\u3075\u3047"),(u"\u3075\u304a",u"\u3075\u3049"),(u"\u3075\u3046",u"\u3075"),
(u"\u306f\u3044",u"\u3072"),(u"\u306f\u3046",u"\u3075"),(u"\u306f\u3048",u"\u3078"),(u"\u306f\u304a",u"\u307b"),(u"\u306f\u3042",u"\u306f"),
(u"\u3050\u3042",u"\u304c"),(u"\u3050\u3044",u"\u304e"),(u"\u3050\u3048",u"\u3052"),(u"\u3050\u304a",u"\u3054"),(u"\u3050\u3046",u"\u3050"),
(u"\u3058\u3085\u3042",u"\u3058\u3083"),(u"\u3058\u3085\u3046",u"\u3058\u3085"),(u"\u3058\u3085\u3048",u"\u3058\u3047"),(u"\u3058\u3085\u304a",u"\u3058\u3087"),(u"\u3058\u3085\u304a",u"\u3058"),
(u"\u304f\u3042",u"\u304b"),(u"\u304f\u3044",u"\u304d"),(u"\u304f\u3048",u"\u3051"),(u"\u304f\u304a",u"\u3053"),(u"\u304f\u3046",u"\u304f"),
(u"\u308b\u3042",u"\u3089"),(u"\u308b\u3044",u"\u308a"),(u"\u308b\u3048",u"\u308c"),(u"\u308b\u304a",u"\u308d"),(u"\u308b\u3046",u"\u308b"),
(u"\u3080\u3042",u"\u307e"),(u"\u3080\u3044",u"\u307f"),(u"\u3080\u3048",u"\u3081"),(u"\u3080\u304a",u"\u3082"),(u"\u3080\u3046",u"\u3080"),
(u"\u306c\u3042",u"\u306a"),(u"\u306c\u3044",u"\u306b"),(u"\u306c\u3048",u"\u306d"),(u"\u306c\u304a",u"\u306e"),(u"\u306c\u3046",u"\u306c"),
(u"\u3077\u3042",u"\u3071"),(u"\u3077\u3044",u"\u3074"),(u"\u3077\u3048",u"\u307a"),(u"\u3077\u304a",u"\u307d"),(u"\u3077\u3046",u"\u3077"),
(u"\u3059\u3042",u"\u3055"),(u"\u3059\u3048",u"\u305b"),(u"\u3059\u304a",u"\u305d"),(u"\u3059\u3046",u"\u3059"),
(u"\u3057\u3085\u3042",u"\u3057\u3083"),(u"\u3057\u3085\u3046",u"\u3057\u3085"),(u"\u3057\u3085\u3048",u"\u3057\u3047"),(u"\u3057\u3085\u304a",u"\u3057\u3087"),(u"\u3057\u3085\u3044",u"\u3057"),
(u"\u3064\u3042",u"\u305f"),(u"\u3064\u3044",u"\u3061"),(u"\u3064\u3048",u"\u3066"),(u"\u3064\u304a",u"\u3068"),(u"\u3064\u3046",u"\u3064"),
(u"\u3086\u3042",u"\u3084"),(u"\u3086\u3048",u"\u3044\u3047"),(u"\u3086\u304a",u"\u3088"),(u"\u3086\u3046",u"\u3086"),
(u"\u305a\u3042",u"\u3056"),(u"\u305a\u3044",u"\u3058"),(u"\u305a\u3048",u"\u305c"),(u"\u305a\u304a",u"\u305e"),(u"\u305a\u3046",u"\u305a"),
(u"\u308f\u3044",u"\u3046\u3043"),(u"\u308f\u3046",u"\u3046"),(u"\u308f\u3048",u"\u3046\u3047"),(u"\u308f\u304a",u"\u3092"),(u"\u308f\u3042",u"\u308f"),
(u'\u3046\u3043\u3066\u3085', u'\u3046\u3043\u3065'), # sounds a bit better for words like 'with'
(u'\u3085\u3046',u'\u3085'), # and 'the' (especially with a_as_in_ago mapping to u'\u3046\u304a'; it's hard to get a convincing 'the' though, especially in isolation)
(u'\u3050\u3050',u'\u3050'), # gugu -> gu, sometimes comes up with 'gl-' combinations
(u'\u30fc\u30fc+',u'\u30fc'), # in case we put 30fc in the table AND a stress mark has been applied to it
(u'^(.)$',ur'\1\u30fc'), # lengthen any word that ends up as a single kana (otherwise can be clipped badly)
(u'^([\u3042\u3070\u3060\u304c\u304b\u3089\u307e\u306a\u3071\u3055\u305f\u3084\u3056\u308f]\u3044)$',ur'\1\u30fc'), # ditto for -ai (TODO: -ao might need lengthening sometimes?? depends on context. -ei, -oi, -ou seem OK)
],
cleanup_func = hiragana_to_katakana
),
"names" : makeDic(
"Lexconvert internal phoneme names (sometimes useful with the --phones option while developing new formats)",
*[(phName,phVal) for phName,phVal in phonemes.items()])}
# The mainopt_...() functions are the main options
# (if you implement a new one, main() will detect it);
# 1st line of doc string should be parameter summary
# (start the doc string with \n if no parameters); if 1st
# character of doc string is * then this function is put
# among the first in the help (otherwise alphabetically).
# If function returns a string, that's taken to be a
# message to be printed with error exit. Same if it raises
# an exception of type Message.
def mainopt_try(i):
"""*<format> [<pronunciation>]
Convert input from <format> into eSpeak and try it out.
(Requires the 'espeak' command.)
E.g.: python lexconvert.py --try festival h @0 l ou1
or: python lexconvert.py --try unicode-ipa '\\u02c8\\u0279\\u026adn\\u0329' (for Unicode put '\\uNNNN' or UTF-8)"""
format = sys.argv[i+1]
if not format in lexFormats: return "No such format "+repr(format)+" (use --formats to see a list of formats)"
for phones in getInputText(i+2,"phonemes in "+format+" format",'maybe'):
espeak = convert(phones,format,'espeak')
os.popen("espeak -x","w").write(markup_inline_word("espeak",espeak)+'\n') # separate process each item for more responsiveness from the console (sending 'maybe' to getInputText means won't lose efficiency if not read from console)
def mainopt_trymac(i):
"""*<format> [<pronunciation>]
Convert phonemes from <format> into Mac and try it using the Mac OS 'say' command"""
format = sys.argv[i+1]
if not format in lexFormats: return "No such format "+repr(format)+" (use --formats to see a list of formats)"
for resp in getInputText(i+2,"phonemes in "+format+" format",'maybe'):
mac = convert(resp,format,'mac')
toSay = markup_inline_word("mac",mac)
print toSay
os.popen(macSayCommand()+" -v Vicki","w").write(toSay) # Need to specify a voice because the default voice might not be able to take Apple phonemes. Vicki has been available since 10.3, as has the 'say' command (previous versions need osascript, see Gradint's code)
def mainopt_trymac_uk(i):
"""*<format> [<pronunciation>]
Convert phonemes from <format> and try it with Mac OS British voices (see --mac-uk for details)"""
format = sys.argv[i+1]
if not format in lexFormats: return "No such format "+repr(format)+" (use --formats to see a list of formats)"
for resp in getInputText(i+2,"phonemes in "+format+" format",'maybe'):
macuk = convert(resp,format,'mac-uk')
m = MacBritish_System_Lexicon("",os.environ.get("MACUK_VOICE","Daniel"))
try:
try: m.speakPhones(macuk.split())
finally: m.close()
except KeyboardInterrupt:
sys.stderr.write("Interrupted\n")
def mainopt_phones(i):
"""*<format> [<words>]
Use eSpeak to convert text to phonemes, and then convert the phonemes to format 'format'.
E.g.: python lexconvert.py --phones unicode-ipa This is a test sentence.
Set environment variable PHONES_PIPE_COMMAND to an additional command to which to write the phones as well as standard output. (If standard input is a terminal then this will be done separately after each line.)
(Some commercial speech synthesizers do not work well when driven entirely from phonemes, because their internal format is different and is optimised for normal text.)
Set format to 'all' if you want to see the phonemes in ALL supported formats.
"""
format = sys.argv[i+1]
if format=="example": return "The 'example' format cannot be used with --phones; try --convert, or did you mean --phones festival" # could allow example anyway as it's basically Festival, but save confusion as eSpeak might not generate the same phonemes if our example words haven't been installed in the system's eSpeak. (Still allow it to be used in --try etc though.)
if not format in lexFormats and not format=="all": return "No such format "+repr(format)+" (use --formats to see a list of formats)"
hadOneoff = False
for response in getInputText(i+2,"text",'maybe'):
response = pipeThroughEspeak(response.replace(u'\u2032'.encode('utf-8'),'').replace(u'\u00b4'.encode('utf-8'),'').replace(u'\u02b9'.encode('utf-8'),'').replace(u'\u00b7'.encode('utf-8'),'')) # (remove any 2032 and b7 pronunciation marks before passing to eSpeak)
if not '\n' in response.rstrip() and 'command' in response: return response.strip() # 'bad cmd' / 'cmd not found'
if format=="all": formats = sorted(k for k in lexFormats.keys() if not k=="example")
else: formats = [format]
for format in formats:
def out(doOneoff=True):
if len(formats)>1: writeFormatHeader(format)
if doOneoff: sys.stdout.write(checkSetting(format,"inline_oneoff_header"))
sys.stdout.write(checkSetting(format,"inline_header"))
output_clauses(format,convert(parseIntoWordsAndClauses("espeak",response),"espeak",format))
sys.stdout.write(checkSetting(format,"inline_footer"))
print
sys.stdout.flush() # in case it's being piped
out(not hadOneoff) ; hadOneoff = True
if os.environ.get("PHONES_PIPE_COMMAND",""):
o,sys.stdout = sys.stdout,os.popen(os.environ["PHONES_PIPE_COMMAND"],'w')
out()
sys.stdout = o
def pipeThroughEspeak(inpt):
"Writes inpt to espeak -q -x (in chunks if necessary) and returns the result"
bufsize = 8192 # careful not to set this too big, as the OS might limit it (TODO can we check?)
ret = []
while len(inpt) > bufsize:
splitAt = inpt.rfind('\n',0,bufsize)+1
if not splitAt: # no newline, try to split on space
splitAt = inpt.rfind(' ',0,bufsize)+1
if not splitAt:
sys.stderr.write("Note: had to split eSpeak input and couldn't find a newline or space to do it on\n")
splitAt = bufsize
response = pipeThroughEspeak(inpt[:splitAt])
if not '\n' in response.rstrip() and 'command' in response: return response.strip() # 'bad cmd' / 'cmd not found'
ret.append(response) ; inpt=inpt[splitAt:]
w,r=os.popen4("espeak -q -x",bufsize=bufsize)
w.write(inpt) ; w.close()
return "\n".join(ret) + r.read()
def writeFormatHeader(format):
"Writes a header for 'format' when outputting in all formats. Assumes the output MIGHT end up being more than one line."
global writeFormatHeader_called
if writeFormatHeader_called: print
print format
print '-'*len(format)
writeFormatHeader_called = True
writeFormatHeader_called = False
def mainopt_check_variants(i):
# undocumented (won't appear in help text)
groups = {}
for k,v in lexFormats['espeak'].items():
if type(k)==str:
intV = int(v)
if not intV in consonants:
if not intV in groups: groups[intV] = []
groups[intV].append((v,k))
i = groups.items() ; i.sort()
for k,v in i:
if len(v)==1: continue
v.sort()
while True:
print "Group",k
os.popen("espeak -x","w").write('\n'.join([markup_inline_word("espeak",w) for _,w in v]))
if not input("Again? 1/0: "): break
def mainopt_check_for_similar_formats(i):
# undocumented (won't appear in help text)
items = lexFormats.items() ; r = []
while items:
k1,dic1 = items[0]
for k2,dic2 in items[1:]:
diff = 0
for kk,vv in dic1.items():
if not type(kk)==int: continue
if kk==syllable_separator: continue
if not dic2.get(kk,"!"+vv)==vv: diff += 1
r.append((diff,k1,k2))
items = items[1:]
r.sort() ; had = set()
for diffs,format1,format2 in r:
if format1 in had and format2 in had: continue
had.add(format1) ; had.add(format2)
if "names" in had: break
print diffs,"phoneme differences between",format1,"and",format2
def festival_group_stress(pronunc):
"Special-case cleanup_func for the Festival format"
# TODO: do we ever need to add extra consonants to the
# previous group instead of the next group? (not sure
# what difference it makes to the synthesis, but it
# might make the entry a bit more readable)
groups = [] ; thisGroup = [[],'0',False] # phon,stress,complete
for phon in pronunc.split():
if phon in ['0','1','2']:
if groups and phon >= groups[-1][1]:
groups[-1][1]=phon
continue
thisGroup[0].append(phon)
if phon[0] in 'aeiou@':
thisGroup[2]=True
groups.append(thisGroup)
thisGroup = [[],'0',False]
if thisGroup[0]: groups.append(thisGroup)
if len(groups)>=2 and not groups[-1][2]:
groups[-2][0] += groups[-1][0]
del groups[-1]
return "("+' '.join(("(("+' '.join(g[0])+') '+g[1]+")") for g in groups)+")"
def mainopt_convert(i):
"""*<from-format> <to-format>
Convert a user lexicon (generally from its default filename; if this cannot be found then lexconvert will tell you what it should be).
E.g.: python lexconvert.py --convert festival cepstral"""
fromFormat = sys.argv[i+1]
toFormat = sys.argv[i+2]
if fromFormat==toFormat: return "Cannot convert a lexicon to its own format (that could result in it being truncated)"
if toFormat=="mac-uk": return "Cannot permanently save a Mac-UK lexicon; please use the --mac-uk option to read text"
if toFormat=="example": return "Cannot overwrite the built-in example lexicon"
for f in [fromFormat,toFormat]:
if not f in lexFormats: return "No such format "+repr(f)+" (use --formats to see a list of formats)"
try:
fname=getSetting(toFormat,"lex_filename")
getSetting(toFormat,"lex_entry_format") # convert_user_lexicon will need this
except KeyError: fname = None
if not fname: return "Write support for lexicons of format '%s' not yet implemented (need at least lex_filename and lex_entry_format); try using --phones or --phones2phones options instead" % (toFormat,)
if toFormat=="espeak":
assert fname=="en_extra", "If you changed eSpeak's lex_filename in the table you also need to change the code below"
if os.system("mv en_extra en_extra~ && grep \" // \" en_extra~ > en_extra"): sys.stderr.write("Warning: en_extra not found, making a new one\n(espeak compile will probably fail in this directory)\n") # otherwise keep the commented entries, so can incrementally update the user lexicon only
outFile=open(fname,"a")
else:
l = 0
try: l = open(fname).read()
except: pass
assert not l, "File "+replHome(fname)+" already exists and is not empty; are you sure you want to overwrite it? (Delete it first if so)" # (if you run with python -O then this is ignored, as are some other checks so be careful)
outFile=open(fname,"w")
print "Writing %s lexicon entries to %s file %s" % (fromFormat,toFormat,fname)
try: convert_user_lexicon(fromFormat,toFormat,outFile)
except Message:
print " - error, deleting",fname
os.remove(fname) ; raise
def mainopt_festival_dictionary_to_espeak(i):
"""<location>
Convert the Festival Oxford Advanced Learners Dictionary (OALD) pronunciation lexicon to eSpeak.
You need to specify the location of the OALD file in <location>,
e.g. for Debian festlex-oald package: python lexconvert.py --festival-dictionary-to-espeak /usr/share/festival/dicts/oald/all.scm
or if you can't install the Debian package, try downloading http://ftp.debian.org/debian/pool/non-free/f/festlex-oald/festlex-oald_1.4.0.orig.tar.gz, unpack it into /tmp, and do: python lexconvert.py --festival-dictionary-to-espeak /tmp/festival/lib/dicts/oald/oald-0.4.out
In all cases you need to cd to the eSpeak source directory before running this. en_extra will be overwritten. Converter will also read your ~/.festivalrc if it exists. (You can later incrementally update from ~/.festivalrc using the --convert option; the entries from the system dictionary will not be overwritten in this case.) Specify --without-check to bypass checking the existing eSpeak pronunciation for OALD entries (much faster, but makes a larger file and in some cases compromises the pronunciation quality)."""
try: festival_location=sys.argv[i+1]
except IndexError: return "Error: --festival-dictionary-to-espeak must be followed by the location of the festival OALD file (see help text)"
try: open(festival_location)
except: return "Error: The specified OALD location '"+festival_location+"' could not be opened"
try: open("en_list")
except: return "Error: en_list could not be opened (did you remember to cd to the eSpeak dictsource directory first?"
convert_system_festival_dictionary_to_espeak(festival_location,not '--without-check' in sys.argv,not os.system("test -e ~/.festivalrc"))
def mainopt_syllables(i):
"""[<words>]
Attempt to break 'words' into syllables for music lyrics (uses espeak to determine how many syllables are needed)"""
# Normally, espeak -x output can't be relied on to always put a space between every input word. So we put a newline after every input word instead. This might affect eSpeak's output (not recommended for mainopt_phones, hence no 'interleave words and phonemes' option), but it should be OK for just counting the syllables. (Also, the assumption that the input words have been taken from song lyrics usefully rules out certain awkward punctuation cases.)
for txt in getInputText(i+1,"word(s)",'maybe'):
words=txt.split()
response = pipeThroughEspeak('\n'.join(words).replace("!","").replace(":",""))
if not '\n' in response.rstrip() and 'command' in response: return response.strip() # 'bad cmd' / 'cmd not found'
rrr = response.split("\n")
print " ".join([hyphenate(word,sylcount(convert(line,"espeak","example"))) for word,line in zip(words,filter(lambda x:x,rrr))])
sys.stdout.flush() # in case piped
def wordSeparator(format):
"""Returns the effective word separator of format (remembering that it defaults to same as phoneme_separator"""
return checkSetting(format,"word_separator",checkSetting(format,"phoneme_separator"," "))
def mainopt_phones2phones(i):
"""*<format1> <format2> [<phonemes in format1>]
Perform a one-off conversion of phonemes from format1 to format2 (format2 can be 'all' if you want)""" # If format1 is 'example' and you don't specify phonemes, we take the words from the example lexicon. But don't say that in the help string because it might confuse the issue about phonemes being optional on the command line and prompted for if not specified and stdin is not piped in all formats other than 'example'.
format1,format2 = sys.argv[i+1],sys.argv[i+2]
if not format1 in lexFormats: return "No such format "+repr(format1)+" (use --formats to see a list of formats)"
if not format2 in lexFormats and not format2=="all": return "No such format "+repr(format2)+" (use --formats to see a list of formats)"
if format1=="example" and len(sys.argv)<=i+3:
if stdin_is_terminal(): txt=""
else: txt=sys.stdin.read() # and it might still be ""
if txt: parseIntoWordsAndClauses(format1,txt)
else: clauses=[[x[1]] for x in getSetting('example','lex_read_function')()]
else: clauses = parseIntoWordsAndClauses(format1,getInputText(i+3,"phonemes in "+format1+" format"))
if format2=="all": formats = sorted(k for k in lexFormats.keys() if not k=="example")
else: formats = [format2]
for format2 in formats:
if len(formats)>1: writeFormatHeader(format2)
sys.stdout.write(checkSetting(format2,"inline_header"))
output_clauses(format2,convert(clauses,format1,format2))
sys.stdout.write(checkSetting(format2,"inline_footer")) ; print
def parseIntoWordsAndClauses(format,phones):
"Returns list of clauses, each of which is a list of words, assuming 'phones' are in format 'format'"
wordSep = checkSetting(format,"word_separator") # don't use wordSeparator() here - we're splitting, not joining, so we don't want it to default to phoneme_separator
clauseSep = checkSetting(format,"clause_separator","\n")
def s(sep):
if sep==" ": return None # " " means ANY whitespace (TODO: document this?)
else: return sep
if clauseSep and type(clauseSep) in [str,unicode]:
clauses = phones.split(s(clauseSep))
else: clauses = [phones]
for i in range(len(clauses)):
if wordSep: clauses[i]=clauses[i].split(s(wordSep))
else: clauses[i] = [clauses[i]]
clauses[i] = filter(lambda x:x, clauses[i])
return filter(lambda x:x,clauses)
def mainopt_mac_uk(i):
"""<from-format> [<text>]
Speak text in Mac OS 10.7+ British voices while using a lexicon converted in from <from-format>. As these voices do not have user-modifiable lexicons, lexconvert must binary-patch your system's master lexicon; this is at your own risk! (Superuser privileges are needed the first time. A backup of the system file is made, and all changes are restored on normal exit but if you force-quit then you might need to restore the backup manually. Text speaking needs to be under lexconvert's control because it usually has to change the input words to make them fit the available space in the binary lexicon.) By default the Daniel voice is used; Emily or Serena can be selected by setting the MACUK_VOICE environment variable."""
# If you have xterm etc, then text will also be printed, with words from the altered lexicon underlined.
fromFormat = sys.argv[i+1]
if not fromFormat in lexFormats: return "No such format "+repr(fromFormat)+" (use --formats to see a list of formats)"
lex = get_macuk_lexicon(fromFormat)
try:
for line in getInputText(i+2,"text",True):
m = MacBritish_System_Lexicon(line,os.environ.get("MACUK_VOICE","Daniel"))
try: m.readWithLex(lex)
finally: m.close()
except KeyboardInterrupt:
sys.stderr.write("Interrupted\n")
class Counter(object):
"A simple class with two static members, count and subcount, for use by the consonant(), vowel() and other() functions"
c=sc=0
def other():
"Used by Phonemes() when creating something that is neither a vowel nor a consonant, e.g. a stress mark"
Counter.c += 1 ; Counter.sc=0 ; return Counter.c
consonants = set() ; mainVowels = set()
def consonant():
"Used by Phonemes() when creating a consonant"
r = other() ; consonants.add(r) ; return r
def vowel():
"Used by Phonemes() when creating a vowel"
r = other() ; mainVowels.add(r) ; return r
def opt_vowel():
"Used by Phonemes() when creating an optional vowel (one that has no warning issued if some format doesn't support it)"
return other()
def variant():
"Used by Phonemes() when creating a variant of the just-defined vowel/consonant/etc"
Counter.sc += 1
while str(Counter.sc).endswith('0'): Counter.sc += 1
return 0, float('%d.%d' % (Counter.c,Counter.sc))
# the 0 is so we can say _, name = variant()
# so as to get some extra indentation
def ifset(var,a,b=""):
"Checks the environment variable var; if it is set (non-empty), return a, otherwise return b. Used in LexFormats to create tables with variations set by the environment."
import os
if os.environ.get(var,""): return a
else: return b
def speakjet(symbol,opcode):
"Special-case function for the Speakjet table"
if ifset('SPEAKJET_BINARY',1):
assert not ifset('SPEAKJET_SYM',1), "Cannot set both SPEAKJET_SYM and SPEAKJET_BINARY"
return chr(opcode)
else: return ifset('SPEAKJET_SYM',symbol,str(opcode))
def makeDic(doc,*args,**kwargs):
"Make a dictionary with a doc string, default-bidirectional mappings and extra settings; see LexFormats for how this is used."
d = {} ; duplicates = set()
for a in args:
assert type(a)==tuple and (len(a)==2 or len(a)==3)
k=a[0]
if k in d: duplicates.add(k)
v=a[1]
assert (type(k) in [str,unicode] and type(v) in [int,float]) or (type(v) in [str,unicode] and type(k) in [int,float]), "Wrong types "+repr(a)+" (did you forget a _, before calling variant() or something?)"
d[k] = v
if len(a)==3: bidir=a[2]
else: bidir=True
if bidir:
# (k,v,True) = both (k,v) and (v,k)
if v in d: duplicates.add(v)
d[v] = k
assert not duplicates, " Duplicate key(s) in "+repr(doc)+": "+", ".join((repr(dup)+"".join(" (="+g+")" for g,val in globals().items() if val==dup)) for dup in sorted(list(duplicates)))+". Did you forget a ,False to suppress bidirectional mapping?" # by the way, Python does not detect duplicate keys in {...} notation - it just lets you overwrite
missing = [l for l in (list(consonants)+list(mainVowels)) if not l in d]
# did_approx = False
if missing and 'approximate_missing' in kwargs:
for miss,approxTo in [
# TODO: put this table somewhere else?
# (If the thing on the right is just 1 item, we could make the thing on the left a variant of it. But that might not be a good idea unless they're really very close, since if it's a variant then the substitution is done without warning even if approximate_missing is not set.)
(a_as_in_ago, [u_as_in_but]),
(a_as_in_air, [e_as_in_them,r]),
(ear, [e_as_in_eat,u_as_in_but]),
(oor_as_in_poor, [close_to_or]), # TODO: ,r?
(a_as_in_ah,[a_as_in_apple]), # this seems to be missing in some American voices (DecTalk, Keynote, SAM); TODO: is this the best approximation we can do?
]:
if miss in missing and all(x in d for x in approxTo):
d[miss]=kwargs.get("phoneme_separator"," ").join(d[x] for x in approxTo)
# did_approx = True
missing.remove(miss)
# if did_approx: doc="(approx.) "+doc # and see also the code in makeVariantDic. Commenting out because this is misleading: the formats where we didn't do a did_approx might also contain approximations of some kind. Incidentally there are some British English voices that need approximate_missing (e.g. Apollo 2)
d[("settings","doc")] = doc
if missing:
import sys ; sys.stderr.write("WARNING: Some non-optional vowels/consonants are missing from "+repr(doc)+"\nThe following are missing: "+", ".join("/".join(g for g,val in globals().items() if val==m) for m in missing)+"\n")
for k,v in kwargs.items(): d[('settings',k)] = v
wsep = d.get(('settings','word_separator'),None)
psep = d.get(('settings','phoneme_separator'),' ')
if not wsep==None: assert not wsep in d, "word_separator duplicates with a key in "+repr(doc)
if not psep==None: assert not psep in d, "phoneme_separator duplicates with a key (did you forget to change the default, or to add a ,False somewhere?) in "+repr(doc)
global lastDictionaryMade ; lastDictionaryMade = d
return d
def makeVariantDic(doc,*args,**kwargs):
"Like makeDic but create a new 'variant' version of the last-made dictionary, modifying some phonemes and settings (and giving it a new doc string) but keeping everything else the same. Any list settings (e.g. cleanup_regexps) are ADDED to by the variant; other settings and phonemes are REPLACED if they are specified in the variant. If you don't want subsequent variants to inherit the changes made by this variant, add noInherit=True to the keyword args."
global lastDictionaryMade
ldmOld = lastDictionaryMade
toUpdate = lastDictionaryMade.copy()
global mainVowels,consonants
oldV,oldC = mainVowels,consonants
mainVowels,consonants = [],[] # so makeDic doesn't complain if some vowels/consonants are missing
if 'noInherit' in kwargs:
noInherit = kwargs['noInherit']
del kwargs['noInherit']
else: noInherit = False
d = makeDic(doc,*args,**kwargs)
if noInherit: lastDictionaryMade = ldmOld
mainVowels,consonants = oldV,oldC
# if toUpdate[("settings","doc")].startswith("(approx.) ") and not d[("settings","doc")].startswith("(approx.) "): d[("settings","doc")]="(approx.) "+d[("settings","doc")] # TODO: always?
for k,v in toUpdate.items():
if type(v)==list and k in d: d[k] = v+d[k]
toUpdate.update(d) ; return toUpdate
def getSetting(formatName,settingName):
"Gets a setting from lexFormats, exception if not there"
return lexFormats[formatName][('settings',settingName)]
def checkSetting(formatName,settingName,default=""):
"Gets a setting from lexFormats, default if not there"
return lexFormats[formatName].get(('settings',settingName),default)
import commands,sys,re,os
cached_sourceName,cached_destName,cached_dict = None,None,None
def make_dictionary(sourceName,destName):
"Uses lexFormats to make a mapping dictionary from a particular source format to a particular dest format, and also sets module variables for that particular conversion (TODO: put those module vars into an object in case someone wants to use this code in a multithreaded server)"
global cached_sourceName,cached_destName,cached_dict
if (sourceName,destName) == (cached_sourceName,cached_destName): return cached_dict
source = lexFormats[sourceName]
dest = lexFormats[destName]
d = {}
global dest_consonants ; dest_consonants = set()
global dest_syllable_sep ; dest_syllable_sep = dest.get(syllable_separator,"")
global implicit_vowel_before_NL
implicit_vowel_before_NL = None
for k,v in source.items():
if type(k)==tuple: continue # settings
if type(v) in [str,unicode]: continue # (num->string entries are for converting IN to source; we want the string->num entries for converting out)
if not v in dest: v = int(v) # (try the main version of a variant)
if not v in dest: continue # (haven't got it - will have to ignore or break into parts)
d[k] = dest[v]
if int(v) in consonants: dest_consonants.add(d[k])
if int(v)==e_as_in_herd and (not implicit_vowel_before_NL or v==int(v)): # TODO: or u_as_in_but ? used by festival and some other synths before words ending 'n' or 'l' (see usage of implicit_vowel_before_NL later)
implicit_vowel_before_NL = d[k]
cached_sourceName,cached_destName,cached_dict=sourceName,destName,d
return d
warnedAlready = set()
def convert(pronunc,source,dest):
"Convert pronunc from source to dest. pronunc can be a string or a list; if a list then we'll recurse on each of the list elements and return a new list (this is meant for batch-converting clauses etc)"
if source==dest: return pronunc # essential for --try experimentation with codes not yet supported by lexconvert
if type(pronunc)==list: return [convert(p,source,dest) for p in pronunc]
func = checkSetting(source,'cvtOut_func')
if func: pronunc=func(pronunc)
for s,r in checkSetting(source,'cvtOut_regexps'):
pronunc=re.sub(s,r,pronunc)
ret = [] ; toAddAfter = None
dictionary = make_dictionary(source,dest)
maxLen=max(len(l) for l in dictionary.keys())
debugInfo=""
separator = checkSetting(dest,'phoneme_separator',' ')
safe_to_drop = checkSetting(source,"safe_to_drop_characters")
while pronunc:
for lettersToTry in range(maxLen,-1,-1):
if not lettersToTry:
if safe_to_drop==True: pass
elif (not safe_to_drop) or not pronunc[0] in safe_to_drop and not (pronunc[0],debugInfo) in warnedAlready:
warnedAlready.add((pronunc[0],debugInfo))
sys.stderr.write("Warning: ignoring "+source+" character "+repr(pronunc[0])+debugInfo+" (unsupported in "+dest+")\n")
pronunc=pronunc[1:] # ignore
elif dictionary.has_key(pronunc[:lettersToTry]):
debugInfo=" after "+pronunc[:lettersToTry]
toAdd=dictionary[pronunc[:lettersToTry]]
isStressMark=(toAdd and toAdd in [lexFormats[dest].get(primary_stress,''),lexFormats[dest].get(secondary_stress,''),lexFormats[dest].get(syllable_separator,'')])
if isStressMark and not checkSetting(dest,"stress_comes_before_vowel"):
if checkSetting(source,"stress_comes_before_vowel"): toAdd, toAddAfter = "",toAdd # move stress marks from before vowel to after
else: # stress is already after, but:
# With Cepstral synth (and kana-approx), stress mark should be placed EXACTLY after the vowel and not any later. Might as well do this for others also.
r=len(ret)-1
while ret[r] in dest_consonants or ret[r].endswith("*added"): r -= 1 # (if that raises IndexError then the input had a stress mark before any vowel) ("*added" condition is there so that implicit vowels don't get the stress)
ret.insert(r+1,toAdd) ; toAdd=""
elif isStressMark and not checkSetting(source,"stress_comes_before_vowel"): # it's a stress mark that should be moved from after the vowel to before it
i=len(ret)
while i and (ret[i-1] in dest_consonants or ret[i-1].endswith("*added")): i -= 1
if i: i-=1
ret.insert(i,toAdd)
if dest_syllable_sep: ret.append(dest_syllable_sep) # (TODO: this assumes stress marks are at end of syllable rather than immediately after vowel; correct for Festival; check others; probably a harmless assumption though; mac-uk is better with syllable separators although espeak basically ignores them)
toAdd = ""
# attempt to sort out the festival dictionary's (and other's) implicit_vowel_before_NL
elif implicit_vowel_before_NL and ret and ret[-1] and toAdd in ['n','l'] and ret[-1] in dest_consonants: ret.append(implicit_vowel_before_NL+'*added')
elif len(ret)>2 and ret[-2].endswith('*added') and toAdd and not toAdd in dest_consonants and not toAdd==dest_syllable_sep: del ret[-2]
if toAdd:
# Add it, but if toAdd is multiple phonemes, try to put toAddAfter after the FIRST phoneme
if separator: toAdd=toAdd.split(separator)
else: toAdd = [toAdd] # TODO: won't work for formats that don't have a phoneme separator (doesn't really matter for eSpeak though)
ret.append(toAdd[0])
if toAddAfter and not toAdd[0] in dest_consonants:
ret.append(toAddAfter)
toAddAfter=None
ret += toAdd[1:]
pronunc=pronunc[lettersToTry:]
break
if toAddAfter: ret.append(toAddAfter)
if ret and ret[-1]==dest_syllable_sep: del ret[-1] # spurious syllable separator at end
ret=separator.join(ret).replace('*added','')
for s,r in checkSetting(dest,'cleanup_regexps'):
ret=re.sub(s,r,ret)
func = checkSetting(dest,'cleanup_func')
if func: return func(ret)
else: return ret
def unicode_preprocess(pronunc):
"Special-case cvtOut_func for unicode-ipa: tries to catch \\uNNNN etc"
if "\\u" in pronunc and not '"' in pronunc: # maybe \uNNNN copied from Gecko on X11, can just evaluate it to get the unicode
# (NB make sure to quote the \'s if pasing in on the command line)
try: pronunc=eval('u"'+pronunc+'"')
except: pass
else: # see if it makes sense as utf-8
try: pronunc = pronunc.decode('utf-8')
except: pass
return pronunc
def ascii_braille_to_unicode(a):
"Special-case cleanup_func for braille-ipa (set by braille-ipa if BRAILLE_UNICODE is set). Converts Braille ASCII to Unicode dot patterns."
d=dict(zip(list(" A1B'K2L@CIF/MSP\"E3H9O6R^DJG>NTQ,*5<-U8V.%[$+X!&;:4\\0Z7(_?W]#Y)="),[unichr(c) for c in range(0x2800,0x2840)]))
return u''.join(d.get(c,c) for c in list(a))
def hiragana_to_katakana(u):
"Special-case cleanup_func for kana-approx; converts all hiragana characters in unicode string 'u' into katakana if KANA_TYPE is set to anything beginning with a 'k'"
assert type(u)==unicode
if not os.environ.get("KANA_TYPE","").lower().startswith("k"): return u
u = list(u)
for i in xrange(len(u)):
if 0x3041 <= ord(u[i]) <= 0x3096:
u[i]=unichr(ord(u[i])+0x60)
return u"".join(u)
def espeak_probably_right_already(existing_pronunc,new_pronunc):
"""Used by convert_system_festival_dictionary_to_espeak to compare a "new" pronunciation with eSpeak's existing pronunciation. As the transcription from OALD to eSpeak is only approximate, it could be that our new pronunciation is not identical to the existing one but the existing one is actually correct; try to detect when this happens by checking if the pronunciations are the same after some simplifications."""
if existing_pronunc==new_pronunc: return True
def simplify(pronunc): return \
pronunc.replace(";","").replace("%","") \
.replace("a2","@") \
.replace("3","@") \
.replace("L","l") \
.replace("I2","i:") \
.replace("I","i:").replace("i@","i:@") \
.replace(",","") \
.replace("s","z") \
.replace("aa","A:") \
.replace("A@","A:") \
.replace("O@","O:") \
.replace("o@","O:") \
.replace("r-","r")
# TODO: rewrite @ to 3 whenever not followed by a vowel?
if simplify(existing_pronunc)==simplify(new_pronunc): return True # almost the same, and festival @/a2 etc seems to be a bit ambiguous so leave it alone
def parse_festival_dict(festival_location):
"For OALD; yields word,part-of-speech,pronunciation"
ret = []
for line in open(festival_location).xreadlines():
line=line.strip()
if "((pos" in line: line=line[:line.index("((pos")]
if line.startswith('( "'): line=line[3:]
line=line.replace('"','').replace('(','').replace(')','')
try:
word, pos, pronunc = line.split(None,2)
except ValueError: continue # malformed line
if pos not in ['n','v','a','cc','dt','in','j','k','nil','prp','uh']: continue # two or more words
yield (word.lower(), pos, pronunc)
class Message(Exception): pass
def convert_system_festival_dictionary_to_espeak(festival_location,check_existing_pronunciation,add_user_dictionary_also):
"See mainopt_festival_dictionary_to_espeak"
os.system("mv en_extra en_extra~") # start with blank 'extra' dictionary
if check_existing_pronunciation: os.system("espeak --compile=en") # so that the pronunciation we're checking against is not influenced by a previous version of en_extra
outFile=open("en_extra","w")
print "Reading dictionary lists"
wordDic = {} ; ambiguous = {}
for line in filter(lambda x:x.split() and not re.match(r'^[a-z]* *\$',x),open("en_list").read().split('\n')): ambiguous[line.split()[0]]=ambiguous[line.split()[0]+'s']=True # this stops the code below from overriding anything already in espeak's en_list. If taking out then you need to think carefully about words like "a", "the" etc.
for word,pos,pronunc in parse_festival_dict(festival_location):
pronunc=pronunc.replace("i@ 0 @ 0","ii ou 2 ").replace("i@ 0 u 0","ii ou ") # (hack for OALD's "radio"/"video"/"stereo"/"embryo" etc)
pronunc=pronunc.replace("0","") # 0's not necessary, and OALD sometimes puts them in wrong places, confusing the converter
if word in ['mosquitoes']: continue # OALD bug (TODO: any others?)
if wordDic.has_key(word):
ambiguous[word] = True
del wordDic[word] # better not go there
if not ambiguous.has_key(word):
wordDic[word] = (pronunc, pos)
toDel = []
if check_existing_pronunciation:
print "Checking existing pronunciation"
proc=os.popen("espeak -q -x -v en-rp > /tmp/.pronunc 2>&1","w")
wList = []
progressCount=0 ; oldPercent=-1
for word,(pronunc,pos) in wordDic.items():
if check_existing_pronunciation:
percent = int(progressCount*100/len(wordDic))
if not percent==oldPercent: sys.stdout.write(str(percent)+"%\r") ; sys.stdout.flush()
oldPercent=percent
progressCount += 1
if not re.match("^[A-Za-z]*$",word): # (some versions of eSpeak also OK with "-", but not all)
# contains special characters - better not go there
toDel.append(word)
elif word.startswith("plaque") or word in "friday saturday sunday tuesday thursday yesterday".split():
# hack to accept eSpeak's pl'ak instead of pl'A:k - order was reversed in the March 2009 draft
toDel.append(word)
elif word[-1]=="s" and wordDic.has_key(word[:-1]):
# unnecessary plural (espeak will pick up on them anyway)
toDel.append(word)
elif word.startswith("year") or "quarter" in word: toDel.append(word) # don't like festival's pronunciation of those (TODO: also 'memorial' why start with [m'I])
elif check_existing_pronunciation:
proc.write(word+"\n")
proc.flush() # so the progress indicator works
wList.append(word)
if check_existing_pronunciation:
proc.close() ; print
oldPronDic = {}
for k,v in zip(wList,open("/tmp/.pronunc").read().split("\n")): oldPronDic[k]=v.strip().replace(" ","")
for w in toDel: del wordDic[w]
print "Doing the conversion"
lines_output = 0
total_lines = 0
not_output_because_ok = []
items = wordDic.items() ; items.sort() # necessary because of the hacks below which check for the presence of truncated versions of the word (want to have decided whether or not to output those truncated versions before reaching the hacks)
for word,(pronunc,pos) in items:
total_lines += 1
new_e_pronunc = convert(pronunc,"festival","espeak")
if new_e_pronunc.count("'")==2 and not '-' in word: new_e_pronunc=new_e_pronunc.replace("'",",",1) # if 2 primary accents then make the first one a secondary (except on hyphenated words)
# TODO if not en-rp? - if (word.endswith("y") or word.endswith("ie")) and new_e_pronunc.endswith("i:"): new_e_pronunc=new_e_pronunc[:-2]+"I"
unrelated_word = None
if check_existing_pronunciation: espeakPronunc = oldPronDic.get(word,"")
else: espeakPronunc = ""
if word[-1]=='e' and wordDic.has_key(word[:-1]): unrelated_word, espeakPronunc = word[:-1],"" # hack: if word ends with 'e' and dropping the 'e' leaves a valid word that's also in the dictionary, we DON'T want to drop this word on the grounds that espeak already gets it right, because if we do then adding 's' to this word may cause espeak to add 's' to the OTHER word ('-es' rule).
if espeak_probably_right_already(espeakPronunc,new_e_pronunc):
not_output_because_ok.append(word)
continue
if not unrelated_word: lines_output += 1
outFile.write(word+" "+new_e_pronunc+" // from Festival's ("+pronunc+")")
if espeakPronunc: outFile.write(", not [["+espeakPronunc+"]]")
elif unrelated_word: outFile.write(" (here to stop espeak's affix rules getting confused by Festival's \""+unrelated_word+"\")")
outFile.write("\n")
print "Corrected(?) %d entries out of %d" % (lines_output,total_lines)
if add_user_dictionary_also: convert_user_lexicon("festival","espeak",outFile)
outFile.close()
os.system("espeak --compile=en")
if not_output_because_ok:
print "Checking for unwanted side-effects of those corrections" # e.g. terrible as Terr + ible, inducing as in+Duce+ing
proc=os.popen("espeak -q -x -v en-rp > /tmp/.pronunc 2>&1","w")
progressCount = 0
for w in not_output_because_ok:
proc.write(w+"\n") ; proc.flush()
percent = int(progressCount*100/len(not_output_because_ok))
if not percent==oldPercent: sys.stdout.write(str(percent)+"%\r") ; sys.stdout.flush()
oldPercent = percent
progressCount += 1
proc.close()
outFile=open("en_extra","a") # append to it
for word,pronunc in zip(not_output_because_ok,open("/tmp/.pronunc").read().split("\n")):
pronunc = pronunc.strip().replace(" ","")
if not pronunc==oldPronDic[word] and not espeak_probably_right_already(oldPronDic[word],pronunc):
outFile.write(word+" "+oldPronDic[word]+" // (undo affix-side-effect from previous words that gave \""+pronunc+"\")\n")
outFile.close()
os.system("espeak --compile=en")
return not_output_because_ok
def read_user_lexicon(fromFormat):
"Calls the appropriate lex_read_function, opening lex_filename first if supplied"
readFunction = checkSetting(fromFormat,"lex_read_function")
if not readFunction: raise Message("Reading from '%s' lexicon file not yet implemented (no lex_read_function); try using --phones or --phones2phones options instead" % (fromFormat,))
try:
lexFilename = getSetting(fromFormat,"lex_filename")
lexfile = open(lexFilename)
if not os.environ.get("LEXCONVERT_OMIT_READING_FROM",""): print "Reading from",lexFilename # TODO: document LEXCONVERT_OMIT_READING_FROM (might be useful for the --mac-uk option)
except KeyError: lexfile = None # lex_read_function without lex_filename is allowed, if the read function can take null param and fetch the lexicon itself
except IOError: raise Message(fromFormat+"'s lexicon is expected to be in a file called "+replHome(lexFilename)+" which could not be read - please fix and try again")
return readFunction(lexfile)
def replHome(fname):
"Format fname for printing, substituting ~ for HOME if appropriate"
h = os.environ.get('HOME','')
if h and fname.startswith(h+os.sep):
return "~"+fname[len(h):]
else: return fname
def get_macuk_lexicon(fromFormat):
"Converts lexicon from fromFormat and returns a list suitable for MacBritish_System_Lexicon's readWithLex"
return [(word,convert(pronunc,fromFormat,"mac-uk")) for word, pronunc in read_user_lexicon(fromFormat)]
def convert_user_lexicon(fromFormat,toFormat,outFile):
"See mainopt_convert"
lex = read_user_lexicon(fromFormat)
lex_header = checkSetting(toFormat,"lex_header")
if type(lex_header)==str: outFile.write(lex_header)
else: lex_header(outFile)
entryFormat=getSetting(toFormat,"lex_entry_format")
wordCase=checkSetting(toFormat,"lex_word_case")
for word, pronunc in lex:
pronunc = convert(pronunc,fromFormat,toFormat)
if type(pronunc)==unicode: pronunc=pronunc.encode('utf-8')
if wordCase=="upper": word=word.upper()
elif wordCase=="lower": word=word.lower()
outFile.write(entryFormat % (word,pronunc))
footer = checkSetting(toFormat,"lex_footer")
if type(footer)==str: outFile.write(footer)
else: footer(outFile)
def bbcMicro_partPhonemeCount(pronunc):
"""Returns the number of 'part phonemes' (at least that's what I'm calling them) for the BBC Micro phonemes in pronunc. The *SPEAK command cannot take more than 117 part-phonemes at a time before saying "Line too long", and in some cases it takes less than that (I'm not sure why); 115 is a safer limit."""
partCount = 0 ; pronunc0 = pronunc
while pronunc:
found = 0
for p in ' ,AA,AE,AH,AI,AO,AW,AY,B,CH,CT,DH,DUX,D,EE,EH,ER,F,G,/H,IH,IX,IY,J,K,L,M,NX,N,OW,OL,OY,O,P,R,SH,S,TH,T,UH,/UL,/U,UW,UX,V,W,Y,ZH,Z'.split(','): # phonemes and space count, but pitch numbers do not count
if pronunc.startswith(p):
partCount += {
# *SPEAK can take 117 of most single-letter phonemes, or 116 (limited by the 232+6-character input limit) of most 2-letter phonemes
'AW':2,'IY':2,'OW':2,'OL':2,'UW':2,'/UL':2, # *SPEAK can take 58 of these
'DUX':3,'AY':3,'CH':3,'J':3,'OY':3, # *SPEAK can take 39 of these
'CT':4, # *SPEAK can take 29 of these
}.get(p,1)
pronunc=pronunc[len(p):] ; found=1 ; break
if not found:
assert pronunc[0] in '12345678',"Unrecognised BBC Micro phoneme at "+pronunc+" in "+pronunc0
pronunc=pronunc[1:]
return partCount
def markup_inline_word(format,pronunc):
"Returns pronunc with any necessary markup for putting it in a text (using the inline_format setting)"
if type(pronunc)==unicode: pronunc=pronunc.encode('utf-8') # UTF-8 output - ok for pasting into Firefox etc *IF* the terminal/X11 understands utf-8 (otherwise redirect to a file, point the browser at it, and set encoding to utf-8, or try --convert'ing which will o/p HTML)
format = checkSetting(format,"inline_format","%s")
if type(format) in [str,unicode]:
if type(format)==unicode: format=format.encode('utf-8') # see above
return format % pronunc
else: return format(pronunc)
def markup_doubleTalk_word(pronunc):
"Special-case function set as inline_format in doubletalk (checks environment variables for command code)"
cmd = os.environ.get('DTALK_COMMAND_CODE','')
if cmd: cmd=chr(int(cmd))
else: cmd = '*'
return "%sD%s%sT" % (cmd,pronunc,cmd)
def markup_bbcMicro_word(pronunc):
"Special-case function set as inline_format in bbcmicro. Begins a new *SPEAK command when necessary. See also write_bbcmicro_phones."
global bbc_partsSoFar,bbc_charsSoFar
thisPartCount = bbcMicro_partPhonemeCount(pronunc)
if (not bbc_partsSoFar or bbc_partsSoFar+thisPartCount > 115) or (not bbc_charsSoFar or bbc_charsSoFar+len(pronunc) > 238): # 238 is max len of BBC BASIC prompt (both the immediate prompt and the one with line number supplied by AUTO, in both BASIC II and BASIC IV); re other limit see bbcMicro_partPhonemeCount
if bbc_charsSoFar: r="\n"
else: r=""
cmd="*SPEAK" # (could add a space if want to make it more readable, at the expense of an extra keystroke in the paste buffer; by the way, when not using the ROM version you must use *SPEAK not OS.("SPEAK"), at least on a Model B; seems OSCLI doesn't go through quite the same vectors as star)
bbc_charsSoFar = len(cmd)+len(pronunc)+1 # +1 for the space that'll be after this word if we don't start a new line
bbc_partsSoFar = thisPartCount+1 # ditto
return r+cmd+pronunc
else:
bbc_charsSoFar += len(pronunc)+1
bbc_partsSoFar += thisPartCount+1
return pronunc
bbc_partsSoFar=bbc_charsSoFar=0
def sylcount(example_format_festival):
"""Tries to count the number of syllables in a Festival string (see mainopt_syllables). We treat @ as counting the same as the previous syllable (e.g. "fire", "power"), but this can vary in different songs, so the result will likely need a bit of proofreading."""
count = inVowel = maybeCount = hadAt = 0
festival = example_format_festival.split() # no brackets, emphasis by vowels, but spaces between each syllable
for phone,i in zip(festival,range(len(festival))):
if phone[0] in "aeiou": inVowel=0 # unconditionally start new syllable
if phone[0] in "aeiou@12":
if not inVowel: count += 1
elif phone[0]=="@" and not hadAt: maybeCount = 1 # (e.g. "loyal", but NOT '1', e.g. "world")
if "@" in phone: hadAt = 1 # for words like "cheerful" ("i@ 1 @" counts as one)
inVowel = 1
if phone[0]=="@" and i>=3 and festival[i-2:i]==["ai","1"] and festival[i-3] in ["s","h"]: # special rule for higher, Messiah, etc - like "fire" but usually 2 syllables
maybeCount = 0 ; count += 1
else:
if not phone[0] in "drz": count += maybeCount # not 'r/z' e.g. "ours", "fired" usually 1 syllable in songs, "desirable" usually 4 not 5
# TODO steward? y u@ 1 d but usally 2 syllables
inVowel = maybeCount = hadAt = 0
return count
def hyphenate(word,numSyls):
"See mainopt_syllables"
orig = word
try: word,isu8 = word.decode('utf-8'),True
except: isu8 = False
pre=[] ; post=[]
while word and not 'a'<=word[0].lower()<='z':
pre.append(word[0]) ; word=word[1:]
while word and not 'a'<=word[-1].lower()<='z':
post.insert(0,word[-1]) ; word=word[:-1]
if numSyls>len(word): return orig # probably numbers or something
l = int((len(word)+numSyls/2)/numSyls) ; syls = []
for i in range(numSyls):
if i==numSyls-1: syls.append(word[i*l:])
else: syls.append(word[i*l:(i+1)*l])
if len(syls)>1:
if len(syls[-1])>2 and syls[-1][0]==syls[-1][1] and not syls[-1][0].lower() in "aeiou":
# repeated consonant at start - put one on previous
syls[-2] += syls[-1][0]
syls[-1] = syls[-1][1:]
elif ((len(syls[-2])>2 and syls[-2][-1]==syls[-2][-2] and not syls[-2][-1].lower() in "aeiou") \
or (syls[-1] and syls[-1][0].lower() in "aeiouy" and len(syls[-2])>2)) \
and filter(lambda x:x.lower() in "aeiou",list(syls[-2][:-1])):
# repeated consonant at end - put one on next
# or vowel on right: move a letter over (sometimes the right thing to do...)
# (unless doing so leaves no vowels)
syls[-1] = syls[-2][-1]+syls[-1]
syls[-2] = syls[-2][:-1]
word = ''.join(pre)+"- ".join(syls)+''.join(post)
if isu8: word=word.encode('utf-8')
return word
def macSayCommand():
"""Return the environment variable SAY_COMMAND if it is set and if it is non-empty, otherwise return "say".
E.g. SAY_COMMAND="say -o file.aiff" (TODO: document this in the help text?)
In Gradint you can set (e.g. if you have a ~/.festivalrc) extra_speech=[("en","python lexconvert.py --mac-uk festival")] ; extra_speech_tofile=[("en",'echo %s | SAY_COMMAND="say -o /tmp/said.aiff" python lexconvert.py --mac-uk festival && sox /tmp/said.aiff /tmp/said.wav',"/tmp/said.wav")]"""
s = os.environ.get("SAY_COMMAND","")
if s: return s
else: return "say"
def stdin_is_terminal():
"Returns True if it seems the standard input is connected to a terminal (rather than piped from a file etc)"
return (not hasattr(sys.stdin,"isatty")) or sys.stdin.isatty()
def getInputText(i,prompt,as_iterable=False):
"""Gets text either from the command line or from standard input. Issue prompt if there's nothing on the command line and standard input is connected to a tty instead of a pipe or file. If as_iterable, return an iterable object over the lines instead of reading and returning all text at once. If as_iterable=='maybe', return the iterable but if not reading from a tty then read everything into one item."""
txt = ' '.join(sys.argv[i:])
if txt:
if as_iterable=='maybe': return [txt]
elif as_iterable: return txt.split('\n')
else: return txt
if stdin_is_terminal(): sys.stderr.write("Enter "+prompt+" (EOF when done)\n")
elif as_iterable=='maybe': return [sys.stdin.read()]
if as_iterable: return my_xreadlines()
else:
try: return sys.stdin.read()
except KeyboardInterrupt: raise SystemExit
def my_xreadlines():
"On some platforms this might be a bit more responsive than sys.stdin.xreadlines"
while True:
# sys.stderr.write('got here\n')
try: yield raw_input()
except EOFError: return
except KeyboardInterrupt: raise SystemExit
def output_clauses(format,clauses):
"Writes out clauses and words in format 'format' (clauses is a list of lists of words in the phones of 'format'). By default, calls markup_inline_word and join as appropriate. If however the format's 'clause_separator' has been set to a special case, calls that."
if checkSetting(format,"output_is_binary") and hasattr(sys.stdout,"isatty") and sys.stdout.isatty():
print "This is a binary format - not writing to terminal.\nPlease direct output to a file or pipe."
return
clause_sep = checkSetting(format,"clause_separator","\n")
if type(clause_sep) in [str,unicode]: sys.stdout.write(clause_sep.join(wordSeparator(format).join(markup_inline_word(format,word) for word in clause) for clause in clauses))
else: clause_sep(clauses)
def write_bbcmicro_phones(clauses):
"""Special-case function set as clause_separator in bbcmicro format. Must be a special case because it needs to track any extra keystrokes to avoid "Line too long". And while we're at it, we might as well start a new *SPEAK command with each clause, using the natural brief delay between commands; this should minimise the occurrence of additional delays in arbitrary places. Also calls print_bbc_warnings"""
totalKeystrokes = 0 ; lines = 0
for clause in clauses:
global bbc_charsSoFar ; bbc_charsSoFar=0
l=" ".join([markup_inline_word("bbcmicro",word) for word in clause])
print l.replace(" \n","\n")
totalKeystrokes += len(l)+1 ; lines += 1
print_bbc_warnings(totalKeystrokes,lines)
def print_bbc_warnings(keyCount,lineCount):
"Print any relevant size warnings regarding sending 'keyCount' keys in 'lineCount' lines to the BBC Micro"
sys.stdout.flush() # try to keep in sync if someone's doing 2>&1 | less
limits_exceeded = [] ; severe=0
if keyCount >= 32768:
severe=1 ; limits_exceeded.append("BeebEm 32K keystroke limit") # At least in version 3, the clipboard is defined in beebwin.h as a char of size 32768 and its bounds are not checked. Additionally, if you script a second paste before the first has finished (or if you try to use BeebEm's Copy command) then the first paste will be interrupted. So if you really want to make BeebEm read more then I suggest setting a printer destination file, putting a VDU 2,10,3 after each batch of commands, and waiting for that \n to appear in that printer file before sending the next batch, or perhaps write a set of programs to a disk image and have them CHAIN each other or whatever.
shadow_himem=0x8000 # if using a 'shadow mode' on the Master/B+/Integra-B (modes 128-135, which leave all main RAM free)
mode7_himem=0x7c00 # (40x25 characters = 1000 bytes, by default starting at 7c00 with 24 bytes spare at the top, but the scrolling system uses the full 1024 bytes and can tell the video controller to start rendering at any one of them; if you get Jeremy Ruston's book and program the VIDC yourself then you could fix it at 7c18 if you really want, or just set HIMEM=&8000 and don't touch the screen, but that doesn't give you very much more room)
default_speech_loc=0x5500
overhead_per_program_line = 4
for page,model in [
(0x1900,"Model B"), # with Acorn DFS (a reasonable assumption although alternate DFS ROMs are different)
(0xE00,"Master")]: # (the Master has 8k of special paged-in "filing system RAM", so doesn't need 2816 bytes of main RAM for DFS)
top = page+keyCount+lineCount*(overhead_per_program_line-1)+2 # the -1 is because keyCount includes a carriage return at the end of each line
if model=="Master": x=" (use Speech's Sideways RAM version instead, e.g. *SRLOAD SP8000 8000 7 and reset, but sound quality might be worse)" # I don't know why but SP8000 can play higher and more distorted than SPEECH, at least on emulation (and changing the emulation speed doesn't help, because that setting, at least in BeebEm3, just controls extra usleep every frame; it doesn't actually slow down the 6502 *between* frames; anyway timing of sound changes is done by CyclesToSamples stuff in beebsound.cc's SoundTrigger). If on the Master you go into View (*WORD) and then try SP8000, it plays _lower_ than *SPEECH (even if you do *BASIC first) and *SAY can corrupt a View document; ViewSheet (*SHEET) doesn't seem to have this effect; neither does *TERMINAL but *SAY can confuse the terminal.
# Re bank numbers, by default banks 4 to 7 are Sideways RAM (4*16k=64k) and I suppose filling up from 7 makes sense because banks 8-F are ROMs (ANFS,DFS,ViewSheet,Edit,BASIC,ADFS,View,Terminal; OS is a separate 16k so there's scope for 144k of supplied ROM). Banks 0-3 are ROM expansion slots. The "128" in the name "Master 128" comes from 32k main RAM, 64k Sideways RAM, 20k shadow RAM (for screen modes 128-135), 4k OS "private RAM" (paged on top of 8000-8FFF) and 8k filing system RAM (paged on top of C000-DFFF) = 128k. Not sure what happened on the B+.
# By the way BeebEm's beebsound.cc also shows us why SOUND was always out of tune especially in the higher pitches. The 16-bit freqval given to the chip is 125000/freq and must be an integer, so the likely temperament in cents for non-PCM is given by [int(math.log(125000.0/math.ceil(125000/freq)/freq,2**(1.0/1200))) for freq in [440*((2**(1.0/12))**semi) for semi in range(-12*3+2,12*2+6)]] (the actual temperament will depend on the OS's implementation of mapping SOUND pitch values to freqval's, unless you program the chip directly, but this list is indicative and varies over 10% in the top 2 octaves)
# Some other ROMs (e.g. Alan Blundell's "Informant" 1989) seem to result in a crash after the *SPEECH and/or *SPEAK commands complete, at least in some emulator configurations; this may or may not be resolved via timing adjustments or adjustments in the ROM order; not sure exactly what the problem is
else: x=" (Speech program will be overwritten unless relocated)" # (could use Sideways RAM for it instead if you have it fitted, see above)
if top > default_speech_loc: limits_exceeded.append("%s TOP=&%X limit%s" % (model,default_speech_loc,x)) # The Speech program does nothing to stop your program (or its variables etc) from growing large enough to overwrite &5500, nor does it stop the stack pointer (coming down from HIMEM) from overwriting &72FF. For more safety on a Model B you could use RELOCAT to put Speech at &5E00 and be sure to set HIMEM=&5E00 before loading, but then you must avoid commands that change HIMEM, such as MODE (but selecting any non-shadow mode other than 7 will overwrite Speech anyway, although if you set the mode before loading Speech then it'll overwrite screen memory and still work as long as the affected part of the screen is undisturbed). You can't do tricks like ditching the lexicon because RELOCAT won't let you go above 5E00 (unless you fix it, but I haven't looked in detail; if you can fix RELOCAT to go above 5E00 then you can create a lexicon-free Speech by taking the 1st 0x1560 bytes of SPEECH and append two * bytes, relocate to &6600 and set HIMEM, but don't expect *SAY to work, unless you put a really small lexicon into the spare 144 bytes that are left - RELOCAT needs an xx00 address so you can't have those bytes at the bottom). You could even relocate to &6A00 and overwrite (non-shadow) screen memory if you don't mind the screen being filled with gibberish that you'd better not erase! (well if you program the VIDC as mentioned above and you didn't re-add a small lexicon then you could get yourself 3.6 lines of usable Mode 7 display from the spare bytes but it's probably not worth the effort)
if top > mode7_himem:
if model=="Master":
if top > shadow_himem: limits_exceeded.append(model+" 32k HIMEM limit (even for shadow modes)") # TODO: maybe add instructions for using BAS128 on the B+ or Master; this sets PAGE=&10000 and HIMEM=&20000 (i.e. 64k for programs), which uses all 4 SRAM slots so you can't use SP8000 (unless it's on a real ROM); if using Speech in main memory you need to RELOCAT it to leave &3000 upwards for Bas128 code; putting it at &1900 for B+/DFS leaves you only 417 bytes for lexicon (which might not matter if you're using only *SPEECH: just create a shortened lexicon); putting it at &E00 for Master allows space for the default 2204-byte lexicon with 1029 bytes to spare; TODO check if Bas128 uses any workspace between &E00 and &3000 though. Alternatively (if you really want to store such a long program on the BBC) then you'd better split it into several programs that CHAIN each other (as mentioned above).
else: limits_exceeded.append(model+" Mode 7 HIMEM limit (use shadow modes 128-135)")
else: limits_exceeded.append(model+" Mode 7 HIMEM limit") # unless you overwrite the screen (see above) - let's assume the Model B hasn't been fitted with shadow modes (although the Integra-B add-on does give them to the Model B, and leaves PAGE at &1900; B+ has shadow modes but I don't know what's supposed to happen to PAGE on it). 65C02 Tube doesn't help much (it'll try to run Speech on the coprocessor instead of the host, and this results in silence because it can't send its sound back across the Tube; don't know if there's a way to make it run on the host in these circumstances or what the host's memory map is like)
if lineCount > 32768: limits_exceeded.append("BBC BASIC line number limit") # and you wouldn't get this far without filling the memory, even with 128k (4 bytes per line)
elif 10*lineCount > 32767: limits_exceeded.append("AUTO line number limit (try AUTO 0,1)") # (default AUTO increments in steps of 10; you can use AUTO 0,1 to start at 0 and increment in steps of 1. BBC BASIC stores its line info in a compact form which allows a range of 0-32767.)
if severe: warning,after="WARNING: ",""
else: warning,after="Note: ","It should still work if pasted into BeebEm as immediate commands. "
after = ". "+after+"See comments in lexconvert for more details.\n"
if len(limits_exceeded)>1: sys.stderr.write(warning+"this text may be too big for the BBC Micro. The following limits were exceeded: "+", ".join(limits_exceeded)+after)
elif limits_exceeded: sys.stderr.write(warning+"this text may be too big for the BBC Micro because it exceeds the "+limits_exceeded[0]+after)
def bbc_prepDefaultLex(outFile):
"""Special-case function set as lex_header in bbcmicro format. If SPEECH_DISK and MAKE_SPEECH_ROM is set, then read the ROM code from SPEECH_DISK and write to outFile (meant to go before the lexicon, to make a modified BBC Micro Speech ROM with custom lexicon)"""
if not os.environ.get("MAKE_SPEECH_ROM",0): return
d=open(os.environ['SPEECH_DISK']).read() # if this fails, SPEECH_DISK was not set or was set incorrectly (it's required for MAKE_SPEECH_ROM)
i=d.index('LO\x80LP\x80\x82\x11') # start of SP8000 file (if this fails, it wasn't a Speech disk)
j=d.index('>OUS_',i) # start of lexicon (ditto)
assert j-i==0x1683, "Is this really an original disk image?"
outFile.write(d[i:j])
def bbc_appendDefaultLex(outFile):
"""Special-case function set as lex_footer in bbcmicro format. If SPEECH_DISK is set, read Speech's default lexicon from it and append this to outFile. Otherwise just write a terminating >** to outFile. In either case, check for exceeding 16k if we're MAKE_SPEECH_ROM, close the file and call print_bbclex_instructions."""
if os.environ.get("SPEECH_DISK",""):
d=open(os.environ['SPEECH_DISK']).read()
i=d.index('>OUS_') # if this fails, it wasn't a Speech disk
j=d.index(">**",i)
assert j-i==2201, "Lexicon on SPEECH_DISK is wrong size (%d). Is this really an original disk image?" % (j-i)
outFile.write(d[i:j])
# TODO: can we compress the BBC lexicon? i.e. detect if a rule will happen anyway due to subsequent wildcard rules, and delete it if so (don't know how many bytes that would save)
outFile.write(">**")
fileLen = outFile.tell()
assert not os.environ.get("MAKE_SPEECH_ROM",0) or fileLen <= 16384, "Speech ROM file got too big (%d)" % fileLen
outFile.close()
print_bbclex_instructions(getSetting("bbcmicro","lex_filename"),fileLen)
def bbcshortest(n):
"""Convert integer n into the shortest possible number of BBC Micro keystrokes; prefer hex if and only if the extra '&' keystroke won't make it any longer than its decimal equivalent"""
if len(str(n)) < len('&%X'%n): return str(n)
else: return '&%X'%n
def bbcKeystrokes(data,start):
"Return BBC BASIC keystrokes to put data into RAM starting at address start, without using the BASIC heap in the process (although we do use one of the page-4 integer variables to save some keystrokes). Assumes the data is mostly ASCII so the $ operator is the least-keystrokes method of getting it in (rather than ? and ! operators, assembler EQUB/EQUW/EQUS, 6502 mnemonics, etc); we don't mind about overwriting the byte after with a CHR$(13). Keystrokes are limited to ASCII for easier copy/paste. See comments for more details."
# Taken to the extreme, a 'find the least keystrokes' function would be some kind of data compressor; we're not doing that here as we assume this is going to be used to poke in a lexicon, which is basically ASCII with a few CHR$(128)s thrown in; this '$ operator' method is highly likely to yield the least keystrokes for that kind of data, apart from setting and using temporary string variables, but then (1) you're in the realms of data compression and (2) you require heap memory, which might not be a good idea depending on where we're putting our lexicon.
# I suppose it wouldn't hurt in most cases to have an A$=CHR$(128), but not doing this for now because you might be in a situation where you can't touch the heap at all (I'm not sure where the workspace for assembling strings is though).
# However, just to be pedantic about saving a few bytes, there is one thing we CAN do: if we have a lexicon with a lot of CHR$(128)s in it, let's set up BASIC's page-4 integer variables such that $A%=CHR$(128), saving 6 keystrokes per entry without needing the heap (an additional 1 keystroke per entry could be saved if we didn't mind putting an A$ on the heap).
use_int_hack = ((start>=1030 or start+len(data)<=1026) and len(data.split(chr(128))) >= 4)
i=0 ; ret=[]
if use_int_hack: thisLine = "A%=&408:B%=&D80:" # (@% is at &400 and each is 4 byte LSB-MSB; $x reads to next 0D)
# (If we're guaranteed to NOT be using Bas128 and therefore all memory addresses are effectively masked by &FFFF, we can instead set A%=&D800406 (using A%'s low 2 bytes to point to A%'s high 2 bytes) for a 1-off saving of 5 keystrokes and 1 page-4 variable, but this saving is not really worth the readability compromise and the risk posed by the possibility of Bas128 - I don't know how Bas128 treats addresses above &1FFFF)
# (An even 'nastier' trick would be to put !13=&D80 and then use $13, as those bytes are used by BASIC's random number generator, which presumably isn't called during the paste and we don't mind disrupting it; again I don't know about Bas128. But you can't do it because BASIC gives a "$ range" error on anything below 256.)
# (I suppose one thing you _could_ do is LOMEM=&400:A$=CHR$(13) and end with LOMEM=TOP, which would overwrite 3 page-4 variables and let you use just A$ instead of $A%, saving keystrokes over A%=&D800406 after 21 more lexicon words, at the expense of losing track of any variables you had on the heap. But this is getting silly.)
else: thisLine = ""
bbc_max_line_len = 238
inQuote=needPlus=0 ; needCmd=1
while i<len(data):
if needCmd:
thisLine += ('$'+bbcshortest(start)+'=')
inQuote=needPlus=needCmd=0
if data[i]=='"': c,inQ = '""',1 # inQ MUST be 0 or 1, not False/True, because it's also used as 'len of necessary close quote' below
elif 32<=ord(data[i])<127: c,inQ = data[i],1
elif use_int_hack and ord(data[i])==128: c,inQ="$A%",0
else: c,inQ=("CHR$("+str(ord(data[i]))+")"),0
addToLine = [] ; newNeedPlus = needPlus
if inQ and not inQuote:
if needPlus: addToLine.append('+')
addToLine.append('"')
newNeedPlus=0
elif inQuote and not inQ:
addToLine.append('"+')
newNeedPlus=1 # after what we'll add
elif not inQ:
if needPlus: addToLine.append('+')
newNeedPlus=1 # after what we'll add
addToLine.append(c)
addToLine=''.join(addToLine)
if len(thisLine)+len(addToLine)+inQ > bbc_max_line_len: # oops, we've gone too far, back off and end prev line
if inQuote: thisLine += '"'
ret.append(thisLine)
thisLine="" ; needCmd=1 ; continue
thisLine += addToLine ; inQuote=inQ
needPlus=newNeedPlus ; i += 1 ; start += 1
if inQuote: thisLine += '"'
if not needCmd: ret.append(thisLine)
return '\n'.join(ret)+'\n'
def print_bbclex_instructions(fname,size):
"""Print suitable instructions for a BBC Micro lexicon of the given filename and size (the exact nature of the instructions depends on the size). If appropriate, create a .key file containing keystrokes for transferring to an emulator."""
if os.environ.get("MAKE_SPEECH_ROM",0): print "%s (%d bytes, hex %X) can now installed on an emulator (set in Roms.cfg or whatever), or loaded onto a chip. The sound quality of this might be worse than that of the main-RAM version." % (fname,size,size) # (at least on emulation - see comment on sound quality above)
else:
print "The size of this lexicon is %d bytes (hex %X)" % (size,size) # (the default lexicon is 2204 bytes)
bbcStart=None
noSRAM_lex_offset=0x155F # (on the BBC Micro, SRAM means Sideways RAM, not Static RAM as it does elsewhere; for clarity we'd better say "Sideways RAM" in all output)
SRAM_lex_offset=0x1683
SRAM_max=0x4000 # 16k
noSRAM_default_addr=0x5500
noSRAM_min_addr=0xE00 # minimum supported by RELOCAT
page=0x1900 # or 0xE00 for Master (but OK to just leave this at 0x1900 regardless of model; it harmlessly increases the range where special_relocate_instructions 'kick in')
noSRAM_himem=0x7c00 # unless you're in a shadow mode or something (see comments on himem above), however leaving this at 0x7c00 is usually harmless (just causes the 'need to relocate' to 'kick in' earlier, although if memory is really full it might say 'too big' 1k too early)
def special_relocate_instructions(reloc_addr):
pagemove_min,pagemove_max = max(0xE00,page-0x1E00), page+0xE00 # if relocating to within this range, must move PAGE before loading RELOCAT. RELOCAT's supported range is 0xE00 to 0x5E00, omitting (PAGE-&1E00) to (PAGE+&E00)
if reloc_addr < 0x1900: extra=" On a Model B with Acorn DFS you won't be able to use the disk after relocating below &1900, and you can't run star commands from tape so you have to initialise via CALL. (On a Master, DFS is not affected as it doesn't use &E00-&1900.)"
else: extra = ""
if not pagemove_min<=reloc_addr<pagemove_max:
return extra # no other special instructions needed
newpage = reloc_addr+0x1E00
page_max = min(0x5E00,noSRAM_default_addr-0xE00)
if newpage > page_max: return False # "Unfortunately RELOCAT can't put it at &%X even with PAGE changes." % reloc_addr
return " Please run RELOCAT with PAGE in the range of &%X to &%X for this relocation to work.%s" % (newpage,page_max,extra)
if noSRAM_default_addr+noSRAM_lex_offset+size > noSRAM_himem:
reloc_addr = noSRAM_himem-noSRAM_lex_offset-size
reloc_addr -= (reloc_addr%256)
if reloc_addr >= noSRAM_min_addr:
instr = special_relocate_instructions(reloc_addr)
if instr==False: print "This lexicon is too big for Speech in main RAM even with relocation, unless RELOCAT is rewritten to work from files."
else:
bbcStart = reloc_addr+noSRAM_lex_offset
reloc_call = reloc_addr + 0xB00
print "This lexicon is too big for Speech at its default address of &%X, but you could use RELOCAT to put a version at &%X and then initialise it with CALL %s (or do the suggested *SAVE, reset, and run *SP). Be sure to set HIMEM=&%X. Then *LOAD %s %X or change the relocated SP file from offset &%X.%s" % (noSRAM_default_addr,reloc_addr,bbcshortest(reloc_call),reloc_addr,fname,bbcStart,noSRAM_lex_offset,instr)
else: print "This lexicon is too big for Speech in main RAM even with relocation."
else: # fits at default location - no relocation needed
bbcStart = noSRAM_default_addr+noSRAM_lex_offset
print "You can load this lexicon by *LOAD %s %X or change the SPEECH file from offset &%X. Suggest you also set HIMEM=&%X for safety." % (fname,bbcStart,noSRAM_lex_offset,noSRAM_default_addr)
if bbcStart: # we managed to fit it into main RAM
keys = bbcKeystrokes(open(fname).read(),bbcStart)
open(fname+".key","w").write(keys)
print "For ease of transfer to emulators etc, a self-contained keystroke file for putting %s data at &%X has been written to %s.key" % (fname,bbcStart,fname)
if len(keys) > 32767: print "(This file looks too big for BeebEm to paste though)" # see comments elsewhere
# Instructions for replacing lex in SRAM:
if size > SRAM_max-SRAM_lex_offset: print "This lexicon is too big for Speech in Sideways RAM." # unless you can patch Speech to run in SRAM but read its lexicon from main RAM, or run in main RAM but page in multiple banks of SRAM for the lexicon (but even then there'll be a limit)
else: print "You can load this lexicon into Sideways RAM by *SRLOAD %s %X 7 (or whichever bank number you're using), or change the SP8000 file from offset &%X." % (fname,SRAM_lex_offset+0x8000,SRAM_lex_offset)
if not os.environ.get("SPEECH_DISK",""): print "If you want to append the default lexicon to this one, set SPEECH_DISK to the image of the original Speech disk before running lexconvert, e.g. export SPEECH_DISK=/usr/local/BeebEm3/diskimg/Speech.ssd"
if size <= SRAM_max-SRAM_lex_offset: print "You can also set MAKE_SPEECH_ROM=1 (along with SPEECH_DISK) to create a SPEECH.ROM file instead"
print "If you get 'Mistake in speech' when testing some words, try starting with '*SAY, ' (this seems to be a Speech bug)" # - can't track down which words it does and doesn't apply to
print "It might be better to load your lexicon into eSpeak and use lexconvert's --phones option to drive the BBC with phonemes."
def mainopt_version(i):
# TODO: doc string for the help? (or would this option clutter it needlessly) - just print lexconvert's version number and nothing else
print __doc__.split("\n")[0].split(" - ")[0]
def main():
"""Introspect the module to find the mainopt_ functions, and either call one of them or print the help. Returns the error code to send back to the OS."""
def funcToOpt(n): return "--"+n[n.index("_")+1:].replace("_","-")
for k,v in globals().items():
if k.startswith('mainopt_') and funcToOpt(k) in sys.argv:
try: msg = v(sys.argv.index(funcToOpt(k)))
except Message,e: msg=e.message
if msg:
sys.stdout.flush()
sys.stderr.write(msg+"\n") ; return 1
else: return 0
html = ('--htmlhelp' in sys.argv) # (undocumented option used for my website, don't rely on it staying)
def htmlify(h): return h.replace('&','&').replace('<','<').replace('>','>').replace('\n','<br>').replace('--','<tt>--</tt>') # (the last bit is so typography.js doesn't try to rewrite options stuff to en-dash)
if not html: htmlify = lambda x:x
print htmlify(__doc__)
if html: missALine = "<p>"
else: missALine = ""
print missALine
if '--formats' in sys.argv: # non-HTML mode only (format descriptions are included in HTML anyway, and don't worry about the capability summary)
print "Available pronunciation formats (and support levels):"
keys=lexFormats.keys() ; keys.sort()
for k in keys:
types = []
if not k=="example": types.append("phones")
if k=="mac-uk": types.append("speaking")
else:
if checkSetting(k,"lex_read_function"): types.append("lex-read")
if checkSetting(k,"lex_filename") and checkSetting(k,"lex_entry_format"):
ltype = checkSetting(k,"lex_type")
if ltype: ltype=" as "+ltype
types.append("lex-write"+ltype)
print "\n"+k+" ("+", ".join(types)+")"
print getSetting(k,"doc")
return 0
elif html:
print "Available pronunciation formats:"
if html: print '<table id="formats">'
keys=lexFormats.keys() ; keys.sort()
for k in keys: print '<tr><td valign="top"><nobr>'+k+'</nobr></td><td valign="top">'+htmlify(getSetting(k,"doc"))+"</td></tr>"
print "</table><script><!-- try to be more readable on some smartphones\nif(screen && screen.width<600 && document.getElementById && document.getElementById('formats').outerHTML) document.getElementById('formats').outerHTML = document.getElementById('formats').outerHTML.replace(/<table/g,'<dl').replace(/<.table/g,'<'+'/dl').replace(/<tr><td/g,'<dt').replace(/<.td><td/g,'<'+'/dt><dd').replace(/<.td><.tr/g,'<'+'/dd');\n//--></script>"
else: print "Available pronunciation formats: "+", ".join(sorted(lexFormats.keys()))+"\n(Use --formats to see their descriptions)"
print missALine
print "Program options:"
print missALine
if html: print "<dl>"
for _,opt,desc in sorted([(v.__doc__ and not v.__doc__.startswith('*'),k,v.__doc__) for k,v in globals().items()]):
if not opt.startswith("mainopt_"): continue
opt = funcToOpt(opt)
if not desc: continue # undocumented option
params,rest = desc.split("\n",1)
if params.startswith('*'): params=params[1:]
if params: opt += (' '+params)
if html: print "<dt>"+htmlify(opt)+"</dt><dd>"+htmlify(rest)+"</dd>"
else: print opt+"\n"+rest+"\n"
if html: print "</dl>"
return 0
catchingSigs = inSigHandler = False
def catchSignals():
"We had better try to catch all signals if using MacBritish_System_Lexicon so we can safely clean it up. We raise KeyboardInterrupt instead (need to catch this). Might not work with multithreaded code."
global catchingSigs
if catchingSigs: return
catchingSigs = True
import signal
def f(sigNo,*args):
global inSigHandler
if inSigHandler: return
inSigHandler = True
os.killpg(os.getpgrp(),sigNo)
sys.stderr.write("\nCaught signal %d\n" % sigNo)
raise KeyboardInterrupt
for n in xrange(1,signal.NSIG):
if not n in [
signal.SIGCHLD, # sent on subprocess completion
signal.SIGTSTP,signal.SIGCONT, # Ctrl-Z / fg
signal.SIGWINCH, # window-size change
] and not signal.getsignal(n)==signal.SIG_IGN:
try: signal.signal(n,f)
except: pass
class MacBritish_System_Lexicon(object):
"""Overwrites some of the pronunciations in the system
lexicon (after backing up the original). Cannot
change the actual words in the system lexicon, so just
alters pronunciations of words you don't intend to use
so you can substitute these into your texts.
Restores the lexicon on close()."""
instances = {}
def __init__(self,text="",voice="Daniel"):
"""text is the text you want to speak (so that any
words used in it that are not mentioned in your
lexicon are unchanged in the system lexicon);
text="" means you just want to speak phonemes.
voice can be Daniel, Emily or Serena."""
self.voice = False
assert not voice in MacBritish_System_Lexicon.instances, "There is already another instance of MacBritish_System_Lexicon for the "+voice+" voice"
assert not os.system("lockfile -1 -r 10 /tmp/"+voice+".PCMWave.lock") # in case some other process has it (note: if you run with python -O, this check won't happen!)
self.voice = voice
self.filename = "/System/Library/Speech/Voices/"+voice+".SpeechVoice/Contents/Resources/PCMWave"
assert os.path.exists(self.filename),"Cannot find an installation of '"+voice+"' on this system"
if not os.path.exists(self.filename+"0"):
sys.stderr.write("Backing up "+self.filename+" to "+self.filename+"0...\n") # (you'll need a password if you're not running as root)
err = os.system("sudo mv \""+self.filename+"\" \""+self.filename+"0\"; sudo cp \""+self.filename+"0\" \""+self.filename+"\"; sudo chown "+str(os.getuid())+" \""+self.filename+"\"")
assert not err, "Error creating backup"
lexFile = self.filename+".lexdir"
if not os.path.exists(lexFile):
sys.stderr.write("Creating lexdir file...\n")
err = os.system("sudo touch \""+lexFile+"\" ; sudo chown "+str(os.getuid())+" \""+lexFile+"\"")
assert not err, "Error creating lexdir"
import cPickle
if os.stat(lexFile).st_size: self.wordIndexStart,self.wordIndexEnd,self.phIndexStart,self.phIndexEnd = cPickle.Unpickler(open(lexFile)).load()
else:
dat = open(self.filename).read()
def findW(word,rtnPastEnd=0):
i = re.finditer(re.escape(word+chr(0)),dat)
try: n = i.next()
except StopIteration: raise Exception("word not found in voice file")
try:
n2 = i.next()
raise Exception("word does not uniquely identify a byte position (has at least %d and %d)" % (n.start(),n2.start()))
except StopIteration: pass
if rtnPastEnd: return n.end()
else: return n.start()
self.wordIndexStart = findW("808s")
self.phIndexStart = findW("'e&It.o&U.e&Its")
self.wordIndexEnd = findW("zombie",1)
self.phIndexEnd = findW("'zA+m.bI",1)
cPickle.Pickler(open(lexFile,"w")).dump((self.wordIndexStart,self.wordIndexEnd,self.phIndexStart,self.phIndexEnd))
self.dFile = open(self.filename,'r+')
assert len(self.allWords()) == len(self.allPh())
MacBritish_System_Lexicon.instances[voice] = self
self.textToAvoid = text.replace(unichr(160).encode('utf-8'),' ') ; self.restoreDic = {}
catchSignals()
def allWords(self):
"Returns a list of words that are defined in the system lexicon (which won't be changed, but see allPh)"
self.dFile.seek(self.wordIndexStart)
return [x for x in self.dFile.read(self.wordIndexEnd-self.wordIndexStart).split(chr(0)) if x]
def allPh(self):
"Returns a list of (file position, phoneme string) for each of the primary phoneme entries from the system lexicon. These entries can be changed in-place by writing to the said file position, and then spoken by giving the voice the corresponding word from allWords (but see also usable_words)."
self.dFile.seek(self.phIndexStart)
def f(l):
last = None ; r = [] ; pos = self.phIndexStart
for i in l:
if re.search(r'[ -~]',i) and not i in ["'a&I.'fo&Un","'lI.@n","'so&Un.j$"] and not (i==last and i in ["'tR+e&I.si"]): r.append((pos,i)) # (the listed pronunciations are secondary ones that for some reason are in the list)
if re.search(r'[ -~]',i): last = i
pos += (len(i)+1) # +1 for the \x00
assert pos==self.phIndexEnd+1 # +1 because the last \00 will result in a "" item after; the above +1 will be incorrect for that item
return r
return f([x for x in self.dFile.read(self.phIndexEnd-self.phIndexStart).split(chr(0))])
def usable_words(self,words_ok_to_redefine=[]):
"Returns a list of (word,phoneme_file_position,original_phonemes) by combining allWords with allPh, but omitting any words that don't seem 'usable' (for example words that contain spaces, since these lexicon entries don't seem to be actually used by the voice). Words that occur in self.textToAvoid are also considered non-usable, unless they also occur in words_ok_to_redefine (user lexicon)."
for word,(pos,phonemes) in zip(self.allWords(),self.allPh()):
if not re.match("^[a-z0-9]*$",word): continue # it seems words not matching this regexp are NOT used by the engine
if not (phonemes and 32<ord(phonemes[0])<127): continue # better not touch those, just in case
if word in self.textToAvoid and not word in words_ok_to_redefine: continue
yield word,pos,phonemes
def check_redef(self,wordsAndPhonemes):
"Diagnostic function to list on standard error the redefinitions we want to make. wordsAndPhonemes is a list of (original system-lexicon word, proposed new phonemes). The old phonemes are also listed, fetched from allPh."
aw = self.allWords() ; ap = 0
for w,p in wordsAndPhonemes:
w = w.lower()
if not re.match("^[a-z0-9]*$",w): continue
if not w in aw: continue
if not ap:
ap = self.allPh()
sys.stderr.write("Warning: some words were already in system lexicon\nword\told\tnew\n")
sys.stderr.write(w+"\t"+ap[aw.index(w)][1]+"\t"+p+"\n")
def speakPhones(self,phonesList):
"Speaks every phonetic word in phonesList"
words = [str(x)+"s" for x in range(len(phonesList))]
d = self.setMultiple(words,phonesList)
os.popen(macSayCommand()+" -v \""+self.voice+"\"",'w').write(" ".join(d.get(w,"") for w in words))
def readWithLex(self,lex):
"Reads the text given in the constructor after setting up the lexicon with the given (word,phoneme) list"
# self.check_redef(lex) # uncomment if you want to know about these
textToPrint = u' '+self.textToAvoid.decode('utf-8')+u' '
tta = ' '+self.textToAvoid.replace(u'\u2019'.encode('utf-8'),"'").replace(u'\u2032'.encode('utf-8'),'').replace(u'\u00b4'.encode('utf-8'),'').replace(u'\u02b9'.encode('utf-8'),'').replace(u'\u00b7'.encode('utf-8'),'').replace(u'\u2014'.encode('utf-8'),' ')+' ' # (ignore pronunciation marks 2032 and b7 that might be in the text, but still print them in textToPrint; also normalise apostrophes but not in textToPrint, and be careful with dashes as lex'ing the word after a hyphen or em-dash won't work BUT we still want to support hyphenated words IN the lexicon, so em-dashes are replaced here and hyphens are included in nonWordBefore below)
words2,phonemes2 = [],[] # keep only the ones actually used in the text (no point setting whole lexicon)
nonWordBefore=r"(?i)(?<=[^A-Za-z"+chr(0)+"-])" # see below for why chr(0) is included, and see comment above for why hyphen is at the end; (?i) = ignore case
nonWordAfter=r"(?=([^A-Za-z'"+unichr(0x2019)+"-]|['"+unichr(0x2019)+r"-][^A-Za-z]))" # followed by non-letter non-apostrophe, or followed by apostrophe non-letter (so not if followed by "'s", because the voice won't use our custom lex entry if "'s" is added to the lex'd word, TODO: automatically add "'s" versions to the lexicon via +s or +iz?) (also not if followed by hyphen-letters; hyphen before start is handled above, although TODO preceded by non-letter + hyphen might be OK)
ttal = tta.lower()
for ww,pp in lex:
if ww.lower() in ttal and re.search(nonWordBefore+re.escape(ww)+nonWordAfter,tta):
words2.append(ww) ; phonemes2.append(pp)
for k,v in self.setMultiple(words2,phonemes2).iteritems():
tta = re.sub(nonWordBefore+re.escape(k)+nonWordAfter,chr(0)+v,tta)
textToPrint = re.sub(nonWordBefore+'('+u'[\u2032\u00b4\u02b9\u00b7]*'.join(re.escape(c) for c in k)+')'+nonWordAfter,chr(0)+r'\1'+chr(1),textToPrint)
tta = tta.replace(chr(0),'')
term = os.environ.get("TERM","")
if ("xterm" in term or term=="screen") and sys.stdout.isatty(): # we can probably underline words (inverse is more widely supported than underline, e.g. should work even on an old Linux console in case someone's using that to control an OS X server, but there might be a *lot* of words, which wouldn't be very good in inverse if user needs dark background and inverse is bright. Unlike Annogen, we're dealing primarily with Latin letters.)
import textwrap
textwrap.len = lambda x: len(x.replace(chr(0),"").replace(chr(1),"")) # a 'hack' to make (at least the 2.x implementations of) textwrap ignore our chr(0) and chr(1) markers in their calculations. Relies on textwrap calling len().
print textwrap.fill(textToPrint,stdout_width_unix(),break_on_hyphens=False).encode('utf-8').replace(chr(0),"\x1b[4m").replace(chr(1),"\x1b[0m").strip() # break_on_hyphens=False because we don't really want hyphenated NAMES to be split across lines, and anyway textwrap in (at least) Python 2.7 has a bug that sometimes causes a line breaks to be inserted before a syllable marker symbol like 'prime'
# else don't print anything (saves confusion)
os.popen(macSayCommand()+" -v \""+self.voice+"\"",'w').write(tta)
def setMultiple(self,words,phonemes):
"Sets phonemes for words, returning dict of word to substitute word. Flushes file buffer before return."
avail = [] ; needed = []
for word,pos,phon in self.usable_words(words):
avail.append((len(phon),word,pos,phon))
for word,phon in zip(words,phonemes):
needed.append((len(phon),word,phon))
avail.sort() ; needed.sort() # shortest phon first
i = 0 ; wDic = {} ; iDone=set() ; mustBeAlpha=True
# mustBeAlpha: prefer alphabetical words, since
# these can be capitalised at start of sentence
# (the prosody doesn't always work if it isn't)
for l,word,phon in needed:
while avail[i][0] < l or (mustBeAlpha and not re.match("[A-Za-z]",avail[i][1])) or i in iDone:
i += 1
if i==len(avail):
if mustBeAlpha: # desperate situation: we HAVE to use the non-alphabetical slots now (ideally we should pick words that never occur at start of sentence for them, but this branch is hopefully a rare situation in practice)
mustBeAlpha=False ; i=0; continue
sys.stderr.write("Could not find enough lexicon slots!\n") # TODO: we passed 'words' to usable_words's words_ok_to_redefine - this might not be the case if we didn't find enough slots
self.dFile.flush() ; return wDic
iDone.add(i)
_,wSubst,pos,oldPhon = avail[i] ; i += 1
if avail[i][2] in self.restoreDic: oldPhon=None # shouldn't happen if setMultiple is called only once, but might be useful for small experiments in the Python interpreter etc
self.set(pos,phon,oldPhon)
wDic[word] = wSubst[0].upper()+wSubst[1:] # always capitalise it so it can be used at start of sentence too (TODO: copy original capitalisation of each instance instead, in case it happens to come directly after a dotted abbreviation? although if it's something that's always capitalised anyway, e.g. most names, then this won't make any difference)
self.dFile.flush() ; return wDic
def set(self,phPos,val,old=None):
"""Sets phonemes at position phPos to new value.
Caller should flush the file buffer when done."""
# print "Debugger: setting %x to %s" % (phPos,val)
if old:
assert not phPos in self.restoreDic, "Cannot call set() twice on same phoneme while re-specifying 'old'"
assert len(val) <= len(old), "New phoneme is too long!"
self.restoreDic[phPos] = old
else: assert phPos in self.restoreDic, "Must specify old values (for restore) when setting for first time"
self.dFile.seek(phPos)
self.dFile.write(val+chr(0))
def __del__(self):
"WARNING - this might not be called before exit - best to call close() manually"
if not self.voice: return
self.close()
def close(self):
for phPos,val in self.restoreDic.items():
self.set(phPos,val)
self.dFile.close()
del MacBritish_System_Lexicon.instances[self.voice]
assert not os.system("rm -f /tmp/"+self.voice+".PCMWave.lock")
self.voice=None
def stdout_width_unix(): # assumes isatty
import struct,fcntl,termios
return struct.unpack('hh', fcntl.ioctl(1,termios.TIOCGWINSZ,'1234'))[1]
lexFormats = LexFormats() # at end, in case it refers to anything that was defined later
if __name__ == "__main__": sys.exit(main())
|
[
"leejcody@gmail.com"
] |
leejcody@gmail.com
|
0379d4bbe345fe0dbca192d0ac27043c4d7b4f57
|
6e423cddd8698bc662bcc3208eb7a8fdb2eb0d72
|
/mcenter_client/tests/mcenter_server_api/encoder.py
|
9cb35bd7d768a99107b103b4881feb672e2e089a
|
[
"Apache-2.0"
] |
permissive
|
theromis/mlpiper
|
7d435343af7b739767f662b97a988c2ccc7665ed
|
738356ce6d5e691a5d813acafa3f0ff730e76136
|
refs/heads/master
| 2020-05-05T04:44:00.494105
| 2019-04-03T19:53:01
| 2019-04-03T22:02:53
| 179,722,926
| 0
| 0
|
Apache-2.0
| 2019-04-05T17:06:02
| 2019-04-05T17:06:01
| null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from connexion.apps.flask_app import FlaskJSONEncoder
import six
from mcenter_server_api.models.base_model_ import Model
class JSONEncoder(FlaskJSONEncoder):
include_nulls = False
def default(self, o):
if isinstance(o, Model):
dikt = {}
for attr, _ in six.iteritems(o.openapi_types):
value = getattr(o, attr)
if value is None and not self.include_nulls:
continue
attr = o.attribute_map[attr]
dikt[attr] = value
return dikt
return FlaskJSONEncoder.default(self, o)
|
[
"lior.amar@parallelmachines.com"
] |
lior.amar@parallelmachines.com
|
1779c30030ca6febb2833e99d27a6cde0844200c
|
d2874ae74c83303d04520f0b79d8fe6eba87fd81
|
/ex8.py
|
e728e2a94219f629793b0cea5d1cd51aed1cd233
|
[] |
no_license
|
lakshmipraba/New-one
|
0036639203b1d06960e90aa366eaae8b8bb043f4
|
8347ec7b7317e15bdeb7544f9a2ea42e2ccf231d
|
refs/heads/master
| 2020-04-29T03:23:25.008782
| 2014-02-06T15:58:09
| 2014-02-06T15:58:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
formatter="%r %r %r %r"
print formatter % (1,2,3,4)
print formatter % ("one","two","three","four")
print formatter % (True,False,False,True)
print formatter % (formatter,formatter,"formatter",formatter)
print formatter %("i had this thing.",
"that you could type up right.",
"but it did't sing.",
"so i said goodnight.")
|
[
"lakshmipraba@users.noreply.github.com"
] |
lakshmipraba@users.noreply.github.com
|
eb2d843edd7819c4b6b0efc4fbe526ac08e3bcaa
|
243adc83bbb79277509d042165bfc1ec15626849
|
/RandomQG/settings.py
|
6b3ba1d38dd6dc13b0c9b2a45d48bd7a1fc9d875
|
[] |
no_license
|
Rohith-Bellamkonda/RandomQuotesGenerator
|
d80f554da8134054e9f5baf8ed548c13603e1ea1
|
3d07135527c1176cebe3bff132818285d0b6ecd0
|
refs/heads/main
| 2023-06-28T09:47:30.381503
| 2021-07-27T09:14:48
| 2021-07-27T09:14:48
| 389,919,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
"""
Django settings for RandomQG project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-9hltd!^*@qdf7zf7239ux&gy4odtz+$2c1(636ob7tf^f#-=r='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quotesapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'RandomQG.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR/'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'RandomQG.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"Rohith4560@gmail.com"
] |
Rohith4560@gmail.com
|
a837cdef15b5851a122127c2ff923cef5050feda
|
cb7a446b35e05c3dc11cf0df8510e5dcdcd40703
|
/test.py
|
398718b007dd1f0776110266cf1df83492cfba5e
|
[] |
no_license
|
iampaavan/Udemy-Python-Data-Structures-Algorithms
|
8a59082eed4605571c7859a53878d7a38dca11d9
|
9fd685504a799202234e7364d44dc688df871241
|
refs/heads/master
| 2020-06-04T17:47:40.499407
| 2019-06-25T19:31:24
| 2019-06-25T19:31:24
| 192,130,929
| 0
| 0
| null | 2019-06-27T19:47:58
| 2019-06-15T23:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
nums = [4, 5, 1, 8, 2]
length = len(nums)
L, R, answer = [0]*length, [0]*length, [0]*length
class Solution:
def productExceptSelf(self, nums):
# The length of the input array
length = len(nums)
# The left and right arrays as described in the algorithm
L, R, answer = [0]*length, [0]*length, [0]*length
# L[i] contains the product of all the elements to the left
# Note: for the element at index '0', there are no elements to the left,
# so the L[0] would be 1
L[0] = 1
for i in range(1, length):
# L[i - 1] already contains the product of elements to the left of 'i - 1'
# Simply multiplying it with nums[i - 1] would give the product of all
# elements to the left of index 'i'
L[i] = nums[i - 1] * L[i - 1]
print(L[i])
# R[i] contains the product of all the elements to the right
# Note: for the element at index 'length - 1', there are no elements to the right,
# so the R[length - 1] would be 1
R[length - 1] = 1
for i in reversed(range(length - 1)):
# R[i + 1] already contains the product of elements to the right of 'i + 1'
# Simply multiplying it with nums[i + 1] would give the product of all
# elements to the right of index 'i'
R[i] = nums[i + 1] * R[i + 1]
# Constructing the answer array
for i in range(length):
# For the first element, R[i] would be product except self
# For the last element of the array, product except self would be L[i]
# Else, multiple product of all elements to the left and to the right
answer[i] = L[i] * R[i]
return answer
t = Solution()
print(t.productExceptSelf([1, 2, 3, 4]))
|
[
"noreply@github.com"
] |
iampaavan.noreply@github.com
|
e454ce2d0c662cd6193e06cb607a0575506a8faf
|
c70327483ad0f71756cb77d60649497a91dfe1b4
|
/scrape_the_names.py
|
2fbb59d9e72eea61f77e1ac017ff6c30715dabcb
|
[] |
no_license
|
ilkeroralkasim/name_generation
|
e081f688b64d9464bf76675174b18ec989485c5e
|
abc4dc4183eed51c8bac9ff2fb3613e9c68a75c5
|
refs/heads/master
| 2022-09-11T07:18:13.619947
| 2020-05-16T22:50:20
| 2020-05-16T22:50:20
| 264,542,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
import os
import random
import time
name_list = []
urls = ['https://www.behindthename.com/names']
urls = urls + [urls[0] + '/' + str(i) for i in range(2, 78)]
filenames = ['page' + str(i) + ".txt" for i in range(77)]
f_u = zip(urls, filenames)
for url, filename in f_u:
with open(filename, "w") as target_file:
print('Writing file: ', filename)
web_page = requests.get(url)
time.sleep(5)
soup = BeautifulSoup(web_page.text, 'html.parser')
results = soup.find_all('span', attrs={'class':'listname'})
for result in results:
n = result.find('a').text
name_list.append(n)
target_file.write(n + '\n')
print(len(name_list))
print(name_list[20:30])
uni_list = sorted(set(name_list))
print(len(uni_list))
print(uni_list)
|
[
"ilker.oralkasim@gmail.com"
] |
ilker.oralkasim@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.