blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72e8659e812c879108daec1f80e0a561e34e3ddb
|
3bcf8677aaaf6deb112deb3be4972f040b016356
|
/optimizing/pi/numba_random.py
|
330ed58350f4e1dfe1070491f3268cad963f7789
|
[] |
no_license
|
hobson/pycon2014
|
5db411e88cc369e2ba3c856fd73596aad8cdf73b
|
a4316057f336cf66ab5b7e5bb5d9e27b908b6290
|
refs/heads/master
| 2021-01-23T09:52:33.522982
| 2014-04-16T16:15:08
| 2014-04-16T16:15:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
# file: numba_random.py
"""Random numbers with numba.
"""
from numba import autojit, jit, random
import numba_no_debug
STATE = random.state_p
PREC = 0xffffffff
@autojit
def rand_auto():
"""Create a pseudo random between (0, 1).
"""
return random.rk_interval(PREC, STATE) / PREC
@jit('float64()')
def rand_jit():
"""Create a pseudo random between (0, 1).
"""
return random.rk_interval(PREC, STATE) / PREC
def rand_no_jit():
"""Create a pseudo random between (0, 1).
"""
return random.rk_interval(PREC, STATE) / PREC
rand = jit('float64()')(rand_no_jit)
if __name__ == '__main__':
import timeit
def test():
"""Time the execution.
"""
for name in ['rand_no_jit', 'rand', 'rand_jit']:
print name + ':',
print timeit.timeit('{}()'.format(name),
'from numba_random import {}'.format(name))
test()
|
[
"hobsonlane@gmail.com"
] |
hobsonlane@gmail.com
|
0e3d40df10f95376e4dd2509d5331063f8b17ef3
|
1311a5502b496d0139befba3efa6981b9d081aca
|
/fake_bpy_modules_2.82-20200514/addon_utils.py
|
275bc349d06cffd36eb5279f5adeab8fe50c9083
|
[] |
no_license
|
MegaMotion/SVR_Blender_Automation_Plugin
|
0982aa4e91d038a36f002fda91e8e89b7789d568
|
48ba2d94698ce184bec93711502ba2b368d86e6f
|
refs/heads/master
| 2022-10-25T20:08:57.173092
| 2020-06-19T19:19:30
| 2020-06-19T19:19:30
| 271,391,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
import sys
import typing
def check(module_name):
'''
'''
pass
def disable(module_name, default_set, handle_error):
'''
'''
pass
def disable_all():
'''
'''
pass
def enable(module_name, default_set, persistent, handle_error):
'''
'''
pass
def module_bl_info(mod, info_basis):
'''
'''
pass
def modules(module_cache, refresh):
'''
'''
pass
def modules_refresh(module_cache):
'''
'''
pass
def paths():
'''
'''
pass
def reset_all(reload_scripts):
'''
'''
pass
|
[
"connerlindsley@gmail.com"
] |
connerlindsley@gmail.com
|
5e70c107390a34fb914c5ad7d4ad1f5c0b1a8ecd
|
b8ef2fba17d8e993b4f6350936ed26d01f8b727d
|
/GUI_LEGO/GUI/read_data.py
|
39a7ef27d45d0c97b50cb49e19e5e71314fac174
|
[] |
no_license
|
Nabot/GUI-master
|
96e1c777acff1b4b2777b103d5d6e8a9b441395c
|
6065e11e9347e94f534f1af30b8b781bad4fc621
|
refs/heads/master
| 2021-01-21T06:42:04.752838
| 2017-05-17T13:46:34
| 2017-05-17T13:46:34
| 91,581,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
import serial
import time
# # Serial port parameters
serial_speed = 9600
serial_port = '/dev/tty.HC-06-DevB' # bluetooth shield hc-06
def print_data():
print "conecting to serial port ..."
ser = serial.Serial(serial_port, serial_speed, timeout=1)
# print "sending message to turn on PIN 13 ..."
# ser.write('1')
# while(True):
print "recieving message from arduino ..."
data = ser.readline()
if (data != ""):
print "arduino says: %s" % data
#time.sleep(1)
return(data)
|
[
"nabot@Nabot.local"
] |
nabot@Nabot.local
|
6eb7e4074f7c6505c26194d0571858f0402223e5
|
8b22730050501c72421abda7f55db583773aa37c
|
/ecvheo1/Queue & Deque/2346.py
|
f867f5d5b85f74aca122be29e669a467964dd6dd
|
[] |
no_license
|
naekang/uos-algorithm
|
5f98b1eae7c3ee1bde4942976ce18d12cf0d5f19
|
a0c03cfc5571c6c6dda5a710bd6e40e7c699e5e0
|
refs/heads/main
| 2023-08-06T18:28:39.447373
| 2021-10-04T05:43:19
| 2021-10-04T05:43:19
| 396,826,675
| 0
| 0
| null | 2021-08-16T14:18:20
| 2021-08-16T14:18:20
| null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
n = int(input())
idx = 0
data = list(map(int, input().split()))
index = [x for x in range(1, n + 1)]
temp = data.pop(idx)
print(index.pop(idx), end=" ")
while data:
if temp < 0:
idx = (idx + temp) % len(data)
else:
idx = (idx + (temp - 1)) % len(data)
temp = data.pop(idx)
print(index.pop(idx), end=" ")
|
[
"ecvheo@naver.com"
] |
ecvheo@naver.com
|
edbc3aede980c504e73f2c1928cd6849bf79efa7
|
5e4e01601fb6c0dd4410705ac874b88b05abb779
|
/list_challenge/double_index.py
|
c28e321c7bb52a949018fcee6e6e3d691e45815c
|
[] |
no_license
|
marlenaJakubowska/codecademy_py
|
259c2d9d94407310295c4bab1bceb71e88c2fe58
|
49f980d73753a81fbc38b18b65d11facd2139da1
|
refs/heads/master
| 2020-09-26T17:38:25.948857
| 2019-12-06T10:34:48
| 2019-12-06T10:34:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
# Create a function named double_index that has two parameters named lst and index.
# The function should return a new list where all elements are the same as in lst except
# for the element at index, which should be double the value of the element at index of lst.
# If index is not a valid index, the function should return the original list.
# For example, the following code should return [1,2,6,4] because the element at index 2 has been doubled:
# double_index([1, 2, 3, 4], 2)
def double_index(lst, index):
if index >= len(lst):
return lst
else:
new_lst = lst[0:index]
new_lst.append(lst[index]*2)
new_lst = new_lst + lst[index+1:]
return new_lst
print(double_index([3, 8, -10, 12], 2))
|
[
"mar.jakubowska90@gmail.com"
] |
mar.jakubowska90@gmail.com
|
07d05ba45c472bf799a1f23648b8c051d0afc099
|
55c9d534eb5a1c11f507d4b0e5bb1dbfe359f988
|
/icedata/datasets/ochuman/tests/test_parsers.py
|
c8c84d443740c747318c856f3821b50d8aaba4e2
|
[
"Apache-2.0"
] |
permissive
|
yrodriguezmd/icedata
|
a1aa5e2d8c5af829310ff16493f20408fc303f13
|
ef97358fa5b01ce9b707310571262e6f26ff0694
|
refs/heads/master
| 2023-07-26T08:30:12.831216
| 2021-09-09T16:49:28
| 2021-09-09T16:49:28
| 400,196,590
| 0
| 0
|
Apache-2.0
| 2021-09-04T19:05:51
| 2021-08-26T14:24:26
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
import icedata
from icevision.all import *
def test_parser(data_dir):
parser = icedata.ochuman.parser(
data_dir / "annotations/ochuman.json", data_dir / "images"
)
records = parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 10
record = records[3]
assert record.filepath.name == "000004.jpg"
assert record.record_id == 3
assert record.detection.labels == ["person", "person"]
assert record.height == 333
assert record.width == 500
|
[
"noreply@github.com"
] |
yrodriguezmd.noreply@github.com
|
48a5901961e5d91bff29c809c3e59e9a8471778e
|
8d81eb76afdd16d4f417d6e98e9bc93287e1fcff
|
/Apps/phskypebusiness/skypeforbusiness_consts.py
|
abd0fb25b75eaf243bc09e0eba95c876c5d58601
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
philroyer-phantom/phantom-apps
|
29e02c299a3db78870b44be3ba6ab51c5e975160
|
2e83bda83cd130e240d5107e54ee9055dc6fcae2
|
refs/heads/master
| 2021-08-08T03:38:51.344427
| 2020-03-17T22:59:27
| 2020-03-17T22:59:27
| 248,769,317
| 1
| 0
|
Apache-2.0
| 2020-03-20T14:00:18
| 2020-03-20T14:00:17
| null |
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
# File: skypeforbusiness_consts.py
# Copyright (c) 2019 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# Define your constants here
SKYPE4B_LOGIN_BASE_URL = 'https://login.microsoftonline.com'
SKYPE4B_AUTHORIZE_URL = '/{tenant_id}/oauth2/authorize?client_id={client_id}&redirect_uri={redirect_uri}' \
'&response_type={response_type}&state={state}&resource={resource}'
SKYPE4B_SERVER_TOKEN_URL = '/{tenant_id}/oauth2/token'
SKYPE4B_FIRST_HUB_URL_ENDPOINT = 'https://webdir.online.lync.com/autodiscover/autodiscoverservice.svc/root'
SKYPE4B_AUTODISCOVERY_ENDPOINT = '/autodiscover/autodiscoverservice.svc/root/oauth/user'
SKYPE4B_PHANTOM_BASE_URL = '{phantom_base_url}rest'
SKYPE4B_PHANTOM_SYS_INFO_URL = '/system_info'
SKYPE4B_PHANTOM_ASSET_INFO_URL = '/asset/{asset_id}'
SKYPE4B_APPLICATIONS_ENDPOINT = '/ucwa/oauth/v1/applications'
SKYPE4B_REST_URL_NOT_AVAILABLE_MSG = 'Rest URL not available. Error: {error}'
SKYPE4B_TEST_CONNECTIVITY_FAILED_MSG = 'Test connectivity failed'
SKYPE4B_TEST_CONNECTIVITY_PASSED_MSG = 'Test connectivity passed'
SKYPE4B_OAUTH_URL_MSG = 'Using OAuth URL:'
SKYPE4B_BASE_URL_NOT_FOUND_MSG = 'Phantom Base URL not found in System Settings. ' \
'Please specify this value in System Settings.'
SKYPE4B_AUTHORIZE_USER_MSG = 'Please authorize user in a separate tab using URL'
SKYPE4B_TOKEN_NOT_AVAILABLE_MSG = 'Token not available. Please run test connectivity first.'
SKYPE4B_RUN_TEST_CONN_MSG = 'Resource URL not available. Please run test connectivity first.'
SKYPE4B_JSON_CONTACT = 'contact_email'
SKYPE4B_JSON_MESSAGE = 'message'
SKYPE4B_CONFIG_CLIENT_ID = 'client_id'
SKYPE4B_CONFIG_CLIENT_SECRET = 'client_secret'
SKYPE4B_CONFIG_TENANT = 'tenant'
SKYPE4B_DEFAULT_TENANT = 'common'
SKYPE4B_ACCESS_TOKEN = 'access_token'
SKYPE4B_REFRESH_TOKEN = 'refresh_token'
SKYPE4B_TOKEN_STRING = 'token'
SKYPE4B_TC_FILE = 'oauth_task.out'
SKYPE4B_HEADERS_APP_JSON = 'application/json'
SKYPE4B_AUTHORIZE_WAIT_TIME = 15
SKYPE4B_TC_STATUS_SLEEP = 3
|
[
"noreply@github.com"
] |
philroyer-phantom.noreply@github.com
|
254d29df377255ac6f245a33ca81d90de2a36670
|
f5f99147fb0932b5233c98c410be0b2e4d35dc3e
|
/Updated_0714/model.py
|
043752360ae41ab204be651bed9ad5825480983f
|
[] |
no_license
|
inzaghi7891/ML-in-OFC-Learned-DBP
|
7cc17e73155231cf0322b70739704ea9a04dba5c
|
f33dfc1e8d5a36ac5f7b14b1998da7b823f437f0
|
refs/heads/master
| 2022-12-23T14:28:23.618694
| 2020-10-07T03:46:39
| 2020-10-07T03:46:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,646
|
py
|
import tensorflow as tf
import numpy as np
from Weight_Transform import Weight_Transform
from Weight_Transform_whole import Weight_Transform_whole
from scipy.io import loadmat
N = 2048
M = 1024
k = 6
Lsp = 100000
Nsp = 1
M_step = 2
STPS = 1 # Step per Span
delta = Lsp/M_step
A = loadmat('init.mat')['real']
A = np.array(A)
real_init = np.squeeze(A)
A = loadmat('init.mat')['image']
A = np.array(A)
imag_init = np.squeeze(A)
A = loadmat('init_MF.mat')['real_MF']
A = np.array(A)
MF_real_init = np.squeeze(A)
A = loadmat('init_MF.mat')['image_MF']
A = np.array(A)
MF_image_init = np.squeeze(A)
class Model(object):
def __init__(self):
self.X_real = tf.compat.v1.placeholder(tf.float32, shape=(None, N), name="X_real")
self.X_image = tf.compat.v1.placeholder(tf.float32, shape=(None, N), name="X_image")
self.y_real = tf.compat.v1.placeholder(tf.float32, shape=(None, M), name="y_real")
self.y_image = tf.compat.v1.placeholder(tf.float32, shape=(None, M), name="y_image")
self.X_OP_real = tf.math.scalar_mul(0.1, self.X_real) # Since placeholder cannot assigned by another tensor
self.X_OP_image = tf.math.scalar_mul(0.1, self.X_image)
self.spanlist = []
for ii in range(1, Nsp+1): # for loop ready for large network
self.spanlist.append(Span(X_real=self.X_OP_real, X_image=self.X_OP_image, span_number=ii))
self.out_real, self.out_image = self.spanlist[ii-1].get_span_out()
self.X_OP_real = self.out_real
self.X_OP_image = self.out_image
# MF
# self.rr = tf.contrib.layers.fully_connected(self.out_real, num_outputs=M, activation_fn=None, biases_initializer=None,
# trainable=True, scope='MF_Real')
# self.ri = tf.contrib.layers.fully_connected(self.out_real, num_outputs=M, activation_fn=None, biases_initializer=None,
# trainable=True, scope='MF_Image')
# self.ir = tf.contrib.layers.fully_connected(self.out_image, num_outputs=M, activation_fn=None, biases_initializer=None,
# trainable=True, reuse=True, scope='MF_Real')
# self.ii = tf.contrib.layers.fully_connected(self.out_image, num_outputs=M, activation_fn=None, biases_initializer=None,
# trainable=True, reuse=True, scope='MF_Image')
self.MF_real = tf.Variable(MF_real_init, trainable=True, dtype=tf.float32, name='MF_Real') # Filter Initialization Needed
self.MF_Processed_real = Weight_Transform(self.MF_real, k=10, n=N, m=M)
self.MF_image = tf.Variable(MF_image_init, trainable=True, dtype=tf.float32, name='MF_Image') # Filter Initialization Needed
self.MF_Processed_image = Weight_Transform(self.MF_image, k=10, n=N, m=M)
self.rr = tf.matmul(self.out_real, self.MF_Processed_real)
self.ri = tf.matmul(self.out_real, self.MF_Processed_image)
self.ir = tf.matmul(self.out_image, self.MF_Processed_real)
self.ii = tf.matmul(self.out_image, self.MF_Processed_image)
self.Si = tf.math.add(self.ri, self.ir)
self.Sr = tf.math.subtract(self.rr, self.ii)
self.out_MF_real = self.Sr
self.out_MF_image = self.Si
def get_reconstruction(self):
return self.out_MF_real, self.out_MF_image
def get_middle_para(self):
return self.out_real, self.out_image
class Span(object):
def __init__(self, X_real, X_image, span_number):
self.X_real = X_real
self.X_image = X_image
self.span_number = span_number
self.layerlist = []
for ii in range(1, STPS+1): # for loop ready for large network
self.layerlist.append(Layer(X_real=self.X_real, X_image=self.X_image, layer_number=ii, span_number=self.span_number))
self.out_real, self.out_image = self.layerlist[ii-1].get_layer_output()
self.X_real = self.out_real
self.X_image = self.out_image
def get_span_out(self):
return self.out_real, self.out_image
def get_w1(self):
return self.layerlist[1].get_w1()
class Layer(object):
def __init__(self, X_real, X_image, layer_number, span_number):
self.X_real = X_real
self.X_image = X_image
self.layer_number = layer_number
self.span_number = span_number
# Linear W1
self.Linear1 = Linear_W1(X_real=self.X_real, X_image=self.X_image, layer_number=self.layer_number, span_number=self.span_number)
self.Sr, self.Si = self.Linear1.get_linear_W1_out()
# Nonlinear
self.Nonlinear = NonLinear_OP(S_real=self.Sr, S_image=self.Si, layer_number=self.layer_number, span_number=self.span_number)
self.out_real, self.out_image = self.Nonlinear.get_nonlinear_out()
# # Linear W2
# self.Linear2 = Linear_W2(X_real=self.y_real, X_image=self.y_image, layer_number=self.layer_number, span_number=self.span_number)
# self.out_real, self.out_image = self.Linear2.get_linear_W2_out()
def get_layer_output(self):
return self.out_real, self.out_image
def get_w1(self):
return self.Linear1.get_w1()
class Linear_W1(object):
def __init__(self, X_real, X_image, layer_number, span_number):
self.X_real = X_real
self.X_image = X_image
self.name_real = 'weight_real_' + str(span_number) + '_' + str(layer_number) + '_' + '1'
self.name_image = 'weight_image_' + str(span_number) + '_' + str(layer_number) + '_' + '1'
x = 1
self.W1_real = tf.Variable(real_init, trainable=True, dtype=tf.float32, name=self.name_real) # Filter Initialization Needed
# f1 = lambda: self.W1_real # Filter Initialization Needed
# f2 = lambda: self.W1_real*0.1 # Filter Initialization Needed
# self.W1_real = tf.case([(tf.less(x, layer_number), f1)], default=f2)
self.W1_Processed_real = Weight_Transform(self.W1_real, k=69, n=N, m=N)
self.W1_image = tf.Variable(imag_init, trainable=True, dtype=tf.float32, name=self.name_image)
# f1 = lambda: self.W1_image # Filter Initialization Needed
# f2 = lambda: self.W1_image*0.1 # Filter Initialization Needed
# self.W1_image = tf.case([(tf.less(x, layer_number), f1)], default=f2)
self.W1_Processed_image = Weight_Transform(self.W1_image,k=69, n=N, m=N)
self.rr = tf.matmul(self.X_real, self.W1_Processed_real)
self.ri = tf.matmul(self.X_real, self.W1_Processed_image)
self.ir = tf.matmul(self.X_image, self.W1_Processed_real)
self.ii = tf.matmul(self.X_image, self.W1_Processed_image)
self.Si = tf.math.add(self.ri, self.ir)
self.Sr = tf.math.subtract(self.rr, self.ii)
def get_linear_W1_out(self):
return self.Sr, self.Si
def get_w1(self):
return self.W1_real
class Linear_W2(object):
def __init__(self, X_real, X_image, layer_number, span_number):
self.X_real = X_real
self.X_image = X_image
self.name_real = 'weight_real_' + str(span_number) + '_' + str(layer_number) + '_' + '2'
self.name_image = 'weight_image_' + str(span_number) + '_' + str(layer_number) + '_' + '2'
self.W2_real = tf.Variable([0.3,0.3,0.3,0.3,0.3,0.3,0.3], trainable=True, dtype=tf.float32, name=self.name_real)
self.W2_Processed_real = Weight_Transform(self.W2_real, k=k, n=N)
self.W2_image = tf.Variable([0.3,0.3,0.3,0.3,0.3,0.3,0.3], trainable=True, dtype=tf.float32, name=self.name_image)
self.W2_Processed_image = Weight_Transform(self.W2_image, k=k, n=N)
self.rr = tf.matmul(self.X_real, tf.transpose(self.W2_Processed_real))
self.ri = tf.matmul(self.X_real, tf.transpose(self.W2_Processed_image))
self.ir = tf.matmul(self.X_image, tf.transpose(self.W2_Processed_real))
self.ii = tf.matmul(self.X_image, tf.transpose(self.W2_Processed_image))
self.Si = tf.math.add(self.ri, self.ir)
self.Sr = tf.math.subtract(self.rr, self.ii)
def get_linear_W2_out(self):
return self.Sr, self.Si
class NonLinear_OP(object):
def __init__(self, S_real, S_image, layer_number, span_number):
self.Sr = S_real
self.Si = S_image
self.name = 'alph_' + str(span_number) + '_' + str(layer_number)
self.alph = tf.Variable(-1.0*84.8, trainable=True, dtype=tf.float32, name=self.name) # Alpha Initialization
self.S_power = tf.math.add(tf.math.square(self.Sr), tf.math.square(self.Si))
self.S_power = tf.math.scalar_mul(self.alph, self.S_power)
self.sin = tf.math.sin(self.S_power)
self.cos = tf.math.cos(self.S_power)
self.y_real = tf.math.subtract(tf.math.multiply(self.Sr, self.cos), tf.math.multiply(self.Si, self.sin))
self.y_image = tf.math.add(tf.math.multiply(self.Si, self.cos), tf.math.multiply(self.Sr, self.sin))
def get_nonlinear_out(self):
return self.y_real, self.y_image
|
[
"noreply@github.com"
] |
inzaghi7891.noreply@github.com
|
b5e2ea0ffd7e6eb73051a11f835d678c0ffb9d9b
|
8025346dbb746d9ae4653d1a20900a822fdc2641
|
/nodes/sub_thread
|
83c6d1dfbfef25827634ce16fa62de16592cbb19
|
[
"MIT"
] |
permissive
|
m-elwin/me495_threads
|
9d8c92f82b26d02795710c0d246f3cd6016ceb03
|
25a2cbe733b8332841e36cc38b23fb74decec300
|
refs/heads/main
| 2022-02-21T06:11:50.956026
| 2022-02-07T14:27:33
| 2022-02-07T14:27:33
| 216,602,506
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
#!/usr/bin/env python
""" Used to test the python threading model.
SUBSCRIBERS:
sleep (std_msgs/Duration) - duration of sleep, in seconds
A single subscriber that, upon receiving a message,
logs diagnostic information and sleeps for a specified duration.
By publishing different messages at different rates, the threading
model can be explored.
"""
import rospy
import threading
from std_msgs.msg import Duration
def sleeper(msg):
rospy.loginfo("Sleeping for %d sec, %d nsec on thread ID %s.", msg.data.secs, msg.data.nsecs, threading.current_thread())
rospy.sleep(msg.data)
rospy.loginfo("Woke up on Thread ID %s", threading.current_thread())
if __name__ == "__main__":
rospy.init_node("sub_thread")
rospy.loginfo("Main Thread: %s", threading.current_thread())
rospy.Subscriber("sleep", Duration, sleeper)
rospy.spin()
|
[
"elwin@northwestern.edu"
] |
elwin@northwestern.edu
|
|
fec71cd0be62a88856dede6cbc59942c71b6ea75
|
1d4df37989259d36942b601ae5c786a2061eb08d
|
/session five/files/2. color spaces.py
|
5f0f18787d7dea400aecb2ac908c699408fe6860
|
[] |
no_license
|
AhMedMubarak20/full-computer-vision-course
|
47246b00623720c38e6b87529f1dd63aba83c64c
|
ca75f9e3ae6b74f1f96da723c13fb84836ae5e10
|
refs/heads/main
| 2023-07-01T21:26:13.961564
| 2021-07-24T19:05:50
| 2021-07-24T19:05:50
| 336,350,127
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import numpy as np
import cv2
image = cv2.imread("images/khwarizmy.jpg")
###? Color Spaces ###
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Gray", gray)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
cv2.imshow("HSV", hsv)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
cv2.imshow("L*a*b", lab)
cv2.imshow("ORG", image)
cv2.waitKey(0)
|
[
"noreply@github.com"
] |
AhMedMubarak20.noreply@github.com
|
47db407d57ca8add03329b54fca47c7f724b89fc
|
7dd91a3aad021f27e2dc1808be0acf46d70c98aa
|
/run.py
|
109b661960cb6c34c31e5a3b6a69f8336e7e7027
|
[] |
no_license
|
ricardo1470/server
|
d4e9af0dc9dcb488a8876008f4f12c7fa4b72a92
|
9f295e5dc72181ff4085257bca0667d38f394ee0
|
refs/heads/master
| 2022-11-28T07:43:46.611574
| 2020-08-04T09:27:32
| 2020-08-04T09:27:32
| 275,807,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
#!/usr/bin/python3
from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/client')
def client():
list_name = ['client1', 'client2', 'client3', 'client4']
return render_template("client.html", list=list_name)
@app.route('/params')
@app.route('/params/<name>/')
@app.route('/params/<name>/<int:num>')
def params(name = "value", num = "nothing"):
return ('the parameter is: {} {}'.format(name, num))
if __name__ == '__main__':
app.run(debug= True, port=9000, host='127.0.0.1')#run server port 9000
|
[
"ricardo.alfonso.camayo@gmail.com"
] |
ricardo.alfonso.camayo@gmail.com
|
e324dc2d2052c11d81a5d928308b5cee3cac91c0
|
084c7c66bdb33cb07ddb965c71429546061c21a0
|
/Face Detection using image/tempCodeRunnerFile.py
|
6db9a3f0f245c30817bfdad21b3b008550633ea0
|
[] |
no_license
|
Kamalesh9483/Face-Detection-using-Open-CV2
|
ce571b06cd3c340f1feae457667e122acd987a62
|
bac86f0e88452ac703b0385ad7dc275819dc5e5e
|
refs/heads/master
| 2023-04-18T17:25:07.823552
| 2021-05-06T15:37:51
| 2021-05-06T15:37:51
| 364,898,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
imgGray = cv2.imwrite("C:/Users/Kamalesh/Python program_ test/Virtual paint assignment/Face Detection/Face Detection - image/group_new.jpg",imgGray)
|
[
"kamalesh9483@gmail.com"
] |
kamalesh9483@gmail.com
|
0aa71140e205fe90dc957e3378e51fe861fa9cef
|
19c09273188fd65fd356429e1816823af9abf852
|
/loaders/arroyomolinos_payments_loader.py
|
bec9cc8a90b7f1941c5048bd10c73bad8d364096
|
[] |
no_license
|
civio/presupuesto-arroyomolinos
|
036231a5b9ccf996d50086d556e999b56006dc1e
|
c5d25430ec823da3a9bf7e49b4faecab8ec4fc42
|
refs/heads/master
| 2022-06-04T13:44:03.336727
| 2020-12-13T10:32:48
| 2020-12-13T10:32:48
| 63,264,643
| 0
| 0
| null | 2022-04-09T15:53:12
| 2016-07-13T17:04:48
|
SCSS
|
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
# -*- coding: UTF-8 -*-
import datetime
from budget_app.loaders import PaymentsLoader
from budget_app.models import Budget
class ArroyomolinosPaymentsLoader(PaymentsLoader):
# Parse an input line into fields
def parse_item(self, budget, line):
policy_id = line[1].strip()[:2] # First two digits of the programme make the policy id
# But what we want as area is the policy description
policy = Budget.objects.get_all_descriptions(budget.entity)['functional'][policy_id]
# We need to limit the length of the descriptions, they're way over the limit
description = unicode(line[6].strip(), encoding='utf8')[:300]
# Flag items as anonymized or not, so they don't show in the list of biggest providers
payee = self._titlecase(line[5].strip())
anonymized = payee.startswith('Este concepto recoge')
return {
'area': policy,
'programme': None,
'fc_code': None, # We don't try (yet) to have foreign keys to existing records
'ec_code': None,
'date': datetime.datetime.strptime(line[3].strip(), "%d/%m/%Y"),
'payee': payee,
'anonymized': anonymized,
'description': description,
# The Excel files are inconsistent, in2csv renders different number formats (?!)
'amount': self._read_english_number(line[7]) if budget.year==2016 else self._read_spanish_number(line[7])
}
|
[
"david.cabo@gmail.com"
] |
david.cabo@gmail.com
|
e388796953cac08221d94cf729dd2cd4dec45a43
|
ebbdc0969343ac4d035b40b07af986af384759c2
|
/serverConf/scripts/hostsSpfSetup/hostsSpfSetup.py
|
8aff7957042b0a7a23988857d8000800bcc01deb
|
[] |
no_license
|
surajbhosale409/server-configuration-scripts
|
4f8998662945d71960534f8bba53a9a09aa9a86b
|
44db318b1e71c339f2908d71d2e1f4f20299f13b
|
refs/heads/master
| 2020-03-20T04:46:54.770284
| 2018-06-13T13:17:16
| 2018-06-13T13:17:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
import os
def hostsSpfSetup(ipDomainList):
spfVals=[]
try:
hosts=open("/etc/hosts","a")
for rec in ipDomainList:
hosts.writelines(rec[0]+" "+rec[1]+"\n")
spfVals.append("v=spf1 +a +mx +ip4:"+rec[0]+" -all")
hosts.close()
#Setting hostname to server
os.system("hostname "+ipDomainList[0][1])
return [True,spfVals]
except:
return [False,spfVals]
|
[
"surajbhosale409@gmail.com"
] |
surajbhosale409@gmail.com
|
2f73b1ec347dad059288650b9a14abf4b0fd7c4a
|
4c3094a869f59be8836993469b28f088fef9fff1
|
/Questions/Q_125_PricingLICProduct.py
|
5fa85f491cab24ad64a1b0f7ff3a683c908fadc9
|
[] |
no_license
|
Bharadwaja92/DataInterviewQuestions
|
d885d40da4d546a164eee37e7250ddb519fc8954
|
5b002f34c3b1440f4347a098f7ce1db84fc80e7f
|
refs/heads/master
| 2022-11-06T08:57:49.283013
| 2020-06-22T09:10:40
| 2020-06-22T09:10:40
| 269,247,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
"""""""""
Suppose you're working for a life insurance company that is trying to price a one year policy for a 24-year old male.
If the individual has a .99972 probability of surviving the year, and the payout is $300,000,
what should the company price the policy at if they want to target an expected value of $150?
"""
"""
Prob of paying $300,000 = 1 - 0.99972 = 0.00028
Expected value = $150
Policy_price * prob of paying = expected value
Policy_price = 150 / 0.00028 = $535714.2857142858
"""
|
[
"saibharadwaj.kh@gaiansolutions.com"
] |
saibharadwaj.kh@gaiansolutions.com
|
ae1d83bb5c90007b36e92b1587ad877c336ef1c1
|
8ed49272e93624a65fead2aeb2924230bd69dab6
|
/problem1169.py
|
80ec582608ded4b7f6eada47e0f2843b5f95915f
|
[] |
no_license
|
digitalladder/leetcode
|
c1c6cb833f8df65b59f75defaa30aa3116b8d667
|
aba3b3627c1510cf61eaf8555389aad09cd112bb
|
refs/heads/master
| 2022-08-29T00:11:55.829581
| 2020-05-31T02:55:24
| 2020-05-31T02:55:24
| 166,723,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
#problem 1169 / invalid transactions
class Solution(object):
def invalidTransactions(self, transactions):
"""
:type transactions: List[str]
:rtype: List[str]
"""
temp = collections.defaultdict(list)
res = set()
for tran in transactions:
name,time,amount,city = tran.split(',')
if int(amount) > 1000:
res.add(tran)
if name in temp:
for tt,aa,cc in temp[name]:
if city != cc and abs(int(time)-int(tt)) <= 60:
res.add(','.join([name,tt,aa,cc]))
res.add(tran)
temp[name].append([time,amount,city])
return res
|
[
"junjiehou@houjunjiedeMacBook-Pro.local"
] |
junjiehou@houjunjiedeMacBook-Pro.local
|
e058f512c4f79e240b59e9cb2f0c7df5e816cb9c
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/transformers/models/mmbt/__init__.py
|
9d5ff9cc73014065f1c941915e384c3142dcf3fe
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6636209f6f9ddd864af8a961c522aa5cf9cca41e1f27284ac0b8b147cbddd191
size 1773
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
84d8368e72641ef892afd8f64075be6b140a3945
|
5ba905ceb10b4c194580325e8d90a104ab4e3f46
|
/hackit/recommendations.py
|
4ba5d20ff929c116139dc9aa2cdff6d870033b70
|
[] |
no_license
|
Achintyabhatta/hackathon-Frontliners
|
1f3aeaacfdc666152a2b1f5272959f37800484c5
|
7a19ac494cd183f2ed56bcf810f5efb48e70b216
|
refs/heads/master
| 2022-07-02T22:48:16.746083
| 2020-05-11T19:01:17
| 2020-05-11T19:01:17
| 263,082,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
import string,os
from django.conf import settings
import pandas as pd
import csv
def perform(essentials):
df = pd.read_csv(os.path.join(settings.MEDIA_ROOT, 'all_rules.csv'))
mydata = {}
for index,row in df.iterrows():
for each in row['ordered_product'].split(','):
if each not in mydata.keys():
mydata[each] = set()
mydata[each].add(row['recommended_product'])
recommended = set()
print(essentials)
for each in essentials:
if each in mydata.keys():
for every in mydata[each]:
if every not in essentials:
recommended.add(every)
return recommended
|
[
"noreply@github.com"
] |
Achintyabhatta.noreply@github.com
|
f9f920c7088ace3cce8b09dd5e897827686f8e8f
|
b67e25ab636349954522d6e055273630f31abe46
|
/opencv/test_seuillage.py
|
61ff3073222078f0affe1c710318b41470757d8a
|
[] |
no_license
|
sabeaussan/ROS_Unity
|
bb859854159489f1a2a9995c7268a613543bb9cd
|
a963975e7c4d63aca8adb6825a992c7c15cfab28
|
refs/heads/master
| 2022-11-08T22:12:48.330875
| 2020-06-30T13:13:16
| 2020-06-30T13:13:16
| 275,931,638
| 11
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#!/usr/bin/python3
import cv2 as cv
img = cv.imread("balle_orange_2.jpg")
hsv = cv.cvtColor(img,cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv,(0,122,150),(25,194,246))
cv.imshow("orange",mask)
cv.waitKey()
cv.destroyAllWindows()
|
[
"sambeda86@gmail.com"
] |
sambeda86@gmail.com
|
56d1024800be8e9370491967fdd7c0330dfd8c65
|
8a55b7428cc3962482fd70aaba74997f2205471b
|
/mysite/settings.py
|
6bd125a69207e16381aff3e095fbe552123f864a
|
[] |
no_license
|
spiroskatsamakas/djangotest11
|
418abf6a2ccee9ad1bc5be6f07e4bb4c552fb077
|
e10897105388cb97f1843a1288492e553e30e58f
|
refs/heads/master
| 2023-07-05T11:56:56.201697
| 2021-08-19T22:08:31
| 2021-08-19T22:08:31
| 398,075,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,193
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.24.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5h&!wlte8@44z8_5q@58%rmxn)s$f57lv%qq+m+5al(0(e_%^6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'el-GR'
TIME_ZONE = 'Europe/Athens'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"spiroskatsamakas@gmail.com"
] |
spiroskatsamakas@gmail.com
|
b219fbfea800f1870a3e624308fe40693f3b3efd
|
9835151ed6b99222d660af90b31e133d6bbc6a3d
|
/fhs/usr/share/python/syncwerk/restapi/restapi/forms.py
|
99cf0b1f1aafe142edcab9f4772c77375ab3803a
|
[
"Apache-2.0"
] |
permissive
|
syncwerk/syncwerk-server-restapi
|
33c6108c00b086260c62a0408f8f7f12ee7e1685
|
13b3ed26a04248211ef91ca70dccc617be27a3c3
|
refs/heads/main
| 2023-04-10T20:09:15.966129
| 2021-04-20T11:53:17
| 2021-04-20T11:53:17
| 292,826,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,939
|
py
|
# Copyright (c) 2012-2016 Seafile Ltd.
# encoding: utf-8
from django.conf import settings
from django import forms
from django.utils.translation import ugettext_lazy as _
from pyrpcsyncwerk import RpcsyncwerkError
from restapi.base.accounts import User
from restapi.constants import DEFAULT_USER, GUEST_USER
from restapi.utils import is_valid_dirent_name
from restapi.utils.licenseparse import user_number_over_limit
from restapi.role_permissions.utils import get_available_roles
class AddUserForm(forms.Form):
"""
Form for adding a user.
"""
email = forms.EmailField()
name = forms.CharField(max_length=64, required=False)
department = forms.CharField(max_length=512, required=False)
role = forms.ChoiceField(choices=[ (i, i) for i in get_available_roles() ])
password1 = forms.CharField(widget=forms.PasswordInput())
password2 = forms.CharField(widget=forms.PasswordInput())
def clean_email(self):
if user_number_over_limit():
raise forms.ValidationError(_("The number of users exceeds the limit."))
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
raise forms.ValidationError(_("A user with this email already exists."))
except User.DoesNotExist:
return self.cleaned_data['email']
def clean_name(self):
"""
should not include '/'
"""
if "/" in self.cleaned_data["name"]:
raise forms.ValidationError(_(u"Name should not include '/'."))
return self.cleaned_data["name"]
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two passwords didn't match."))
return self.cleaned_data
class RepoCreateForm(forms.Form):
"""
Form for creating repo and org repo.
"""
repo_name = forms.CharField(max_length=settings.MAX_FILE_NAME,
error_messages={
'required': _(u'Name can\'t be empty'),
'max_length': _(u'Name is too long (maximum is 255 characters)')
})
repo_desc = forms.CharField(max_length=100, error_messages={
'required': _(u'Description can\'t be empty'),
'max_length': _(u'Description is too long (maximum is 100 characters)')
})
encryption = forms.CharField(max_length=1)
uuid = forms.CharField(required=False)
magic_str = forms.CharField(required=False)
encrypted_file_key = forms.CharField(required=False)
def clean_repo_name(self):
repo_name = self.cleaned_data['repo_name']
if not is_valid_dirent_name(repo_name):
error_msg = _(u"Name %s is not valid") % repo_name
raise forms.ValidationError(error_msg)
else:
return repo_name
def clean(self):
encryption = self.cleaned_data['encryption']
if int(encryption) == 0:
return self.cleaned_data
uuid = self.cleaned_data['uuid']
magic_str = self.cleaned_data['magic_str']
encrypted_file_key = self.cleaned_data['encrypted_file_key']
if not (uuid and magic_str and encrypted_file_key):
raise forms.ValidationError(_("Argument missing"))
return self.cleaned_data
class SharedRepoCreateForm(RepoCreateForm):
"""
Used for creating group repo and public repo
"""
permission = forms.ChoiceField(choices=(('rw', 'read-write'), ('r', 'read-only')))
class RepoRenameDirentForm(forms.Form):
"""
Form for rename a file/dir.
"""
oldname = forms.CharField(error_messages={'required': _("Oldname is required")})
newname = forms.CharField(max_length=settings.MAX_FILE_NAME,
error_messages={
'max_length': _("It's too long."),
'required': _("It's required."),
})
def clean_newname(self):
newname = self.cleaned_data['newname']
try:
if not is_valid_dirent_name(newname):
error_msg = _(u'Name "%s" is not valid') % newname
raise forms.ValidationError(error_msg)
else:
return newname
except RpcsyncwerkError, e:
raise forms.ValidationError(str(e))
class RepoNewDirentForm(forms.Form):
"""
Form for create a new empty dir or a new empty file.
"""
dirent_name = forms.CharField(max_length=settings.MAX_FILE_NAME,
error_messages={
'max_length': _("It's too long."),
'required': _("It's required."),
})
def clean_dirent_name(self):
dirent_name = self.cleaned_data['dirent_name']
try:
if not is_valid_dirent_name(dirent_name):
error_msg = _(u'Name "%s" is not valid') % dirent_name
raise forms.ValidationError(error_msg)
else:
return dirent_name
except RpcsyncwerkError, e:
raise forms.ValidationError(str(e))
class SetUserQuotaForm(forms.Form):
"""
Form for setting user quota.
"""
space_quota = forms.IntegerField(min_value=0,
error_messages={'required': _('Space quota can\'t be empty'),
'min_value': _('Space quota is too low (minimum value is 0)')})
class RepoSettingForm(forms.Form):
"""
Form for saving repo settings.
"""
repo_name = forms.CharField(error_messages={'required': _('Library name is required')})
days = forms.IntegerField(required=False,
error_messages={'invalid': _('Please enter a number')})
def clean_repo_name(self):
repo_name = self.cleaned_data['repo_name']
if not is_valid_dirent_name(repo_name):
error_msg = _(u"Name %s is not valid") % repo_name
raise forms.ValidationError(error_msg)
else:
return repo_name
class BatchAddUserForm(forms.Form):
"""
Form for importing users from XLSX file.
"""
file = forms.FileField()
class TermsAndConditionsForm(forms.Form):
"""Form to save T&C.
"""
name = forms.CharField(error_messages={'required': _('Name is required')})
version_number = forms.DecimalField(required=True, error_messages={'invalid': _('Please enter a valid number')})
text = forms.CharField(error_messages={'required': _('Text is required')})
|
[
"alexander.jackson@syncwerk.de"
] |
alexander.jackson@syncwerk.de
|
4d7e399e8b436b464a53bfe1bcf35ca760e04af1
|
4588e37a3c78eefe5637166b35d02b89fe97f80c
|
/authentication/urls.py
|
97b5037d43d53e39a7c2133814f1b0d7f32f27c0
|
[] |
no_license
|
David127/pachaqtecProyecto
|
83295fb0e862f0cdb8b01cd597b46ad22375e851
|
10d5f82192a56e2b27325fbc56d1edd93035c35f
|
refs/heads/master
| 2023-06-29T18:35:46.876464
| 2021-08-07T05:51:24
| 2021-08-07T05:51:24
| 393,721,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
from django.urls import path
from .views import RegisterView, EmailVerify, LoginView, LogoutView, RequestPasswordResetEmail, PasswordTokenCheckAPI, PasswordChangeAPI
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('login/', LoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(), name='logout'),
path('register/', RegisterView.as_view(), name='register'),
path('mail_verify/', EmailVerify.as_view(), name='mail_verify'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('password_reset_email/', RequestPasswordResetEmail.as_view(), name='password_reset_email'),
path('password_reset/<uidb64>/<token>', PasswordTokenCheckAPI.as_view(), name='password_reset'),
path('password_change', PasswordChangeAPI.as_view(), name='password_change')
]
|
[
"david.ffdbhjf@gmail.com"
] |
david.ffdbhjf@gmail.com
|
3089a19ad50523efcd56b5526b45246846f872d7
|
7cb3e5e16fd93e6f8a1c07c211cee16dc248ef5d
|
/venv/lib/python3.6/site-packages/django/conf/locale/uk/formats.py
|
e0c6f7fd820ce7fbe5bfa33ee6be778e75855f5f
|
[] |
no_license
|
JustynaJBroniszewska/Blog
|
d74a8cb19fa037b834f5218522ff1397eb60d370
|
cfd8efbcce3e23c7ebeea82b2e732de63c663ac8
|
refs/heads/master
| 2022-11-03T22:01:07.165652
| 2020-06-05T14:25:01
| 2020-06-05T14:25:01
| 266,791,768
| 0
| 0
| null | 2020-06-05T14:25:02
| 2020-05-25T13:52:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "d E Y р."
TIME_FORMAT = "H:i"
DATETIME_FORMAT = "d E Y р. H:i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "d F"
SHORT_DATE_FORMAT = "d.m.Y"
SHORT_DATETIME_FORMAT = "d.m.Y H:i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
"%d.%m.%Y", # '25.10.2006'
"%d %B %Y", # '25 October 2006'
]
TIME_INPUT_FORMATS = [
"%H:%M:%S", # '14:30:59'
"%H:%M:%S.%f", # '14:30:59.000200'
"%H:%M", # '14:30'
]
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H:%M:%S", # '25.10.2006 14:30:59'
"%d.%m.%Y %H:%M:%S.%f", # '25.10.2006 14:30:59.000200'
"%d.%m.%Y %H:%M", # '25.10.2006 14:30'
"%d.%m.%Y", # '25.10.2006'
"%d %B %Y %H:%M:%S", # '25 October 2006 14:30:59'
"%d %B %Y %H:%M:%S.%f", # '25 October 2006 14:30:59.000200'
"%d %B %Y %H:%M", # '25 October 2006 14:30'
"%d %B %Y", # '25 October 2006'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # non-breaking space
NUMBER_GROUPING = 3
|
[
"jj.broniszewska@gmail.com"
] |
jj.broniszewska@gmail.com
|
2042449133f03be90aac3394907857928da24237
|
f703840a152c1e164e87ed29075def4fdb3cef7c
|
/9_Classification_Plant_Disease/image_mining_tools.py
|
fab2b0d9f705bf81afb598c43a7cf89190c73cd6
|
[] |
no_license
|
meryemman/Image-Mining-Projects
|
3fa9292bd7b8218167a6e55e88fecd04adc8ad30
|
c5f55cb1a7561f8e8216394c15648f95247d3c46
|
refs/heads/master
| 2023-02-18T20:52:20.657763
| 2021-01-15T12:57:37
| 2021-01-15T12:57:37
| 328,022,697
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,722
|
py
|
#---------------------------------------------------------------------------------
# Install necessary library and packages
#---------------------------------------------------------------------------------
# !pip install -U -r requirements.txt
#---------------------------------------------------------------------------------
# Import necessary library
#---------------------------------------------------------------------------------
import cv2, sys, os, joblib
from glob import glob
from tqdm import trange, tqdm
import numpy as np
import pandas as pd
# import math
import random
from skimage import feature as ft
import skimage.feature.texture as sft
import mahotas
from scipy import stats
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
# from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.feature_selection import SelectKBest, f_classif
from ReliefF import ReliefF
import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
import seaborn as sns
home = 'C:/Users/mhmh2/Desktop/PlantDiseaseDataSet/'
#---------------------------------------------------------------------------------
# Problem Definition
#---------------------------------------------------------------------------------
'''
Problem definition is all about understanding the problem being solved in order to find
the most suitable ML techniques for use. This starts by deciding the scope of the problem.
That is whether it is a supervised problem (classification or regression) or an
unsupervised problem (clustering). After defining the problem scope, next is to decide
which ML algorithms to be used. For example, if it is a supervised problem then which
ML algorithm to be used. It is linear or non-linear, parametric or non-parametric, and so on.
Defining the problem serves the next step in the ML pipeline which is data preparation.
The machine learns by examples. Each example has inputs and outputs. If the problem is a
classification problem for example in which each sample is to be classified to one of
the pre-defined categories, then the outputs are labels. If it is a regression problem
in which the outputs are represented as a continuous line, then the output is no longer
a label but a number. So, by defining the problem we are able to prepare the data in a
suitable form.
'''
#---------------------------------------------------------------------------------
# Data Preparation
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Helper function to load images from given directories
#---------------------------------------------------------------------------------
# Pre-prepared dataset
def indexing(dir_path, step):
features, labels = [], []
folders = glob(str(dir_path)+os.path.sep+"*"+os.path.sep)
for dir in folders:#tqdm(folders, desc='DB ', file=sys.stdout):
# for d in trange(len(folders), desc='DB '+step, file=sys.stdout):
# tqdm._instances.clear()
# dir = folders[d]
if not os.path.isdir(dir):
continue
label = dir.split(os.path.sep)[-2]
files = glob(str(dir)+os.path.sep+"*.*")
for img_path in tqdm(files, desc=label+"\t", leave=False):
# tqdm._instances.clear()
img = cv2.imread(img_path)
feature = extract_features(img)
if feature is None:
continue
features.append(feature)
labels.append(label)
features, labels = np.array(features), np.array(labels)
print(labels.shape, features.shape)
if not os.path.exists(home+'tools'):
os.makedirs(home+'tools')
np.save(home+'tools/features_'+str(step)+'.npy', features, allow_pickle=True)
np.save(home+'tools/labels_'+str(step)+'.npy', labels, allow_pickle=True)
return np.array(features), np.array(labels)
# dataset "mrawna" (1 folder) (labels in the name of image)
def indexing2(dir_path, step,features=[], labels=[]):
dictionary = {'NE': 'neutral', 'HA': 'happy', 'SA': 'sadness',
'SU': 'surprise', 'AN': 'anger', 'DI': 'disgust', 'AF': 'fear'}
imgs_path = glob(str(dir_path)+os.path.sep+"*"+os.path.sep+"*.JPG")
for img_path in tqdm(imgs_path, desc='DB ', file=sys.stdout):
try:
img_name = img_path.split(os.path.sep)[-1]
except:
continue
if img_name[6:8] in ['FR', 'FL']:
continue
img = cv2.imread(img_path)
feature = extract_features(img)
if feature is None:
continue
features.append(feature)
labels.append(dictionary[img_name[4:6]])
features, labels = np.array(features), np.array(labels)
shape = features.shape
print(labels.shape, features.shape)
if not os.path.exists(home+'tools'):
os.makedirs(home+'tools')
np.save(home+'tools/features_'+str(step)+'.npy', features, allow_pickle=True)
np.save(home+'tools/labels_'+str(step)+'.npy', labels, allow_pickle=True)
return np.array(features), np.array(labels), shape
# Half-prepared dataset
def indexing3(directorys, csv_path, step, features=[], labels=[]):
df = pd.read_csv(csv_path)
all_files = glob(str(directorys)+os.path.sep+"*.*")
for img_path in tqdm(all_files, desc='DB ', file=sys.stdout):
feature = extract_features(cv2.imread(img_path))
if feature is None:
continue
img_name = img_path.split(os.path.sep)[-1].split(".")[0]
label = np.array(df[df['image_id']==img_name]['melanoma'])[0]
if label is None:
continue
labels.append(label)
features.append(feature)
features, labels = np.array(features), np.array(labels)
shape = features.shape
print(labels.shape, features.shape)
if not os.path.exists(home+'tools'):
os.makedirs(home+'tools')
np.save(home+'tools/features_'+str(step)+'.npy', features, allow_pickle=True)
np.save(home+'tools/labels_'+str(step)+'.npy', labels, allow_pickle=True)
return np.array(features), np.array(labels), shape
#---------------------------------------------------------------------------------
# Extract Feature Descriptors
#---------------------------------------------------------------------------------
def extract_features(img):
global_features = []
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Extract color moments
R, G, B = img[:,:,0], img[:,:,1], img[:,:,2]
feature = [np.mean(R), np.std(R), np.mean(G), np.std(G), np.mean(B), np.std(B)]
global_features.extend(feature/np.mean(feature))
# extract a 3D color histogram from the HSV color space using
# the supplied number of 'bins' per channel // bins=(8, 2, 2)
hist = cv2.calcHist([img_hsv], [0, 1, 2], None, (8, 2, 2), [0, 256, 0, 256, 0, 256])
cv2.normalize(hist, hist)
global_features.extend(hist.flatten())
# Basée sur l'analayse de textures par la GLCM (Gray-Level Co-Occurrence Matrix)
glcm = sft.greycomatrix(img_gray, distances = [1], angles = [0], symmetric=True, normed=True)
# glcm = sft.greycomatrix(img_gray, distances = [1], angles = [0, np.pi/4, np.pi/2, 3*np.pi/4], symmetric=True, normed=True)
props = ['contrast', 'correlation', 'energy', 'homogeneity']
feature = [sft.greycoprops(glcm, prop).ravel()[0] for prop in props]
feature = feature / np.sum(feature)
global_features.extend(feature)
# Compute the haralick texture feature vector
feature = mahotas.features.haralick(img_gray).ravel()
global_features.extend(feature)
# Hu Moments that quantifies shape of the Object in img.
feature = cv2.HuMoments(cv2.moments(img_gray)).flatten()
global_features.extend(feature)
# ** Extraction des Features en se basant sur la texture: **
"""
La méthode LBP contient des informations sur la distribution des micro-motifs locaux.
Les expressions faciales peuvent être considérées comme une composition de micro-motifs
qui peuvent être efficacement décrits par les fonctionnalités LBP.
Un histogramme LBP présente uniquement les occurrences des micro-motifs sans aucune indication sur leur emplacement.
"""
# numPoints, radius, eps = 25, 8, 1e-7
# lbp = ft.local_binary_pattern(img_gray, numPoints, radius, method="uniform")
# (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, numPoints + 3), range=(0, numPoints + 2))
# # normalize the histogram
# hist = hist.astype("float")
# hist /= (hist.sum() + eps)
# global_features.extend(hist)
# # Extraction de features en utilisant Histogram of oriented gradient
# feature = ft.hog(img_gray, orientations=6, pixels_per_cell=(9, 9), cells_per_block=(1, 1))
# global_features.extend(feature)
# global_features = stats.zscore(np.array(global_features))
return global_features
#---------------------------------------------------------------------------------
# Feature Engineering
#---------------------------------------------------------------------------------
class FeatureEngineering:
def __init__(self, n_PCA , n_Relief ):
self.pca = PCA(n_components=n_PCA)
self.scaler = StandardScaler()
self.relief = ReliefF(n_features_to_keep = n_Relief)
def normalization_train(self, data_train, target):
data_train = self.pca.fit_transform(data_train)
data_train = self.scaler.fit_transform(data_train)
dic = {val: nb for nb, val in enumerate(set(target))}
target2nb = [dic[l] for l in target]
data_train = np.array(data_train)
target2nb = np.array(target2nb)
data_train = self.relief.fit_transform(data_train, target2nb)
return data_train
def normalization_test(self, data_test):
data_test = self.pca.transform(data_test)
data_test = self.scaler.transform(data_test)
data_test = np.array(data_test)
data_test = self.relief.transform(data_test)
return data_test
def export_modules(self):
joblib.dump(self.pca, home+"model_PCA.sav")
joblib.dump(self.scaler, home+"model_Scaler.sav")
joblib.dump(self.relief, home+"model_reliefF.sav")
def import_modules(self):
self.pca = joblib.load(home+"model_PCA.sav")
self.scaler = joblib.load(home+"model_Scaler.sav")
self.relief = joblib.load(home+"model_reliefF.sav")
FE = FeatureEngineering(50,30)
#---------------------------------------------------------------------------------
# Training ML Algorithm
#---------------------------------------------------------------------------------
def training(X_train, y_train):
# train and evaluate a k-NN classifer on the raw pixel intensities
# model = KNeighborsClassifier(n_neighbors=1)
# model = SVC(gamma=0.01, C=100)
# model = SVC(gamma='scale')
# , verbose = True)
model = SVC(kernel='rbf')
# Set the classifier as a support vector machines with polynomial kernel
# model = SVC(kernel='linear', probability=True, tol=1e-3)
model.fit(X_train, y_train)
joblib.dump(model, home+'tools/model.sav')
# def training(shape):
# print("\n[Read DATA]")
# features = np.load(home+'tools/f'+str(shape)+'.npy', allow_pickle=True)
# labels = np.load(home+'tools/l'+str(shape)+'.npy', allow_pickle=True)
# print(features.shape, labels.shape)
# print("\n[NORMALIZATION]")
# # features = [Ft.normalization_zscore(f) for f in features]
# features = FE.normalization_train(features, labels)
# print("\n[TRAINING]")
# (trainFeat, testFeat, trainLabels, testLabels) = train_test_split(
# features, labels, test_size=0.1, random_state=random.seed())
# # train and evaluate a k-NN classifer on the raw pixel intensities
# # model = KNeighborsClassifier(n_neighbors=1)
# # model = SVC(gamma=0.01, C=100)
# # model = SVC(gamma='scale')
# # , verbose = True) #Set the classifier as a support vector machines with polynomial kernel
# model = SVC(kernel='linear', probability=True, tol=1e-3)
# model.fit(trainFeat,trainLabels)
# # model.fit(features, labels)
# # save the model to disk
# joblib.dump(model, home+'tools/model'+str(shape)+'.sav')
# # np.save('model.npy',model,allow_pickle=True)
# score = model.score(testFeat, testLabels)*100
# print("[DONE]\n[INFO]\tAccuracy = ", score, "%")
def best_SVM(X_train, y_train, X_test, y_test):
# from sklearn import metrics
C = 200
mean_acc = np.zeros((C-1))
for n in tqdm(range(1, C)):
# model = svm.SVC(gamma=0.01, C=100, kernel='linear').fit(X_train,y_train)
model = SVC(gamma=0.01, C=C, kernel='rbf').fit(X_train,y_train)
mean_acc[n-1] = model.score(X_test, y_test)
plt.plot(range(1,C),mean_acc,'g')
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Nabors (K)')
plt.tight_layout()
plt.show()
print( "The best accuracy was with", mean_acc.max(), "with C=", mean_acc.argmax()+1)
return mean_acc.argmax()+1 , mean_acc.max()
def bestK_KNN(X_train, y_train, X_test, y_test):
Ks = 200
mean_acc = np.zeros((Ks-1))
for n in tqdm(range(1,Ks)):
neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train)
mean_acc[n-1] = neigh.score(X_test, y_test)
plt.plot(range(1,Ks),mean_acc,'g')
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Nabors (K)')
plt.tight_layout()
plt.show()
print( "The best accuracy was with", mean_acc.max(), "with k=", mean_acc.argmax()+1)
return mean_acc.argmax()+1 , mean_acc.max()
#---------------------------------------------------------------------------------
# Testing Trained Model ( Evalute Model Performance )
#---------------------------------------------------------------------------------
# Classification : Accuracy, Sensitivity, Specificity, MCC
def evalute_model(X_test, y_test):
loaded_model = joblib.load(home+'tools/model.sav')
score = loaded_model.score(X_test, y_test)*100
print("[INFO]\tAccuracy = ", score, "%")
y_pred = loaded_model.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print('Confusion Matrix :\n', cm)
plot_confusion_matrix(y_test, y_pred, loaded_model.classes_)
def plot_confusion_matrix(test_y, predict_y, labels):
C = confusion_matrix(test_y, predict_y)
#divid each element of the confusion matrix with the sum of elements in that column
# C =(((C.T)/(C.sum(axis=1))).T)
#divid each element of the confusion matrix with the sum of elements in that row
# C =(C/C.sum(axis=0))
plt.figure(figsize=(20,8))
sns.heatmap(C, annot=True, cmap="YlGnBu", fmt=".3f", xticklabels=labels, yticklabels=labels)
plt.xlabel('Predicted Class')
plt.ylabel('Original Class')
plt.show()
def best_nb_features(features_train, labels_train, features_valid, labels_valid):
scores, index = [], []
for i in trange(50, desc='PCA', file= sys.stdout):
for j in tqdm(range(10,20+i), desc='ReliefF (n_PCA = '+str(i+20)+')', leave=False, file= sys.stdout):
FE = FeatureEngineering(i+20, j)
features_t = FE.normalization_train(features_train, labels_train)
features_v = FE.normalization_test(features_valid)
training(features_t, labels_train)
model = SVC(kernel='rbf').fit(features_t, labels_train)
score = model.score(features_v, labels_valid)*100
scores.append(score)
index.append((i+20,j))
(i,j) = index[np.argmax(scores)]
print("[INFO]\tAccuracy = ", np.max(scores), "%\n\tn_PCA = "+str(i)+",\tn_ReliefF = "+str(j) )
return i, j
#---------------------------------------------------------------------------------
# Model Deployment
#---------------------------------------------------------------------------------
def deployment():
features_train = np.load(home+'tools/features_train.npy', allow_pickle=True)
labels_train = np.load(home+'tools/labels_train.npy', allow_pickle=True)
features_valid = np.load(home+'tools/features_valid.npy', allow_pickle=True)
labels_valid = np.load(home+'tools/labels_valid.npy', allow_pickle=True)
features = np.concatenate([features_train, features_valid])
labels = np.concatenate([labels_train, labels_valid])
FE = FeatureEngineering(40, 30)
features = FE.normalization_train(features, labels)
FE.export_modules()
model = SVC(kernel='rbf')
model.fit(features,labels)
joblib.dump(model, home+'model.sav')
def prediction():
loaded_model = joblib.load(home+'model.sav')
FE = FeatureEngineering(40, 30)
FE.import_modules()
features_test = []
imgIds =[]
files = glob(home+'test'+os.path.sep+"*.*")
for img_path in tqdm(files, desc="Test", file=sys.stdout):
img = cv2.imread(img_path)
feature = extract_features(img)
if feature is None:
continue
features_test.append(feature)
imgIds.append(img_path.split(os.path.sep)[-1])
features_test = np.array(features_test)
features_test = FE.normalization_test(features_test)
labels_test = [loaded_model.predict([feature])[0] for feature in features_test]
labels_test = ['Plante_'+str(l) for l in labels_test]
df = pd.DataFrame({'nom_image': imgIds, 'classe_predite': labels_test})
df.to_csv(home+'csv_file.csv', index=False, sep=",")
#---------------------------------------------------------------------------------
# La fonction main
#---------------------------------------------------------------------------------
if __name__ == '__main__':
features_train, labels_train = indexing(home+"train",'train')
features_valid, labels_valid = indexing(home+"validation",'valid')
# if you have all ready extracting the images features
# features_train = np.load(home+'tools/features_train.npy', allow_pickle=True)
# labels_train = np.load(home+'tools/labels_train.npy', allow_pickle=True)
# features_valid = np.load(home+'tools/features_valid.npy', allow_pickle=True)
# labels_valid = np.load(home+'tools/labels_valid.npy', allow_pickle=True)
print(features_train.shape, features_valid.shape)
i, j = best_nb_features(features_train, labels_train, features_valid, labels_valid)
# i, j = 24, 21
# FE = FeatureEngineering(i, j)
# features_train = FE.normalization_train(features_train, labels_train)
# features_valid = FE.normalization_test(features_valid)
# training(features_train, labels_train)
# evalute_model(features_valid, labels_valid)
# best_SVM(features_train, labels_train, features_valid, labels_valid)
# deployment(i, j)
# prediction()
print("[DONE]")
'''
Pour instaler mahotas :
Install Microsoft Visual C++ 14.0 build tools : https://go.microsoft.com/fwlink/?LinkId=691126
or
pip install --upgrade setuptools
pip install mahotas
or the Binary install it the simple way!:
pip install --only-binary :all: mahotas
or :
conda config --add channels conda-forge
conda install mahotas
or :
conda install -c https://conda.anaconda.org/conda-forge mahotas
'''
|
[
"meryem.manessouri@gmail.com"
] |
meryem.manessouri@gmail.com
|
33863f53f57b85e29606bccfb0fb260ba7964e4c
|
b9803afa320b270a7c824fcda56a5441d31239fa
|
/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi
|
ad2884351bb95dfb13c8f1148352bada2e52473c
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
hswolf/learn_mean
|
567404191edcebf3b16b24f761fcc3492d94deda
|
0e85d5428969e167122a887ef6f01d71d1bf8597
|
refs/heads/master
| 2021-01-01T18:38:10.560527
| 2015-08-11T09:41:54
| 2015-08-11T09:41:54
| 40,533,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,690
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/data/opt/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/vinh/.node-gyp/0.12.7",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.11.3 node/v0.12.7 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/home/vinh/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/vinh/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.12.7",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/vinh/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
|
[
"vinhtt0305@gmail.com"
] |
vinhtt0305@gmail.com
|
c696966a5e45cb48ec75452928bb72c2fff807ec
|
d0cfe5a2e14c00b6752bb6f5e042f2d8a8d4f37d
|
/students/johnwachter/session04/dict_set_lab.py
|
c1d9feb6a5093fac284f71d928f4449ebf2d2f9e
|
[] |
no_license
|
johnwachter/Python210-W19
|
d917fcb8d6af1c060536b82d88eebaf8635781c5
|
cdc6e24a820897cc1b6cef175d1e3bf06928aec7
|
refs/heads/master
| 2021-07-11T08:34:40.896895
| 2020-08-07T03:25:08
| 2020-08-07T03:25:08
| 166,606,178
| 0
| 0
| null | 2019-01-19T23:58:33
| 2019-01-19T23:58:33
| null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
#Dictionaires1
mydict = {'Name': 'Chris', 'City': 'Seattle', 'Cake':'Chocolate'}
print(mydict)
del mydict['Cake']
print(mydict)
mydict['Fruit'] = 'Mango'
print(mydict)
print(mydict.keys())
print(mydict.values())
print(mydict.get('Cake', 'Cake not in dictionary'))
if 'Mango' in mydict.values():
print('Mango is in the dictionary')
else: print('Mango not in dictionary')
#Dictionaries2 - What???
#Sets1
s2 = range(0,21)
s3 = range(0,21)
s4 = range(0,21)
holds2 = list(s2)
holds3 = list(s3)
holds4 = list(s4)
l2 = []
l3 = []
l4 = []
for i in holds2:
if i%2 ==0:
l2.append(i)
for i in holds3:
if i%3 == 0:
l3.append(i)
for i in holds4:
if i%4 == 0:
l4.append(i)
s2 = set(l2)
s3 = set(l3)
s4 = set(l4)
print(s2, s3, s4)
print(s3 < s2)
print(s4 <s2)
#Sets2
pyset = {'p', 'y', 't', 'h', 'o', 'n'}
pyset.add('i')
print(pyset)
marathonset = frozenset = {'m','a','r','a','t','h','o','n'}
print(marathonset)
union = pyset | marathonset
print(union)
intersection = pyset & marathonset
print(intersection)
|
[
"johnwachter29@gmail.com"
] |
johnwachter29@gmail.com
|
865d7f6ad41f7524c4479a83da8611b4b6a94258
|
6bee9512be40b149df77e0694c210c9fc6a6289a
|
/lesson8/task7/basic_inheritance.py
|
5b0ffd2eb4fe6430685ad7c1a14a93892a829dd9
|
[] |
no_license
|
sergio9977/SUMMER_BOOTCAMP_2018_Python
|
ced5508ca0c8605eb3fa2af9b254ed7d97055a81
|
351ab57bd9f7b282364936c3e228d6c5e315e3b9
|
refs/heads/master
| 2021-05-09T09:14:33.512871
| 2018-02-26T13:19:15
| 2018-02-26T13:19:15
| 119,430,972
| 0
| 0
| null | 2018-01-29T19:32:45
| 2018-01-29T19:32:45
| null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
class Person:
def __init__(self, first, last):
self.firstname = first
self.lastname = last
def getName(self):
return self.firstname + " " + self.lastname
class Employee(Person):
def __init__(self, first, last, staffnum):
Person.__init__(self,first,last)
self.staffnum = staffnum
def getInfo(self):
res = Person.getName(self)
res += " " + str(self.staffnum)
return res
x = Person("Marge", "Simpson")
y = Employee("Homer", "Simpson", "1007")
print(x.getName())
print(y.getInfo())
|
[
"sergio9961937@gmail.com"
] |
sergio9961937@gmail.com
|
d75045d5c0f1a6949b9e7cfc4acd511c80730b5a
|
c70502b90675d271000e37fc140ee36659e68ea7
|
/foss_finder/utils/user_defined_info/fields/additional_info.py
|
744502ce0d33b6d23777a7b851d88825c6448833
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT"
] |
permissive
|
iotile/foss_finder
|
85936b4eb3b862256f9c8b0092cdad05bc095521
|
0c31530cf2158d4565a8168aecda18ffbcc68f04
|
refs/heads/master
| 2021-07-09T10:41:19.118859
| 2019-03-23T23:26:27
| 2019-03-23T23:26:27
| 145,917,271
| 6
| 1
|
MIT
| 2018-10-01T17:10:56
| 2018-08-23T23:18:59
|
Python
|
UTF-8
|
Python
| false
| false
| 845
|
py
|
from foss_finder.config.config import DEFAULT_COLUMNS, OPTIONAL_COLUMNS
from foss_finder.config.strings import ADDITIONAL_INFO_NAME
from .field import UserDefinedInformationField
class AdditionalInfo(UserDefinedInformationField):
"""
Implements a field of the user-defined information file.
This field's purpose is to add information about a dependency.
In other words, fill optional columns (provided in the INI file) for a given package.
"""
NAME = ADDITIONAL_INFO_NAME
def _transform_row(self, row, package_info):
initial_row_length = len(DEFAULT_COLUMNS) + 1
assert len(row) == initial_row_length + len(OPTIONAL_COLUMNS)
res = row.copy()
for column, value in package_info.items():
res[initial_row_length + OPTIONAL_COLUMNS.index(column)] = value
return res
|
[
"noreply@github.com"
] |
iotile.noreply@github.com
|
286e4156131250a3656262d4864ec215b37dbbce
|
55662961770dfacae3ac835bc1f81ac3093f9de4
|
/week8/3-Cinema-Reservation-System/reserve_cimnema_sys.py
|
30e277f4217bb06b5816defd5413706cb2570382
|
[] |
no_license
|
AntonAtanasov/Programming-101v3
|
c857388424ddbed88a3798ae6a155f7d972c9c07
|
2873897de67a99989805f2d31585d6c2bc249422
|
refs/heads/master
| 2021-01-10T18:23:42.027990
| 2015-06-06T18:07:10
| 2015-06-06T18:07:10
| 36,734,341
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,006
|
py
|
from magic_reservation_system import CinemaReservation
from settings import DB_NAME
import sqlite3
import copy
reserve_msg = [("Step 1(user)>", str),
("Step 2(number of tickets)>", int),
("Step 3(choose a movie)>", int),
("Step 4(choose projection)>", int),
("Step 5(choose seats for ", tuple),
("Step 6(Confirm - type 'finalize')>", str)]
db_connection = sqlite3.connect(DB_NAME)
def main():
while True:
command = CinemaReservation.parse_command(input("Enter command>"))
if CinemaReservation.is_command(command, "help"):
print(CinemaReservation.create_help())
elif CinemaReservation.is_command(command, "show_movies"):
print (CinemaReservation.show_movies(db_connection))
elif CinemaReservation.is_command(command, "show_projections"):
if len(command) == 3:
print(CinemaReservation.show_movie_projections(db_connection, command[1], command[2]))
elif len(command) == 2:
print(CinemaReservation.show_movie_projections(db_connection, command[1]))
else:
print(CinemaReservation.show_movie_projections(db_connection))
elif CinemaReservation.is_command(command, "make_reservation"):
print ('You are about to make reservation! Just folloow the steps. You can give_up @ any time :)')
user_data = reservation_flow()
if user_data:
print(CinemaReservation.make_reservation(db_connection, user_data))
elif CinemaReservation.is_command(command, "cancel_reservation"):
if len(command) != 2:
print('Projection ID not given')
continue
user = input("User?>")
print (CinemaReservation.cancel_reservation(db_connection, command[1], user))
elif CinemaReservation.is_command(command, "exit"):
db_connection.close()
break
else:
if command[0] is '':
continue
print(CinemaReservation.trigger_unknown_command())
def reservation_flow():
recv_data = {}
current_reservation = copy.deepcopy(reserve_msg)
while current_reservation:
current_step, data_type = current_reservation[0]
current_reservation.pop(0)
if current_step == reserve_msg[2][0]:
cur_step_data = get_movie(current_step, data_type)
elif current_step == reserve_msg[3][0]:
cur_step_data = get_projection(current_step, data_type, recv_data['Step-3'], recv_data['Step-2'])
elif current_step == reserve_msg[4][0]:
cur_step_data = check_seats(recv_data['Step-2'], current_step, data_type, recv_data['Step-4'])
elif current_step == reserve_msg[5][0]:
cur_step_data = final_notice(current_step, data_type, recv_data)
else:
cur_step_data = take_user_data(current_step, data_type)
if not cur_step_data:
print ('Reservation process aborted!')
return False
step_key = len(reserve_msg) - len(current_reservation)
recv_data['Step-{}'.format(step_key)] = cur_step_data
return recv_data
def final_notice(step, data_type, usr_data):
print('This is Sum-Up for your Reservation')
print(CinemaReservation.get_reservation_info(db_connection, usr_data))
is_fin_ok = take_user_data(step, data_type)
if is_fin_ok in 'finalize':
return is_fin_ok
return False
def get_movie(step, data_type):
print (CinemaReservation.show_movies(db_connection))
movie_id = None
while not CinemaReservation.show_movie_projections(db_connection, movie_id):
movie_id = take_user_data(step, data_type)
if not movie_id:
return False
return movie_id
def get_projection(step, data_type, movie_id, numb_tickets):
print (CinemaReservation.show_movie_projections(db_connection, movie_id))
l_id = []
proj_ids = CinemaReservation.get_id_of_projections(db_connection, movie_id)
for ids in proj_ids:
l_id.append(ids[0])
no_space = True
proj_id = None
while proj_id not in l_id or no_space:
proj_id = take_user_data(step, data_type)
taken_seats = CinemaReservation.get_taken_seats_by_proj(db_connection, proj_id)
if numb_tickets > 100 - len(taken_seats):
print ('There are not enough free seats for your reservation')
continue
no_space = False
if not proj_id:
return False
return proj_id
def check_seats(numb_of_seats, step, d_type, proj_id):
print (CinemaReservation.show_hall_layout(db_connection, proj_id))
taken_seats = CinemaReservation.get_taken_seats_by_proj(db_connection, proj_id)
seats = []
for tick_num, seat in enumerate(range(numb_of_seats)):
while True:
try:
data = input(step + 'Tiket-{}>'.format(tick_num + 1))
if is_give_up(data):
return False
seat_pos = d_type(int(x.strip()) for x in data.split(','))
in_row = 0 < seat_pos[0] < 10
in_col = 0 < seat_pos[1] < 10
if seat_pos in taken_seats or not in_row or not in_col or seat_pos in seats:
print ('This seat is already taken Or Out of Range')
continue
seats.append(seat_pos)
break
except Exception as e:
print (e)
continue
return seats
def take_user_data(step, data_type):
while True:
try:
data = input(step)
if is_give_up(data):
return False
if not data:
continue
else:
return data_type(data)
except Exception as e:
print(e)
def is_give_up(data):
return data == 'give_up'
if __name__ == '__main__':
main()
|
[
"anton.p.atanasov@gmail.com"
] |
anton.p.atanasov@gmail.com
|
622f0a2831b81efc0fd4c2318bca84ad78309dee
|
9797b289a89012f381c6b1de794374ab0d73ed1a
|
/Chapter_11/11.1.test.py
|
ccb6bb67cbad0915c4e73b0c56cff85a3845a231
|
[] |
no_license
|
qh4321/1_python_crash_course
|
23c26d1c39cd0808e987fb8992e092a34aacc489
|
aa2d72857e3173a3951e52538607b6e331f50a24
|
refs/heads/master
| 2022-12-31T09:03:47.660232
| 2020-10-22T13:33:13
| 2020-10-22T13:33:13
| 301,715,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
import unittest
from city_function import city_country
class CityTestCase(unittest.TestCase):
def test_ll(self):
double = city_country('beijing', 'china')
self.assertEqual(double, 'Beijing,China')
unittest.main()
|
[
"52791020+qh4321@users.noreply.github.com"
] |
52791020+qh4321@users.noreply.github.com
|
9f38ff1b368aeb8c210558580b8f2c492e943a39
|
c8b29b23b3363ae246516818f59fc95e2b2b76f7
|
/app/main.py
|
02b0893279609287d241e0a0443fe34f1c2b35ea
|
[
"MIT"
] |
permissive
|
ayushmaskey/bike_rental_system
|
49910dee9a27628306cfe852cc4c033222170aa8
|
7c53b295b41701d7852e71bef0f110d4ce4813f5
|
refs/heads/master
| 2020-06-04T23:24:32.189816
| 2019-06-17T00:07:32
| 2019-06-17T00:07:32
| 192,232,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
from bike_rental import BikeRental
from customer import Customer
def main():
shop = BikeRental(100)
customer = Customer()
while True:
print("""
===== bike rental shop =======
1. Display available
2. Request a bike on hourly basis $5
3. Request a bike on daily basis $20
4. Request a bike on weekly basis $60
5. Return a bike
6. Exit
""")
choice = input("Enter choice:")
try:
choice = int(choice)
except ValueError:
print("Not an int")
continue
if choice == 1:
shop.display_stock()
elif choice == 2:
customer.rentalTime = shop.rentBikeOnHourlyBasis( customer.requestBike() )
customer.rentalBasis = 1
elif choice == 3:
customer.rentalTime = shop.rentBikeOnDailyBasis( customer.requestBike() )
customer.rentalBasis = 2
elif choice == 4:
customer.rentalTime = shop.rentBikeObWeeklyBasis( customer.requestBike() )
elif choice == 5:
customer.bill = shop.returnBike( customer.returnBike() )
customer.rentalBasis, customer.rentalTime, customer.bikes = 0,0,0
elif choice == 6:
break
else:
print("invalid input. enter number betweek 1 and 6")
print("Thank you")
if __name__ == "__main__":
main()
|
[
"maskey.maskey@gmail.com"
] |
maskey.maskey@gmail.com
|
97fc78b6c5686fb9bbb8b11faf77bdd250d89984
|
8c098094e61fe80f8542e90ef57d6cd96c4bae56
|
/models.py
|
f89c94e8ca9fca6b6054ec7bb3bda0a5b1d51f09
|
[] |
no_license
|
jugg3rn4u7/ConnectifyAPI
|
11ff41b1ba0495fd349e24defb3407b7790708d4
|
0b8335b4e36ac23302d1a20124910fa97b393375
|
refs/heads/master
| 2020-12-31T03:34:43.124948
| 2016-09-09T04:18:56
| 2016-09-09T04:18:56
| 62,965,333
| 0
| 0
| null | 2016-07-09T19:17:23
| 2016-07-09T19:17:23
| null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
# Models
User = dict(
phoneNumber = "",
password = "",
verified = 0,
code = 0,
salt = "",
avatar = 0,
avatarName = 0
);
|
[
"shriku4u27@gmail.com"
] |
shriku4u27@gmail.com
|
d38fe46075d0bd0c00e53082701f1e24d6fc4a1a
|
5e9bda2d6082f62d889df1c28973436af905faaa
|
/demo/agw/FlatMenu.py
|
8a6f08fc91b3beade65596ae3cf19deb606b9cc2
|
[] |
no_license
|
FXCMAPI/Phoenix
|
cab74db4e8f141f4d27b4b2e3d5bab06994b2e52
|
040763282f04a5b98f89c054b254993d1a8ca618
|
refs/heads/master
| 2020-03-16T15:42:10.660845
| 2018-05-09T01:41:51
| 2018-05-09T01:45:33
| 132,755,174
| 1
| 1
| null | 2018-05-09T12:41:54
| 2018-05-09T12:41:53
| null |
UTF-8
|
Python
| false
| false
| 31,541
|
py
|
#!/usr/bin/env python
import wx
import math
import random
import os
import sys
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
bitmapDir = os.path.join(dirName, 'bitmaps')
sys.path.append(os.path.split(dirName)[0])
try:
from agw import flatmenu as FM
from agw.artmanager import ArtManager, RendererBase, DCSaver
from agw.fmresources import ControlFocus, ControlPressed
from agw.fmresources import FM_OPT_SHOW_CUSTOMIZE, FM_OPT_SHOW_TOOLBAR, FM_OPT_MINIBAR
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.flatmenu as FM
from wx.lib.agw.artmanager import ArtManager, RendererBase, DCSaver
from wx.lib.agw.fmresources import ControlFocus, ControlPressed
from wx.lib.agw.fmresources import FM_OPT_SHOW_CUSTOMIZE, FM_OPT_SHOW_TOOLBAR, FM_OPT_MINIBAR
import images
if wx.VERSION >= (2,7,0,0):
import wx.lib.agw.aui as AUI
AuiPaneInfo = AUI.AuiPaneInfo
AuiManager = AUI.AuiManager
_hasAUI = True
else:
try:
import PyAUI as AUI
_hasAUI = True
AuiPaneInfo = AUI.PaneInfo
AuiManager = AUI.FrameManager
except:
_hasAUI = False
#----------------------------------------------------------------------
#-------------------------------
# Menu items IDs
#-------------------------------
MENU_STYLE_DEFAULT = wx.NewId()
MENU_STYLE_XP = wx.NewId()
MENU_STYLE_2007 = wx.NewId()
MENU_STYLE_VISTA = wx.NewId()
MENU_STYLE_MY = wx.NewId()
MENU_USE_CUSTOM = wx.NewId()
MENU_LCD_MONITOR = wx.NewId()
MENU_HELP = wx.NewId()
MENU_DISABLE_MENU_ITEM = wx.NewId()
MENU_REMOVE_MENU = wx.NewId()
MENU_TRANSPARENCY = wx.NewId()
MENU_NEW_FILE = 10005
MENU_SAVE = 10006
MENU_OPEN_FILE = 10007
MENU_NEW_FOLDER = 10008
MENU_COPY = 10009
MENU_CUT = 10010
MENU_PASTE = 10011
def switchRGBtoBGR(colour):
return wx.Colour(colour.Blue(), colour.Green(), colour.Red())
def CreateBackgroundBitmap():
mem_dc = wx.MemoryDC()
bmp = wx.Bitmap(200, 300)
mem_dc.SelectObject(bmp)
mem_dc.Clear()
# colour the menu face with background colour
top = wx.Colour("blue")
bottom = wx.Colour("light blue")
filRect = wx.Rect(0, 0, 200, 300)
mem_dc.GradientFillConcentric(filRect, top, bottom, wx.Point(100, 150))
mem_dc.SelectObject(wx.NullBitmap)
return bmp
#------------------------------------------------------------
# A custom renderer class for FlatMenu
#------------------------------------------------------------
class FM_MyRenderer(FM.FMRenderer):
""" My custom style. """
def __init__(self):
FM.FMRenderer.__init__(self)
def DrawMenuButton(self, dc, rect, state):
"""Draws the highlight on a FlatMenu"""
self.DrawButton(dc, rect, state)
def DrawMenuBarButton(self, dc, rect, state):
"""Draws the highlight on a FlatMenuBar"""
self.DrawButton(dc, rect, state)
def DrawButton(self, dc, rect, state, colour=None):
if state == ControlFocus:
penColour = switchRGBtoBGR(ArtManager.Get().FrameColour())
brushColour = switchRGBtoBGR(ArtManager.Get().BackgroundColour())
elif state == ControlPressed:
penColour = switchRGBtoBGR(ArtManager.Get().FrameColour())
brushColour = switchRGBtoBGR(ArtManager.Get().HighlightBackgroundColour())
else: # ControlNormal, ControlDisabled, default
penColour = switchRGBtoBGR(ArtManager.Get().FrameColour())
brushColour = switchRGBtoBGR(ArtManager.Get().BackgroundColour())
# Draw the button borders
dc.SetPen(wx.Pen(penColour))
dc.SetBrush(wx.Brush(brushColour))
dc.DrawRoundedRectangle(rect.x, rect.y, rect.width, rect.height,4)
def DrawMenuBarBackground(self, dc, rect):
# For office style, we simple draw a rectangle with a gradient colouring
vertical = ArtManager.Get().GetMBVerticalGradient()
dcsaver = DCSaver(dc)
# fill with gradient
startColour = self.menuBarFaceColour
endColour = ArtManager.Get().LightColour(startColour, 90)
dc.SetPen(wx.Pen(endColour))
dc.SetBrush(wx.Brush(endColour))
dc.DrawRectangle(rect)
def DrawToolBarBg(self, dc, rect):
if not ArtManager.Get().GetRaiseToolbar():
return
# fill with gradient
startColour = self.menuBarFaceColour()
dc.SetPen(wx.Pen(startColour))
dc.SetBrush(wx.Brush(startColour))
dc.DrawRectangle(0, 0, rect.GetWidth(), rect.GetHeight())
#------------------------------------------------------------
# Declare our main frame
#------------------------------------------------------------
class FlatMenuDemo(wx.Frame):
def __init__(self, parent, log):
wx.Frame.__init__(self, parent, size=(700, 500), style=wx.DEFAULT_FRAME_STYLE |
wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetIcon(images.Mondrian.GetIcon())
wx.SystemOptions.SetOption("msw.remap", "0")
self.SetTitle("FlatMenu wxPython Demo ;-D")
if _hasAUI:
self._mgr = AuiManager()
self._mgr.SetManagedWindow(self)
self._popUpMenu = None
mainSizer = wx.BoxSizer(wx.VERTICAL)
# Create a main panel and place some controls on it
mainPanel = wx.Panel(self, wx.ID_ANY)
panelSizer = wx.BoxSizer(wx.VERTICAL)
mainPanel.SetSizer(panelSizer)
# Create minibar Preview Panel
minibarPanel= wx.Panel(self, wx.ID_ANY)
self.CreateMinibar(minibarPanel)
miniSizer = wx.BoxSizer(wx.VERTICAL)
miniSizer.Add(self._mtb, 0, wx.EXPAND)
minibarPanel.SetSizer(miniSizer)
# Add log window
self.log = log
hs = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(mainPanel, wx.ID_ANY, "Press me for pop up menu!")
hs.Add(btn, 0, wx.ALL, 5)
# Connect a button
btn.Bind(wx.EVT_BUTTON, self.OnButtonClicked)
btn = wx.Button(mainPanel, wx.ID_ANY, "Press me for a long menu!")
hs.Add(btn, 0, wx.ALL, 5)
panelSizer.Add(hs, 0, wx.ALL, 5)
# Connect a button
btn.Bind(wx.EVT_BUTTON, self.OnLongButtonClicked)
statusbar = self.CreateStatusBar(2)
statusbar.SetStatusWidths([-2, -1])
# statusbar fields
statusbar_fields = [("FlatMenu wxPython Demo, Andrea Gavana @ 01 Nov 2006"),
("Welcome To wxPython!")]
for i in range(len(statusbar_fields)):
statusbar.SetStatusText(statusbar_fields[i], i)
self.CreateMenu()
self.ConnectEvents()
mainSizer.Add(self._mb, 0, wx.EXPAND)
mainSizer.Add(mainPanel, 1, wx.EXPAND)
self.SetSizer(mainSizer)
mainSizer.Layout()
if _hasAUI:
# AUI support
self._mgr.AddPane(mainPanel, AuiPaneInfo().Name("main_panel").
CenterPane())
self._mgr.AddPane(minibarPanel, AuiPaneInfo().Name("minibar_panel").
Caption("Minibar Preview").Right().
MinSize(wx.Size(150, 200)))
self._mb.PositionAUI(self._mgr)
self._mgr.Update()
ArtManager.Get().SetMBVerticalGradient(True)
ArtManager.Get().SetRaiseToolbar(False)
self._mb.Refresh()
self._mtb.Refresh()
self.CenterOnScreen()
def CreateMinibar(self, parent):
# create mini toolbar
self._mtb = FM.FlatMenuBar(parent, wx.ID_ANY, 16, 6, options = FM_OPT_SHOW_TOOLBAR|FM_OPT_MINIBAR)
checkCancelBmp = wx.Bitmap(os.path.join(bitmapDir, "ok-16.png"), wx.BITMAP_TYPE_PNG)
viewMagBmp = wx.Bitmap(os.path.join(bitmapDir, "viewmag-16.png"), wx.BITMAP_TYPE_PNG)
viewMagFitBmp = wx.Bitmap(os.path.join(bitmapDir, "viewmagfit-16.png"), wx.BITMAP_TYPE_PNG)
viewMagZoomBmp = wx.Bitmap(os.path.join(bitmapDir, "viewmag-p-16.png"), wx.BITMAP_TYPE_PNG)
viewMagZoomOutBmp = wx.Bitmap(os.path.join(bitmapDir, "viewmag-m-16.png"), wx.BITMAP_TYPE_PNG)
self._mtb.AddCheckTool(wx.ID_ANY, "Check Settings Item", checkCancelBmp)
self._mtb.AddCheckTool(wx.ID_ANY, "Check Info Item", checkCancelBmp)
self._mtb.AddSeparator()
self._mtb.AddRadioTool(wx.ID_ANY, "Magnifier", viewMagBmp)
self._mtb.AddRadioTool(wx.ID_ANY, "Fit", viewMagFitBmp)
self._mtb.AddRadioTool(wx.ID_ANY, "Zoom In", viewMagZoomBmp)
self._mtb.AddRadioTool(wx.ID_ANY, "Zoom Out", viewMagZoomOutBmp)
def CreateMenu(self):
# Create the menubar
self._mb = FM.FlatMenuBar(self, wx.ID_ANY, 32, 5, options = FM_OPT_SHOW_TOOLBAR | FM_OPT_SHOW_CUSTOMIZE)
fileMenu = FM.FlatMenu()
styleMenu = FM.FlatMenu()
editMenu = FM.FlatMenu()
multipleMenu = FM.FlatMenu()
historyMenu = FM.FlatMenu()
subMenu = FM.FlatMenu()
helpMenu = FM.FlatMenu()
subMenu1 = FM.FlatMenu()
subMenuExit = FM.FlatMenu()
self.newMyTheme = self._mb.GetRendererManager().AddRenderer(FM_MyRenderer())
# Load toolbar icons (32x32)
copy_bmp = wx.Bitmap(os.path.join(bitmapDir, "editcopy.png"), wx.BITMAP_TYPE_PNG)
cut_bmp = wx.Bitmap(os.path.join(bitmapDir, "editcut.png"), wx.BITMAP_TYPE_PNG)
paste_bmp = wx.Bitmap(os.path.join(bitmapDir, "editpaste.png"), wx.BITMAP_TYPE_PNG)
open_folder_bmp = wx.Bitmap(os.path.join(bitmapDir, "fileopen.png"), wx.BITMAP_TYPE_PNG)
new_file_bmp = wx.Bitmap(os.path.join(bitmapDir, "filenew.png"), wx.BITMAP_TYPE_PNG)
new_folder_bmp = wx.Bitmap(os.path.join(bitmapDir, "folder_new.png"), wx.BITMAP_TYPE_PNG)
save_bmp = wx.Bitmap(os.path.join(bitmapDir, "filesave.png"), wx.BITMAP_TYPE_PNG)
context_bmp = wx.Bitmap(os.path.join(bitmapDir, "contexthelp-16.png"), wx.BITMAP_TYPE_PNG)
colBmp = wx.Bitmap(os.path.join(bitmapDir, "month-16.png"), wx.BITMAP_TYPE_PNG)
view1Bmp = wx.Bitmap(os.path.join(bitmapDir, "view_choose.png"), wx.BITMAP_TYPE_PNG)
view2Bmp = wx.Bitmap(os.path.join(bitmapDir, "view_detailed.png"), wx.BITMAP_TYPE_PNG)
view3Bmp = wx.Bitmap(os.path.join(bitmapDir, "view_icon.png"), wx.BITMAP_TYPE_PNG)
view4Bmp = wx.Bitmap(os.path.join(bitmapDir, "view_multicolumn.png"), wx.BITMAP_TYPE_PNG)
# Set an icon to the exit/help/transparency menu item
exitImg = wx.Bitmap(os.path.join(bitmapDir, "exit-16.png"), wx.BITMAP_TYPE_PNG)
helpImg = wx.Bitmap(os.path.join(bitmapDir, "help-16.png"), wx.BITMAP_TYPE_PNG)
ghostBmp = wx.Bitmap(os.path.join(bitmapDir, "field-16.png"), wx.BITMAP_TYPE_PNG)
# Create a context menu
context_menu = FM.FlatMenu()
# Create the menu items
menuItem = FM.FlatMenuItem(context_menu, wx.ID_ANY, "Test Item", "", wx.ITEM_NORMAL, None, context_bmp)
context_menu.AppendItem(menuItem)
item = FM.FlatMenuItem(fileMenu, MENU_NEW_FILE, "&New File\tCtrl+N", "New File", wx.ITEM_NORMAL)
fileMenu.AppendItem(item)
item.SetContextMenu(context_menu)
self._mb.AddTool(MENU_NEW_FILE, "New File", new_file_bmp)
item = FM.FlatMenuItem(fileMenu, MENU_SAVE, "&Save File\tCtrl+S", "Save File", wx.ITEM_NORMAL)
fileMenu.AppendItem(item)
self._mb.AddTool(MENU_SAVE, "Save File", save_bmp)
item = FM.FlatMenuItem(fileMenu, MENU_OPEN_FILE, "&Open File\tCtrl+O", "Open File", wx.ITEM_NORMAL)
fileMenu.AppendItem(item)
self._mb.AddTool(MENU_OPEN_FILE, "Open File", open_folder_bmp)
self._mb.AddSeparator() # Toolbar separator
item = FM.FlatMenuItem(fileMenu, MENU_NEW_FOLDER, "N&ew Folder\tCtrl+E", "New Folder", wx.ITEM_NORMAL)
fileMenu.AppendItem(item)
self._mb.AddTool(MENU_NEW_FOLDER, "New Folder",new_folder_bmp)
self._mb.AddSeparator() # Toobar separator
item = FM.FlatMenuItem(fileMenu, MENU_COPY, "&Copy\tCtrl+C", "Copy", wx.ITEM_NORMAL)
fileMenu.AppendItem(item)
self._mb.AddTool(MENU_COPY, "Copy", copy_bmp)
item = FM.FlatMenuItem(fileMenu, MENU_CUT, "Cut\tCtrl+X", "Cut", wx.ITEM_NORMAL)
fileMenu.AppendItem(item)
self._mb.AddTool(MENU_CUT, "Cut", cut_bmp)
item = FM.FlatMenuItem(fileMenu, MENU_PASTE, "Paste\tCtrl+V", "Paste", wx.ITEM_NORMAL, subMenuExit)
fileMenu.AppendItem(item)
self._mb.AddTool(MENU_PASTE, "Paste", paste_bmp)
self._mb.AddSeparator() # Separator
# Add a wx.ComboBox to FlatToolbar
combo = wx.ComboBox(self._mb, -1, choices=["Hello", "World", "wxPython"])
self._mb.AddControl(combo)
self._mb.AddSeparator() # Separator
stext = wx.StaticText(self._mb, -1, "Hello")
#stext.SetBackgroundStyle(wx.BG_STYLE_CUSTOM )
self._mb.AddControl(stext)
self._mb.AddSeparator() # Separator
# Add another couple of bitmaps
self._mb.AddRadioTool(wx.ID_ANY, "View Column", view1Bmp)
self._mb.AddRadioTool(wx.ID_ANY, "View Icons", view2Bmp)
self._mb.AddRadioTool(wx.ID_ANY, "View Details", view3Bmp)
self._mb.AddRadioTool(wx.ID_ANY, "View Multicolumn", view4Bmp)
# Add non-toolbar item
item = FM.FlatMenuItem(subMenuExit, wx.ID_EXIT, "E&xit\tAlt+X", "Exit demo", wx.ITEM_NORMAL, None, exitImg)
subMenuExit.AppendItem(item)
fileMenu.AppendSeparator()
item = FM.FlatMenuItem(subMenuExit, wx.ID_EXIT, "E&xit\tAlt+Q", "Exit demo", wx.ITEM_NORMAL, None, exitImg)
fileMenu.AppendItem(item)
# Second menu
item = FM.FlatMenuItem(styleMenu, MENU_STYLE_DEFAULT, "Menu style Default\tAlt+N", "Menu style Default", wx.ITEM_RADIO)
styleMenu.AppendItem(item)
item.Check(True)
item = FM.FlatMenuItem(styleMenu, MENU_STYLE_MY, "Menu style Custom \tAlt+C", "Menu style Custom", wx.ITEM_RADIO)
styleMenu.AppendItem(item)
item = FM.FlatMenuItem(styleMenu, MENU_STYLE_XP, "Menu style XP\tAlt+P", "Menu style XP", wx.ITEM_RADIO)
styleMenu.AppendItem(item)
item = FM.FlatMenuItem(styleMenu, MENU_STYLE_2007, "Menu style 2007\tAlt+O", "Menu style 2007", wx.ITEM_RADIO)
styleMenu.AppendItem(item)
item = FM.FlatMenuItem(styleMenu, MENU_STYLE_VISTA, "Menu style Vista\tAlt+V", "Menu style Vista", wx.ITEM_RADIO)
styleMenu.AppendItem(item)
styleMenu.AppendSeparator()
item = FM.FlatMenuItem(styleMenu, MENU_USE_CUSTOM, "Show Customize DropDown", "Shows the customize drop down arrow", wx.ITEM_CHECK)
# Demonstrate how to set custom font and text colour to a FlatMenuItem
item.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_BOLD, False, "Courier New"))
item.SetTextColour(wx.RED)
item.Check(True)
styleMenu.AppendItem(item)
styleMenu.AppendSeparator()
item = FM.FlatMenuItem(styleMenu, MENU_LCD_MONITOR, "Use LCD monitors option", "Instructs FlatMenu to use LCD drawings", wx.ITEM_CHECK)
styleMenu.AppendItem(item)
# Add some radio items
styleMenu.AppendSeparator()
# Add sub-menu to main menu
item = FM.FlatMenuItem(styleMenu, wx.ID_ANY, "Sub-&menu radio items", "", wx.ITEM_NORMAL, subMenu1)
styleMenu.AppendItem(item)
item.SetContextMenu(context_menu)
item = FM.FlatMenuItem(subMenu1, wx.ID_ANY, "Radio Item 1", "Radio Item 1", wx.ITEM_RADIO)
subMenu1.AppendItem(item)
item = FM.FlatMenuItem(subMenu1, wx.ID_ANY, "Radio Item 2", "Radio Item 2", wx.ITEM_RADIO)
subMenu1.AppendItem(item)
item.Check(True)
item = FM.FlatMenuItem(subMenu1, wx.ID_ANY, "Radio Item 3", "Radio Item 3", wx.ITEM_RADIO)
subMenu1.AppendItem(item)
item = FM.FlatMenuItem(editMenu, MENU_REMOVE_MENU, "Remove menu", "Remove menu", wx.ITEM_NORMAL)
editMenu.AppendItem(item)
item = FM.FlatMenuItem(editMenu, MENU_DISABLE_MENU_ITEM, "Disable Menu Item ...", "Disable Menu Item", wx.ITEM_NORMAL)
editMenu.AppendItem(item)
editMenu.AppendSeparator()
item = FM.FlatMenuItem(editMenu, MENU_TRANSPARENCY, "Set FlatMenu transparency...", "Sets the FlatMenu transparency",
wx.ITEM_NORMAL, None, ghostBmp)
editMenu.AppendItem(item)
# Add some dummy entries to the sub menu
# Add sub-menu to main menu
item = FM.FlatMenuItem(editMenu, 9001, "Sub-&menu items", "", wx.ITEM_NORMAL, subMenu)
editMenu.AppendItem(item)
# Create the submenu items and add them
item = FM.FlatMenuItem(subMenu, 9002, "&Sub-menu Item 1", "", wx.ITEM_NORMAL)
subMenu.AppendItem(item)
item = FM.FlatMenuItem(subMenu, 9003, "Su&b-menu Item 2", "", wx.ITEM_NORMAL)
subMenu.AppendItem(item)
item = FM.FlatMenuItem(subMenu, 9004, "Sub-menu Item 3", "", wx.ITEM_NORMAL)
subMenu.AppendItem(item)
item = FM.FlatMenuItem(subMenu, 9005, "Sub-menu Item 4", "", wx.ITEM_NORMAL)
subMenu.AppendItem(item)
maxItems = 17
numCols = 2
switch = int(math.ceil(maxItems/float(numCols)))
fnt = wx.Font(9, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_BOLD, False, "Courier New")
colours = [wx.RED, wx.GREEN, wx.BLUE]
for i in range(17):
row, col = i%switch, i/switch
result = random.randint(0, 1) == 1
bmp = (result and [colBmp] or [wx.NullBitmap])[0]
item = FM.FlatMenuItem(multipleMenu, wx.ID_ANY, "Row %d, Col %d"%((row+1, col+1)), "", wx.ITEM_NORMAL, None, bmp)
if result == 0:
# Demonstrate how to set custom font and text colour to a FlatMenuItem
col = random.randint(0, 2)
item.SetFont(fnt)
item.SetTextColour(colours[col])
multipleMenu.AppendItem(item)
multipleMenu.SetNumberColumns(2)
historyMenu.Append(wx.ID_OPEN, "&Open...")
self.historyMenu = historyMenu
self.filehistory = FM.FileHistory()
self.filehistory.UseMenu(self.historyMenu)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnFileOpenDialog, id=wx.ID_OPEN)
item = FM.FlatMenuItem(helpMenu, MENU_HELP, "&About\tCtrl+A", "About...", wx.ITEM_NORMAL, None, helpImg)
helpMenu.AppendItem(item)
fileMenu.SetBackgroundBitmap(CreateBackgroundBitmap())
# Add menu to the menu bar
self._mb.Append(fileMenu, "&File")
self._mb.Append(styleMenu, "&Style")
self._mb.Append(editMenu, "&Edit")
self._mb.Append(multipleMenu, "&Multiple Columns")
self._mb.Append(historyMenu, "File Histor&y")
self._mb.Append(helpMenu, "&Help")
def ConnectEvents(self):
# Attach menu events to some handlers
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnQuit, id=wx.ID_EXIT)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnEdit, id=MENU_DISABLE_MENU_ITEM, id2=MENU_REMOVE_MENU)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnStyle, id=MENU_STYLE_XP, id2=MENU_STYLE_VISTA)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnFlatMenuCmd, id=MENU_NEW_FILE, id2=20013)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnAbout, id=MENU_HELP)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnStyle, id=MENU_STYLE_MY)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnStyle, id=MENU_STYLE_DEFAULT)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnShowCustom, id=MENU_USE_CUSTOM)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnLCDMonitor, id=MENU_LCD_MONITOR)
self.Bind(FM.EVT_FLAT_MENU_SELECTED, self.OnTransparency, id=MENU_TRANSPARENCY)
self.Bind(FM.EVT_FLAT_MENU_ITEM_MOUSE_OVER, self.OnMouseOver, id=MENU_NEW_FILE)
self.Bind(FM.EVT_FLAT_MENU_ITEM_MOUSE_OUT, self.OnMouseOut, id=MENU_NEW_FILE)
self.Bind(wx.EVT_UPDATE_UI, self.OnFlatMenuCmdUI, id=20001, id2=20013)
if "__WXMAC__" in wx.Platform:
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(FM.EVT_FLAT_MENU_RANGE, self.OnFileHistory, id=wx.ID_FILE1, id2=wx.ID_FILE9+1)
def OnSize(self, event):
self._mgr.Update()
self.Layout()
def OnQuit(self, event):
if _hasAUI:
self._mgr.UnInit()
self.Destroy()
def OnButtonClicked(self, event):
# Demonstrate using the wxFlatMenu without a menu bar
btn = event.GetEventObject()
# Create the popup menu
self.CreatePopupMenu()
# Position the menu:
# The menu should be positioned at the bottom left corner of the button.
btnSize = btn.GetSize()
btnPt = btn.GetPosition()
# Since the btnPt (button position) is in client coordinates,
# and the menu coordinates is relative to screen we convert
# the coords
btnPt = btn.GetParent().ClientToScreen(btnPt)
# A nice feature with the Popup menu, is the ability to provide an
# object that we wish to handle the menu events, in this case we
# pass 'self'
# if we wish the menu to appear under the button, we provide its height
self._popUpMenu.SetOwnerHeight(btnSize.y)
self._popUpMenu.Popup(wx.Point(btnPt.x, btnPt.y), self)
def OnLongButtonClicked(self, event):
# Demonstrate using the wxFlatMenu without a menu bar
btn = event.GetEventObject()
# Create the popup menu
self.CreateLongPopupMenu()
# Postion the menu:
# The menu should be positioned at the bottom left corner of the button.
btnSize = btn.GetSize()
# btnPt is returned relative to its parent
# so, we need to convert it to screen
btnPt = btn.GetPosition()
btnPt = btn.GetParent().ClientToScreen(btnPt)
# if we wish the menu to appear under the button, we provide its height
self._longPopUpMenu.SetOwnerHeight(btnSize.y)
self._longPopUpMenu.Popup(wx.Point(btnPt.x, btnPt.y), self)
def CreatePopupMenu(self):
if not self._popUpMenu:
self._popUpMenu = FM.FlatMenu()
#-----------------------------------------------
# Flat Menu test
#-----------------------------------------------
# First we create the sub-menu item
subMenu = FM.FlatMenu()
subSubMenu = FM.FlatMenu()
# Create the menu items
menuItem = FM.FlatMenuItem(self._popUpMenu, 20001, "First Menu Item", "", wx.ITEM_CHECK)
self._popUpMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(self._popUpMenu, 20002, "Sec&ond Menu Item", "", wx.ITEM_CHECK)
self._popUpMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(self._popUpMenu, wx.ID_ANY, "Checkable-Disabled Item", "", wx.ITEM_CHECK)
menuItem.Enable(False)
self._popUpMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(self._popUpMenu, 20003, "Third Menu Item", "", wx.ITEM_CHECK)
self._popUpMenu.AppendItem(menuItem)
self._popUpMenu.AppendSeparator()
# Add sub-menu to main menu
menuItem = FM.FlatMenuItem(self._popUpMenu, 20004, "Sub-&menu item", "", wx.ITEM_NORMAL, subMenu)
self._popUpMenu.AppendItem(menuItem)
# Create the submenu items and add them
menuItem = FM.FlatMenuItem(subMenu, 20005, "&Sub-menu Item 1", "", wx.ITEM_NORMAL)
subMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(subMenu, 20006, "Su&b-menu Item 2", "", wx.ITEM_NORMAL)
subMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(subMenu, 20007, "Sub-menu Item 3", "", wx.ITEM_NORMAL)
subMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(subMenu, 20008, "Sub-menu Item 4", "", wx.ITEM_NORMAL)
subMenu.AppendItem(menuItem)
# Create the submenu items and add them
menuItem = FM.FlatMenuItem(subSubMenu, 20009, "Sub-menu Item 1", "", wx.ITEM_NORMAL)
subSubMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(subSubMenu, 20010, "Sub-menu Item 2", "", wx.ITEM_NORMAL)
subSubMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(subSubMenu, 20011, "Sub-menu Item 3", "", wx.ITEM_NORMAL)
subSubMenu.AppendItem(menuItem)
menuItem = FM.FlatMenuItem(subSubMenu, 20012, "Sub-menu Item 4", "", wx.ITEM_NORMAL)
subSubMenu.AppendItem(menuItem)
# Add sub-menu to submenu menu
menuItem = FM.FlatMenuItem(subMenu, 20013, "Sub-menu item", "", wx.ITEM_NORMAL, subSubMenu)
subMenu.AppendItem(menuItem)
def CreateLongPopupMenu(self):
if hasattr(self, "_longPopUpMenu"):
return
self._longPopUpMenu = FM.FlatMenu()
sub = FM.FlatMenu()
#-----------------------------------------------
# Flat Menu test
#-----------------------------------------------
for ii in range(30):
if ii == 0:
menuItem = FM.FlatMenuItem(self._longPopUpMenu, wx.ID_ANY, "Menu Item #%ld"%(ii+1), "", wx.ITEM_NORMAL, sub)
self._longPopUpMenu.AppendItem(menuItem)
for k in range(5):
menuItem = FM.FlatMenuItem(sub, wx.ID_ANY, "Sub Menu Item #%ld"%(k+1))
sub.AppendItem(menuItem)
else:
menuItem = FM.FlatMenuItem(self._longPopUpMenu, wx.ID_ANY, "Menu Item #%ld"%(ii+1))
self._longPopUpMenu.AppendItem(menuItem)
# ------------------------------------------
# Event handlers
# ------------------------------------------
def OnStyle(self, event):
eventId = event.GetId()
if eventId == MENU_STYLE_DEFAULT:
self._mb.GetRendererManager().SetTheme(FM.StyleDefault)
elif eventId == MENU_STYLE_2007:
self._mb.GetRendererManager().SetTheme(FM.Style2007)
elif eventId == MENU_STYLE_XP:
self._mb.GetRendererManager().SetTheme(FM.StyleXP)
elif eventId == MENU_STYLE_VISTA:
self._mb.GetRendererManager().SetTheme(FM.StyleVista)
elif eventId == MENU_STYLE_MY:
self._mb.GetRendererManager().SetTheme(self.newMyTheme)
self._mb.ClearBitmaps()
self._mb.Refresh()
self._mtb.Refresh()
self.Update()
def OnShowCustom(self, event):
self._mb.ShowCustomize(event.IsChecked())
def OnLCDMonitor(self, event):
self._mb.SetLCDMonitor(event.IsChecked())
def OnTransparency(self, event):
transparency = ArtManager.Get().GetTransparency()
dlg = wx.TextEntryDialog(self, 'Please enter a value for menu transparency',
'FlatMenu Transparency', str(transparency))
if dlg.ShowModal() != wx.ID_OK:
dlg.Destroy()
return
value = dlg.GetValue()
dlg.Destroy()
try:
value = int(value)
except:
dlg = wx.MessageDialog(self, "Invalid transparency value!", "Error",
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
if value < 0 or value > 255:
dlg = wx.MessageDialog(self, "Invalid transparency value!", "Error",
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
ArtManager.Get().SetTransparency(value)
def OnMouseOver(self, event):
self.log.write("Received Flat menu mouse enter ID: %d\n"%(event.GetId()))
def OnMouseOut(self, event):
self.log.write("Received Flat menu mouse leave ID: %d\n"%(event.GetId()))
def OnFlatMenuCmd(self, event):
self.log.write("Received Flat menu command event ID: %d\n"%(event.GetId()))
def OnFlatMenuCmdUI(self, event):
self.log.write("Received Flat menu update UI event ID: %d\n"%(event.GetId()))
def GetStringFromUser(self, msg):
dlg = wx.TextEntryDialog(self, msg, "Enter Text")
userString = ""
if dlg.ShowModal() == wx.ID_OK:
userString = dlg.GetValue()
dlg.Destroy()
return userString
def OnFileOpenDialog(self, evt):
dlg = wx.FileDialog(self, defaultDir = os.getcwd(),
wildcard = "All Files|*", style = wx.FD_OPEN | wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.log.write("You selected %s\n" % path)
# add it to the history
self.filehistory.AddFileToHistory(path)
dlg.Destroy()
def OnFileHistory(self, evt):
# get the file based on the menu ID
fileNum = evt.GetId() - wx.ID_FILE1
path = self.filehistory.GetHistoryFile(fileNum)
self.log.write("You selected %s\n" % path)
# add it back to the history so it will be moved up the list
self.filehistory.AddFileToHistory(path)
def OnEdit(self, event):
if event.GetId() == MENU_REMOVE_MENU:
idxStr = self.GetStringFromUser("Insert menu index to remove:")
if idxStr.strip() != "":
idx = int(idxStr)
self._mb.Remove(idx)
elif event.GetId() == MENU_DISABLE_MENU_ITEM:
idxStr = self.GetStringFromUser("Insert menu item ID to be disabled (10005 - 10011):")
if idxStr.strip() != "":
idx = int(idxStr)
mi = self._mb.FindMenuItem(idx)
if mi:
mi.Enable(False)
def OnAbout(self, event):
msg = "This is the About Dialog of the FlatMenu demo.\n\n" + \
"Author: Andrea Gavana @ 03 Nov 2006\n\n" + \
"Please report any bug/requests or improvements\n" + \
"to Andrea Gavana at the following email addresses:\n\n" + \
"andrea.gavana@gmail.com\nandrea.gavana@maerskoil.com\n\n" + \
"Welcome to wxPython " + wx.VERSION_STRING + "!!"
dlg = wx.MessageDialog(self, msg, "FlatMenu wxPython Demo",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#---------------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, " Test FlatMenu ", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, evt):
self.win = FlatMenuDemo(self, self.log)
self.win.Show(True)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
if wx.Platform != '__WXMAC__':
win = TestPanel(nb, log)
return win
else:
from Main import MessagePanel
win = MessagePanel(nb, 'This demo only works on MSW and GTK.',
'Sorry', wx.ICON_WARNING)
return win
#----------------------------------------------------------------------
overview = FM.__doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
[
"robin@alldunn.com"
] |
robin@alldunn.com
|
c17b386db26d84061a9618a4fd6d5402792e6a3b
|
f7f6980743ca3f728d89e012ba3ac1aa87978a58
|
/PK_Gateway.py
|
96ba4d7a75ef6edd5d2710b9895e52f00ad063ab
|
[] |
no_license
|
amadea-system/PluralKit_API_Gateway
|
d9cc076d1d4468a2b7e9bb7fd1ac490d53a70794
|
a682a4d503821fdc2b1c5467896a864e7e3e9726
|
refs/heads/master
| 2023-02-25T02:13:40.127648
| 2021-01-29T16:51:49
| 2021-01-29T16:51:49
| 333,824,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,727
|
py
|
"""
This is an API Gateway for the Plural Kit API.
It's primary purpose is to take the load from our internal programs and scripts
and thus remove that burden from the Plural Kit servers,
Copyright 2020 Amadea System
"""
import logging
import json
from flask import Flask
from utils import pluralKitAPI as pk
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
log = logging.getLogger(__name__)
app = Flask(__name__)
with open('config.json') as conf_file:
config = json.load(conf_file)
system_id = config['system_id']
pk_token = config['pk_token']
# system_id_debug = config['system_id_debug']
# pk_token_debug = config['pk_token_debug']
amadea_system = pk.System.get_by_hid(system_id, pk_token)
amadea_system._fronter_decay_time = config['fronter_decay_time']
@app.route('/')
def hello_world():
return 'Welcome to the PK API Gateway'
@app.route('/raw/s/amadea/fronters')
def get_raw_fronters():
try:
amadea_system.update_fronters(forced=True)
fronters = amadea_system.fronters
except pk.PluralKitError as e:
return e
if fronters.json is not None:
return fronters.json
else:
return {}
@app.route('/s/amadea/fronters')
def get_cached_fronters():
try:
fronters = amadea_system.fronters
except pk.PluralKitError as e:
return e
return fronters.json
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
app.run(host="0.0.0.0", port=5003, debug=False)
# app.run(host="0.0.0.0", port=8080, debug=True)
|
[
"53259552+amadea-system@users.noreply.github.com"
] |
53259552+amadea-system@users.noreply.github.com
|
ee2ce6ef73683d2e4409cea21ef2c5ac5967e0d3
|
a0c1c35c7d2549c4252a2c1f5ebdbe641c99de8e
|
/workspace/SRM147/GoldenChain.py
|
520a66e5c385ac896fa871767e550a6d95b5ebc0
|
[] |
no_license
|
teruterubohz/topcoder
|
01aa4383d2df186ed1d7dda7a08055929ec0ca79
|
520eab20976a169c5b34603b6be09d3e39038c0f
|
refs/heads/master
| 2021-01-19T02:52:31.594307
| 2019-01-18T12:04:05
| 2019-01-18T12:04:05
| 52,022,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,461
|
py
|
import math,string,itertools,fractions,heapq,collections,re,array,bisect,random
class GoldenChain:
def minCuts(self, sections):
return
# BEGIN KAWIGIEDIT TESTING
# Generated by KawigiEdit-pf 2.3.0
import sys
import time
def KawigiEdit_RunTest(testNum, p0, hasAnswer, p1):
sys.stdout.write(str("Test ") + str(testNum) + str(": [") + str("{"))
for i in range(len(p0)):
if (i > 0):
sys.stdout.write(str(","))
sys.stdout.write(str(p0[i]))
sys.stdout.write(str("}"))
print(str("]"))
obj = GoldenChain()
startTime = time.clock()
answer = obj.minCuts(p0)
endTime = time.clock()
res = True
print(str("Time: ") + str((endTime - startTime)) + str(" seconds"))
if (hasAnswer):
res = answer == p1
if (not res):
print(str("DOESN'T MATCH!!!!"))
if (hasAnswer):
print(str("Desired answer:"))
print(str("\t") + str(p1))
print(str("Your answer:"))
print(str("\t") + str(answer))
elif ((endTime - startTime) >= 2):
print(str("FAIL the timeout"))
res = False
elif (hasAnswer):
print(str("Match :-)"))
else:
print(str("OK, but is it right?"))
print(str(""))
return res
all_right = True
tests_disabled = False
# ----- test 0 -----
disabled = False
p0 = (3,3,3,3)
p1 = 3
all_right = (disabled or KawigiEdit_RunTest(0, p0, True, p1) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 1 -----
disabled = False
p0 = (2000000000,)
p1 = 1
all_right = (disabled or KawigiEdit_RunTest(1, p0, True, p1) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 2 -----
disabled = False
p0 = (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50)
p1 = 42
all_right = (disabled or KawigiEdit_RunTest(2, p0, True, p1) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 3 -----
disabled = False
p0 = (20000000,20000000,2000000000)
p1 = 3
all_right = (disabled or KawigiEdit_RunTest(3, p0, True, p1) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 4 -----
disabled = False
p0 = (10,10,10,10,10,1,1,1,1,1)
p1 = 5
all_right = (disabled or KawigiEdit_RunTest(4, p0, True, p1) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
# ----- test 5 -----
disabled = False
p0 = (1,10)
p1 = 1
all_right = (disabled or KawigiEdit_RunTest(5, p0, True, p1) ) and all_right
tests_disabled = tests_disabled or disabled
# ------------------
if (all_right):
if (tests_disabled):
print(str("You're a stud (but some test cases were disabled)!"))
else:
print(str("You're a stud (at least on given cases)!"))
else:
print(str("Some of the test cases had errors."))
# PROBLEM STATEMENT
# Background
#
# I remember a chain problem from my childhood. Suppose you have four sections of a golden chain. Each consists of three links joined together in a line. You would like to connect all four sections into a necklace. The obvious solution is to cut the last link of each section and use it to connect the first section to the second one, then the second to the third, then the third to the fourth, then the fourth to the first one. If you want to minimize the number of cuts, you can do better. You can cut one of the three link sections into its individual links. Using the three loose links you can join the three remaining sections together.
#
# Your task is, given the lengths of the sections, to return the minimum number of cuts to make one big circular necklace out of all of them.
#
#
# DEFINITION
# Class:GoldenChain
# Method:minCuts
# Parameters:tuple (integer)
# Returns:integer
# Method signature:def minCuts(self, sections):
#
#
# CONSTRAINTS
# -sections has between 1 and 50 elements inclusive
# -each element of sections is between 1 and 2,147,483,647 inclusive
# -the sum of all elements of sections is between 3 and 2,147,483,647 inclusive
#
#
# EXAMPLES
#
# 0)
# {3,3,3,3}
#
# Returns: 3
#
# 1)
# {2000000000}
#
# Returns: 1
#
# 2)
# {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
# 21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,
# 38,39,40,41,42,43,44,45,46,47,48,49,50}
#
# Returns: 42
#
# 3)
# {20000000,20000000,2000000000}
#
# Returns: 3
#
# 4)
# {10,10,10,10,10,1,1,1,1,1}
#
# Returns: 5
#
# 5)
# {1,10}
#
# Returns: 1
#
# END KAWIGIEDIT TESTING
#Powered by KawigiEdit-pf 2.3.0!
|
[
"teruterubohz@gmail.com"
] |
teruterubohz@gmail.com
|
68160073063770730333e5a0d2fb1645cc4cf84d
|
2e2b50bc9338670d5ce938aaa72621f364509b41
|
/dags/kube_pod_operator.py
|
eba59149d6f42dfb8f49da98165f954d1176bb37
|
[] |
no_license
|
houstonj1/airflow-kube
|
75588dc34a6a474c96d67e91c0e86eaef53572d6
|
2fdd6546a0a3328a09f541c448be2e2ef935f417
|
refs/heads/main
| 2023-05-25T12:35:21.562554
| 2020-10-29T16:11:27
| 2020-10-29T16:11:27
| 303,440,702
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
from datetime import timedelta
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.dates import days_ago
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": days_ago(2),
"email": ["airflow@example.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(seconds=15),
}
dag = DAG(
"kube_pod_opereator",
default_args=default_args,
description="A DAG using K8s pod operator",
schedule_interval=None,
catchup=False,
)
hello_world = KubernetesPodOperator(
task_id="hello-world",
namespace="default",
image="hello-world",
name="hello-world",
in_cluster=True,
get_logs=True,
log_events_on_failure=True,
dag=dag,
)
python_version = KubernetesPodOperator(
task_id="python-version",
namespace="default",
image="python:3.8-slim",
cmds=["python", "--version"],
name="python-version",
in_cluster=True,
get_logs=True,
dag=dag,
)
node_version = KubernetesPodOperator(
task_id="node-version",
namespace="default",
image="node:lts-alpine",
cmds=["node", "--version"],
name="node-version",
in_cluster=True,
get_logs=True,
dag=dag,
)
go_version = KubernetesPodOperator(
task_id="go-version",
namespace="default",
image="golang:alpine",
cmds=["go", "version"],
name="go-version",
in_cluster=True,
get_logs=True,
dag=dag,
)
hello_world >> [python_version, node_version, go_version]
|
[
"james-houston@pluralsight.com"
] |
james-houston@pluralsight.com
|
9be7874e668a88d0d9693cde61c18f8a0d179c3c
|
5fb68f25a20066118f85d510020a23da766ff0ce
|
/abandon/release_puppet_unity_ths.py
|
2dc0520e19d1a885ca7227149023583dbdd5ef59
|
[
"MIT"
] |
permissive
|
yutiansut/puppet
|
fe4d2c239ad9755507f24760871e3268a4ed9419
|
c8ca97b5b8720798f585cf9f7266cbdf247407c3
|
refs/heads/master
| 2021-01-16T19:26:38.375253
| 2020-08-14T06:02:35
| 2020-08-14T06:02:35
| 100,169,753
| 2
| 1
|
MIT
| 2020-08-14T06:02:36
| 2017-08-13T10:37:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,778
|
py
|
__author__ = '睿瞳深邃(https://github.com/Raytone-D)'
__project__ = "扯线木偶(puppet for THS trader)"
#增加账户id_btn = 1691
# coding: utf-8
import ctypes
from ctypes.wintypes import BOOL, HWND, LPARAM
from time import sleep
import win32clipboard as cp
WM_COMMAND, WM_SETTEXT, WM_GETTEXT, WM_KEYDOWN, WM_KEYUP, VK_CONTROL = \
273, 12, 13, 256, 257, 17 # 消息命令
F1, F2, F3, F4, F5, F6 = \
112, 113, 114, 115, 116, 117 # keyCode
op = ctypes.windll.user32
buffer = ctypes.create_unicode_buffer
def keystroke(hCtrl, keyCode, param=0): # 击键
op.PostMessageW(hCtrl, WM_KEYDOWN, keyCode, param)
op.PostMessageW(hCtrl, WM_KEYUP, keyCode, param)
def get_data():
sleep(0.3) # 秒数关系到是否能复制成功。
op.keybd_event(17, 0, 0, 0)
op.keybd_event(67, 0, 0, 0)
sleep(0.1) # 没有这个就复制失败
op.keybd_event(67, 0, 2, 0)
op.keybd_event(17, 0, 2, 0)
cp.OpenClipboard(None)
raw = cp.GetClipboardData(13)
data = raw.split()
cp.CloseClipboard()
return data
class unity():
''' 大一统协同交易 '''
def __init__(self, hwnd):
keystroke(hwnd, F6) # 切换到双向委托
self.buff = buffer(32)
# 代码,价格,数量,买入,代码,价格,数量,卖出,全撤, 撤买, 撤卖
id_members = 1032, 1033, 1034, 1006, 1035, 1058, 1039, 1008, 30001, 30002, 30003, \
32790, 1038, 1047, 2053, 30022 # 刷新,余额、表格、最后一笔、撤相同
self.two_way = hwnd
sleep(0.1) # 按CPU的性能调整秒数(0.01~~0.5),才能获取正确的self.two_way。
for i in (59648, 59649):
self.two_way = op.GetDlgItem(self.two_way, i)
self.members = {i: op.GetDlgItem(self.two_way, i) for i in id_members}
def buy(self, symbol, price, qty): # 买入(B)
op.SendMessageW(self.members[1032], WM_SETTEXT, 0, symbol)
op.SendMessageW(self.members[1033], WM_SETTEXT, 0, price)
op.SendMessageW(self.members[1034], WM_SETTEXT, 0, qty)
op.PostMessageW(self.two_way, WM_COMMAND, 1006, self.members[1006])
def sell(self, *args): # 卖出(S)
op.SendMessageW(self.members[1035], WM_SETTEXT, 0, symbol)
op.SendMessageW(self.members[1058], WM_SETTEXT, 0, price)
op.SendMessageW(self.members[1039], WM_SETTEXT, 0, qty)
op.PostMessageW(self.two_way, WM_COMMAND, 1008, self.members[1008])
def refresh(self): # 刷新(F5)
op.PostMessageW(self.two_way, WM_COMMAND, 32790, self.members[32790])
def cancel(self, way=0): # 撤销下单
pass
def cancelAll(self): # 全撤(Z)
op.PostMessageW(self.two_way, WM_COMMAND, 30001, self.members[30001])
def cancelBuy(self): # 撤买(X)
op.PostMessageW(self.two_way, WM_COMMAND, 30002, self.members[30002])
def cancelSell(self): # 撤卖(C)
op.PostMessageW(self.two_way, WM_COMMAND, 30003, self.members[30003])
def cancelLast(self): # 撤最后一笔,仅限华泰定制版有效
op.PostMessageW(self.two_way, WM_COMMAND, 2053, self.members[2053])
def cancelSame(self): # 撤相同代码,仅限华泰定制版
#op.PostMessageW(self.two_way, WM_COMMAND, 30022, self.members[30022])
pass
def balance(self): # 可用余额
op.SendMessageW(self.members[1038], WM_GETTEXT, 32, self.buff)
return self.buff.value
def position(self): # 持仓(W)
keystroke(self.two_way, 87)
op.SetForegroundWindow(self.members[1047])
return get_data()
def tradeRecord(self): # 成交(E)
keystroke(self.two_way, 69)
op.SetForegroundWindow(self.members[1047])
return get_data()
def orderRecord(self): # 委托(R)
keystroke(self.two_way, 82)
op.SetForegroundWindow(self.members[1047])
return get_data()
def finder(register):
''' 枚举所有可用的broker交易端并实例化 '''
team = set()
buff = buffer(32)
@ctypes.WINFUNCTYPE(BOOL, HWND, LPARAM)
def check(hwnd, extra):
if op.IsWindowVisible(hwnd):
op.GetWindowTextW(hwnd, buff, 32)
if '交易系统' in buff.value:
team.add(hwnd)
return 1
op.EnumWindows(check, 0)
def get_nickname(hwnd):
account = hwnd
for i in 59392, 0, 1711:
account = op.GetDlgItem(account, i)
op.SendMessageW(account, WM_GETTEXT, 32, buff)
return register.get(buff.value[-3:])
return {get_nickname(hwnd): unity(hwnd) for hwnd in team if hwnd}
if __name__ == '__main__':
myRegister = {'888': '股神','509': 'gf', '966': '女神', '167': '虚拟盘', '743': '西门吹雪'}
# 用来登录的号码(一般是券商客户号)最后3位数,不能有重复,nickname不能有重名!
trader = finder(myRegister)
if not trader:
print("没发现可用的交易端。")
else:
#print(trader.keys())
x = {nickname: broker.balance() for (nickname, broker) in trader.items()}
print("可用余额:%s" %x)
buy = '000078', '6.6', '300'
#trader['虚拟盘'].buy(*buy)
#p = trader['虚拟盘'].orderRecord()
#p = trader['虚拟盘'].tradeRecord()
p = trader['虚拟盘'].position()
print(p)
#trader['西门吹雪'].cancelLast()
|
[
"noreply@github.com"
] |
yutiansut.noreply@github.com
|
0fb93d3fbb0befa9bc95f8f2daa2dd0579410dfe
|
5835ec8ce289f21f0d5be130ff5a725d6fbad9ee
|
/rlmeta/storage/tensor_circular_buffer.py
|
9d1a716c539142d63263675eef942fddf16b39e0
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebookresearch/rlmeta
|
4f3b8649380666a0661a58e33f5cc9857c98acf9
|
06b2dc2b04c78241ba30addc60d71fba8b9aec76
|
refs/heads/main
| 2023-05-23T11:00:24.819345
| 2023-02-11T06:20:05
| 2023-02-11T06:20:05
| 438,839,283
| 296
| 24
|
MIT
| 2023-02-11T06:20:07
| 2021-12-16T02:47:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional, Sequence, Tuple, Union
import numpy as np
import _rlmeta_extension
from rlmeta.core.types import NestedTensor, Tensor
from rlmeta.storage import Storage
IndexType = Union[int, Tensor]
KeyType = Union[int, Tensor]
ValueType = NestedTensor
class TensorCircularBuffer(Storage):
def __init__(self, capacity: int) -> None:
self._impl = _rlmeta_extension.TensorCircularBuffer(capacity)
def __getitem__(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._impl[index]
@property
def capacity(self) -> int:
return self._impl.capacity
@property
def size(self) -> int:
return self._impl.size
def empty(self) -> bool:
return self._impl.empty()
def reset(self) -> None:
self._impl.reset()
def clear(self) -> None:
self._impl.clear()
def front(self) -> Tuple[KeyType, ValueType]:
return self._impl.front()
def back(self) -> Tuple[KeyType, ValueType]:
return self._impl.back()
def at(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._impl.at(index)
def get(self, key: KeyType) -> ValueType:
return self._impl.get(key)
def append(self, data: NestedTensor) -> Tuple[int, Optional[int]]:
return self._impl.append(data)
def extend(self,
data: Union[NestedTensor, Sequence[NestedTensor]],
stacked: bool = False) -> Tuple[np.ndarray, np.ndarray]:
if stacked:
return self._impl.extend_stacked(data)
else:
return self._impl.extend(data)
|
[
"bit.yangxm@gmail.com"
] |
bit.yangxm@gmail.com
|
019f111fd520525a2832379a4ae3c01253ee4c84
|
908655251066427f654ee33ebdf804f9f302fcc3
|
/Toolbox/garage/garage/envs/mujoco/__init__.py
|
a75a710c6b5ab76c82b6b6863989a1ab540a650e
|
[
"MIT"
] |
permissive
|
maxiaoba/MCTSPO
|
be567f80f1dcf5c35ac857a1e6690e1ac599a59d
|
eedfccb5a94e089bd925b58f3d65eef505378bbc
|
refs/heads/main
| 2023-07-05T02:20:16.752650
| 2021-07-06T06:04:40
| 2021-07-06T06:04:40
| 381,811,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
from garage.envs.mujoco.mujoco_env import MujocoEnv
from garage.envs.mujoco.ant_env import AntEnv # noqa: I100
from garage.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from garage.envs.mujoco.hopper_env import HopperEnv
from garage.envs.mujoco.point_env import PointEnv
from garage.envs.mujoco.simple_humanoid_env import SimpleHumanoidEnv
from garage.envs.mujoco.swimmer_env import SwimmerEnv
from garage.envs.mujoco.swimmer3d_env import Swimmer3DEnv # noqa: I100
from garage.envs.mujoco.walker2d_env import Walker2DEnv
__all__ = [
"MujocoEnv",
"AntEnv",
"HalfCheetahEnv",
"HopperEnv",
"PointEnv",
"SimpleHumanoidEnv",
"SwimmerEnv",
"Swimmer3DEnv",
"Walker2DEnv",
]
|
[
"xiaobaima@DNab421bb2.stanford.edu"
] |
xiaobaima@DNab421bb2.stanford.edu
|
ccb61474f529d018e2009f567ba58421b973862b
|
f3e06697a63e1bdb4cd8c711f504d19ff273c957
|
/bin/pull_cn10ybondsh300.py
|
c39015b41197b9b8a1638b6e12acf6ca917fb05b
|
[] |
no_license
|
kingfuzhu/52etf.net.new
|
74338fb27fa3606bd4516c5bad2df2c3686153bf
|
1c408cb16d441dff46cefdb6fdc3309eb8443be8
|
refs/heads/main
| 2023-08-24T17:07:16.621675
| 2021-10-21T01:17:11
| 2021-10-21T01:17:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import akshare as ak
import urllib
import sys
dest = sys.argv[1]
# 设置字体
import matplotlib.font_manager as fm
path = '/usr/share/fonts/truetype/SimHei.ttf'
github_url = 'https://github.com/adobe-fonts/source-han-sans/blob/release/OTF/SimplifiedChinese/SourceHanSansSC-Normal.otf'
url = github_url + '?raw=true' # You want the actual file, not some html
from tempfile import NamedTemporaryFile
response = urllib.request.urlopen(url)
f = NamedTemporaryFile(delete=False, suffix='.ttf')
f.write(response.read())
f.close()
fontprop = fm.FontProperties(fname=f.name, size=13)
# 设置xaxis的刻度
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
# 国债数据
df = ak.bond_investing_global(country="中国", index_name="中国10年期国债", period="每月", start_date="1990-01-01", end_date="2020-06-13")
df = df[['收盘']]
df.reset_index(inplace=True)
import matplotlib.dates as mdates
df['日期'] = df['日期'].map(mdates.date2num)
# 初始化fig, axes
fig, ax = plt.subplots(figsize=(25,8))
ax.plot(df['日期'], df['收盘'], label='十年期国债收益率')
ax.xaxis_date()
ax.xaxis.set_major_locator(years)
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_major_formatter(years_fmt)
ax.set_ylabel(ylabel='到期收益率', fontproperties=fontprop)
ax.grid(True)
# 沪深300数据
stock_df = ak.stock_zh_index_daily(symbol="sh000300")
stock_df.reset_index(inplace=True)
stock_df['date'] = stock_df['date'].map(mdates.date2num)
# 初始化fig, axes
ax2 = ax.twinx()
ax2.plot(stock_df['date'], stock_df['close'],
color='red', alpha=0.8, label='沪深300')
ax2.set_ylabel(ylabel='沪深300', fontproperties=fontprop)
from datetime import datetime
updated_at = datetime.now().strftime('%Y-%m-%d')
fig.suptitle('中国十年期国债收益率 v.s. 沪深300\n公众号:结丹记事本儿,更新于{}'.format(updated_at), fontproperties=fontprop, fontsize=16)
fig.legend(loc="upper left", prop=fontprop, bbox_to_anchor=(0,1), bbox_transform=ax.transAxes)
#fig.tight_layout()
import os
fname = os.path.abspath(dest+'cn10ybondsh300.png')
fig.savefig(fname=fname, dpi=100, quality=50)
|
[
"lileilei999@qq.com"
] |
lileilei999@qq.com
|
1c2e55d37cc03de8dd12ad539843d907c6cc7c43
|
f4505246c73acf36a6cbf0703c586a97744ecf44
|
/backend/chat/admin.py
|
c056b07794a7e09fa093df4e48bd19592f95b10d
|
[] |
no_license
|
crowdbotics-apps/fun-for-me-22426
|
176a5e3bc801a89951ddf699d2aae9d9f9d0bac0
|
3efedf39e307f0b139a23937a233ef6f3e2b9c1d
|
refs/heads/master
| 2023-01-06T17:20:40.332249
| 2020-11-09T03:32:15
| 2020-11-09T03:32:15
| 311,213,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
from django.contrib import admin
from .models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
admin.site.register(Message)
admin.site.register(ForwardedMessage)
admin.site.register(MessageAction)
admin.site.register(Thread)
admin.site.register(ThreadMember)
admin.site.register(ThreadAction)
# Register your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
337747116aa59f6577a365dcff23d7eeaa3eae24
|
97d03b14743fa897183e4573ecf0c0cdcf04f2ad
|
/lef_def_parser/cell_learn.py
|
c13413d5e98bb2eebabcd7d56de05aaf933a07f3
|
[
"MIT"
] |
permissive
|
HanyMoussa/SPEF_EXTRACTOR
|
0d1f9244f02e1584c159d7dd3304a4c80376ce3c
|
4241190ce2bd8b44cf75092dd70b8ef9f4837c2c
|
refs/heads/master
| 2022-12-20T14:54:52.196847
| 2020-09-23T15:07:04
| 2020-09-23T15:07:04
| 284,084,394
| 11
| 3
|
MIT
| 2020-09-23T14:20:58
| 2020-07-31T16:44:02
|
Python
|
UTF-8
|
Python
| false
| false
| 20,932
|
py
|
"""
Train a ML model to predict cells based on vias location
Name: Tri Minh Cao
Email: tricao@utdallas.edu
Date: October 2016
"""
import pickle
import random
import os
from def_parser import *
from lef_parser import *
import util
from sklearn.linear_model import LogisticRegression
import numpy as np
import plot_layout
FEATURE_LEN = 21
def save_data_pickle(dataset, filename):
# pickle the merged data
# filename = "./merged_data/freepdk45_10_17_16.pickle"
try:
with open(filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', filename, ':', e)
def merge_data(data_folder, num_cells):
"""
Read from data pickle files, and merge
:return:
"""
random.seed(12345)
all_samples = []
all_labels = []
pickle_files = os.listdir(data_folder)
for file in pickle_files:
pickle_file = os.path.join(data_folder, file)
data = load_data_pickle(pickle_file)
# REMOVE
# pickle_file = os.path.join(data_folder, file)
# try:
# with open(data_folder, 'rb') as f:
# dataset = pickle.load(f)
# except Exception as e:
# print('Unable to read data from', pickle_file, ':', e)
all_samples.extend(data[0])
all_labels.extend(data[1])
all_dataset = (all_samples, all_labels)
dataset = {}
dataset['AND2X1'] = []
dataset['INVX1'] = []
dataset['INVX8'] = []
dataset['NAND2X1'] = []
dataset['NOR2X1'] = []
dataset['OR2X1'] = []
choices = [i for i in range(len(all_samples))]
random.shuffle(choices)
for idx in choices:
features = all_samples[idx]
label = all_labels[idx]
if len(dataset[label]) < num_cells:
dataset[label].append(features)
cont = False
for each_macro in dataset:
if len(dataset[each_macro]) < num_cells:
cont = True
if not cont:
break
for each_macro in dataset:
print (each_macro)
print (len(dataset[each_macro]))
# should return the merged data set
return dataset
def train_model(dataset, data_len, num_to_label):
"""
Method to train model
:param dataset: dataset
:param data_len: total length of training set
:return: trained model
"""
all_dataset = np.ndarray(shape=(data_len, FEATURE_LEN),
dtype=np.int32)
all_label = np.ndarray(data_len,
dtype=np.int32)
current_size = 0
num_selected = [0, 0, 0, 0, 0, 0]
while current_size < data_len:
choice = random.randrange(6) # we have 6 types of cells
cur_label = num_to_label[choice]
cur_idx = num_selected[choice]
cur_data = dataset[cur_label][cur_idx]
all_dataset[current_size, :] = np.array(dataset[cur_label][cur_idx],
dtype=np.int32)
all_label[current_size] = choice
current_size += 1
num_selected[choice] += 1
# shuffle the dataset
random.seed(6789)
all_dataset, all_label = util.randomize(all_dataset, all_label)
num_train = int(0.85 * data_len)
#print(max(all_label))
test_dataset = all_dataset[num_train:]
test_label = all_label[num_train:]
train_dataset = all_dataset[:num_train]
train_label = all_label[:num_train]
# train a logistic regression model
regr = LogisticRegression()
X_train = train_dataset
y_train = train_label
X_test = test_dataset
y_test = test_label
regr.fit(X_train, y_train)
score = regr.score(X_test, y_test)
pred_labels = regr.predict(X_test)
print(pred_labels[:100])
print(score)
# Save the trained model for later use
# filename = "./trained_models/logit_model_103116.pickle"
# save_data_pickle(regr, filename)
# return the trained model
return regr, X_train, y_train, X_test, y_test
def predict_cell(candidates, row, model, lef_data, std_cells):
"""
Use the trained model to choose the most probable cell from via groups.
:param candidates: 2-via and 3-via groups that could make a cell
:return: a tuple (chosen via group, predicted cell name)
"""
# possibly I can use the current method of testing the width of each cell
# margin = 350
# dataset = np.ndarray(shape=(len(candidates), FEATURE_LEN),
# dtype=np.float32)
scores = [-100 for i in range(len(candidates))]
for i in range(len(candidates)):
if candidates[i] != -1:
features = []
each_group = candidates[i]
# width = std_cells[2]
left_margin = std_cells[i][-1]
# for left_margin in range(50, 800, 50):
left_pt = [each_group[0][0][0] - left_margin, CELL_HEIGHT * row]
# width = each_group[-1][0][0] - left_pt[0] + margin
num_vias = len(each_group)
features.append(num_vias)
x_bound = left_pt[0]
y_bound = left_pt[1]
# NOTE: some cell has 4 vias
# We suppose maximum vias in a cell is 4
for each_via in each_group:
x_loc = each_via[0][0] - x_bound
y_loc = each_via[0][1] - y_bound
# features.append(x_loc)
features.append(y_loc)
# add via type
features.append(each_via[3])
# if there are only two vias, then there are no via3
if num_vias < 4:
temp = [-1 for i in range((4 - num_vias) * 2)]
features.extend(temp)
# add the distance between vias
for i in range(num_vias - 1):
for j in range(i + 1, num_vias):
x_dist = each_group[j][0][0] - each_group[i][0][0]
y_dist = each_group[j][0][1] - each_group[i][0][1]
features.append(x_dist)
features.append(y_dist)
# add extra features in case of having less vias
if num_vias < 4:
if num_vias == 1:
remain_dists = 2 * int(util.nCr(4, 2))
else:
remain_dists = 2 * (int(util.nCr(4, 2) - util.nCr(num_vias, 2)))
temp = [0 for i in range(remain_dists)]
features.extend(temp)
# do predict
dataset = np.array(features, dtype=np.int32)
# print(dataset)
X_test = dataset.reshape(1, FEATURE_LEN)
result = model.decision_function(X_test)
result = result[0]
# print(each_group)
# print(left_margin)
print(labels[i])
print(features)
print(result)
# print()
features = []
if result[i] == max(result):
return candidates[i], i
# scores[i] = result[i]
# return the best score
# print(scores)
# max_score = -100
# best_choice = -1
# for i in range(len(candidates)):
# if scores[i] > max_score:
# best_choice = i
# max_score = scores[i]
# return candidates[best_choice], best_choice
# possible_candidates = []
# for i in range(len(candidates)):
# if candidates[i] != -1:
# possible_candidates.append(i)
# dataset = np.ndarray(shape=(1, img_height, img_width),
# dtype=np.float32)
# each_group = candidates[i]
# left_pt = [each_group[0][0][0] - margin, CELL_HEIGHT * row]
# width = each_group[-1][0][0] - left_pt[0] + margin
# # print (width)
# img_file = plot_window(left_pt, width, CELL_HEIGHT, each_group, lef_data)
# # print (img_file)
# image_data = img_util.load_image(img_file)
# # print (image_data.shape)
# dataset[0, :, :] = image_data
# X_test = dataset.reshape(dataset.shape[0], img_shape)
# result = model.decision_function(X_test)
# result = result[0]
# # print (result)
# # check for result
# if result[i] == max(result):
# return candidates[i], i
# # if we cannot find a solution, randomly select a choice
# choice = random.choice(possible_candidates)
# return candidates[choice], choice
def predict_row():
# FIXME: restructure this method
# We can load the trained model
pickle_filename = "./trained_models/logit_model_101716.pickle"
logit_model = load_data_pickle(pickle_filename)
labels = {0: 'and2', 1: 'invx1', 2: 'invx8', 3: 'nand2', 4: 'nor2',
5: 'or2'}
cell_labels = {'AND2X1': 'and2', 'INVX1': 'invx1', 'NAND2X1': 'nand2',
'NOR2X1': 'nor2', 'OR2X1': 'or2', 'INVX8': 'invx8'}
# process
components = util.sorted_components(def_parser.diearea[1], CELL_HEIGHT,
def_parser.components.comps)
num_rows = len(components)
# print the sorted components
correct = 0
total_cells = 0
predicts = []
actuals = []
# via_groups is only one row
# for i in range(len(via1_sorted)):
for i in range(0, 1):
via_groups = util.group_via(via1_sorted[i], 3, MAX_DISTANCE)
visited_vias = [] # later, make visited_vias a set to run faster
cells_pred = []
for each_via_group in via_groups:
first_via = each_via_group[0][0]
# print (first_via)
if not first_via in visited_vias:
best_group, prediction = predict_cell(each_via_group, i,
logit_model, lef_parser)
print (best_group)
print (labels[prediction])
cells_pred.append(labels[prediction])
for each_via in best_group:
visited_vias.append(each_via)
# print (best_group)
# print (labels[prediction])
print (cells_pred)
print (len(cells_pred))
actual_comp = []
actual_macro = []
for each_comp in components[i]:
actual_comp.append(cell_labels[each_comp.macro])
actual_macro.append(each_comp.macro)
print (actual_comp)
print (len(actual_comp))
num_correct, num_cells = predict_score(cells_pred, actual_comp)
correct += num_correct
total_cells += num_cells
predicts.append(cells_pred)
actuals.append(actual_comp)
print ()
print (correct)
print (total_cells)
print (correct / total_cells * 100)
def load_data_pickle(filename):
try:
with open(filename, 'rb') as f:
dataset = pickle.load(f)
except Exception as e:
print('Unable to read data from', filename, ':', e)
return dataset
def old_main_class():
num_cells_required = 900
# merge_data()
# load data from selected pickle
set_filename = "./merged_data/selected_10_17_16.pickle"
dataset = load_data_pickle(set_filename)
# build the numpy array
label_to_num = {'AND2X1': 0, 'INVX1': 1, 'INVX8': 2, 'NAND2X1': 3,
'NOR2X1': 4, 'OR2X1': 5}
num_to_label = {0: 'AND2X1', 1: 'INVX1', 2: 'INVX8', 3: 'NAND2X1',
4: 'NOR2X1', 5: 'OR2X1'}
# train_model()
#######
# DO SOME PREDICTION
def_path = './libraries/layout_freepdk45/c880a.def'
def_parser = DefParser(def_path)
def_parser.parse()
scale = def_parser.scale
lef_file = "./libraries/FreePDK45/gscl45nm.lef"
lef_parser = LefParser(lef_file)
lef_parser.parse()
print ("Process file:", def_path)
CELL_HEIGHT = int(float(scale) * lef_parser.cell_height)
all_via1 = util.get_all_vias(def_parser, via_type="M2_M1_via")
# print (all_via1)
# sort the vias by row
via1_sorted = util.sort_vias_by_row(def_parser.diearea[1], CELL_HEIGHT, all_via1)
MAX_DISTANCE = 2280 # OR2 cell width, can be changed later
# predict_row()
################
# new section
# FIXME: need to build the netlist
# test the image-based method
##############
# List of standard cells
std_cell_info = {}
# info includes (min num vias, max num vias, width,
# distance from left boundary to first pin)
# I wonder if max num vias should be used, actually I don't know what is the
# maximum number of vias, but I guess +1 is fine.
# 0 is and2, 1 is invx1, etc.
std_cell_info[0] = (3, 4, 2280, 295)
std_cell_info[1] = (2, 3, 1140, 315)
std_cell_info[2] = (2, 3, 2660, 695)
std_cell_info[3] = (3, 4, 1520, 90)
std_cell_info[4] = (3, 4, 1520, 315)
std_cell_info[5] = (3, 4, 2280, 695)
def get_candidates(first_via_idx, via_list, std_cells):
"""
Generate a list of candidates from the first via.
Each standard cell will be considered for candidates.
If the standard cell cannot be placed there, the value is -1,
otherwise, it will be a list of vias.
:param first_via_idx: first via index in the via_list
:param via_list: the list of all vias (in a row)
:param std_cells: a list that stores information of std cells
:return: a list of groups of vias, or -1
"""
# candidates = [-1 for i in range(len(std_cells))]
candidates = []
first_via = via_list[first_via_idx]
# print (first_via)
first_via_x = first_via[0][0]
for i in range(len(std_cells)):
cell_width = std_cells[i][2]
min_vias = std_cell_info[i][0]
max_vias = std_cells[i][1]
pin_left_dist = std_cells[i][3]
boundary = first_via_x + cell_width - pin_left_dist
# possible vias contain the vias inside the boundary
possible_vias = [first_via]
for j in range(first_via_idx + 1, len(via_list)):
if via_list[j][0][0] <= boundary:
possible_vias.append(via_list[j])
else:
break
# check the candidate against cell info
if len(possible_vias) > max_vias or len(possible_vias) < min_vias:
candidates.append(-1)
# continue
else:
if possible_vias not in candidates:
candidates.append(possible_vias)
print(candidates)
print(len(candidates))
return candidates
def get_inputs_outputs(def_info):
"""
Method to get all inputs and outputs nets from a DEF file.
:param def_info: def info (already parsed).
:return: inputs and outputs
"""
pins = def_parser.pins.pins
inputs = []
outputs = []
for each_pin in pins:
pin_name = each_pin.name
direction = each_pin.direction.lower()
if direction == 'input':
inputs.append(pin_name)
elif direction == 'output':
outputs.append(pin_name)
return inputs, outputs
# Main Class
if __name__ == '__main__':
random.seed(12345)
# CONSTANTS
label_to_num = {'AND2X1': 0, 'INVX1': 1, 'INVX8': 2, 'NAND2X1': 3,
'NOR2X1': 4, 'OR2X1': 5}
num_to_label = {0: 'AND2X1', 1: 'INVX1', 2: 'INVX8', 3: 'NAND2X1',
4: 'NOR2X1', 5: 'OR2X1'}
# merge the data
pickle_folder = './training_data/'
dataset = merge_data(pickle_folder, 1100)
# study the data
# and2_data = dataset['AND2X1']
# print(and2_data[:50])
# pickle the merged data
set_filename = "./merged_data/selected_11_03_16_less_feats.pickle"
# save_data_pickle(dataset, set_filename)
# train the model
regr_model, X_train, y_train, X_test, y_test = train_model(dataset, 5500, num_to_label)
save_data_pickle(regr_model, './trained_models/logit_110316_no_x.pickle')
# study the test set
for i in range(1, 100):
print(num_to_label[y_test[i:i+1][0]])
print(X_test[i:i+1])
print(regr_model.decision_function(X_test[i:i+1]))
print()
# make up some cases here and see the result
makeup = []
# makeup.append([3, 190, 1710, 0, 950, 1710, 0, 1140, 1330, 1, -1, -1, -1])
# no input/output data
# makeup.append([3, 190+400, 1710, -1, 950+400, 1710, -1, 1140+400, 1330, -1, -1, -1, -1])
# labels = []
# labels.append(3)
# X_makeup = np.array(makeup, dtype=np.int32)
# for i in range(len(makeup)):
# print(num_to_label[labels[i]])
# print(X_makeup[i:i+1])
# print(regr_model.decision_function(X_makeup[i:i+1]))
# print(num_to_label[regr_model.predict(X_makeup[i:i+1])[0]])
# print()
# load the model
# model_file = './trained_models/logit_110316_no_x.pickle'
# regr_model = load_data_pickle(model_file)
#######
# PREDICTION
# get information from DEF and LEF files
def_path = './libraries/layout_freepdk45/c432.def'
def_parser = DefParser(def_path)
def_parser.parse()
scale = def_parser.scale
lef_file = "./libraries/FreePDK45/gscl45nm.lef"
lef_parser = LefParser(lef_file)
lef_parser.parse()
print ("Process file:", def_path)
CELL_HEIGHT = int(float(scale) * lef_parser.cell_height)
all_via1 = util.get_all_vias(def_parser, via_type="M2_M1_via")
# print (all_via1[:50])
# build the net_via dictionary
nets = def_parser.nets.nets
# initialize the nets_via_dict
nets_vias_dict = {}
for net in nets:
net_name = net.name
nets_vias_dict[net_name] = []
# add vias to nets_dict
for each_via in all_via1:
net = each_via[2]
nets_vias_dict[net].append(each_via)
# sort the vias by row
via1_sorted = util.sort_vias_by_row(def_parser.diearea[1], CELL_HEIGHT, all_via1)
# add inputs and outputs from the design to via info
inputs, outputs = get_inputs_outputs(def_parser)
# print(inputs)
# print(outputs)
for each_in in inputs:
for each_via in nets_vias_dict[each_in]:
each_via[3] = 0
for each_out in outputs:
for each_via in nets_vias_dict[each_out]:
each_via[3] = 1
# get candidates
labels = {0: 'and2', 1: 'invx1', 2: 'invx8', 3: 'nand2', 4: 'nor2',
5: 'or2'}
cell_labels = {'AND2X1': 'and2', 'INVX1': 'invx1', 'NAND2X1': 'nand2',
'NOR2X1': 'nor2', 'OR2X1': 'or2', 'INVX8': 'invx8'}
##############
# List of standard cells
std_cell_info = {}
# info includes (min num vias, max num vias, width,
# distance from left boundary to first pin)
# I wonder if max num vias should be used, actually I don't know what is the
# maximum number of vias, but I guess +1 is fine.
# 0 is and2, 1 is invx1, etc.
std_cell_info[0] = (3, 4, 2280, 295)
std_cell_info[1] = (2, 3, 1140, 315)
std_cell_info[2] = (2, 3, 2660, 695)
std_cell_info[3] = (3, 4, 1520, 90)
std_cell_info[4] = (3, 4, 1520, 315)
std_cell_info[5] = (3, 4, 2280, 695)
# find the sorted components
components = sorted_components(def_parser.diearea[1], CELL_HEIGHT,
def_parser.components.comps)
correct = 0
total_cells = 0
predicts = []
actuals = []
# via_groups is only one row
# for i in range(len(via1_sorted)):
for i in range(0, 1):
print ('Process row', (i + 1))
visited_vias = [] # later, make visited_vias a set to run faster
cells_pred = []
via_idx = 3
while via_idx < len(via1_sorted[i]):
# while via_idx < 3:
# choosing candidates
candidates = get_candidates(via_idx, via1_sorted[i], std_cell_info)
best_group, prediction = predict_cell(candidates, i, regr_model,
lef_parser, std_cell_info)
via_idx += len(best_group)
print(best_group)
print(labels[prediction])
# cells_pred.append(labels[prediction])
# for each_via in best_group:
# visited_vias.append(each_via)
"""
print (cells_pred)
print (len(cells_pred))
actual_comp = []
actual_macro = []
for each_comp in components[i]:
actual_comp.append(cell_labels[each_comp.macro])
actual_macro.append(each_comp.macro)
print (actual_comp)
print (len(actual_comp))
# check predictions vs actual cells
# for i in range(len(actual_comp)):
# if cells_pred[i] == actual_comp[i]:
# correct += 1
num_correct, num_cells = predict_score(cells_pred, actual_comp)
correct += num_correct
total_cells += num_cells
predicts.append(cells_pred)
actuals.append(actual_comp)
print ()
print (correct)
print (total_cells)
print (correct / total_cells * 100)
"""
|
[
"57135988+ramezmoussa@users.noreply.github.com"
] |
57135988+ramezmoussa@users.noreply.github.com
|
a378c572205cbaf5e36a297e746385342022b3ae
|
a995f917e307be0d427cc9cfd3dbdd045abdd097
|
/剑指offer/面试题46. 把数字翻译成字符串(dp).py
|
42c6faab283bd0074eae08f41fff90c186b170cb
|
[] |
no_license
|
Andrewlearning/Leetcoding
|
80d304e201588efa3ac93626021601f893bbf934
|
819fbc523f3b33742333b6b39b72337a24a26f7a
|
refs/heads/master
| 2023-04-02T09:50:30.501811
| 2023-03-18T09:27:24
| 2023-03-18T09:27:24
| 243,919,298
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
class Solution(object):
def translateNum(self, num):
"""
:type num: int
:rtype: int
"""
s = str(num)
dp = [1 for _ in range(len(s) + 1)]
for i in range(2, len(s) + 1):
# 我们可以选择翻译一位或者是两位
if "10" <= s[i - 2:i] <= "25":
dp[i] = dp[i - 1] + dp[i - 2]
# 否则则是 01, 或者是 90这种情况,无法被两个元素被翻译
# 所以只能翻译1位
else:
dp[i] = dp[i - 1]
return dp[-1]
"""
https://leetcode-cn.com/problems/ba-shu-zi-fan-yi-cheng-zi-fu-chuan-lcof/solution/mian-shi-ti-46-ba-shu-zi-fan-yi-cheng-zi-fu-chua-6/
time: O(N)
space: O(N)
"""
|
[
"yifu3@ualberta.ca"
] |
yifu3@ualberta.ca
|
e7474d9c5c1e8516b6e0cd7305371e9b583dbedf
|
70cea82aa3e5c574c7cb134253061dcdc45d08a7
|
/opencv-ex/ex03/cv_ex10.py
|
931881a60ba844706ed27decc56a2631b36c602a
|
[] |
no_license
|
kkc926/raspberrypi
|
e21b9431e8532790bab5b0132a8d3e232ffd292a
|
53018babb2395680013d77bf02bfce409e5bbfe0
|
refs/heads/master
| 2022-12-28T21:20:12.431215
| 2020-10-10T09:58:52
| 2020-10-10T09:58:52
| 299,950,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
import cv2
import numpy as np
src = cv2.imread('./data/lena.jpg', cv2.IMREAD_GRAYSCALE)
dst = cv2.resize(src, dsize=(320, 240))
dst2 = cv2.resize(src, dsize=(0,0), fx=1.5, fy=1.2)
cv2.imshow('dst', dst)
cv2.imshow('dst2', dst2)
cv2.waitKey()
cv2.destroyWindows()
|
[
"kkc926@naver.com"
] |
kkc926@naver.com
|
f4ee30e31222eff3c38aa983ee03c0fd28637c2b
|
ff303bc4218950faec67d4f53bb4acbbde586725
|
/jt65.py
|
723b3bd3eaab67552b98a30b543270596484d5c3
|
[] |
no_license
|
mcogoni/weakmon
|
86455868773b17a216ca49ae96ff57bd72db3d86
|
81a5cacd05ae589dcb76c249cfb466ec81de2964
|
refs/heads/master
| 2021-08-10T18:52:11.785312
| 2020-05-28T13:35:37
| 2020-05-28T13:35:37
| 185,056,638
| 0
| 0
| null | 2019-05-05T16:35:57
| 2019-05-05T16:35:57
| null |
UTF-8
|
Python
| false
| false
| 59,165
|
py
|
#!/usr/local/bin/python
#
# decode JT65
#
# inspired by the QEX May/June 2016 article by K9AN and K1JT
# about soft-decision JT65 decoding.
#
# much information and code from the WSJT-X source distribution.
#
# uses Phil Karn's Reed-Solomon software.
#
# Robert Morris, AB1HL
#
import numpy
import wave
import weakaudio
import weakutil
import scipy
import scipy.signal
import sys
import os
import math
import time
import copy
import calendar
import subprocess
import threading
import re
import random
import multiprocessing
from scipy.signal import lfilter
import ctypes
from ctypes import c_int, byref, cdll
import resource
import collections
import gc
#
# performance tuning parameters.
#
budget = 9 # CPU seconds (9)
noffs = 4 # look for sync every jblock/noffs (2)
off_scores = 1 # consider off_scores*noffs starts per freq bin (3, 4)
pass1_frac = 0.2 # fraction budget to spend before subtracting (0.5, 0.9, 0.5)
hetero_thresh = 6 # zero out bin that wins too many times (9, 5, 7)
soft_iters = 75 # try r-s soft decode this many times (35, 125, 75)
subslop = 0.01 # search in this window to match subtraction symbols
subgap = 1.3 # extra subtract()s this many hz on either side of main bin
# information about one decoded signal.
class Decode:
def __init__(self,
hza,
nerrs,
msg,
snr,
minute,
start,
twelve,
decode_time):
self.hza = hza
self.nerrs = nerrs
self.msg = msg
self.snr = snr
self.minute = minute
self.start = start
self.dt = 0.0 # XXX
self.twelve = twelve
self.decode_time = decode_time
def hz(self):
return numpy.mean(self.hza)
# Phil Karn's Reed-Solomon decoder.
# copied from wsjt-x, along with wrapkarn.c.
librs = cdll.LoadLibrary("librs/librs.so")
# the JT65 sync pattern
pattern = [
1,-1,-1,1,1,-1,-1,-1,1,1,1,1,1,1,-1,1,-1,1,-1,-1,-1,1,-1,1,1,-1,-1,1,-1,-1,
-1,1,1,1,-1,-1,1,1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,1,-1,1,-1,1,-1,1,1,
-1,-1,1,1,-1,1,-1,1,-1,1,-1,-1,1,-1,-1,-1,-1,-1,-1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,
-1,1,-1,-1,1,-1,1,1,-1,1,-1,1,-1,1,-1,-1,1,1,-1,-1,1,-1,-1,1,-1,-1,-1,-1,1,1,
1,1,1,1,1,1
]
# start of special 28-bit callsigns, e.g. CQ.
NBASE = 37*36*10*27*27*27
# start of special grid locators for sig strength &c.
NGBASE = 180*180
# does this decoded message contain text that's generated
# mistakenly from noise by the reed-solomon decoder?
def broken_msg(msg):
bads = [ "OL6MWK", "1S9LND", "9M3QHC", "TIKK+", "J87FOE", "000AAA",
"TG7HQQ", "475IVR", "L16RAH", "XO2QLH", "5E8HML", "HF7VBA",
"F11XTN", "7T4EUZ", "EF5KYD", "A80CCM", "HF7VBA",
"VV3EZD", "DT8ZBT", "8Z9RTD", "7U0NNP", "6P8CGY", "WH9ASY",
"V96TCU", "BF3AUF", "7B5JDP", "1HFXR1", "28NTV",
"388PNI", "TN2CQQ", "Y99CGR", "R21KIC", "X26DPX", "QG4YMT",
"Y99CGR", "0L6MWK", "KG0EEY", "777SZP", "JU3SJO", "J76LH4XC5EO20",
"A7FVFZOQH3", "GI5OF44MGO", "LN3CWS", "QTJNYSW6", "1FHXR1",
"RG9CP6Z", "HIKGWR", "U5A9R7", "MF0ZG3", "9OOATN", "SVUW5S",
"7MD2HY", "D5F2Q4Y", "L9HTT", "51FLJM", "6ZNDRN", "HTTROP",
"ED0Z9O", "CDP7W2", "Q0TZ20VS", "TYKFVKV", "12VPKMR", "XNC34V",
"GO950IZ", "MU6BNL", "302KDY", " CM5 K ", "892X722B8CSC",
"8+YL.D0E-MUR.", "W7LH./", "HHW7LH.",
]
for bad in bads:
if bad in msg:
return True
return False
# weighted choice, to pick symbols to ignore in soft decode.
# a[i] = [ value, weight ]
def wchoice(a, n):
total = 0.0
for e in a:
total += e[1]
ret = [ ]
got = [ False ] * len(a)
while len(ret) < n:
x = random.random() * total
for ai in range(0, len(a)):
if got[ai] == False:
e = a[ai]
if x <= e[1]:
ret.append(e[0])
total -= e[1]
got[ai] = True
break
x -= e[1]
return ret
def wchoice_test():
a = [ [ "a", .1 ], [ "b", .1 ], [ "c", .4 ], [ "d", .3 ], [ "e", .1 ] ]
counts = { }
for iter in range(0, 500):
x = wchoice(a, 2)
assert len(x) == 2
for e in x:
counts[e] = counts.get(e, 0) + 1
print(counts)
very_first_time = True
class JT65:
debug = False
offset = 0
def __init__(self):
self.done = False
self.msgs_lock = threading.Lock()
self.msgs = [ ]
self.verbose = False
self.enabled = True # True -> run process(); False -> don't
self.jrate = int(11025/2) # sample rate for processing (FFT &c)
self.jblock = int(4096/2) # samples per symbol
weakutil.init_freq_from_fft(self.jblock)
# set self.start_time to the UNIX time of the start
# of a UTC minute.
now = int(time.time())
gm = time.gmtime(now)
self.start_time = now - gm.tm_sec
# seconds per cycle
def cycle_seconds(self):
return 60
# return the minute number for t, a UNIX time in seconds.
# truncates down, so best to pass a time mid-way through a minute.
def minute(self, t):
dt = t - self.start_time
dt /= 60.0
return int(dt)
# convert cycle number to UNIX time.
def minute2time(self, m):
return (m * 60) + self.start_time
def second(self, t):
dt = t - self.start_time
dt /= 60.0
m = int(dt)
return 60.0 * (dt - m)
def seconds_left(self, t):
return 60 - self.second(t)
# printable UTC timestamp, e.g. "07/07/15 16:31:00"
# dd/mm/yy hh:mm:ss
# t is unix time.
def ts(self, t):
gm = time.gmtime(t)
return "%02d/%02d/%02d %02d:%02d:%02d" % (gm.tm_mday,
gm.tm_mon,
gm.tm_year - 2000,
gm.tm_hour,
gm.tm_min,
gm.tm_sec)
def openwav(self, filename):
self.wav = wave.open(filename)
self.wav_channels = self.wav.getnchannels()
self.wav_width = self.wav.getsampwidth()
self.cardrate = self.wav.getframerate()
def readwav(self, chan):
z = self.wav.readframes(1024)
if self.wav_width == 1:
zz = numpy.fromstring(z, numpy.int8)
elif self.wav_width == 2:
if (len(z) % 2) == 1:
return numpy.array([])
zz = numpy.fromstring(z, numpy.int16)
else:
sys.stderr.write("oops wave_width %d" % (self.wav_width))
sys.exit(1)
if self.wav_channels == 1:
return zz
elif self.wav_channels == 2:
return zz[chan::2] # chan 0/1 => left/right
else:
sys.stderr.write("oops wav_channels %d" % (self.wav_channels))
sys.exit(1)
def gowav(self, filename, chan):
self.openwav(filename)
bufbuf = [ ]
n = 0
while True:
buf = self.readwav(chan)
if buf.size < 1:
break
bufbuf.append(buf)
n += len(buf)
if n >= 60*self.cardrate:
samples = numpy.concatenate(bufbuf)
self.process(samples[0:60*self.cardrate], 0)
bufbuf = [ samples[60*self.cardrate:] ]
n = len(bufbuf[0])
if n >= 49*self.cardrate:
samples = numpy.concatenate(bufbuf)
bufbuf = None
self.process(samples, 0)
def opencard(self, desc):
# self.cardrate = 11025 # XXX
self.cardrate = int(11025 / 2) # XXX jrate
self.audio = weakaudio.new(desc, self.cardrate)
def gocard(self):
samples_time = time.time()
bufbuf = [ ]
nsamples = 0
while self.done == False:
sec = self.second(samples_time)
if sec < 48 or nsamples < 48*self.cardrate:
# give lower-level audio a chance to use
# bigger batches, may help resampler() quality.
time.sleep(1.0)
else:
time.sleep(0.2)
[ buf, buf_time ] = self.audio.read()
if len(buf) > 0:
bufbuf.append(buf)
nsamples += len(buf)
samples_time = buf_time
if numpy.max(buf) > 30000 or numpy.min(buf) < -30000:
sys.stderr.write("!")
# wait until we have enough samples through 49th second of minute.
# we want to start on the minute (i.e. a second before nominal
# start time), and end a second after nominal end time.
# thus through 46.75 + 2 = 48.75.
sec = self.second(samples_time)
if sec >= 49 and nsamples >= 49*self.cardrate:
# we have >= 49 seconds of samples, and second of minute is >= 49.
samples = numpy.concatenate(bufbuf)
bufbuf = [ ]
# sample # of start of minute.
i0 = len(samples) - self.cardrate * self.second(samples_time)
i0 = int(i0)
t = samples_time - (len(samples)-i0) * (1.0/self.cardrate)
self.process(samples[i0:], t)
samples = None
nsamples = 0
def close(self):
# ask gocard() thread to stop.
self.done = True
# received a message, add it to the list.
# dec is a Decode.
def got_msg(self, dec):
self.msgs_lock.acquire()
# already in msgs with worse nerrs?
found = False
for i in range(max(0, len(self.msgs)-40), len(self.msgs)):
xm = self.msgs[i]
if xm.minute == dec.minute and abs(xm.hz() - dec.hz()) < 10 and xm.msg == dec.msg:
# we already have this msg
found = True
if dec.nerrs < xm.nerrs:
self.msgs[i] = dec
if found == False:
self.msgs.append(dec)
self.msgs_lock.release()
# return a list of all messages received
# since the last call to get_msgs().
# each msg is a Decode.
def get_msgs(self):
self.msgs_lock.acquire()
a = self.msgs
self.msgs = [ ]
self.msgs_lock.release()
return a
# fork the real work, to try to get more multi-core parallelism.
def process(self, samples, samples_time):
global budget
global very_first_time
if very_first_time:
# warm things up.
very_first_time = False
thunk = (lambda dec : self.got_msg(dec))
self.process0(samples, samples_time, thunk, 0, 2580)
return
sys.stdout.flush()
# parallelize the work by audio frequency: one thread
# gets the low half, the other thread gets the high half.
# the ranges have to overlap so each can decode
# overlapping (interfering) transmissions.
rca = [ ] # connections on which we'll receive
pra = [ ] # child processes
tha = [ ] # a thread to read from each child
txa = [ ]
npr = 2
for pi in range(0, npr):
min_hz = pi * int(2580 / npr)
min_hz = max(min_hz - 50, 0)
max_hz = (pi + 1) * int(2580 / npr)
max_hz = min(max_hz + 175, 2580)
txa.append(time.time())
recv_conn, send_conn = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=self.process00,
args=[samples, samples_time, send_conn,
min_hz, max_hz])
p.start()
send_conn.close()
pra.append(p)
rca.append(recv_conn)
th = threading.Thread(target=lambda c=recv_conn: self.readchild(c))
th.start()
tha.append(th)
for pi in range(0, len(rca)):
t0 = time.time()
pra[pi].join(budget+2.0)
if pra[pi].is_alive():
print("\n%s child process still alive, enabled=%s\n" % (self.ts(time.time()),
self.enabled))
pra[pi].terminate()
pra[pi].join(2.0)
t1 = time.time()
tha[pi].join(2.0)
if tha[pi].isAlive():
t2 = time.time()
print("\n%s reader thread still alive, enabled=%s, %.1f %.1f %.1f\n" % (self.ts(t2),
self.enabled,
t0-txa[pi],
t1-t0,
t2-t1))
rca[pi].close()
def readchild(self, recv_conn):
while True:
try:
dec = recv_conn.recv()
# x is a Decode
self.got_msg(dec)
except:
break
# in child process.
def process00(self, samples, samples_time, send_conn, min_hz, max_hz):
gc.disable() # no point since will exit soon
thunk = (lambda dec : send_conn.send(dec))
self.process0(samples, samples_time, thunk, min_hz, max_hz)
send_conn.close()
# for each decode, call thunk(Decode).
# only look at sync tones from min_hz .. max_hz.
def process0(self, samples, samples_time, thunk, min_hz, max_hz):
global budget, noffs, off_scores, pass1_frac, subgap
if self.enabled == False:
return
if self.verbose:
print("len %d %.1f, type %s, rates %.1f %.1f" % (len(samples),
len(samples) / float(self.cardrate),
type(samples[0]),
self.cardrate,
self.jrate))
sys.stdout.flush()
# for budget.
t0 = time.time()
# samples_time is UNIX time that samples[0] was
# sampled by the sound card.
samples_minute = self.minute(samples_time + 30)
if self.cardrate != self.jrate:
# reduce rate from self.cardrate to self.jrate.
assert self.jrate >= 2 * 2500
if False:
filter = weakutil.butter_lowpass(2500.0, self.cardrate, order=10)
samples = scipy.signal.lfilter(filter[0],
filter[1],
samples)
samples = weakutil.resample(samples, self.cardrate, self.jrate)
else:
# resample in pieces so that we can preserve float32,
# since lfilter insists on float64.
rs = weakutil.Resampler(self.cardrate, self.jrate)
resampleblock = self.cardrate # exactly one second works best
si = 0
ba = [ ]
while si < len(samples):
block = samples[si:si+resampleblock]
nblock = rs.resample(block)
nblock = nblock.astype(numpy.float32)
ba.append(nblock)
si += resampleblock
samples = numpy.concatenate(ba)
# assume samples[0] is at the start of the minute, so that
# signals ought to start one second into samples[].
# pad so that there two seconds before the start of
# the minute, and a few seconds after 0:49.
pad0 = 2 # add two seconds to start
endsec = 49 + 4 # aim to have padded samples end on 0:53
sm = numpy.mean(abs(samples[2000:5000]))
r0 = (numpy.random.random(self.jrate*pad0) - 0.5) * sm * 2
r0 = r0.astype(numpy.float32)
if len(samples) >= endsec*self.jrate:
# trim at end
samples = numpy.concatenate([ r0, samples[0:endsec*self.jrate] ])
else:
# pad at end
needed = endsec*self.jrate - len(samples)
r1 = (numpy.random.random(needed) - 0.5) * sm * 2
r1 = r1.astype(numpy.float32)
samples = numpy.concatenate([ r0, samples, r1 ])
[ noise, scores ] = self.scores(samples, min_hz, max_hz)
# scores[i] = [ bin, correlation, valid, start ]
bin_hz = self.jrate / float(self.jblock)
ssamples = numpy.copy(samples) # for subtraction
already = { } # suppress duplicate msgs
subalready = { }
decodes = 0
# first without subtraction.
# don't blow the whole budget, to ensure there's time
# to start decoding on subtracted signals.
i = 0
while i < len(scores) and ((decodes < 1 and (time.time() - t0) < budget) or
(decodes > 0 and (time.time() - t0) < budget * pass1_frac)):
hz = scores[i][0] * (self.jrate / float(self.jblock))
dec = self.process1(samples, hz, noise, scores[i][3], already)
if dec != None:
decodes += 1
dec.minute = samples_minute
thunk(dec)
if not dec.msg in subalready:
ssamples = self.subtract_v4(ssamples, dec.hza,
dec.start, dec.twelve)
ssamples = self.subtract_v4(ssamples, numpy.add(dec.hza, subgap),
dec.start, dec.twelve)
ssamples = self.subtract_v4(ssamples, numpy.add(dec.hza, -subgap),
dec.start, dec.twelve)
subalready[dec.msg] = True
i += 1
nfirst = i
# re-score subtracted samples.
[ junk_noise, scores ] = self.scores(ssamples, min_hz, max_hz)
# now try again, on subtracted signal.
# we do a complete new pass since a strong signal might have
# been unreadable due to another signal at a somewhat higher
# frequency.
i = 0
while i < len(scores) and (time.time() - t0) < budget:
hz = scores[i][0] * (self.jrate / float(self.jblock))
dec = self.process1(ssamples, hz, noise, scores[i][3], already)
if dec != None:
decodes += 1
dec.minute = samples_minute
thunk(dec)
# this subtract() is important for performance.
if not dec.msg in subalready:
ssamples = self.subtract_v4(ssamples, dec.hza,
dec.start, dec.twelve)
ssamples = self.subtract_v4(ssamples, numpy.add(dec.hza, subgap),
dec.start, dec.twelve)
ssamples = self.subtract_v4(ssamples, numpy.add(dec.hza, -subgap),
dec.start, dec.twelve)
subalready[dec.msg] = True
i += 1
if self.verbose:
print("%d..%d, did %d of %d, %d hits, maxrss %.1f MB" % (
min_hz,
max_hz,
nfirst+i,
len(scores),
decodes,
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024.0*1024.0)))
# assign a score to each frequency bin,
# according to how similar it seems to a sync tone pattern.
# samples should have already been padded.
# returns [ noise, scores ]
# noise is for SNR.
# scores[i] is [ sync_bin, score, True, start ]
def scores(self, samples, min_hz, max_hz):
bin_hz = self.jrate / float(self.jblock)
minbin = max(5, int(min_hz / bin_hz))
maxbin = int(max_hz / bin_hz)
offs = [ int((x*self.jblock)/noffs) for x in range(0, noffs) ]
m = []
noises = numpy.zeros(self.jblock//2 + 1) # for SNR
nnoises = 0
for oi in range(0, len(offs)):
m.append([])
si = offs[oi]
while si + self.jblock <= len(samples):
block = samples[si:si+self.jblock]
# block = block * scipy.signal.blackmanharris(len(block))
# a = numpy.fft.rfft(block)
# a = abs(a)
a = weakutil.arfft(block)
m[oi].append(a)
noises = numpy.add(noises, a)
nnoises += 1
si += self.jblock
noises /= nnoises
# calculate noise for snr, mimicing wsjtx wsprd.c.
# first average in freq domain over 7-bin window.
# then noise from 30th percentile.
nn = numpy.convolve(noises, [ 1, 1, 1, 1, 1, 1, 1 ])
nn = nn / 7.0
nn = nn[6:]
nns = sorted(nn[minbin:maxbin])
noise = nns[int(0.3*len(nns))]
# scores[i] = [ bin, correlation, valid, start ]
scores = [ ]
# for each frequency bin, strength of correlation with sync pattern.
# searches (w/ correlation) for best match to sync pattern.
# tries different offsets in time (from offs[]).
for j in range(minbin, maxbin):
for oi in range(0, len(offs)):
v = [ ]
for mx in m[oi]:
v.append(mx[j])
cc = numpy.correlate(v, pattern)
indices = list(range(0, len(cc)))
indices = sorted(indices, key=lambda i : -cc[i])
indices = indices[0:off_scores]
for ii in indices:
scores.append([ j, cc[ii], True, offs[oi] + ii*self.jblock ])
# highest scores first.
scores = sorted(scores, key=lambda sc : -sc[1])
return [ noise, scores ]
# subtract a decoded signal (hz/start/twelve) from the samples,
# to that we can then decode weaker signals underneath it.
# i.e. interference cancellation.
# generates the right tone for each symbol, finds the best
# offset w/ correlation, finds the amplitude, subtracts in the time domain.
def subtract_v4(self, osamples, hza, start, twelve):
global subslop
sender = JT65Send()
bin_hz = self.jrate / float(self.jblock)
# the 126 symbols, each 0..66
symbols = sender.symbols(twelve)
samples = numpy.copy(osamples)
if start < 0:
samples = numpy.append([0.0]*(-start), samples)
else:
samples = samples[start:]
bigslop = int(self.jblock * subslop)
#bigslop = int((self.jblock / hza[0]) / 2.0) + 1
#bigslop = int(self.jblock / hza[0]) + 1
# find amplitude of each symbol.
amps = [ ]
offs = [ ]
tones = [ ]
i = 0
while i < 126:
nb = 1
while i+nb < 126 and symbols[i+nb] == symbols[i]:
nb += 1
sync_hz = self.sync_hz(hza, i)
hz = sync_hz + symbols[i] * bin_hz
tone = weakutil.costone(self.jrate, hz, self.jblock*nb)
# nominal start of symbol in samples[]
i0 = i * self.jblock
i1 = i0 + nb*self.jblock
# search +/- slop.
# we search separately for each symbol b/c the
# phase may drift over the minute, and we
# want the tone to match exactly.
i0 = max(0, i0 - bigslop)
i1 = min(len(samples), i1 + bigslop)
cc = numpy.correlate(samples[i0:i1], tone)
mm = numpy.argmax(cc) # thus samples[i0+mm]
# what is the amplitude?
# if actual signal had a peak of 1.0, then
# correlation would be sum(tone*tone).
cx = cc[mm]
c1 = numpy.sum(tone * tone)
a = cx / c1
amps.append(a)
offs.append(i0+mm)
tones.append(tone)
i += nb
ai = 0
while ai < len(amps):
a = amps[ai]
off = offs[ai]
tone = tones[ai]
samples[off:off+len(tone)] -= tone * a
ai += 1
if start < 0:
nsamples = samples[(-start):]
else:
nsamples = numpy.append(osamples[0:start], samples)
return nsamples
# this doesn't work, probably because phase is not
# coherent over the message.
def subtract_v5(self, osamples, hza, start, twelve):
sender = JT65Send()
samples = numpy.copy(osamples)
assert start >= 0
symbols = sender.symbols(twelve)
bin_hz = self.jrate / float(self.jblock)
msg = sender.fsk(symbols, hza, bin_hz, self.jrate, self.jblock)
slop = int((self.jblock / hza[0]) / 2.0) + 1
i0 = start - slop
i1 = start + len(msg) + slop
cc = numpy.correlate(samples[i0:i1], msg)
mm = numpy.argmax(cc) # thus msg starts at samples[i0+mm]
# what is the amplitude?
# if actual signal had a peak of 1.0, then
# correlation would be sum(tone*tone).
cx = cc[mm]
c1 = numpy.sum(msg * msg)
a = cx / c1
samples[i0+mm:i0+mm+len(msg)] -= msg * a
return samples
# a signal begins near samples[start0], at frequency hza[0]..hza[1].
# return a better guess at the start.
def guess_start(self, samples, hza, start0):
bin_hz = self.jrate / float(self.jblock)
offs = [ ]
slop = self.jblock // noffs
i = 0
while i < 126:
nb = 0
while i+nb < 126 and pattern[nb+i] == 1:
nb += 1
if nb > 0:
hz = self.sync_hz(hza, i)
tone = weakutil.costone(self.jrate, hz, self.jblock*nb)
i0 = start0 + i * self.jblock
i1 = i0 + nb*self.jblock
cc = numpy.correlate(samples[i0-slop:i1+slop], tone)
mm = numpy.argmax(cc)
offs.append(i0-slop+mm - i0)
i += nb
else:
i += 1
medoff = numpy.median(offs)
start = int(start0 + medoff)
return start
# the sync tone is believed to be hz to within one fft bin.
# return hz with higher resolution.
# returns a two-element array of hz at start, hz at end.
def guess_freq(self, samples, hz):
bin_hz = self.jrate / float(self.jblock)
bin = int(round(hz / bin_hz))
freqs = [ ]
for i in range(0, len(pattern)):
if pattern[i] == 1:
sx = samples[i*self.jblock:(i+1)*self.jblock]
ff = weakutil.freq_from_fft(sx, self.jrate,
bin_hz * (bin - 1),
bin_hz * (bin + 2))
if ff != None and not numpy.isnan(ff):
freqs.append(ff)
if len(freqs) < 1:
return None
# nhz = numpy.median(freqs)
# nhz = numpy.mean(freqs)
# return nhz
# frequencies at 1/4 and 3/4 way through samples.
n = len(freqs)
m1 = numpy.median(freqs[0:n//2])
m2 = numpy.median(freqs[n//2:])
# frequencies at start and end.
m0 = m1 - (m2 - m1) / 2.0
m3 = m2 + (m2 - m1) / 2.0
hza = [ m0, m3 ]
return hza
# given hza[hz0,hzn] from guess_freq(),
# and a symbol number (0..126),
# return the sync bin.
# the point is to correct for frequency drift.
def sync_bin(self, hza, sym):
hz = self.sync_hz(hza, sym)
bin_hz = self.jrate / float(self.jblock) # FFT bin size, in Hz
bin = int(round(hz / bin_hz))
return bin
def sync_hz(self, hza, sym):
hz = hza[0] + (hza[1] - hza[0]) * (sym / 126.0)
return hz
# xhz is the sync tone frequency.
# returns None or a Decode
def process1(self, samples, xhz, noise, start, already):
if len(samples) < 126*self.jblock:
return None
bin_hz = self.jrate / float(self.jblock) # FFT bin size, in Hz
dec = self.process1a(samples, xhz, start, noise, already)
return dec
# returns a Decode, or None
def process1a(self, samples, xhz, start, noise, already):
global hetero_thresh
bin_hz = self.jrate / float(self.jblock) # FFT bin size, in Hz
assert start >= 0
#if start < 0:
# samples = numpy.append([0.0]*(-start), samples)
#else:
# samples = samples[start:]
if len(samples) - start < 126*self.jblock:
return None
hza = self.guess_freq(samples[start:], xhz)
if hza == None:
return None
if self.sync_bin(hza, 0) < 5:
return None
if self.sync_bin(hza, 125) < 5:
return None
if self.sync_bin(hza, 0) + 2+64 > self.jblock/2:
return None
if self.sync_bin(hza, 125) + 2+64 > self.jblock/2:
return None
start = self.guess_start(samples, hza, start)
if start < 0:
return None
if len(samples) - start < 126*self.jblock:
return None
samples = samples[start:]
m = [ ]
for i in range(0, 126):
# block = block * scipy.signal.blackmanharris(len(block))
sync_bin = self.sync_bin(hza, i)
sync_hz = self.sync_hz(hza, i)
freq_off = sync_hz - (sync_bin * bin_hz)
block = samples[i*self.jblock:(i+1)*self.jblock]
# block = weakutil.freq_shift(block, -freq_off, 1.0/self.jrate)
# a = numpy.fft.rfft(block)
a = weakutil.fft_of_shift(block, -freq_off, self.jrate)
a = abs(a)
m.append(a)
# look for bins that win too often, perhaps b/c they are
# syncs from higher-frequency JT65 transmissions.
wins = [ 0 ] * 66
for pi in range(0,126):
if pattern[pi] == -1:
bestj = None
bestv = None
sync_bin = self.sync_bin(hza, pi)
for j in range(sync_bin+2, sync_bin+2+64):
if j < len(m[pi]) and (bestj == None or m[pi][j] > bestv):
bestj = j
bestv = m[pi][j]
if bestj != None:
wins[bestj-sync_bin] += 1
# zero out bins that win too often. a given symbol
# (bin) should only appear two or three times in
# a transmission.
for j in range(2, 66):
if wins[j] >= hetero_thresh:
# zero bin j
for pi in range(0,126):
sync_bin = self.sync_bin(hza, pi)
m[pi][sync_bin+j] = 0
# for each non-sync time slot, decide which tone is strongest,
# which yields the channel symbol.
sa = [ ]
strength = [ ] # symbol signal / mean of bins in same time slot
sigs = [ ] # for SNR
for pi in range(0,126):
if pattern[pi] == -1:
sync_bin = self.sync_bin(hza, pi)
a = sorted(list(range(0,64)), key=lambda bin: -m[pi][sync_bin+2+bin])
sa.append(a[0])
b0 = sync_bin+2+a[0] # bucket w/ strongest signal
s0 = m[pi][b0] # level of strongest symbol
sigs.append(s0)
if False:
# bucket w/ 2nd-strongest signal
b1 = sync_bin+2+a[1]
s1 = m[pi][b1] # second-best bin power
if s1 != 0.0:
strength.append(s0 / s1)
else:
strength.append(0.0)
if True:
# mean of bins in same time slot
s1 = numpy.mean(m[pi][sync_bin+2:sync_bin+2+64])
if s1 != 0.0:
strength.append(s0 / s1)
else:
strength.append(0.0)
if False:
# median of bins in same time slot
s1 = numpy.median(m[pi][sync_bin+2:sync_bin+2+64])
if s1 != 0.0:
strength.append(s0 / s1)
else:
strength.append(0.0)
[ nerrs, msg, twelve ] = self.process2(sa, strength)
if nerrs < 0 or broken_msg(msg):
return None
# SNR
sig = numpy.mean(sigs)
# power rather than voltage.
rawsnr = (sig*sig) / (noise*noise)
# the "-1" turns (s+n)/n into s/n
rawsnr -= 1
if rawsnr < 0.1:
rawsnr = 0.1
rawsnr /= (2500.0 / 2.7) # 2.7 hz noise b/w -> 2500 hz b/w
snr = 10 * math.log10(rawsnr)
snr = snr - 63 # empirical, to match wsjt-x 1.7
if self.verbose and not (msg in already):
print("%6.1f %5d: %2d %3.0f %s" % ((hza[0]+hza[1])/2.0, start, nerrs, snr, msg))
already[msg] = True
return Decode(hza, nerrs, msg, snr, None,
start, twelve, time.time())
# sa[] is 63 channel symbols, each 0..63.
# it needs to be un-gray-coded, un-interleaved,
# un-reed-solomoned, &c.
# strength[] indicates how sure we are about each symbol
# (ratio of winning FFT bin to second-best bin).
def process2(self, sa, strength):
global soft_iters
# un-gray-code
for i in range(0, len(sa)):
sa[i] = weakutil.gray2bin(sa[i], 6)
# un-interleave
un = [ 0 ] * 63
un_strength = [ 0 ] * 63
for c in range(0, 7):
for r in range(0, 9):
un[(r*7)+c] = sa[(c*9)+r]
un_strength[(r*7)+c] = strength[(c*9)+r]
sa = un
strength = un_strength
[nerrs,twelve] = self.rs_decode(sa, [])
if nerrs >= 0:
# successful decode.
sym0 = twelve[0]
if numpy.array_equal(twelve, [sym0]*12):
# a JT69 signal...
return [-1, "???", None]
msg = self.unpack(twelve)
if not broken_msg(msg):
#self.analyze1(sa, strength, twelve)
return [nerrs, msg, twelve]
if True:
# attempt soft decode
# at this point we know there must be at least 25
# errors, since otherwise Reed-Solomon would have
# decoded.
# map from strength to probability of incorrectness,
# from analyze1() and analyze1.py < analyze1
# this are for strength = sym / (mean of other sym bins in this time slot)
sm = [ 1.0, 1.0, 0.837, 0.549, 0.318, 0.276, 0.215, 0.171,
0.126, 0.099, 0.079, 0.055, 0.041, 0.034, 0.027, 0.020, 0.018, 0.013,
0.012, 0.008, 0.022, 0.000, 0.004, 0.014, 0.008, ]
# map for strongest / second-strongest
#sm = [ 1.0, 0.4, 0.07, 0.015, 0.01 ]
# map for strongest / median
#sm = [ 1.0, 1.0, 0.829, 0.619, 0.379, 0.250, 0.251, 0.193, 0.195, 0.172,
# 0.154, 0.152, 0.139, 0.125, 0.099, 0.107, 0.105, 0.112, 0.096, 0.086,
# 0.061, 0.060, 0.059, 0.056, 0.050, 0.047, 0.045, 0.045, 0.027, 0.056,
# 0.030, 0.028, 0.023, 0.043, 0.058, 0.038, 0.082, 0.031, 0.025, 0.022,
# 0.025, 0.070, 0.034, 0.052, 0.036, 0.062, 0.028, 0.013, 0.016, 0.032,
# 0.028, 0.050, 0.024, 0.03, 0.033, 0.03, 0.037, 0.022, 0.015, 0.02,
# 0.078, 0.035, 0.043, 0.080, 0.020, 0.020, 0.02, 0.050, 0.062, 0.02,
# 0.021, 0.02, 0.02, 0.02, ]
# for each symbol time, how likely to be wrong.
#weights = numpy.divide(1.0, numpy.add(strength, 1.0))
weights = [ ]
for i in range(0, len(strength)):
ss = int(round(strength[i]))
if ss >= len(sm):
weights.append(0.01) # 1% chance of wrong
else:
weights.append(sm[ss])
total_weight = numpy.sum(weights)
expected_errors = total_weight
# weakest first
worst = sorted(list(range(0, 63)), key = lambda i: strength[i])
best = None # [ matching_symbols, nerrs, msg, twelve ]
wa = [ ]
for si in range(0, 63):
wa.append([ si, weights[si] ])
# try various numbers of erasures.
for iter in range(0, soft_iters):
xmin = max(0, int(expected_errors) - 10)
xmin = min(xmin, 35)
xmax = min(int(expected_errors) + 10, 40)
nera = int((random.random() * (xmax - xmin)) + xmin)
if True:
eras = [ ]
for j in range(0, 63):
if len(eras) >= nera:
break
si = worst[j]
if random.random() < nera*(weights[si] / total_weight):
# rs_decode() has this weird convention for erasures.
eras.append(63-1-si)
if False:
eras = wchoice(wa, nera)
for j in range(0, len(eras)):
eras[j] = 63 - 1 - eras[j]
[nerrs,twelve] = self.rs_decode(sa, eras)
if nerrs >= 0:
msg = self.unpack(twelve)
if broken_msg(msg):
continue
# re-encode, count symbols that match input to decoder, as score.
sy1 = self.rs_encode(twelve)
eqv = numpy.equal(sy1, sa)
neq = collections.Counter(eqv)[True]
if best == None or neq > best[0]:
sys.stdout.flush()
sys.stderr.flush()
if best != None:
sys.stdout.write("nerrs=%d neq=%d %s -> " % (best[1], best[0], best[2]))
sys.stdout.write("nera=%d nerrs=%d neq=%d %s\n" % (nera, nerrs, neq, msg))
sys.stdout.flush()
best = [ neq, nerrs, msg, twelve ]
if best != None:
sys.stdout.flush()
return best[1:]
# Reed Solomon could not decode.
return [-1, "???", None ]
# we have a good decode.
# record strength vs whether the symbol was OK or not.
# to derive a better mapping from strength to
# probability of correctness.
# feed output into analyze1.py to generate
# mapping from strength to probability of incorrectness,
# which process2() uses.
def analyze1(self, sa, strength, twelve):
# re-encode to find the correct symbols.
sa1 = self.rs_encode(twelve)
f = open("analyze1", "a")
for i in range(0, len(sa)):
if sa[i] == sa1[i]:
ok = 1
else:
ok = 0
f.write("%f %s\n" % (strength[i], ok))
f.close()
# convert packed character to Python string.
# 0..9 a..z space
def charn(self, c):
if c >= 0 and c <= 9:
return chr(ord('0') + c)
if c >= 10 and c < 36:
return chr(ord('A') + c - 10)
if c == 36:
return ' '
# sys.stderr.write("jt65 charn(%d) bad\n" % (c))
return '?'
# x is an integer, e.g. nc1 or nc2, containing all the
# call sign bits from a packed message.
# 28 bits.
def unpackcall(self, x):
a = [ 0, 0, 0, 0, 0, 0 ]
a[5] = self.charn((x % 27) + 10) # + 10 b/c only alpha+space
x = int(x / 27)
a[4] = self.charn((x % 27) + 10)
x = int(x / 27)
a[3] = self.charn((x % 27) + 10)
x = int(x / 27)
a[2] = self.charn(x%10) # digit only
x = int(x / 10)
a[1] = self.charn(x % 36) # letter or digit
x = int(x / 36)
a[0] = self.charn(x)
return ''.join(a)
# extract maidenhead locator
def unpackgrid(self, ng):
if ng == NGBASE+1:
return " "
if ng >= NGBASE+1 and ng < NGBASE+31:
return " -%02d" % (ng - (NGBASE+1)) # sig str, -01 to -30 DB
if ng >= NGBASE+31 and ng < NGBASE+62:
return "R-%02d" % (ng - (NGBASE+31))
if ng == NGBASE+62:
return "RO "
if ng == NGBASE+63:
return "RRR "
if ng == NGBASE+64:
return "73 "
lat = (ng % 180) - 90
ng = int(ng / 180)
lng = (ng * 2) - 180
g = "%c%c%c%c" % (ord('A') + int((179-lng)/20),
ord('A') + int((lat+90)/10),
ord('0') + int(((179-lng)%20)/2),
ord('0') + (lat+90)%10)
#print "lat %d, long %d, %s" % (lat, lng, g)
return g
def unpack(self, a):
# a[] has 12 0..63 symbols, or 72 bits.
# turn them into the original human-readable message.
# unpack([61, 37, 30, 28, 9, 27, 61, 58, 26, 3, 49, 16]) -> "G3LTF DL9KR JO40"
nc1 = 0 # 28 bits of first call
nc1 |= a[4] >> 2 # 4 bits
nc1 |= a[3] << 4 # 6 bits
nc1 |= a[2] << 10 # 6 bits
nc1 |= a[1] << 16 # 6 bits
nc1 |= a[0] << 22 # 6 bits
nc2 = 0 # 28 bits of second call
nc2 |= (a[4] & 3) << 26 # 2 bits
nc2 |= a[5] << 20 # 6 bits
nc2 |= a[6] << 14 # 6 bits
nc2 |= a[7] << 8 # 6 bits
nc2 |= a[8] << 2 # 6 bits
nc2 |= a[9] >> 4 # 2 bits
ng = 0 # 16 bits of grid
ng |= (a[9] & 15) << 12 # 4 bits
ng |= a[10] << 6 # 6 bits
ng |= a[11]
if ng >= 32768:
txt = self.unpacktext(nc1, nc2, ng)
return txt
if nc1 == NBASE+1:
c2 = self.unpackcall(nc2)
grid = self.unpackgrid(ng)
return "CQ %s %s" % (c2, grid)
if nc1 >= 267649090 and nc1 <= 267698374:
# CQ with suffix (e.g. /QRP)
n = nc1 - 267649090
sf = self.charn(n % 37)
n /= 37
sf = self.charn(n % 37) + sf
n /= 37
sf = self.charn(n % 37) + sf
n /= 37
c2 = self.unpackcall(nc2)
grid = self.unpackgrid(ng)
return "CQ %s/%s %s" % (c2, sf, grid)
c1 = self.unpackcall(nc1)
if c1 == "CQ9DX ":
c1 = "CQ DX "
m = re.match(r'^ *E9([A-Z][A-Z]) *$', c1)
if m != None:
c1 = "CQ " + m.group(1)
c2 = self.unpackcall(nc2)
grid = self.unpackgrid(ng)
return "%s %s %s" % (c1, c2, grid)
def unpacktext(self, nc1, nc2, nc3):
c = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ +-./?"
nc3 &= 32767
if (nc1 & 1) != 0:
nc3 += 32768
nc1 >>= 1
if (nc2 & 1) != 0:
nc3 += 65536
nc2 >>= 1
msg = [""] * 22
for i in range(4, -1, -1):
j = nc1 % 42
msg[i] = c[j]
nc1 = nc1 // 42
for i in range(9, 4, -1):
j = nc2 % 42
msg[i] = c[j]
nc2 = nc2 // 42
for i in range(12, 9, -1):
j = nc3 % 42
msg[i] = c[j]
nc3 = nc3 // 42
return ''.join(msg)
# call the Reed-Solomon decoder.
# symbols is 63 integers, the channel symbols after
# un-gray-coding and un-interleaving.
# era is an array of integers indicating which
# symbols are erasures.
# returns 12 original symbols of the packed message,
# or none.
def rs_decode(self, symbols, era):
int63 = c_int * 63
int12 = c_int * 12
recd0 = int63()
for i in range(0, 63):
recd0[i] = symbols[i]
era0 = int63()
numera0 = c_int()
numera0.value = len(era)
for i in range(0, len(era)):
era0[i] = era[i]
decoded = int12()
nerr = c_int()
nerr.value = 0
librs.rs_decode_(recd0, era0, byref(numera0), decoded, byref(nerr))
if nerr.value < 0:
# could not decode
return [-1, None]
a = [ ]
for i in range(0, 12):
a.append(decoded[i])
return [ nerr.value, a ]
# call the Reed-Solomon encoder.
# twelve is 12 6-bit symbol numbers (after packing).
# returns 63 symbols.
def rs_encode(self, twelve):
int63 = c_int * 63
int12 = c_int * 12
tw = int12()
for i in range(0, 12):
tw[i] = twelve[i]
out = int63()
librs.rs_encode_(byref(tw), byref(out))
a = [ ]
for i in range(0, 63):
a.append(out[i])
return a
class JT65Send:
def __init__(self):
pass
# convert a character into a number; order is
# 0..9 A..Z space
def nchar(self, ch):
if ch >= '0' and ch <= '9':
return ord(ch) - ord('0')
if ch >= 'A' and ch <= 'Z':
return ord(ch) - ord('A') + 10
if ch == ' ':
return 36
print("NT65Send.nchar(%s) oops" % (ch))
return 0
# returns a 28-bit number.
# we need call to be:
# lds lds d ls ls ls
# l-etter, d-igit, s-pace
# 28-bit number's high bits correspond to first call sign character.
def packcall(self, call):
call = call.strip()
call = call.upper()
if call == "CQ":
return NBASE + 1
if call == "QRZ":
return NBASE + 2
if call == "DE":
return 267796945
if len(call) > 2 and len(call) < 6 and not call[2].isdigit():
call = " " + call
while len(call) < 6:
call = call + " "
if re.search(r'^[A-Z0-9 ][A-Z0-9 ][0-9][A-Z ][A-Z ][A-Z ]$', call) == None:
return -1
x = 0
x += self.nchar(call[0])
x *= 36
x += self.nchar(call[1])
x *= 10
x += self.nchar(call[2])
x *= 27
x += self.nchar(call[3]) - 10
x *= 27
x += self.nchar(call[4]) - 10
x *= 27
x += self.nchar(call[5]) - 10
return x
# returns 16-bit number.
# g is maidenhead grid, or signal strength, or 73.
def packgrid(self, g):
g = g.strip()
g = g.upper()
if g[0] == '-':
return NGBASE + 1 + int(g[1:])
if g[0:2] == 'R-':
return NGBASE + 31 + int(g[2:])
if g == "RO":
return NGBASE + 62
if g == "RRR":
return NGBASE + 63
if g == "73":
return NGBASE+64
if re.match(r'^[A-R][A-R][0-9][0-9]$', g) == None:
return -1
lng = (ord(g[0]) - ord('A')) * 20
lng += (ord(g[2]) - ord('0')) * 2
lng = 179 - lng
lat = (ord(g[1]) - ord('A')) * 10
lat += (ord(g[3]) - ord('0')) * 1
lat -= 90
x = (lng + 180) / 2
x *= 180
x += lat + 90
return x
# turn three numbers into 12 6-bit symbols.
def pack3(self, nc1, nc2, g):
a = [0] * 12
a[0] = (nc1 >> 22) & 0x3f
a[1] = (nc1 >> 16) & 0x3f
a[2] = (nc1 >> 10) & 0x3f
a[3] = (nc1 >> 4) & 0x3f
a[4] = ((nc1 & 0xf) << 2) | ((nc2 >> 26) & 0x3)
a[5] = (nc2 >> 20) & 0x3f
a[6] = (nc2 >> 14) & 0x3f
a[7] = (nc2 >> 8) & 0x3f
a[8] = (nc2 >> 2) & 0x3f
a[9] = ((nc2 & 0x3) << 4) | ((g >> 12) & 0xf)
a[10] = (g >> 6) & 0x3f
a[11] = (g >> 0) & 0x3f
return a
def pack(self, msg):
msg = msg.strip()
msg = re.sub(r' *', ' ', msg)
msg = re.sub(r'^CQ DX ', 'CQ9DX ', msg)
# try CALL CALL GRID
a = msg.split(' ')
if len(a) == 3:
nc1 = self.packcall(a[0])
nc2 = self.packcall(a[1])
g = self.packgrid(a[2])
if nc1 >= 0 and nc2 >= 0 and g >= 0:
return self.pack3(nc1, nc2, g)
# never finished this -- no text &c.
sys.stderr.write("JT65Send.pack(%s) -- cannot parse\n" % (msg))
# sys.exit(1)
return [0] * 12
def testpack(self):
r = JT65()
for g in [ "FN42", "-22", "R-01", "RO", "RRR", "73", "AA00", "RR99" ]:
pg = self.packgrid(g)
upg = r.unpackgrid(pg)
if g != upg.strip():
print("packgrid oops %s" % (g))
for call in [ "AB1HL", "K1JT", "M0TRJ", "KK4BMV", "2E0CIN", "HF9D",
"6Y4K", "D4Z", "8P6DR", "ZS2I", "3D2RJ",
"WB3D", "S59GCD", "T77C", "4Z5AD", "A45XR", "OJ0V",
"6Y6N", "S57V", "3Z0R" ]:
# XXX 3XY1T doesn't work
pc = self.packcall(call)
upc = r.unpackcall(pc)
if call != upc.strip():
print("packcall oops %s %d %s" % (call, pc, upc))
for msg in [ "AB1HL K1JT FN42", "CQ DX CO3HMR EL82", "KD6HWI PY7VI R-12",
"KD5RBW TU 73", "CQ N5OSK EM25", "PD9BG KG7EZ RRR",
"W1JET KE0HQZ 73", "WB3D OM4SX -16", "WA3ETR IZ2QGB RR73",
"BG THX JOE 73"]:
pm = self.pack(msg)
upm = r.unpack(pm)
upm = re.sub(r' *', ' ', upm)
if msg != upm.strip():
print("pack oops %s %s %s" % (msg, pm, upm))
for bf in bfiles:
wsa = bf[1].split("\n")
for wsx in wsa:
wsx = wsx.strip()
m = re.search(r'# (.*)', wsx)
if m != None:
msg = m.group(1)
pm = self.pack(msg)
upm = r.unpack(pm)
upm = re.sub(r' *', ' ', upm)
if msg != upm.strip():
print("pack oops %s %s %s" % (msg, pm, upm))
# call the Reed-Solomon encoder.
# twelve is 12 6-bit symbol numbers (after packing).
# returns 63 symbols.
def rs_encode(self, twelve):
int63 = c_int * 63
int12 = c_int * 12
tw = int12()
for i in range(0, 12):
tw[i] = twelve[i]
out = int63()
librs.rs_encode_(byref(tw), byref(out))
a = [ ]
for i in range(0, 63):
a.append(out[i])
return a
def sync_hz(self, hza, sym):
hz = hza[0] + (hza[1] - hza[0]) * (sym / 126.0)
return hz
# ba should be 126 symbols, each 0..66.
# hza is [start,end] frequency,
# as from guess_freq().
# spacing is inter-symbol frequency spacing.
def fsk(self, ba, hza, spacing, rate, symsamples):
# the frequency needed at each sample.
hzv = numpy.array([])
for bi in range(0, len(ba)):
base = self.sync_hz(hza, bi)
fr = base + (ba[bi] * spacing)
block = numpy.repeat(fr, symsamples)
hzv = numpy.append(hzv, block)
# cumulative angle.
angles = numpy.cumsum(2.0 * math.pi / (float(rate) / hzv))
a = numpy.sin(angles)
return a
# twelve[] is 12 6-bit symbols to send.
# returns an array of 126 symbol numbers, each 0..66,
# including sync tones.
def symbols(self, twelve):
# Reed-Solomon -> 63 symbols
enc = self.rs_encode(twelve)
# interleave
inter = [ 0 ] * 63
for c in range(0, 7):
for r in range(0, 9):
inter[(c*9)+r] = enc[(r*7)+c]
# gray-code
gray = [ weakutil.bin2gray(x, 6) for x in inter ]
# sync pattern -> 126 "symbols", each 0..66
synced = [ 0 ] * 126
i = 0
for j in range(0, 126):
if pattern[j] == 1:
synced[j] = 0
else:
synced[j] = gray[i] + 2
i += 1
return synced
# twelve[] is 12 6-bit symbols to send.
# tone is Hz of sync tone.
# returns an array of audio samples.
def tones(self, twelve, tone, rate):
synced = self.symbols(twelve)
samples_per_symbol = int(round(rate * (4096 / 11025.0)))
samples = self.fsk(synced, [tone, tone], 2.6918, rate, samples_per_symbol)
return samples
def testsend(self):
random.seed(0) # XXX determinism
# G3LTF DL9KR JO40
x1 = self.tones([61, 37, 30, 28, 9, 27, 61, 58, 26, 3, 49, 16], 1000, 11025)
x1 = numpy.concatenate(([0]*1, x1, [0]*(8192-1) ))
#rv = numpy.concatenate( [ [random.random()]*4096 for i in range(0, 128) ] )
#x1 = x1 * rv
# RA3Y VE3NLS 73
x2 = self.tones([46, 6, 32, 22, 55, 20, 11, 32, 53, 23, 59, 16], 1050, 11025)
x2 = numpy.concatenate(([0]*4096, x2, [0]*(8192-4096) ))
#rv = numpy.concatenate( [ [random.random()]*4096 for i in range(0, 128) ] )
#x2 = x2 * rv
# CQ DL7ACA JO40
x3 = self.tones([62, 32, 32, 49, 37, 27, 59, 2, 30, 19, 49, 16], 1100, 11025)
x3 = numpy.concatenate(([0]*5120, x3, [0]*(8192-5120) ))
#rv = numpy.concatenate( [ [random.random()]*4096 for i in range(0, 128) ] )
#x3 = x3 * rv
# VA3UG F1HMR 73
x4 = self.tones([52, 54, 60, 12, 55, 54, 7, 19, 2, 23, 59, 16], 1150, 11025)
x4 = numpy.concatenate(([0]*1, x4, [0]*(8192-1) ))
#rv = numpy.concatenate( [ [random.random()]*4096 for i in range(0, 128) ] )
#x4 = x4 * rv
x = 3*x1 + 2*x2 + 1.0*x3 + 0.5*x4
x += numpy.random.rand(len(x)) * 1.0
x *= 1000.0
x = numpy.append(x, [0]*(12*11025))
r = JT65()
r.cardrate = 11025.0
r.gotsamples(x)
r.process(self.samples)
def send(self, msg):
self.testsend()
def usage():
sys.stderr.write("Usage: jt65.py -in CARD:CHAN [-center xxx]\n")
sys.stderr.write(" jt65.py -file fff [-center xxx] [-chan xxx]\n")
sys.stderr.write(" jt65.py -bench dir/decodes.txt\n")
sys.stderr.write(" jt65.py -send msg\n")
# list sound cards
weakaudio.usage()
sys.exit(1)
if False:
r = JT65()
print(r.unpack([61, 37, 30, 28, 9, 27, 61, 58, 26, 3, 49, 16])) # G3LTF DL9KR JO40
print(r.unpack([61, 37, 30, 28, 5, 27, 61, 58, 26, 3, 49, 16])) # G3LTE DL9KR JO40
print(r.unpack([61, 37, 30, 28, 9, 27, 61, 58, 26, 3, 49, 17])) # G3LTF DL9KR JO41
sys.exit(0)
if False:
r = JT65()
# G3LTF DL9KR JO40
print(r.process2([
14, 16, 9, 18, 4, 60, 41, 18, 22, 63, 43, 5, 30, 13, 15, 9, 25, 35, 50, 21, 0,
36, 17, 42, 33, 35, 39, 22, 25, 39, 46, 3, 47, 39, 55, 23, 61, 25, 58, 47, 16, 38,
39, 17, 2, 36, 4, 56, 5, 16, 15, 55, 18, 41, 7, 26, 51, 17, 18, 49, 10, 13, 24
], None))
sys.exit(0)
if False:
s = JT65Send()
# G3LTF DL9KR JO40
x = s.tones([61, 37, 30, 28, 9, 27, 61, 58, 26, 3, 49, 16], 1000, 11025)
# inject some bad symbols
# note x[] has sync in it.
# 1 2 5 6 7 14 16 18 19 20
n = 28
for pi in range(0, len(pattern)):
if pattern[pi] < 0 and n > 0:
#x[si*4096:(si+1)*4096] = numpy.random.random(4096)
x = numpy.concatenate((x[0:pi*4096], numpy.random.random(4096), x[(pi+1)*4096:]))
n -= 1
r = JT65()
r.cardrate = 11025.0
r.verbose = True
r.gotsamples(x)
r.process(r.samples, 0)
sys.exit(0)
def benchmark1(dir, bfiles, verbose):
global chan
chan = 0
score = 0 # how many we decoded
wanted = 0 # how many wsjt-x decoded
for bf in bfiles:
if bf[0] == False:
continue
if verbose:
print(bf[1])
wsa = bf[2].split("\n")
filename = dir + "/" + bf[1]
r = JT65()
r.verbose = False
r.gowav(filename, chan)
all = r.get_msgs()
# each msg is [ minute, hz, msg, decode_time, nerrs, snr ]
got = { } # did wsjt-x see this? indexed by msg.
for wsx in wsa:
wsx = wsx.strip()
m = re.search(r'# (.*)', wsx)
if m != None:
wanted += 1
wsmsg = m.group(1)
wsmsg = wsmsg.replace(" ", "")
found = False
for x in all:
mymsg = x.msg
mymsg = mymsg.replace(" ", "")
if mymsg == wsmsg:
found = True
got[x.msg] = True
if found:
score += 1
if verbose:
print("yes %s" % (m.group(1)))
else:
if verbose:
print("no %s" % (m.group(1)))
sys.stdout.flush()
if True and verbose:
for x in all:
if x.nerrs < 25 and not (x.msg in got):
print("EXTRA: %6.1f %d %.0f %s" % (x.hz(), x.nerrs, x.snr, x.msg))
if verbose:
print("score %d of %d" % (score, wanted))
return [ score, wanted ]
# given a file with wsjt-x 1.6.0 results, sitting in a directory
# fill of the corresponding .wav files, do our own decoding and
# compare results with wsjt-x.
# e.g. benchmark("nov/wsjt.txt") or benchmark("jt65files/big.txt").
# wsjt-x output is cut-and-paste from wsjt-x decode display.
# 2211 -1 -0.3 712 # S56IZW WB8CQV R-13
# 2211 -12 0.0 987 # VE2NCG K8GLC EM88
# wav file names look like 161122_2211.wav
def benchmark(wsjtfile, verbose):
dir = os.path.dirname(wsjtfile)
minutes = { } # keyed by hhmm
wsjtf = open(wsjtfile, "r")
for line in wsjtf:
line = re.sub(r'\xA0', ' ', line) # 0xA0 -> space
line = re.sub(r'[\r\n]', '', line)
m = re.match(r'^([0-9]{4}) +[0-9.-]+ +[0-9.-]+ +[0-9]+ +# *(.*)$', line)
if m == None:
print("oops: " + line)
continue
hhmm = m.group(1)
if not hhmm in minutes:
minutes[hhmm] = ""
minutes[hhmm] += line + "\n"
wsjtf.close()
info = [ ]
for hhmm in sorted(minutes.keys()):
ff = [ x for x in os.listdir(dir) if re.match('......_' + hhmm + '.wav', x) != None ]
if len(ff) == 1:
filename = ff[0]
info.append([ True, filename, minutes[hhmm] ])
elif len(ff) == 0:
sys.stderr.write("could not find .wav file in %s for %s\n" % (dir, hhmm))
else:
sys.stderr.write("multiple files in %s for %s: %s\n" % (dir, hhmm, ff))
return benchmark1(dir, info, verbose)
def optimize(wsjtfile):
vars = [
# [ "weakutil.use_numpy_rfft", [ False, True ] ],
# [ "weakutil.use_numpy_arfft", [ False, True ] ],
# [ "weakutil.fos_threshold", [ 0.125, 0.25, 0.5, 0.75, 1.0 ] ],
[ "subslop", [ 0.005, 0.01, 0.02, 0.04, 0.08, 0.16 ] ],
[ "noffs", [ 1, 2, 3, 4, 6, 8, 10, 12 ] ],
[ "pass1_frac", [ 0.05, 0.1, 0.2, 0.3, 0.4, 0.5 ] ],
[ "soft_iters", [ 0, 25, 50, 75, 100, 200, 400, 800 ] ],
[ "subgap", [ 0.3, 0.7, 1.0, 1.3, 1.6, 2.0, 2.3, 2.6 ] ],
[ "budget", [ 6, 9, 15 ] ],
[ "hetero_thresh", [ 4, 6, 8, 10 ] ],
[ "off_scores", [ 1, 2, 4 ] ],
]
sys.stdout.write("# ")
for v in vars:
sys.stdout.write("%s=%s " % (v[0], eval(v[0])))
sys.stdout.write("\n")
# warm up any caches, JIT, &c.
r = JT65()
r.verbose = False
r.gowav("jt65files/j7.wav", 0)
for v in vars:
for val in v[1]:
old = None
if "." in v[0]:
xglob = ""
else:
xglob = "global %s ; " % (v[0])
exec("%sold = %s" % (xglob, v[0]))
exec("%s%s = %s" % (xglob, v[0], val))
#sys.stdout.write("# ")
#for vx in vars:
# sys.stdout.write("%s=%s " % (vx[0], eval(vx[0])))
#sys.stdout.write("\n")
sc = benchmark(wsjtfile, False)
exec("%s%s = old" % (xglob, v[0]))
sys.stdout.write("%s=%s : " % (v[0], val))
sys.stdout.write("%d\n" % (sc[0]))
sys.stdout.flush()
def main():
# gc.set_debug(gc.DEBUG_STATS)
thv = gc.get_threshold()
gc.set_threshold(10*thv[0], 10*thv[1], 10*thv[2])
filenames = [ ]
desc = None
bench = None
opt = None
send_msg = None
i = 1
while i < len(sys.argv):
if sys.argv[i] == "-in":
desc = sys.argv[i+1]
i += 2
elif sys.argv[i] == "-file":
filenames.append(sys.argv[i+1])
i += 2
elif sys.argv[i] == "-bench":
bench = sys.argv[i+1]
i += 2
elif sys.argv[i] == "-opt":
opt = sys.argv[i+1]
i += 2
elif sys.argv[i] == "-send":
send_msg = sys.argv[i+1]
i += 2
else:
usage()
if send_msg != None:
js = JT65Send()
js.send(send_msg)
sys.exit(0)
if bench != None:
benchmark(bench, True)
sys.exit(0)
if opt != None:
optimize(opt)
sys.exit(0)
if len(filenames) > 0 and desc == None:
r = JT65()
r.verbose = True
for filename in filenames:
r.gowav(filename, 0)
elif len(filenames) == 0 and desc != None:
r = JT65()
r.verbose = True
r.opencard(desc)
r.gocard()
else:
usage()
weakutil.which_fft = "numpy" # XXX mysteriously, fftw doesn't work for jt65
weakutil.init_fft([2048])
if __name__ == '__main__':
if False:
pfile = "cprof.out"
sys.stderr.write("jt65: cProfile -> %s\n" % (pfile))
import cProfile
import pstats
cProfile.run('main()', pfile)
p = pstats.Stats(pfile)
p.strip_dirs().sort_stats('time')
# p.print_stats(10)
p.print_callers()
else:
main()
|
[
"rtm@csail.mit.edu"
] |
rtm@csail.mit.edu
|
23b2e637ae86701c527f04da0037fb4b2d80e3d3
|
e9b8923a4d270a2670d75d13ce10bf90057022b2
|
/main.py
|
a574908dc7d6b853f0d52361ef0473906e0f5ddb
|
[] |
no_license
|
jmash/kivycalc
|
12d7b138e48412a6361138a6fe0603eab54e28b5
|
272a24b5951b448aa36690a2ff45346cfcee4834
|
refs/heads/master
| 2021-01-02T14:28:57.292799
| 2020-02-11T02:50:31
| 2020-02-11T02:50:31
| 239,662,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,913
|
py
|
import kivy # gotta kivy if you wanna shivvy
import re # regular expressions for determining input
import math # for sqrt
kivy.require('1.9.1') # necessary for _reasons_
from kivy.uix.screenmanager import Screen # the App is loaded onto the screen widget
from kivy.properties import ObjectProperty
# the ObjectProperty lets us link the CalcDisplay widget to the MainScreen in the kv file.
# it's set to None so that it just takes whatever type is assigned to it, although it could
# be specified explicity beforehand. it doesn't really matter here so I just leave it.
# there are other kinds of properties (like number, string, list, etc) that can be used
# to link values from child widgets to their parents.
from kivy.app import App # gotta app if you wanna rap
from kivy.uix.button import Button
# Can he withstand the temptation ...to push the button that, even now,
# beckons him closer? Will he succumb to the maddening urge to eradicate history?
# At the MERE PUSH of a SINGLE BUTTON! The beeyootiful shiny button!
# The jolly candy-like button! Will he hold out, folks? CAN he hold out?
from kivy.uix.textinput import TextInput
# What's funny is that I'm not even using this for its intended purpose.
# the CalcDisplay is the disabled TextInput at the top of the calculator that shows
# the results of the calcuations. this is an old-school calculator where you can ONLY
# press the buttons, as God intended.
class CalcDisplay(TextInput):
# here i'm precompiling all the regular expressions so I can just go ahead
# and use them all later as I need to.
numREComp = re.compile(r"\d")
CEREComp = re.compile(r"CE")
opsREComp = re.compile(r"\÷|\×|\+|\-|\%|x\^y")
sqrtREComp = re.compile(r"sqrt")
decREComp = re.compile(r"\.")
eqREComp = re.compile(r"\=")
# the primary buffer (the first thing you enter, before choosing an operation)
primaryBuffer = "0"
# the secondary buffer (the second thing you enter, after choosing an operation)
secondaryBuffer = "0"
# the operation of choice
opBuffer = ""
# whether or not an operation is in the pipe
bufferFlag = False
# this is the constructor that passes in all the relevant arguments
# (through experimentation, it seems like it's necessary for linking
# using the Properties, but i'm actually not 100% sure on that. it doesn't
# seem to work without the initializer and the super, tho)
def __init__(self, **kwargs):
super(CalcDisplay, self).__init__(**kwargs)
# set the text to the primary buffer when the widget first gets
# set up. the buffer should always be at 0 at this point.
self.text = self.primaryBuffer
# when you boop a button. the event listener for this is set up in the
# associated kv file.
def press_button(self, buttonInput):
# if the button pressed is a number, check if we're in the first or
# second buffer. In either case, if we're at 0, replace the value
# with the button's face value. If a number other than 0 is already
# there, replace it and then start concatenating the value to the end
# of the string.
if(self.numREComp.match(buttonInput)):
if(self.bufferFlag == False):
if(self.primaryBuffer == "0"):
self.primaryBuffer = buttonInput
else:
self.primaryBuffer += buttonInput
else:
if(self.secondaryBuffer == "0"):
self.secondaryBuffer = buttonInput
else:
self.secondaryBuffer += buttonInput
# this isn't implemented yet (it would add a decimal to the value)
# luckily, that lets me demonstrate how to change a button to look
# disabled (check the kv file; the background is different)
if(self.decREComp.match(buttonInput)):
pass
# this clears all the buffers; everything starts from scratch
if(self.CEREComp.match(buttonInput)):
self.primaryBuffer = "0"
self.secondaryBuffer = "0"
self.opBuffer = ""
# when an operation button is pressed, certain face symbols
# can't actually be used to evaluate the result, so they get
# switched out to the proper Python operators to be placed
# in the operator buffer
if(self.opsREComp.match(buttonInput)):
if(self.bufferFlag == False):
if(buttonInput == "÷"):
self.opBuffer = "/"
elif(buttonInput == "×"):
self.opBuffer = "*"
elif(buttonInput == "x^y"):
self.opBuffer = "**"
else:
self.opBuffer= buttonInput
self.bufferFlag = True
# this takes the value in whichever buffer is on tap and returns its
# square root in place. it's bugged, by the way: you can't perform the sqrt
# operation twice in a row.
if(self.sqrtREComp.match(buttonInput)):
if(self.bufferFlag == False):
self.primaryBuffer = str(math.sqrt(int(self.primaryBuffer)))
else:
self.secondaryBuffer = str(math.sqrt(int(self.secondaryBuffer)))
# the equals button takes what's inside each buffer and sandwiches them
# all together into a valid Python expression, which is then evaluated
# and re-stringified into the output.
if(self.eqREComp.match(buttonInput)):
if(self.bufferFlag == True):
fullBuffer = self.primaryBuffer + self.opBuffer + self.secondaryBuffer
result = str(eval(fullBuffer))
self.bufferFlag = False
self.primaryBuffer = result
self.secondaryBuffer = "0"
self.opBuffer = ""
# at the end of the process, update the display to reflect the primary
# or secondary buffer
if(self.bufferFlag == False):
self.text = self.primaryBuffer
else:
self.text = self.secondaryBuffer
# defines a CalcButton. not technically necessary but useful for
# the kv file, where it's now clear where the calculator buttons are
# supposed to be
class CalcButton(Button):
pass
# the MainScreen is a Screen widget, which is allowed to contain layout
# widgets (like BoxLayout, which is the type I'm using)
class MainScreen(Screen):
cDisplay = ObjectProperty(None)
# this is the root of the application. the build function is apparently
# necessary and will return initialize the first screen of the application.
# the .run() method is (i think) linked to the App widget that's being passed
# in as an argument.
class KivyCalcApp(App):
def build(self):
ms = MainScreen()
return ms
if __name__ == '__main__':
KivyCalcApp().run()
|
[
"jaredmashcraft@gmail.com"
] |
jaredmashcraft@gmail.com
|
1c162fd2e9a8262027bb99cdd87c22081f60188b
|
203da3abd0f3d3c6191390cb3d8025fb5d512c62
|
/app.py
|
41b7848f3e2b7d543da7fdd4db20e0849ffa09c4
|
[
"MIT"
] |
permissive
|
Sh4yy/CherryAuth
|
dc34898b1333ee49c928589874ade01f12df536b
|
5787f18cc388f32560c7acf729ab7001abc929d4
|
refs/heads/master
| 2022-12-12T12:33:24.743788
| 2020-02-16T02:19:51
| 2020-02-16T02:19:51
| 194,908,331
| 2
| 0
|
MIT
| 2022-12-08T05:50:29
| 2019-07-02T17:37:12
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
from sanic import Sanic
from Routes import bp
from models import *
def create_db():
"""
initialize and create db models
:return: True on success
"""
db.create_tables([User, Session, Credentials])
return True
def create_app():
"""
initialize the web server
:return: app on success
"""
app = Sanic(__name__)
app.blueprint(bp)
return app
def create_secret():
"""
create jwt secret if doesnt exist
:return: True if created
"""
try:
JWT.gen_secret()
except Exception:
return False
return True
|
[
"shayan@umd.edu"
] |
shayan@umd.edu
|
0d5901e78e5132eccc386099896af3dc15a658c2
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/requests_html__examples/ru_sefon_cc__news.py
|
13bbf32d02bbb6880bb19dd90d69147c6ef4a4c9
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install requests-html
from requests_html import HTMLSession
session = HTMLSession()
rs = session.get('https://ru.sefon.cc/news/')
rs.html.render() # Без этого не будет выполнения js кода
for a in rs.html.find('.mp3 > .btns > a[data-url]'):
print(a.attrs['data-url'])
# https://cdn5.sefon.pro/files/prev/193/Sontry%20-%20%D0%94%D0%B8%D1%81%D0%B1%D0%B0%D0%BB%D0%B0%D0%BD%D1%81%20%28192kbps%29.mp3
# https://cdn5.sefon.pro/files/prev/193/Orlando%20-%20%D0%9D%D0%B5%20%D0%A1%D1%82%D0%B5%D1%81%D0%BD%D1%8F%D0%B9%D1%81%D1%8F%20%28192kbps%29.mp3
# https://cdn8.sefon.pro/files/prev/193/%D0%A1%D0%B0%D1%88%D0%B0%20%D0%94%D0%B6%D0%B0%D0%B7%20-%20%D0%9F%D1%8C%D1%8F%D0%BD%D1%8B%D0%B9%20%D0%9F%D0%BE%20%D0%94%D0%B2%D0%BE%D1%80%D0%B0%D0%BC%20%28192kbps%29.mp3
# https://cdn1.sefon.pro/files/prev/193/%D0%9E%D0%BA%D1%81%D0%B0%D0%BD%D0%B0%20%D0%9A%D0%BE%D0%B2%D0%B0%D0%BB%D0%B5%D0%B2%D1%81%D0%BA%D0%B0%D1%8F%20%26%20Andery%20Toronto%20-%20%D0%94%D0%B5%D0%B2%D0%BE%D1%87%D0%BA%D0%B0%20%D0%9D%D0%B5%20%D0%9F%D0%BB%D0%B0%D1%87%D1%8C%20%28192kbps%29.mp3
# https://cdn2.sefon.pro/files/prev/193/%D0%9F%D0%B0%D1%88%D0%B0%20Proorok%20-%20%D0%9A%20%D0%9D%D0%B5%D0%B1%D0%B5%D1%81%D0%B0%D0%BC%20%28192kbps%29.mp3
# ...
# https://cdn5.sefon.pro/files/prev/193/%D0%90%D1%80%D0%B8%D1%82%D0%BC%D0%B8%D1%8F%20feat.%20Lazy%20Cat%20-%20%D0%9A%D0%BE%D1%81%D0%BC%D0%BE%D1%81%20%28192kbps%29.mp3
# https://cdn3.sefon.pro/files/prev/193/Delaney%20Jane%20-%20Want%20You%20Now%20%28192kbps%29.mp3
# https://cdn8.sefon.pro/files/prev/193/%D0%9B%D0%B5%D0%B2%D0%B0%D0%BD%20%D0%93%D0%BE%D1%80%D0%BE%D0%B7%D0%B8%D1%8F%20-%20%D0%9F%D0%B0%D1%80%D0%BA%20%D0%93%D0%BE%D1%80%D1%8C%D0%BA%D0%BE%D0%B3%D0%BE%20%28192kbps%29.mp3
# https://cdn6.sefon.pro/files/prev/193/Carlie%20Hanson%20-%20Good%20Enough%20%28192kbps%29.mp3
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
0fd47bd134edd98708784732dfea0ce0b7f45262
|
5e0e4a16f2a55dac38ae7583a044afe13240d04f
|
/authentication/urls.py
|
31fb936db3da16c906c4bc7e03d2d327daf0aeeb
|
[] |
no_license
|
murgesh2000/expenseswebsite
|
4217f806461d5c59b6af7741889a392c14e42bd0
|
62fcc3547da5631abe281b4982c7e8fc152f6029
|
refs/heads/main
| 2023-03-01T12:29:51.312131
| 2021-01-27T06:08:44
| 2021-01-27T06:08:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
from django.urls import path
from .views import RegistrationView, UsernameValidationView
urlpatterns = [
path('register', RegistrationView.as_view(), name="register"),
path('validate-username', UsernameValidationView.as_view(), name="validate-username")
]
|
[
"nikhilngowda18@gmail.com"
] |
nikhilngowda18@gmail.com
|
a132614def95032aef90db94e0854951c33fcf2f
|
31b1fb0b9e610c63a4a81f6db6b211d586116f62
|
/test/runtime/operators_test/sum_test.py
|
1ebc77a74e5d372484060f8b03fc5153f7a6f62a
|
[
"Zlib",
"MIT"
] |
permissive
|
LabBros/webdnn
|
aa3103425c87e742dae260a808ecd392197571ad
|
a91e28f11a253e55a3b582af0fb924ddd925cef0
|
refs/heads/master
| 2021-08-20T08:59:21.102962
| 2017-11-28T17:15:35
| 2017-11-28T17:15:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.axis import Axis
from webdnn.graph.graph import Graph
from webdnn.graph.operators.sum import Sum
from webdnn.graph.order import OrderNHWC, OrderNCHW, Order
from webdnn.graph.variable import Variable
OrderNHW = Order([Axis.N, Axis.H, Axis.W])
@wrap_template
def template(x_order=OrderNHWC, y_order=OrderNHW, axis=Axis.C, description: str = ""):
vx = np.arange(120).reshape(2, 3, 4, 5)
vy = np.sum(vx, axis=OrderNHWC.axes_dict[axis])
x = Variable(vx.shape, order=OrderNHWC)
y, = Sum(None, axis=axis)(x)
x.change_order(x_order)
y.change_order(y_order)
generate_kernel_test_case(
description=f"Sum {description}",
graph=Graph([x], [y]),
backend=["webgpu", "webgl", "webassembly"],
inputs={x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])},
expected={y: np.transpose(vy, [OrderNHW.axes_dict[a] for a in y.order.axes])},
)
def test():
template()
def test_different_order():
template(x_order=OrderNCHW)
|
[
"y.kikura@gmail.com"
] |
y.kikura@gmail.com
|
6c4e9fecab06fa7db3764811ceb340f58a4f01b3
|
fb99dfa9ad948543e28851b17209e737eb6c37e2
|
/meiduo/apps/goods/migrations/0003_auto_20200822_1538.py
|
21441bd0ff80bc6c3090dd4f158096b502c84545
|
[] |
no_license
|
lutaikang/meiduo
|
98eeae4372829669fcc4a6ccdd6b91e98d2d0f7a
|
cac921c361c76bc40fd51a8fa012bf68f02e266a
|
refs/heads/master
| 2022-12-26T19:00:26.596273
| 2020-10-14T00:18:28
| 2020-10-14T00:18:28
| 284,571,839
| 0
| 0
| null | 2020-08-20T11:14:27
| 2020-08-03T01:05:59
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 468
|
py
|
# Generated by Django 3.0.8 on 2020-08-22 07:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0002_auto_20200821_1825'),
]
operations = [
migrations.AlterField(
model_name='sku',
name='default_image',
field=models.ImageField(blank=True, default='', max_length=200, null=True, upload_to='', verbose_name='默认图片'),
),
]
|
[
"1554284589@qq.com"
] |
1554284589@qq.com
|
01b768ce505eb7f4bb2fc092621cc197579058eb
|
1fb342bb494e92a5f8757c6990a98d1b0cfd1c42
|
/toyClassification/MC-Dropout-MAP-01-Adam/eval.py
|
cf81b7c7ef19f626a449bd9adc2f47db45c6c422
|
[
"MIT"
] |
permissive
|
abdo-eldesokey/evaluating_bdl
|
111b99c2dbf9e3846949eca263fe2d83330af6ca
|
93e386319573d6a1c465f479e534db334ea30eda
|
refs/heads/master
| 2020-07-16T16:24:04.700096
| 2019-09-02T09:39:29
| 2019-09-02T09:39:29
| 205,823,556
| 0
| 0
|
MIT
| 2019-09-02T09:32:31
| 2019-09-02T09:32:30
| null |
UTF-8
|
Python
| false
| false
| 3,575
|
py
|
# code-checked
# server-checked
from model import ToyNet
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
batch_size = 32
M = 4
x_min = -6.0
x_max = 6.0
num_points = 60
network = ToyNet("eval_MC-Dropout-MAP-01-Adam_1_M10_0", project_dir="/root/evaluating_bdl/toyClassification").cuda()
network.load_state_dict(torch.load("/root/evaluating_bdl/toyClassification/training_logs/model_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth"))
M_float = float(M)
print (M_float)
network.eval()
false_prob_values = np.zeros((num_points, num_points))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
for x_1_i, x_1_value in enumerate(x_values):
for x_2_i, x_2_value in enumerate(x_values):
x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))
mean_prob_vector = np.zeros((2, ))
for i in range(M):
logits = network(x) # (shape: (1, num_classes)) (num_classes==2)
prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))
prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))
mean_prob_vector += prob_vector/M_float
false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]
plt.figure(1)
x_1, x_2 = np.meshgrid(x_values, x_values)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density")
plt.colorbar()
plt.savefig("%s/predictive_density_gray.png" % network.model_dir)
plt.close(1)
x_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
dist = np.sqrt(x_1**2 + x_2**2)
false_prob_values_GT = np.zeros(dist.shape)
false_prob_values_GT[dist < 2.4] = 1.0
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="RdBu")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_GT.png" % network.model_dir)
plt.close(1)
plt.figure(1)
plt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap="binary")
plt.xlabel("x_1")
plt.ylabel("x_2")
plt.title("Predictive Density - Ground Truth")
plt.colorbar()
plt.savefig("%s/predictive_density_gray_GT.png" % network.model_dir)
plt.close(1)
with open("/root/evaluating_bdl/toyClassification/HMC/false_prob_values.pkl", "rb") as file: # (needed for python3)
false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))
x_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)
x_1, x_2 = np.meshgrid(x_values, x_values)
x_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)
x_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)
fig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))
im = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap="RdBu", vmin=0, vmax=1)
im = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap="RdBu", vmin=0, vmax=1)
fig.colorbar(im, ax=axes.flat)
plt.savefig("%s/predictive_density_comparison.png" % network.model_dir)
plt.close()
|
[
"fregu856@gmail.com"
] |
fregu856@gmail.com
|
1ff38dfba3794d876797fb0375becaa9cb290fbc
|
327921706393c17005470544656e96fa42cdffb0
|
/package/DynamoCompas/bin/compas/numerical/matrices.py
|
65f089654bd04980afada7b647938feca1b9c25b
|
[] |
no_license
|
garciadelcastillo/DynamoCompas
|
efdc5df6151dac876b9f2bbb825c0fbc5ad832a9
|
c929226739209bceee951f49f82cb207fa0f3235
|
refs/heads/master
| 2021-01-20T01:28:12.281236
| 2018-02-16T11:56:44
| 2018-02-16T11:56:44
| 101,290,004
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,135
|
py
|
from __future__ import print_function
from compas.geometry import dot_vectors
from compas.geometry import length_vector
from compas.geometry import cross_vectors
from numpy import abs
from numpy import array
from numpy import asarray
from numpy import float32
from numpy import tile
from numpy import ones
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import diags
from scipy.sparse import spdiags
from scipy.sparse import vstack as svstack
__author__ = ['Tom Van Mele <vanmelet@ethz.ch>',
'Andrew Liew <liew@arch.ethz.ch>']
__copyright__ = 'Copyright 2016, Block Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'vanmelet@ethz.ch'
__all__ = [
'adjacency_matrix',
'degree_matrix',
'connectivity_matrix',
'laplacian_matrix',
'face_matrix',
'mass_matrix',
'stiffness_matrix',
'equilibrium_matrix'
]
def _return_matrix(M, rtype):
if rtype == 'list':
return M.toarray().tolist()
if rtype == 'array':
return M.toarray()
if rtype == 'csr':
return M.tocsr()
if rtype == 'csc':
return M.tocsc()
if rtype == 'coo':
return M.tocoo()
return M
# ==============================================================================
# adjacency
# ==============================================================================
def adjacency_matrix(adjacency, rtype='array'):
a = [(1, i, j) for i in range(len(adjacency)) for j in adjacency[i]]
data, rows, cols = zip(*a)
A = coo_matrix((data, (rows, cols))).asfptype()
return _return_matrix(A, rtype)
def face_matrix(face_vertices, rtype='array'):
"""Creates a face-vertex adjacency matrix.
Parameters:
face_vertices (list of list) : List of vertices per face.
rtype (str) : The return type.
"""
f = array([(i, j, 1) for i, vertices in enumerate(face_vertices) for j in vertices])
F = coo_matrix((f[:, 2], (f[:, 0], f[:, 1]))).asfptype()
return _return_matrix(F, rtype)
# def network_adjacency_matrix(network, rtype='array'):
# key_index = {key: index for index, key in enumerate(network.vertices())}
# adjacency = [[key_index[nbr] for nbr in network.vertex_neighbours(key)] for key in network.vertices()]
# return adjacency_matrix(adjacency, rtype=rtype)
# def mesh_adjacency_matrix(mesh, rtype='csr'):
# key_index = mesh.key_index()
# adjacency = [[key_index[nbr] for nbr in mesh.vertex_neighbours(key)] for key in mesh.vertices()]
# return adjacency_matrix(adjacency, rtype=rtype)
# def network_face_matrix(network, rtype='csr'):
# r"""Construct the face matrix of a network.
# Parameters:
# network (compas.datastructures.network.network.Network) :
# A ``compas`` network datastructure object.
# rtype (str) : Optional.
# The type of matrix to be returned. The default is ``'csr'``.
# Returns:
# array-like: The face matrix in the format specified by ``rtype``.
# Possible values of ``rtype`` are ``'list'``, ``'array'``, ``'csr'``, ``'csc'``, ``'coo'``.
# The face matrix represents the relationship between faces and vertices.
# Each row of the matrix represents a face. Each column represents a vertex.
# The matrix is filled with zeros except where a relationship between a vertex
# and a face exist.
# .. math::
# F_{ij} =
# \cases{
# 1 & if vertex j is part of face i \cr
# 0 & otherwise
# }
# The face matrix can for example be used to compute the centroids of all
# faces of a network.
# Example:
# .. code-block:: python
# import compas
# from compas.datastructures.network.network import Network
# network = Network.from_obj(compas.find_resource('lines.obj'))
# F = face_matrix(network, 'csr')
# xyz = array([network.vertex_coordinates(key) for key in network])
# c = F.dot(xyz)
# """
# key_index = {key: index for index, key in enumerate(network.vertices())}
# face_vertices = [[key_index[key] for key in network.face_vertices(fkey)] for fkey in network.faces()]
# return face_matrix(face_vertices, rtype=rtype)
# ==============================================================================
# degree
# ==============================================================================
def degree_matrix(adjacency, rtype='array'):
d = [(len(adjacency[i]), i, i) for i in range(len(adjacency))]
data, rows, cols = zip(*d)
D = coo_matrix((data, (rows, cols))).asfptype()
return _return_matrix(D, rtype)
# def network_degree_matrix(network, rtype='array'):
# key_index = {key: index for index, key in enumerate(network.vertices())}
# adjacency = [[key_index[nbr] for nbr in network.vertex_neighbours(key)] for key in network.vertices()]
# return degree_matrix(adjacency, rtype=rtype)
# ==============================================================================
# connectivity
# ==============================================================================
def connectivity_matrix(edges, rtype='array'):
r"""Creates a connectivity matrix from a list of vertex index pairs.
The connectivity matrix encodes how edges in a network are connected
together. Each row represents an edge and has 1 and -1 inserted into the
columns for the start and end nodes.
.. math::
\mathbf{C}_{ij} =
\cases{
-1 & if edge i starts at vertex j \cr
+1 & if edge i ends at vertex j \cr
0 & otherwise
}
Note:
A connectivity matrix is generally sparse and will perform superior
in numerical calculations as a sparse matrix.
Parameters:
edges (list of list): List of lists [[node_i, node_j], [node_k, node_l]].
rtype (str): Format of the result, 'array', 'csc', 'csr', 'coo'.
Returns:
sparse: If ``rtype`` is ``None``, ``'csc'``, ``'csr'``, ``'coo'``.
array: If ``rtype`` is ``'array'``.
Examples:
>>> connectivity_matrix([[0, 1], [0, 2], [0, 3]], rtype='array')
[[-1 1 0 0]
[-1 0 1 0]
[-1 0 0 1]]
"""
m = len(edges)
data = array([-1] * m + [1] * m)
rows = array(list(range(m)) + list(range(m)))
cols = array([edge[0] for edge in edges] + [edge[1] for edge in edges])
C = coo_matrix((data, (rows, cols))).asfptype()
return _return_matrix(C, rtype)
# def network_connectivity_matrix(network, rtype='array'):
# key_index = {key: index for index, key in enumerate(network.vertices())}
# edges = [(key_index[u], key_index[v]) for u, v in network.edges()]
# return connectivity_matrix(edges, rtype=rtype)
# def mesh_connectivity_matrix(mesh, rtype='csr'):
# key_index = mesh.key_index()
# edges = [(key_index[u], key_index[v]) for u, v in mesh.wireframe()]
# return connectivity_matrix(edges, rtype=rtype)
# ==============================================================================
# laplacian
# ==============================================================================
# change this to a procedural approach
# constructing (fundamental) matrices should not involve matrix operations
def laplacian_matrix(edges, normalize=False, rtype='array'):
r"""Creates a laplacian matrix from a list of edge topologies.
The laplacian matrix is defined as
.. math::
\mathbf{L} = \mathbf{C} ^ \mathrm{T} \mathbf{C}
Note:
The current implementation only supports umbrella weights,
as other weighting schemes are not generally applicable.
See also:
:func:`compas.datastructures.network.numerical.matrices.network_laplacian_matrix`
:func:`compas.datastructures.mesh.numerical.matrices.mesh_laplacian_matrix`
:func:`compas.datastructures.mesh.numerical.matrices.trimesh_cotangent_laplacian_matrix`
:func:`compas.datastructures.mesh.numerical.matrices.trimesh_positive_cotangent_laplacian_matrix`
Parameters:
edges (list of list): List of lists [[node_i, node_j], [node_k, node_l]].
rtype (str): Format of the result, 'array', 'csc', 'csr', 'coo'.
Returns:
sparse: If ''rtype'' is ``None, 'csc', 'csr', 'coo'``.
array: If ''rtype'' is ``'array'``.
Examples:
>>> laplacian_matrix([[0, 1], [0, 2], [0, 3]], rtype='array')
[[ 3 -1 -1 -1]
[-1 1 0 0]
[-1 0 1 0]
[-1 0 0 1]]
"""
C = connectivity_matrix(edges, rtype='csr')
L = C.transpose().dot(C)
if normalize:
L = L / L.diagonal().reshape((-1, 1))
L = csr_matrix(L)
return _return_matrix(L, rtype)
# def network_laplacian_matrix(network, rtype='array', normalize=False):
# r"""Construct the Laplacian matrix of a network.
# Parameters:
# network (compas.datastructures.network.network.Network) :
# The network datastructure.
# rtype (str) :
# Optional.
# The format in which the Laplacian should be returned.
# Default is `'array'`.
# normalize (bool):
# Optional.
# Normalize the entries such that the value on the diagonal is ``1``.
# Default is ``False``.
# Returns:
# array-like: The Laplacian matrix in the format specified by ``rtype``.
# Possible values of ``rtype`` are ``'list'``, ``'array'``, ``'csr'``, ``'csc'``, ``'coo'``.
# Note:
# ``d = L.dot(xyz)`` is currently a vector that points from the centroid to the vertex.
# Therefore ``c = xyz - d``.
# By changing the signs in the laplacian,
# the dsiplacement vectors could be used in a more natural way ``c = xyz + d``.
# Example:
# .. plot::
# :include-source:
# from numpy import array
# import compas
# from compas.datastructures.network import Network
# from compas.datastructures.network.numerical import network_laplacian_matrix
# network = Network.from_obj(compas.get_data('grid_irregular.obj'))
# xy = array([network.vertex_coordinates(key, 'xy') for key in network])
# L = network_laplacian_matrix(network, rtype='csr', normalize=True)
# d = L.dot(xy)
# lines = [{'start': xy[i], 'end': xy[i] - d[i]} for i, k in network.vertices_enum()]
# network.plot(lines=lines)
# """
# key_index = {key: index for index, key in enumerate(network.vertices())}
# edges = [(key_index[u], key_index[v]) for u, v in network.edges()]
# return laplacian_matrix(edges, normalize=normalize, rtype=rtype)
# def mesh_laplacian_matrix(mesh, rtype='csr'):
# data, rows, cols = [], [], []
# key_index = mesh.key_index()
# for key in mesh.vertices():
# r = key_index[key]
# data.append(1)
# rows.append(r)
# cols.append(r)
# # provide anchor clause?
# nbrs = mesh.vertex_neighbours(key)
# w = len(nbrs)
# d = - 1. / w
# for nbr in nbrs:
# c = key_index[nbr]
# data.append(d)
# rows.append(r)
# cols.append(c)
# L = coo_matrix((data, (rows, cols)))
# return _return_matrix(L, rtype)
def trimesh_edge_cotangent(mesh, u, v):
fkey = mesh.halfedge[u][v]
cotangent = 0.0
if fkey is not None:
w = mesh.face[fkey][v] # self.vertex_descendent(v, fkey)
wu = mesh.edge_vector(w, u)
wv = mesh.edge_vector(w, v)
cotangent = dot_vectors(wu, wv) / length_vector(cross_vectors(wu, wv))
return cotangent
def trimesh_edge_cotangents(mesh, u, v):
a = trimesh_edge_cotangent(u, v)
b = trimesh_edge_cotangent(v, u)
return a, b
def trimesh_cotangent_laplacian_matrix(mesh):
"""Construct the Laplacian of a triangular mesh with cotangent weights.
Parameters:
mesh (compas.datastructures.mesh.tri.TriMesh) :
The triangular mesh.
Returns:
array-like :
The Laplacian matrix with cotangent weights.
...
Note:
The matrix is constructed such that the diagonal contains the sum of the
weights of the adjacent vertices, multiplied by `-1`.
The entries of the matrix are thus
.. math::
\mathbf{L}_{ij} =
\begin{cases}
- \sum_{(i, k) \in \mathbf{E}_{i}} w_{ik} & if i = j \\
w_{ij} & if (i, j) \in \mathbf{E} \\
0 & otherwise
\end{cases}
>>> ...
"""
# minus sum of the adjacent weights on the diagonal
# cotangent weights on the neighbours
key_index = mesh.key_index()
n = mesh.number_of_vertices()
data = []
rows = []
cols = []
# compute the weight of each halfedge
# as the cotangent of the angle at the opposite vertex
for u, v in mesh.wireframe():
a, b = mesh.edge_cotangents(u, v)
i = key_index[u]
j = key_index[v]
data.append(0.5 * a) # not sure why multiplication with 0.5 is necessary
rows.append(i)
cols.append(j)
data.append(0.5 * b) # not sure why multiplication with 0.5 is necessary
rows.append(j)
cols.append(i)
L = coo_matrix((data, (rows, cols)), shape=(n, n))
L = L.tocsr()
# subtract from the diagonal the sum of the weights of the neighbours of the
# vertices corresponding to the diagonal entries.
L = L - spdiags(L * ones(n), 0, n, n)
L = L.tocsr()
return L
def trimesh_positive_cotangent_laplacian_matrix(mesh):
raise NotImplementedError
# ==============================================================================
# structural
# ==============================================================================
def mass_matrix(Ct, ks, q=0, c=1, tiled=True):
r"""Creates a network's nodal mass matrix.
The mass matrix is defined as the sum of the member axial stiffnesses
(inline) of the elements connected to each node, plus the force density.
The force density ensures a non-zero value in form-finding/pre-stress
modelling where E=0.
.. math::
\mathbf{m} =
|\mathbf{C}^\mathrm{T}|
(\mathbf{E} \circ \mathbf{A} \oslash \mathbf{l} + \mathbf{f} \oslash \mathbf{l})
Parameters:
Ct (sparse): Sparse transpose of the connectivity matrix (n x m).
ks (array): Vector of member EA / L (m x 1).
q (array): Vector of member force densities (m x 1).
c (float): Convergence factor.
tiled (boolean): Whether to tile horizontally by 3 for x, y, z.
Returns:
array : mass matrix, either (m x 1) or (m x 3).
"""
m = c * (abs(Ct).dot(ks + q))
if tiled:
return tile(m, (1, 3))
return m
def stiffness_matrix():
raise NotImplementedError
def equilibrium_matrix(C, xyz, free, rtype='array'):
r"""Construct the equilibrium matrix of a structural system.
Note:
The matrix of vertex coordinates is vectorised to speed up the
calculations.
Parameters:
C (array, sparse): Connectivity matrix (m x n).
xyz (array, list): Array of vertex coordinates (n x 3).
free (list): The index values of the free vertices.
rtype (str): Format of the result, 'array', 'csc', 'csr', 'coo'.
Returns:
sparse : If ``rtype`` is ``'csc', 'csr', 'coo'``.
array : If ``rtype`` is ``'array'``.
Analysis of the equilibrium matrix reveals some of the properties of the
structural system, its size is (2ni x m) where ni is the number of free or
internal nodes. It is calculated by
.. math::
\mathbf{E}
=
\left[
\begin{array}{c}
\mathbf{C}^{\mathrm{T}}_{\mathrm{i}}\mathbf{U} \\[0.3em]
\hline \\[-0.7em]
\mathbf{C}^{\mathrm{T}}_{\mathrm{i}}\mathbf{V}
\end{array}
\right].
Examples:
>>> C = connectivity_matrix([[0, 1], [0, 2], [0, 3]])
>>> xyz = [[0, 0, 1], [0, 1, 0], [-1, -1, 0], [1, -1, 0]]
>>> equilibrium_matrix(C, xyz, [0], rtype='array')
[[ 0. 1. -1.]
[-1. 1. 1.]]
"""
xyz = asarray(xyz, dtype=float32)
C = csr_matrix(C)
xy = xyz[:, :2]
uv = C.dot(xy)
U = diags([uv[:, 0].flatten()], [0])
V = diags([uv[:, 1].flatten()], [0])
Ct = C.transpose()
Cti = Ct[free, :]
E = svstack((Cti.dot(U), Cti.dot(V)))
return _return_matrix(E, rtype)
# ==============================================================================
# Debugging
# ==============================================================================
if __name__ == "__main__":
# import compas
# from compas.datastructures.network import Network
# from scipy.sparse.linalg import spsolve
# class Network(Network):
# def __init__(self, **kwargs):
# super(Network, self).__init__(**kwargs)
# self.dea.update({'q': 1.0})
# network = Network.from_obj(compas.get_data('lines.obj'))
# k_i = dict((key, index) for index, key in network.vertices_enum())
# i_k = dict(network.vertices_enum())
# xyz = [network.vertex_coordinates(key) for key in network]
# edges = [(k_i[u], k_i[v]) for u, v in network.edges_iter()]
# n = len(xyz)
# m = len(edges)
# fixed = [k_i[key] for key in network.leaves()]
# free = list(set(range(n)) - set(fixed))
# q = [float(1.0) for i in range(m)]
# ij_q = dict(((k_i[u], k_i[v]), attr['q']) for u, v, attr in network.edges_iter(True))
# ij_q.update(((k_i[v], k_i[u]), attr['q']) for u, v, attr in network.edges_iter(True))
# xyz = array(xyz)
# C = connectivity_matrix(edges, 'csr')
# Q = diags([q], [0])
# Ci = C[:, free]
# Cf = C[:, fixed]
# Cit = Ci.transpose()
# CitQCi = Cit.dot(Q).dot(Ci)
# CitQCf = Cit.dot(Q).dot(Cf)
# print(CitQCf.dot(xyz[fixed]))
# CtQC = [[0.0 for j in range(n)] for i in range(n)]
# for i in range(n):
# key = i_k[i]
# Q = 0
# for nbr in network.neighbours(key):
# j = k_i[nbr]
# q = ij_q[(i, j)]
# Q += q
# CtQC[i][j] = - q
# CtQC[i][i] = Q
# CitQCi = [[CtQC[i][j] for j in free] for i in free]
# CitQCf = [[CtQC[i][j] for j in fixed] for i in free]
# CtQC = array(CtQC)
# CitQCi = csr_matrix(array(CitQCi))
# CitQCf = csr_matrix(array(CitQCf))
# xyz[free] = spsolve(CitQCi, - CitQCf.dot(xyz[fixed]))
# for key, attr in network.vertices_iter(True):
# index = k_i[key]
# attr['x'] = xyz[index, 0]
# attr['y'] = xyz[index, 1]
# vlabel = dict((key, str(index)) for index, key in network.vertices_enum())
# elabel = dict(((u, v), str(index)) for index, u, v in network.edges_enum())
# network.plot(vlabel=vlabel, elabel=None)
import compas
from compas.datastructures.network import Network
from numpy import allclose
network = Network.from_obj(compas.get_data('grid_irregular.obj'))
# key_index = {key: index for index, key in enumerate(network.vertices())}
# A = network_adjacency_matrix(network)
# C = network_connectivity_matrix(network)
# L = network_laplacian_matrix(network, normalize=True, rtype='csr')
# D = network_degree_matrix(network)
# xy = [network.vertex_coordinates(key, 'xy') for key in network.vertices()]
# xy = array(xy, dtype=float).reshape((-1, 2))
# centroids1 = [network.vertex_neighbourhood_centroid(key) for key in network.vertices()]
# centroids1 = array(centroids1, dtype=float)[:, 0:2]
# d = L.dot(xy)
# centroids2 = xy - d
# centroids3 = A.dot(xy) / D.diagonal().reshape((-1, 1))
# print(allclose(centroids1, centroids2))
# print(allclose(centroids2, centroids3))
# print(allclose(centroids1, centroids3))
|
[
"personal@garciadelcastillo.es"
] |
personal@garciadelcastillo.es
|
13d46102ff76652081c82b9442c20444fbacedde
|
d2efafce692a0e5b5b6bc6d9996c8f733f9928a6
|
/day_01/built_in_functions.py
|
130d4089fa1a5969e60a54c12885b8b6f320fa0d
|
[] |
no_license
|
jamessonfaria/maratona_python
|
32e0c480e543cfc331224a56a7961a6af75fa228
|
13c88b0e895c91fddafdb6f54a1fd1ad824b9f4a
|
refs/heads/master
| 2023-03-08T13:40:06.371140
| 2021-02-19T04:01:19
| 2021-02-19T04:01:19
| 337,261,468
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
### Built in Functions
print(len("xxxxxxx"))
print(bool(0))
print(int("3") + 3)
print("Hello" + str(33))
print("dsadasasda".upper())
print("BRBRBRBRBRBRBRBRBRB".replace("B", ""))
|
[
"jamessonjr@gmail.com"
] |
jamessonjr@gmail.com
|
51293bc0e2bf53f80a7163096f53637727f9c3e1
|
008d7b53df2631a2d0634b3c1a3be7d3e8f7354b
|
/data_analysis/database.py
|
7db7ef559c16dbf42cc84725e44be72f50286829
|
[
"BSD-3-Clause"
] |
permissive
|
Trax-/data_analysis
|
00bc2549fdd4a60ff804d388c03a1afb60644793
|
2ffb772e95566d38727ae741017750cbbebad08d
|
refs/heads/master
| 2021-01-20T01:11:10.890103
| 2017-07-04T16:10:52
| 2017-07-04T16:10:52
| 89,230,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,958
|
py
|
from os import path
from sqlalchemy import (create_engine, Column, String, Integer, Boolean, Table, ForeignKey, DateTime)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
database_filename = 'twitter.sqlite3'
directory = path.abspath(path.dirname(__file__))
database_filepath = path.join(directory, database_filename)
engine_url = f'sqlite:///{database_filepath}'
engine = create_engine(engine_url)
# Our database class objects are going to inherit from
# this class
Base = declarative_base(bind=engine)
# create a configured “Session” class
Session = sessionmaker(bind=engine, autoflush=False)
# Create a Session
session = Session()
hashtag_tweet = Table('hashtag_tweet', Base.metadata,
Column('hashtag_id', Integer, ForeignKey('hashtags.id'), nullable=False),
Column('tweet_id', Integer, ForeignKey('tweets.id'), nullable=False))
class Tweet(Base):
__tablename__ = 'tweets'
id = Column(Integer, primary_key=True)
tid = Column(String(100), nullable=False, unique=True)
tweet = Column(String(300), nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
coordinates = Column(String(50), nullable=True)
user = relationship('User', backref='tweets')
created_at = Column(DateTime, nullable=False)
favorite_count = Column(Integer)
in_reply_to_screen_name = Column(String)
in_reply_to_status_id = Column(String)
in_reply_to_user_id = Column(String)
lang = Column(String)
quoted_status_id = Column(String)
retweet_count = Column(Integer)
source = Column(String)
is_retweet = Column(Boolean)
hashtags = relationship('Hashtag', secondary='hashtag_tweet', back_populates='tweets')
def __repr__(self):
return '<Tweet {}>'.format(self.id)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
uid = Column(String(50), nullable=False, unique=True)
name = Column(String(100), nullable=False)
screen_name = Column(String)
created_at = Column(DateTime)
# Nullable
description = Column(String)
followers_count = Column(Integer)
friends_count = Column(Integer)
statuses_count = Column(Integer)
favourites_count = Column(Integer)
listed_count = Column(Integer)
geo_enabled = Column(Boolean)
lang = Column(String)
def __repr__(self):
return '<User {}>'.format(self.id)
class Hashtag(Base):
__tablename__ = 'hashtags'
id = Column(Integer, primary_key=True)
text = Column(String(200), nullable=False, unique=True)
tweets = relationship('Tweet',
secondary='hashtag_tweet',
back_populates='hashtags')
def __repr__(self):
return '<Hashtag {}>'.format(self.text)
def init_db():
Base.metadata.create_all()
if not path.isfile(database_filepath):
init_db()
|
[
"tlo@ocsnet.com"
] |
tlo@ocsnet.com
|
f640d5c86f8bc347ef20387cc8e9a637e6ca719e
|
528006e19b587da8e55e65dba14e9adac118688e
|
/Naive Bayes Classifier/Bayes_Classifier.py
|
c70e38532f94000250ae7cc0defba6898824f5db
|
[] |
no_license
|
tonnykwon/Data-Mining
|
0490f84cb0d5145012f155c6536cdd57c1dcd315
|
5067f3d587f80784b31f2eb2ea4dbbaf177ead9e
|
refs/heads/master
| 2020-04-21T06:50:43.674303
| 2019-05-04T10:50:49
| 2019-05-04T10:50:49
| 169,376,502
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,207
|
py
|
## read data
import sys
import csv
from collections import Counter
data = sys.stdin.readlines()
csvreader = csv.reader(data, delimiter=',')
data = []
for row in csvreader:
data.append(row)
# train test split
class_type = list(zip(*data))[-1]
train_idx = [idx for idx, label in enumerate(class_type) if label!='-1' ][1:]
test_idx = [idx for idx, label in enumerate(class_type) if label=='-1' ]
train_data = [list(map(int,column[1:])) for idx, column in enumerate(data) if idx in train_idx]
test_data = [list(map(int,column[1:])) for idx, column in enumerate(data) if idx in test_idx]
# calculate py
train_class_type = list(zip(*train_data))[-1]
# counts for each class. Setting default 0
counts = {}
for label in range(1,8):
counts[label] = 0
counts.update(Counter(train_class_type))
n = sum(counts.values())
py = [(key, (item+0.1)/(n+item*0.1)) for key, item in counts.items()]
# calculate set of features and number of features
feature_set = []
for feature_idx, column in enumerate(list(zip(*train_data))):
# leg features
if feature_idx == 12:
feature_set.append(((0,2,4,5,6,8), 6))
# class
elif feature_idx == 16:
feature_set.append((tuple(range(1,8)), 7))
else:
feature_set.append(((0,1), 2))
# seperate data by class
seperated = {}
for label in list(range(1,8)):
seperated[label] = []
for column in train_data:
seperated[column[-1]].append(column)
# counts each feature for each class
# for each items' feature, count each feature attributes
feature_counts = {}
for key, items in seperated.items():
if not items:
for idx, features in enumerate(feature_set):
feature_counts[(key, idx)] = Counter({feature:0 for feature in features[0]})
for idx, item in enumerate(list(zip(*items))):
# get counts for zero counts
if (key,idx) not in feature_counts:
feature_counts[(key,idx)] = Counter({key:0 for key in feature_set[idx][0]})
feature_counts[(key, idx)].update(list(item))
# calculate pxy based on counts of each attribute and feature number
pxy = {}
for key, item in feature_counts.items():
num_class = len(list(item.elements()))
# feature: feature attributes, num: feature attributes counts
for feature, num in item.items():
# class, feature, attribute
pxy[(key[0], key[1], feature)] = (num+0.1)/(num_class+0.1*feature_set[key[1]][1])
# predict test sets by calculating pxy * py
test_pxy = []
for test_set in test_data:
test_set_pxy=[]
for feature_idx, feature in enumerate(test_set[:-1]):
test_set_pxy.append([pxy.get((label, feature_idx, feature)) for label, prob in py])
test_pxy.append(test_set_pxy)
# multiply all elements in pxy
pxy_list = []
for test_set_pxy in test_pxy:
pxy_set_list = []
for elements in list(zip(*test_set_pxy)):
result = 1
for element in elements:
result *= element
pxy_set_list.append(result)
pxy_list.append(pxy_set_list)
# calculate pxy and py
results = [[pxy_set_list[label-1]*prob for label, prob in py] for pxy_set_list in pxy_list]
# predict test label
for result in results:
print(result.index(max(result))+1)
|
[
"tonnykwon@naver.com"
] |
tonnykwon@naver.com
|
29887f53b9e6b254706c83f259d4b7d80e1a123a
|
d7bf9017a1951bab00c025f842dac68fff0fabce
|
/HW3/ACGAN/models.py
|
75f0bb1ec4770b702e0fb3e92945addacc733a01
|
[] |
no_license
|
rmanzanedo/NTU_DLCV
|
e7e54f57670a93c5a4aa7e0457db21eadaf0c999
|
287ec4ddb93fe8ed7c55e1936015ed6f6efd9b68
|
refs/heads/master
| 2022-11-26T11:02:46.027132
| 2020-01-18T06:26:57
| 2020-01-18T06:26:57
| 234,541,233
| 0
| 0
| null | 2022-11-22T04:19:46
| 2020-01-17T12:11:20
|
Python
|
UTF-8
|
Python
| false
| false
| 6,891
|
py
|
import torch
import torch.nn as nn
import torchvision
import numpy as np
# class generator(nn.Module):
# def __init__(self):
# super(generator, self).__init__()
# self.label_emb = nn.Embedding(1, 100)
# self.init_size = 64 // 4 # Initial size before upsampling
# self.l1 = nn.Sequential(nn.Linear(100, 128 * self.init_size ** 2))
# self.conv_blocks = nn.Sequential(
# nn.BatchNorm2d(128),
# nn.Upsample(scale_factor=2),
# nn.Conv2d(128, 128, 3, stride=1, padding=1),
# nn.BatchNorm2d(128, 0.8),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Upsample(scale_factor=2),
# nn.Conv2d(128, 64, 3, stride=1, padding=1),
# nn.BatchNorm2d(64, 0.8),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(64, 3, 3, stride=1, padding=1),
# nn.Tanh(),
# )
# def forward(self, noise, labels):
# gen_input = torch.mul(self.label_emb(labels), noise)
# out = self.l1(gen_input)
# print(out.size())
# print(out.shape[0])
# out = out.view(10, 128, self.init_size, self.init_size)
# print(out.size())
# exit()
# img = self.conv_blocks(out)
# return img
class generator(nn.Module):
def __init__(self):
super(generator, self).__init__()
# self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( 101, 64 * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(64 * 8),
nn.ReLU(True),
# state size. (64*8) x 4 x 4
nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 4),
nn.ReLU(True),
# state size. (64*4) x 8 x 8
nn.ConvTranspose2d( 64 * 4, 64 * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 2),
nn.ReLU(True),
# state size. (64*2) x 16 x 16
nn.ConvTranspose2d( 64 * 2, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# nn.Dropout(0.2),
# state size. (64) x 32 x 32
nn.ConvTranspose2d( 64, 3, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (3) x 64 x 64
)
def forward(self, input):
return self.main(input)
# # defining discriminator class
# class discriminator(nn.Module):
# def __init__(self):
# super(discriminator, self).__init__()
# def discriminator_block(in_filters, out_filters, bn=True):
# """Returns layers of each discriminator block"""
# block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
# if bn:
# block.append(nn.BatchNorm2d(out_filters, 0.8))
# return block
# self.conv_blocks = nn.Sequential(
# *discriminator_block(3, 16, bn=False),
# *discriminator_block(16, 32),
# *discriminator_block(32, 64),
# *discriminator_block(64, 128),
# )
# # The height and width of downsampled image
# ds_size = 64 // 2 ** 4
# # Output layers
# self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
# self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Softmax())
# def forward(self, img):
# out = self.conv_blocks(img)
# out = out.view(out.shape[0], -1)
# validity = self.adv_layer(out)
# label = self.aux_layer(out)
# return validity, label
import torch.nn as nn
# custom weights initialization called on netG and discriminator
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# class generator(nn.Module):
# def __init__(self):
# super(generator, self).__init__()
# self.ReLU = nn.ReLU(True)
# self.Tanh = nn.Tanh()
# self.conv1 = nn.ConvTranspose2d(101, 64 * 8, 4, 1, 0, bias=False)
# self.BatchNorm1 = nn.BatchNorm2d(64 * 8)
# self.conv2 = nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False)
# self.BatchNorm2 = nn.BatchNorm2d(64 * 4)
# self.conv3 = nn.ConvTranspose2d(64 * 4, 64 * 2, 4, 2, 1, bias=False)
# self.BatchNorm3 = nn.BatchNorm2d(64 * 2)
# self.conv4 = nn.ConvTranspose2d(64 * 2, 64 * 1, 4, 2, 1, bias=False)
# self.BatchNorm4 = nn.BatchNorm2d(64 * 1)
# self.conv5 = nn.ConvTranspose2d(64 * 1, 3, 4, 2, 1, bias=False)
# self.apply(weights_init)
# def forward(self, input):
# x = self.conv1(input)
# x = self.BatchNorm1(x)
# x = self.ReLU(x)
# x = self.conv2(x)
# x = self.BatchNorm2(x)
# x = self.ReLU(x)
# x = self.conv3(x)
# x = self.BatchNorm3(x)
# x = self.ReLU(x)
# x = self.conv4(x)
# x = self.BatchNorm4(x)
# x = self.ReLU(x)
# x = self.conv5(x)
# output = self.Tanh(x)
# return output
class discriminator(nn.Module):
def __init__(self):
super(discriminator, self).__init__()
self.LeakyReLU = nn.LeakyReLU(0.2, inplace=True)
self.conv1 = nn.Conv2d(3, 64, 4, 2, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64 * 2, 4, 2, 1, bias=False)
self.BatchNorm2 = nn.BatchNorm2d(64 * 2)
self.conv3 = nn.Conv2d(64 * 2, 64 * 4, 4, 2, 1, bias=False)
self.BatchNorm3 = nn.BatchNorm2d(64 * 4)
self.conv4 = nn.Conv2d(64 * 4, 64 * 8, 4, 2, 1, bias=False)
self.BatchNorm4 = nn.BatchNorm2d(64 * 8)
self.conv5 = nn.Conv2d(64 * 8, 64 * 1, 4, 1, 0, bias=False)
self.BatchNorm5 = nn.BatchNorm2d(64)
self.disc_linear = nn.Conv2d(64 * 8, 1, 4, 1, 0, bias=False)
self.aux_linear = nn.Conv2d(64 * 8, 1, 4, 1, 0, bias=False)
self.sigmoid1 = nn.Sigmoid()
self.sigmoid2 = nn.Sigmoid()
# self.64 = 64
self.apply(weights_init)
def forward(self, input):
x = self.conv1(input)
x = self.LeakyReLU(x)
x = self.conv2(x)
x = self.BatchNorm2(x)
x = self.LeakyReLU(x)
x = self.conv3(x)
x = self.BatchNorm3(x)
x = self.LeakyReLU(x)
x = self.conv4(x)
x = self.BatchNorm4(x)
x = self.LeakyReLU(x)
# x = self.conv5(x)
# x = self.BatchNorm5(x)
# x = self.LeakyReLU(x)
# x = x.view(-1, 64 * 1)
c = self.aux_linear(x)
c = self.sigmoid1(c)
s = self.disc_linear(x)
s = self.sigmoid2(s)
return s,c
|
[
"r.manzanedo@alumnos.upm.es"
] |
r.manzanedo@alumnos.upm.es
|
d49bdac0b43197f6952ced81795d782ccac9a13d
|
97a447e2a319ccaf932b0220fcdc523bbdfb4499
|
/qd/data.py
|
8c2009707f9b3b393ef87cd12893882591eae97e
|
[] |
no_license
|
tbenthompson/qd
|
d0999dd899173e8313836332cb5d4d8fe644387f
|
70f08671ba3fd5281609d3e44195356bf0a2c25a
|
refs/heads/master
| 2020-03-19T14:52:03.756175
| 2019-02-02T16:15:31
| 2019-02-02T16:15:31
| 124,702,158
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,841
|
py
|
import os
import numpy as np
import cloudpickle
"""
Don't save anything!
"""
class NullData:
def initialized(self, _):
pass
def stepped(self, _):
pass
"""
Do not use. Exists for compatibility with old data.
"""
class MonolithicDataSaver:
def __init__(self, save_freq = 500, filename = 'results.npy'):
self.filename = filename
def initialized(self):
pass
def stepped(self, integrator):
if integrator.step_idx() == self.save_freq:
np.save(
self.filename,
np.array([integrator.model.m, qd_cfg, h_t, h_y], dtype = np.object)
)
def initial_data_path(folder_name):
return os.path.join(folder_name, 'initial_data.npy')
def skip_existing_prefixed_folders(prefix):
i = 0
while True:
name = prefix + str(i)
if not os.path.exists(name):
break
i += 1
return name
"""
Saves chunks of time steps in separate files for easy reloading.
"""
class ChunkedDataSaver:
def __init__(self, chunk_size = 100, folder_prefix = 'data', existing_folder = None):
self.chunk_size = chunk_size
self.folder_prefix = folder_prefix
self.folder_name = existing_folder
if self.folder_name is None:
self.folder_name = skip_existing_prefixed_folders(self.folder_prefix)
os.makedirs(self.folder_name)
def initialized(self, integrator):
with open(initial_data_path(self.folder_name), 'wb') as f:
cloudpickle.dump(
np.array([
integrator.model.m, integrator.model.cfg,
integrator.init_conditions
], dtype = np.object),
f
)
def stepped(self, integrator):
step_idx = integrator.step_idx()
if step_idx % self.chunk_size == 0:
step_data_path = os.path.join(self.folder_name, f'{step_idx}.npy')
np.save(
step_data_path,
np.array([
integrator.h_t[-self.chunk_size:],
integrator.h_y[-self.chunk_size:]
], dtype = np.object)
)
class ChunkedDataLoader:
def __init__(self, folder_name, model_type):
self.model_type = model_type
self.folder_name = folder_name
self.idxs = []
self.ts = None
self.ys = None
self.load_initial_data()
self.load_new_files()
def load_initial_data(self):
with open(initial_data_path(self.folder_name), 'rb') as f:
self.m, self.cfg, self.init_conditions = cloudpickle.load(f)
self.n_dofs = self.init_conditions[1].shape[0]
self.n_tris = self.m.tris.shape[0]
self.model = self.model_type(self.m, self.cfg)
def load_new_files(self):
new_idxs = []
for f in os.listdir(self.folder_name):
if not os.path.isfile(os.path.join(self.folder_name, f)):
continue
base_name, ext = os.path.splitext(f)
if not base_name.isdecimal():
continue
new_idxs.append(int(base_name))
new_idxs.sort()
new_ts = np.empty(new_idxs[-1])
new_ys = np.empty((new_idxs[-1], self.n_dofs))
if self.ts is not None:
new_ts[:self.idxs[-1]] = self.ts
new_ys[:self.idxs[-1]] = self.ys
self.ts = new_ts
self.ys = new_ys
for i in new_idxs:
chunk = np.load(os.path.join(self.folder_name, f'{i}.npy'))
n_steps = len(chunk[0])
self.ts[i - n_steps:i] = chunk[0]
for j in range(n_steps):
self.ys[i - n_steps + j] = chunk[1][j]
self.idxs += new_idxs
def load(folder_name, model_type):
return ChunkedDataLoader(folder_name, model_type)
|
[
"t.ben.thompson@gmail.com"
] |
t.ben.thompson@gmail.com
|
436fc219c2e1886f8a8788e7fd3d92d7b7357493
|
eb90252fc88342309fc1917467aaa7b3ba9da218
|
/test/test_freeze.py
|
034191f57d4ecdddd9b91c6aeced468a2d4528c3
|
[
"MIT"
] |
permissive
|
DataGreed/datafreeze
|
100bbdccd494134b74fe484c2e8b28a83250d184
|
8d75e0cfb2c6f6f59112c0c86abd264d7078aaa8
|
refs/heads/master
| 2023-01-19T05:11:59.661284
| 2020-11-19T15:26:21
| 2020-11-19T15:26:21
| 314,287,724
| 0
| 0
|
MIT
| 2020-11-19T15:26:22
| 2020-11-19T15:22:44
| null |
UTF-8
|
Python
| false
| false
| 4,979
|
py
|
# coding: utf8
from __future__ import unicode_literals
import os
from csv import reader
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from six import PY3, text_type, binary_type
from dataset import connect
from datafreeze.app import freeze
from datafreeze.format.fcsv import value_to_str
from .sample_data import TEST_DATA
class FreezeTestCase(unittest.TestCase):
def setUp(self):
self.db = connect('sqlite://')
self.tbl = self.db['weather']
for row in TEST_DATA:
self.tbl.insert(row)
self.d = mkdtemp()
def tearDown(self):
rmtree(self.d, ignore_errors=True)
def test_freeze(self):
freeze(self.tbl.all(), format='csv',
filename=u'wäther.csv'.encode('utf8'), prefix=self.d)
self.assertTrue(os.path.exists(os.path.join(self.d, u'wäther.csv')))
freeze(self.tbl.all(), format='csv',
filename=u'wäther.csv', prefix=self.d)
self.assertTrue(os.path.exists(os.path.join(self.d, u'wäther.csv')))
def test_freeze_csv(self):
freeze(self.tbl.all(), format='csv',
filename='weather.csv', prefix=self.d)
path = os.path.join(self.d, 'weather.csv')
if PY3:
fh = open(path, 'rt', encoding='utf8', newline='')
else:
fh = open(path, 'rU')
try:
rows = list(reader(fh))
keys = rows[0]
for i, d1 in enumerate(TEST_DATA):
d2 = dict(zip(keys, rows[i + 1]))
for k in d1.keys():
v2 = d2[k]
if not PY3:
v2 = v2.decode('utf8')
v1 = value_to_str(d1[k])
if not isinstance(v1, text_type):
if isinstance(v1, binary_type):
v1 = text_type(v1, encoding='utf8')
else:
v1 = '%s' % v1
self.assertEqual(v2, v1)
finally:
fh.close()
def test_memory_streams(self):
if PY3:
from io import StringIO
else:
from io import BytesIO as StringIO
for fmt in ('csv', 'json', 'tabson'):
with StringIO() as fd:
freeze(self.tbl.all(), format=fmt, fileobj=fd)
self.assertFalse(fd.closed, 'fileobj was closed for format %s' % fmt)
fd.getvalue() # should not throw
def test_freeze_json_no_wrap(self):
freeze(self.tbl.all(), format='json',
filename='weather.csv', prefix=self.d, wrap=False)
path = os.path.join(self.d, 'weather.csv')
if PY3:
fh = open(path, 'rt', encoding='utf8', newline='')
else:
fh = open(path, 'rU')
try:
import json
data = json.load(fh)
self.assertIsInstance(data, list,
'Without wrapping, returned JSON should be a list')
finally:
fh.close()
def test_freeze_json_wrap(self):
freeze(self.tbl.all(), format='json',
filename='weather.csv', prefix=self.d, wrap=True)
path = os.path.join(self.d, 'weather.csv')
if PY3:
fh = open(path, 'rt', encoding='utf8', newline='')
else:
fh = open(path, 'rU')
try:
import json
data = json.load(fh)
self.assertIsInstance(data, dict,
'With wrapping, returned JSON should be a dict')
self.assertIn('results', data.keys())
self.assertIn('count', data.keys())
self.assertIn('meta', data.keys())
finally:
fh.close()
class SerializerTestCase(unittest.TestCase):
def test_serializer(self):
from datafreeze.format.common import Serializer
from datafreeze.config import Export
from datafreeze.util import FreezeException
self.assertRaises(FreezeException, Serializer, {}, {})
s = Serializer(Export({'filename': 'f'}, {'mode': 'nomode'}), '')
self.assertRaises(FreezeException, getattr, s, 'wrap')
s = Serializer(Export({'filename': 'f'}, {}), '')
s.wrap
s = Serializer(Export({'filename': '-'}, {}), '')
self.assertTrue(s.fileobj)
def test_value_to_str1(self):
assert '2011-01-01T00:00:00' == value_to_str(TEST_DATA[0]['date']), \
value_to_str(TEST_DATA[0]['date'])
def test_value_to_str2(self):
if PY3:
assert 'hóla' == value_to_str('\u0068\u00f3\u006c\u0061')
else:
assert u'hóla'.encode('utf-8') == value_to_str(u'\u0068\u00f3\u006c\u0061'), \
[value_to_str(u'\u0068\u00f3\u006c\u0061')]
def test_value_to_str3(self):
assert '' == value_to_str(None)
def test_value_to_str4(self):
assert [] == value_to_str([])
if __name__ == '__main__':
unittest.main()
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
b3127ace3de5be20b96e7bb7b961e2a5860dcc68
|
9a47456b7e609f1cbf5987134c2a987e21119062
|
/initialFRM/frmapp/prevevent.py
|
e563ea5f277bfb9d5f7eacfdbf749fc7d914ac2f
|
[] |
no_license
|
daisedandconfused/frmapp
|
40dab22e8b11ad4bb6108b5a2e79f4a8b99bc526
|
ea1e17f668c3436c7b1191cc770d5ad339fa1d3f
|
refs/heads/master
| 2020-04-29T05:31:18.095844
| 2019-04-28T21:24:09
| 2019-04-28T21:24:09
| 175,886,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
import itertools
import numpy as np
def previousevent(n):
r = n-2
p = list(itertools.product([1, 2], repeat=r))
return p
# matrix = []
# while p !=[]:
# matrix.append(p[:1])
# p = p[1:]
# return matrix
# v = previousevent(4)
# y = v[1][0]
# print(v, y)
|
[
"noreply@github.com"
] |
daisedandconfused.noreply@github.com
|
875e0a5d40dbb7a1d924349d80042bed4b1229e8
|
3ef329a37aacd7edaa61e6f1af0a8d5acba0572d
|
/bonustest/shop/urls.py
|
056379dfbc3c04a26b752d160efcd0172e43b5cb
|
[] |
no_license
|
MasterDingo/django-bonus-angular
|
6c6f8e0e36838efc66e37b331462c8c0baead661
|
32d59a44374704bfd49a4f29c9d8ba5ed6497694
|
refs/heads/master
| 2021-01-11T15:50:08.791967
| 2017-02-22T01:28:37
| 2017-02-22T01:28:37
| 79,937,264
| 0
| 1
| null | 2017-10-24T15:17:41
| 2017-01-24T17:50:35
|
Python
|
UTF-8
|
Python
| false
| false
| 834
|
py
|
from django.conf.urls import url
from .views import (
CategoryListView, CategoryView, ProductsListView, ProductView,
IndexView
)
from .api import urls as api_urls
from rest_framework.urlpatterns import format_suffix_patterns
"""
urlpatterns = [
url(r'login', login_view, {"template_name": "login.html"}, name="login"),
url(r'logout', logout_view, {"next_page": "/"}, name="logout"),
url(r'^$', CategoryListView.as_view(), name="categories_list"),
url(r'category/(?P<cat_id>\d+)', CategoryView.as_view(), name="category"),
url(r'products/$', ProductsListView.as_view(),
name="products_list"),
url(r'products/(?P<pk>\d+)/$', ProductView.as_view(), name="product"),
]
# urlpatterns = format_suffix_patterns(urlpatterns)
"""
urlpatterns = [
url(r'', IndexView.as_view(), name="index"),
]
|
[
"v.derbentsev@gmail.com"
] |
v.derbentsev@gmail.com
|
4e4b3405861bc07f77f7a898adc75b1a3a43e93d
|
bd0b254b89f55781d53e51d146b4cf5f278ccff8
|
/platforms/nuttx/NuttX/tools/menuconfig.py
|
c6a512d318f5f9ba8b2e371a88d1ac82dee7d844
|
[
"BSD-3-Clause"
] |
permissive
|
XingGX/Firmware
|
e4a05caeefae241ddc83a61b7fd4a43e52630e62
|
f26e7801f8f76a371f05dcb86f61f0bcd8df9f30
|
refs/heads/master
| 2021-04-30T08:30:24.146609
| 2019-03-30T08:28:05
| 2019-03-30T08:28:05
| 121,375,065
| 1
| 0
|
BSD-3-Clause
| 2019-03-30T08:28:06
| 2018-02-13T11:32:40
|
C++
|
UTF-8
|
Python
| false
| false
| 101,613
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019, Nordic Semiconductor ASA and Ulf Magnusson
# SPDX-License-Identifier: ISC
"""
Overview
========
A curses-based menuconfig implementation. The interface should feel familiar to
people used to mconf ('make menuconfig').
Supports the same keys as mconf, and also supports a set of keybindings
inspired by Vi:
J/K : Down/Up
L : Enter menu/Toggle item
H : Leave menu
Ctrl-D/U: Page Down/Page Up
G/End : Jump to end of list
g/Home : Jump to beginning of list
The mconf feature where pressing a key jumps to a menu entry with that
character in it in the current menu isn't supported. A jump-to feature for
jumping directly to any symbol (including invisible symbols), choice, menu or
comment (as in a Kconfig 'comment "Foo"') is available instead.
Space and Enter are "smart" and try to do what you'd expect for the given menu
entry.
A few different modes are available:
F: Toggle show-help mode, which shows the help text of the currently selected
item in the window at the bottom of the menu display. This is handy when
browsing through options.
C: Toggle show-name mode, which shows the symbol name before each symbol menu
entry
A: Toggle show-all mode, which shows all items, including currently invisible
items and items that lack a prompt. Invisible items are drawn in a different
style to make them stand out.
Running
=======
menuconfig.py can be run either as a standalone executable or by calling the
menuconfig() function with an existing Kconfig instance. The second option is a
bit inflexible in that it will still load and save .config, etc.
When run in standalone mode, the top-level Kconfig file to load can be passed
as a command-line argument. With no argument, it defaults to "Kconfig".
The KCONFIG_CONFIG environment variable specifies the .config file to load (if
it exists) and save. If KCONFIG_CONFIG is unset, ".config" is used.
$srctree is supported through Kconfiglib.
Color schemes
=============
It is possible to customize the color scheme by setting the MENUCONFIG_STYLE
environment variable. For example, setting it to 'aquatic' will enable an
alternative, less yellow, more 'make menuconfig'-like color scheme, contributed
by Mitja Horvat (pinkfluid).
This is the current list of built-in styles:
- default classic Kconfiglib theme with a yellow accent
- monochrome colorless theme (uses only bold and standout) attributes,
this style is used if the terminal doesn't support colors
- aquatic blue tinted style loosely resembling the lxdialog theme
It is possible to customize the current style by changing colors of UI
elements on the screen. This is the list of elements that can be stylized:
- path Top row in the main display, with the menu path
- separator Separator lines between windows. Also used for the top line
in the symbol information display.
- list List of items, e.g. the main display
- selection Style for the selected item
- inv-list Like list, but for invisible items. Used in show-all mode.
- inv-selection Like selection, but for invisible items. Used in show-all
mode.
- help Help text windows at the bottom of various fullscreen
dialogs
- show-help Window showing the help text in show-help mode
- frame Frame around dialog boxes
- body Body of dialog boxes
- edit Edit box in pop-up dialogs
- jump-edit Edit box in jump-to dialog
- text Symbol information text
The color definition is a comma separated list of attributes:
- fg:COLOR Set the foreground/background colors. COLOR can be one of
* or * the basic 16 colors (black, red, green, yellow, blue,
- bg:COLOR magenta,cyan, white and brighter versions, for example,
brightred). On terminals that support more than 8 colors,
you can also directly put in a color number, e.g. fg:123
(hexadecimal and octal constants are accepted as well).
Colors outside the range -1..curses.COLORS-1 (which is
terminal-dependent) are ignored (with a warning). The COLOR
can be also specified using a RGB value in the HTML
notation, for example #RRGGBB. If the terminal supports
color changing, the color is rendered accurately.
Otherwise, the visually nearest color is used.
If the background or foreground color of an element is not
specified, it defaults to -1, representing the default
terminal foreground or background color.
Note: On some terminals a bright version of the color
implies bold.
- bold Use bold text
- underline Use underline text
- standout Standout text attribute (reverse color)
More often than not, some UI elements share the same color definition. In such
cases the right value may specify an UI element from which the color definition
will be copied. For example, "separator=help" will apply the current color
definition for "help" to "separator".
A keyword without the '=' is assumed to be a style template. The template name
is looked up in the built-in styles list and the style definition is expanded
in-place. With this, built-in styles can be used as basis for new styles.
For example, take the aquatic theme and give it a red selection bar:
MENUCONFIG_STYLE="aquatic selection=fg:white,bg:red"
If there's an error in the style definition or if a missing style is assigned
to, the assignment will be ignored, along with a warning being printed on
stderr.
The 'default' theme is always implicitly parsed first (or the 'monochrome'
theme if the terminal lacks colors), so the following two settings have the
same effect:
MENUCONFIG_STYLE="selection=fg:white,bg:red"
MENUCONFIG_STYLE="default selection=fg:white,bg:red"
Other features
==============
- Seamless terminal resizing
- No dependencies on *nix, as the 'curses' module is in the Python standard
library
- Unicode text entry
- Improved information screen compared to mconf:
* Expressions are split up by their top-level &&/|| operands to improve
readability
* Undefined symbols in expressions are pointed out
* Menus and comments have information displays
* Kconfig definitions are printed
* The include path is shown, listing the locations of the 'source'
statements that included the Kconfig file of the symbol (or other
item)
Limitations
===========
- Python 3 only
This is mostly due to Python 2 not having curses.get_wch(), which is needed
for Unicode support.
- Doesn't work out of the box on Windows
Has been tested to work with the wheels provided at
https://www.lfd.uci.edu/~gohlke/pythonlibs/#curses though.
"""
import curses
import errno
import locale
import os
import platform
import re
import sys
import textwrap
from kconfiglib import Symbol, Choice, MENU, COMMENT, MenuNode, \
BOOL, TRISTATE, STRING, INT, HEX, UNKNOWN, \
AND, OR, \
expr_str, expr_value, split_expr, \
standard_sc_expr_str, \
TRI_TO_STR, TYPE_TO_STR, \
standard_kconfig, standard_config_filename
#
# Configuration variables
#
# If True, try to convert LC_CTYPE to a UTF-8 locale if it is set to the C
# locale (which implies ASCII). This fixes curses Unicode I/O issues on systems
# with bad defaults. ncurses configures itself from the locale settings.
#
# Related PEP: https://www.python.org/dev/peps/pep-0538/
_CONVERT_C_LC_CTYPE_TO_UTF8 = True
# How many steps an implicit submenu will be indented. Implicit submenus are
# created when an item depends on the symbol before it. Note that symbols
# defined with 'menuconfig' create a separate menu instead of indenting.
_SUBMENU_INDENT = 4
# Number of steps for Page Up/Down to jump
_PG_JUMP = 6
# Height of the help window in show-help mode
_SHOW_HELP_HEIGHT = 8
# How far the cursor needs to be from the edge of the window before it starts
# to scroll. Used for the main menu display, the information display, the
# search display, and for text boxes.
_SCROLL_OFFSET = 5
# Minimum width of dialogs that ask for text input
_INPUT_DIALOG_MIN_WIDTH = 30
# Number of arrows pointing up/down to draw when a window is scrolled
_N_SCROLL_ARROWS = 14
# Lines of help text shown at the bottom of the "main" display
_MAIN_HELP_LINES = """
[Space/Enter] Toggle/enter [ESC] Leave menu [S] Save
[O] Load [?] Symbol info [/] Jump to symbol
[F] Toggle show-help mode [C] Toggle show-name mode [A] Toggle show-all mode
[Q] Quit (prompts for save) [D] Save minimal config (advanced)
"""[1:-1].split("\n")
# Lines of help text shown at the bottom of the information dialog
_INFO_HELP_LINES = """
[ESC/q] Return to menu [/] Jump to symbol
"""[1:-1].split("\n")
# Lines of help text shown at the bottom of the search dialog
_JUMP_TO_HELP_LINES = """
Type text to narrow the search. Regexes are supported (via Python's 're'
module). The up/down cursor keys step in the list. [Enter] jumps to the
selected symbol. [ESC] aborts the search. Type multiple space-separated
strings/regexes to find entries that match all of them. Type Ctrl-F to
view the help of the selected item without leaving the dialog.
"""[1:-1].split("\n")
#
# Styling
#
_STYLES = {
"default": """
path=fg:black,bg:white,bold
separator=fg:black,bg:yellow,bold
list=fg:black,bg:white
selection=fg:white,bg:blue,bold
inv-list=fg:red,bg:white
inv-selection=fg:red,bg:blue
help=path
show-help=list
frame=fg:black,bg:yellow,bold
body=fg:white,bg:black
edit=fg:white,bg:blue
jump-edit=edit
text=list
""",
# This style is forced on terminals that do no support colors
"monochrome": """
path=bold
separator=bold,standout
list=
selection=bold,standout
inv-list=bold
inv-selection=bold,standout
help=bold
show-help=
frame=bold,standout
body=
edit=standout
jump-edit=
text=
""",
# Blue tinted style loosely resembling lxdialog
"aquatic": """
path=fg:cyan,bg:blue,bold
separator=fg:white,bg:cyan,bold
help=path
frame=fg:white,bg:cyan,bold
body=fg:brightwhite,bg:blue
edit=fg:black,bg:white
"""
}
# Standard colors definition
_STYLE_STD_COLORS = {
# Basic colors
"black": curses.COLOR_BLACK,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"magenta": curses.COLOR_MAGENTA,
"cyan": curses.COLOR_CYAN,
"white": curses.COLOR_WHITE,
# Bright versions
"brightblack": curses.COLOR_BLACK + 8,
"brightred": curses.COLOR_RED + 8,
"brightgreen": curses.COLOR_GREEN + 8,
"brightyellow": curses.COLOR_YELLOW + 8,
"brightblue": curses.COLOR_BLUE + 8,
"brightmagenta": curses.COLOR_MAGENTA + 8,
"brightcyan": curses.COLOR_CYAN + 8,
"brightwhite": curses.COLOR_WHITE + 8,
# Aliases
"purple": curses.COLOR_MAGENTA,
"brightpurple": curses.COLOR_MAGENTA + 8,
}
def _rgb_to_6cube(rgb):
# Converts an 888 RGB color to a 3-tuple (nice in that it's hashable)
# representing the closest xterm 256-color 6x6x6 color cube color.
#
# The xterm 256-color extension uses a RGB color palette with components in
# the range 0-5 (a 6x6x6 cube). The catch is that the mapping is nonlinear.
# Index 0 in the 6x6x6 cube is mapped to 0, index 1 to 95, then 135, 175,
# etc., in increments of 40. See the links below:
#
# https://commons.wikimedia.org/wiki/File:Xterm_256color_chart.svg
# https://github.com/tmux/tmux/blob/master/colour.c
# 48 is the middle ground between 0 and 95.
return tuple(0 if x < 48 else int(round(max(1, (x - 55)/40))) for x in rgb)
def _6cube_to_rgb(r6g6b6):
# Returns the 888 RGB color for a 666 xterm color cube index
return tuple(0 if x == 0 else 40*x + 55 for x in r6g6b6)
def _rgb_to_gray(rgb):
# Converts an 888 RGB color to the index of an xterm 256-color grayscale
# color with approx. the same perceived brightness
# Calculate the luminance (gray intensity) of the color. See
# https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
# and
# https://www.w3.org/TR/AERT/#color-contrast
luma = 0.299*rgb[0] + 0.587*rgb[1] + 0.114*rgb[2]
# Closest index in the grayscale palette, which starts at RGB 0x080808,
# with stepping 0x0A0A0A
index = int(round((luma - 8)/10))
# Clamp the index to 0-23, corresponding to 232-255
return max(0, min(index, 23))
def _gray_to_rgb(index):
# Convert a grayscale index to its closet single RGB component
return 3*(10*index + 8,) # Returns a 3-tuple
# Obscure Python: We never pass a value for rgb2index, and it keeps pointing to
# the same dict. This avoids a global.
def _alloc_rgb(rgb, rgb2index={}):
# Initialize a new entry in the xterm palette to the given RGB color,
# returning its index. If the color has already been initialized, the index
# of the existing entry is returned.
#
# ncurses is palette-based, so we need to overwrite palette entries to make
# new colors.
#
# The colors from 0 to 15 are user-defined, and there's no way to query
# their RGB values, so we better leave them untouched. Also leave any
# hypothetical colors above 255 untouched (though we're unlikely to
# allocate that many colors anyway).
if rgb in rgb2index:
return rgb2index[rgb]
# Many terminals allow the user to customize the first 16 colors. Avoid
# changing their values.
color_index = 16 + len(rgb2index)
if color_index >= 256:
_warn("Unable to allocate new RGB color ", rgb, ". Too many colors "
"allocated.")
return 0
# Map each RGB component from the range 0-255 to the range 0-1000, which is
# what curses uses
curses.init_color(color_index, *(int(round(1000*x/255)) for x in rgb))
rgb2index[rgb] = color_index
return color_index
def _color_from_num(num):
# Returns the index of a color that looks like color 'num' in the xterm
# 256-color palette (but that might not be 'num', if we're redefining
# colors)
# - _alloc_rgb() won't touch the first 16 colors or any (hypothetical)
# colors above 255, so we can always return them as-is
#
# - If the terminal doesn't support changing color definitions, or if
# curses.COLORS < 256, _alloc_rgb() won't touch any color, and all colors
# can be returned as-is
if num < 16 or num > 255 or not curses.can_change_color() or \
curses.COLORS < 256:
return num
# _alloc_rgb() might redefine colors, so emulate the xterm 256-color
# palette by allocating new colors instead of returning color numbers
# directly
if num < 232:
num -= 16
return _alloc_rgb(_6cube_to_rgb(((num//36)%6, (num//6)%6, num%6)))
return _alloc_rgb(_gray_to_rgb(num - 232))
def _color_from_rgb(rgb):
# Returns the index of a color matching the 888 RGB color 'rgb'. The
# returned color might be an ~exact match or an approximation, depending on
# terminal capabilities.
# Calculates the Euclidean distance between two RGB colors
def dist(r1, r2): return sum((x - y)**2 for x, y in zip(r1, r2))
if curses.COLORS >= 256:
# Assume we're dealing with xterm's 256-color extension
if curses.can_change_color():
# Best case -- the terminal supports changing palette entries via
# curses.init_color(). Initialize an unused palette entry and
# return it.
return _alloc_rgb(rgb)
# Second best case -- pick between the xterm 256-color extension colors
# Closest 6-cube "color" color
c6 = _rgb_to_6cube(rgb)
# Closest gray color
gray = _rgb_to_gray(rgb)
if dist(rgb, _6cube_to_rgb(c6)) < dist(rgb, _gray_to_rgb(gray)):
# Use the "color" color from the 6x6x6 color palette. Calculate the
# color number from the 6-cube index triplet.
return 16 + 36*c6[0] + 6*c6[1] + c6[2]
# Use the color from the gray palette
return 232 + gray
# Terminal not in xterm 256-color mode. This is probably the best we can
# do, or is it? Submit patches. :)
min_dist = float('inf')
best = -1
for color in range(curses.COLORS):
# ncurses uses the range 0..1000. Scale that down to 0..255.
d = dist(rgb, tuple(int(round(255*c/1000))
for c in curses.color_content(color)))
if d < min_dist:
min_dist = d
best = color
return best
def _parse_style(style_str, parsing_default):
# Parses a string with '<element>=<style>' assignments. Anything not
# containing '=' is assumed to be a reference to a built-in style, which is
# treated as if all the assignments from the style were inserted at that
# point in the string.
#
# The parsing_default flag is set to True when we're implicitly parsing the
# 'default'/'monochrome' style, to prevent warnings.
for sline in style_str.split():
# Words without a "=" character represents a style template
if "=" in sline:
key, data = sline.split("=", 1)
# The 'default' style template is assumed to define all keys. We
# run _style_to_curses() for non-existing keys as well, so that we
# print warnings for errors to the right of '=' for those too.
if key not in _style and not parsing_default:
_warn("Ignoring non-existent style", key)
# If data is a reference to another key, copy its style
if data in _style:
_style[key] = _style[data]
else:
_style[key] = _style_to_curses(data)
elif sline in _STYLES:
# Recursively parse style template. Ignore styles that don't exist,
# for backwards/forwards compatibility.
_parse_style(_STYLES[sline], parsing_default)
else:
_warn("Ignoring non-existent style template", sline)
# Dictionary mapping element types to the curses attributes used to display
# them
_style = {}
def _style_to_curses(style_def):
# Parses a style definition string (<element>=<style>), returning
# a (fg_color, bg_color, attributes) tuple.
def parse_color(color_def):
color_def = color_def.split(":", 1)[1]
if color_def in _STYLE_STD_COLORS:
return _color_from_num(_STYLE_STD_COLORS[color_def])
# HTML format, #RRGGBB
if re.match("#[A-Fa-f0-9]{6}", color_def):
return _color_from_rgb((
int(color_def[1:3], 16),
int(color_def[3:5], 16),
int(color_def[5:7], 16)))
try:
color_num = _color_from_num(int(color_def, 0))
except ValueError:
_warn("Ignoring color ", color_def, "that's neither predefined "
"nor a number")
return -1
if not -1 <= color_num < curses.COLORS:
_warn("Ignoring color {}, which is outside the range "
"-1..curses.COLORS-1 (-1..{})"
.format(color_def, curses.COLORS - 1))
return -1
return color_num
fg_color = -1
bg_color = -1
attrs = 0
if style_def:
for field in style_def.split(","):
if field.startswith("fg:"):
fg_color = parse_color(field)
elif field.startswith("bg:"):
bg_color = parse_color(field)
elif field == "bold":
# A_BOLD tends to produce faint and hard-to-read text on the
# Windows console, especially with the old color scheme, before
# the introduction of
# https://blogs.msdn.microsoft.com/commandline/2017/08/02/updating-the-windows-console-colors/
attrs |= curses.A_NORMAL if _IS_WINDOWS else curses.A_BOLD
elif field == "standout":
attrs |= curses.A_STANDOUT
elif field == "underline":
attrs |= curses.A_UNDERLINE
else:
_warn("Ignoring unknown style attribute", field)
return _style_attr(fg_color, bg_color, attrs)
def _init_styles():
if curses.has_colors():
curses.use_default_colors()
# Use the 'monochrome' style template as the base on terminals without
# color
_parse_style("default" if curses.has_colors() else "monochrome", True)
# Add any user-defined style from the environment
if "MENUCONFIG_STYLE" in os.environ:
_parse_style(os.environ["MENUCONFIG_STYLE"], False)
# color_attribs holds the color pairs we've already created, indexed by a
# (<foreground color>, <background color>) tuple.
#
# Obscure Python: We never pass a value for color_attribs, and it keeps
# pointing to the same dict. This avoids a global.
def _style_attr(fg_color, bg_color, attribs, color_attribs={}):
# Returns an attribute with the specified foreground and background color
# and the attributes in 'attribs'. Reuses color pairs already created if
# possible, and creates a new color pair otherwise.
#
# Returns 'attribs' if colors aren't supported.
if not curses.has_colors():
return attribs
if (fg_color, bg_color) not in color_attribs:
# Create new color pair. Color pair number 0 is hardcoded and cannot be
# changed, hence the +1s.
curses.init_pair(len(color_attribs) + 1, fg_color, bg_color)
color_attribs[(fg_color, bg_color)] = \
curses.color_pair(len(color_attribs) + 1)
return color_attribs[(fg_color, bg_color)] | attribs
#
# Main application
#
# Used as the entry point in setup.py
def _main():
menuconfig(standard_kconfig())
def menuconfig(kconf):
"""
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
"""
global _kconf
global _show_all
global _conf_changed
_kconf = kconf
# Load existing configuration and set _conf_changed True if it is outdated
_conf_changed = _load_config()
# Any visible items in the top menu?
_show_all = False
if not _shown_nodes(kconf.top_node):
# Nothing visible. Start in show-all mode and try again.
_show_all = True
if not _shown_nodes(kconf.top_node):
# Give up. The implementation relies on always having a selected
# node.
print("Empty configuration -- nothing to configure.\n"
"Check that environment variables are set properly.")
return
# Disable warnings. They get mangled in curses mode, and we deal with
# errors ourselves.
kconf.disable_warnings()
# Make curses use the locale settings specified in the environment
locale.setlocale(locale.LC_ALL, "")
# Try to fix Unicode issues on systems with bad defaults
if _CONVERT_C_LC_CTYPE_TO_UTF8:
_convert_c_lc_ctype_to_utf8()
# Get rid of the delay between pressing ESC and jumping to the parent menu,
# unless the user has set ESCDELAY (see ncurses(3)). This makes the UI much
# smoother to work with.
#
# Note: This is strictly pretty iffy, since escape codes for e.g. cursor
# keys start with ESC, but I've never seen it cause problems in practice
# (probably because it's unlikely that the escape code for a key would get
# split up across read()s, at least with a terminal emulator). Please
# report if you run into issues. Some suitable small default value could be
# used here instead in that case. Maybe it's silly to not put in the
# smallest imperceptible delay here already, though I don't like guessing.
#
# (From a quick glance at the ncurses source code, ESCDELAY might only be
# relevant for mouse events there, so maybe escapes are assumed to arrive
# in one piece already...)
os.environ.setdefault("ESCDELAY", "0")
# Enter curses mode. _menuconfig() returns a string to print on exit, after
# curses has been de-initialized.
print(curses.wrapper(_menuconfig))
def _load_config():
# Loads any existing .config file. See the Kconfig.load_config() docstring.
#
# Returns True if .config is missing or outdated. We always prompt for
# saving the configuration in that case.
if not _kconf.load_config() or _kconf.missing_syms:
# Either no .config, or assignments to undefined symbols in the
# existing .config (which would get removed when saving)
return True
for sym in _kconf.unique_defined_syms:
if sym.user_value is None:
if sym.config_string:
# Unwritten symbol
return True
elif sym.type in (BOOL, TRISTATE):
if sym.tri_value != sym.user_value:
# Written bool/tristate symbol, new value
return True
elif sym.str_value != sym.user_value:
# Written string/int/hex symbol, new value
return True
# No need to prompt for save
return False
# Global variables used below:
#
# _stdscr:
# stdscr from curses
#
# _cur_menu:
# Menu node of the menu (or menuconfig symbol, or choice) currently being
# shown
#
# _shown:
# List of items in _cur_menu that are shown (ignoring scrolling). In
# show-all mode, this list contains all items in _cur_menu. Otherwise, it
# contains just the visible items.
#
# _sel_node_i:
# Index in _shown of the currently selected node
#
# _menu_scroll:
# Index in _shown of the top row of the main display
#
# _parent_screen_rows:
# List/stack of the row numbers that the selections in the parent menus
# appeared on. This is used to prevent the scrolling from jumping around
# when going in and out of menus.
#
# _show_help/_show_name/_show_all:
# If True, the corresponding mode is on. See the module docstring.
#
# _conf_changed:
# True if the configuration has been changed. If False, we don't bother
# showing the save-and-quit dialog.
#
# We reset this to False whenever the configuration is saved explicitly
# from the save dialog.
def _menuconfig(stdscr):
# Logic for the main display, with the list of symbols, etc.
global _stdscr
global _conf_changed
global _show_help
global _show_name
_stdscr = stdscr
_init()
while True:
_draw_main()
curses.doupdate()
c = _get_wch_compat(_menu_win)
if c == curses.KEY_RESIZE:
_resize_main()
elif c in (curses.KEY_DOWN, "j", "J"):
_select_next_menu_entry()
elif c in (curses.KEY_UP, "k", "K"):
_select_prev_menu_entry()
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
# Keep it simple. This way we get sane behavior for small windows,
# etc., for free.
for _ in range(_PG_JUMP):
_select_next_menu_entry()
elif c in (curses.KEY_PPAGE, "\x15"): # Page Up/Ctrl-U
for _ in range(_PG_JUMP):
_select_prev_menu_entry()
elif c in (curses.KEY_END, "G"):
_select_last_menu_entry()
elif c in (curses.KEY_HOME, "g"):
_select_first_menu_entry()
elif c in (curses.KEY_RIGHT, " ", "\n", "l", "L"):
# Do appropriate node action. Only Space is treated specially,
# preferring to toggle nodes rather than enter menus.
sel_node = _shown[_sel_node_i]
if sel_node.is_menuconfig and not \
(c == " " and _prefer_toggle(sel_node.item)):
_enter_menu(sel_node)
else:
_change_node(sel_node)
if _is_y_mode_choice_sym(sel_node.item) and not sel_node.list:
# Immediately jump to the parent menu after making a choice
# selection, like 'make menuconfig' does, except if the
# menu node has children (which can happen if a symbol
# 'depends on' a choice symbol that immediately precedes
# it).
_leave_menu()
elif c in ("n", "N"):
_set_sel_node_tri_val(0)
elif c in ("m", "M"):
_set_sel_node_tri_val(1)
elif c in ("y", "Y"):
_set_sel_node_tri_val(2)
elif c in (curses.KEY_LEFT, curses.KEY_BACKSPACE, _ERASE_CHAR,
"\x1B", "h", "H"): # \x1B = ESC
if c == "\x1B" and _cur_menu is _kconf.top_node:
res = _quit_dialog()
if res:
return res
else:
_leave_menu()
elif c in ("o", "O"):
if _conf_changed:
c = _key_dialog(
"Load",
"You have unsaved changes. Load new\n"
"configuration anyway?\n"
"\n"
" (Y)es (C)ancel",
"yc")
if c is None or c == "c":
continue
if _load_dialog():
_conf_changed = True
elif c in ("s", "S"):
if _save_dialog(_kconf.write_config, standard_config_filename(),
"configuration"):
_conf_changed = False
elif c in ("d", "D"):
_save_dialog(_kconf.write_min_config, "defconfig",
"minimal configuration")
elif c == "/":
_jump_to_dialog()
# The terminal might have been resized while the fullscreen jump-to
# dialog was open
_resize_main()
elif c == "?":
_info_dialog(_shown[_sel_node_i], False)
# The terminal might have been resized while the fullscreen info
# dialog was open
_resize_main()
elif c in ("f", "F"):
_show_help = not _show_help
_set_style(_help_win, "show-help" if _show_help else "help")
_resize_main()
elif c in ("c", "C"):
_show_name = not _show_name
elif c in ("a", "A"):
_toggle_show_all()
elif c in ("q", "Q"):
res = _quit_dialog()
if res:
return res
def _quit_dialog():
if not _conf_changed:
return "No changes to save"
while True:
c = _key_dialog(
"Quit",
" Save configuration?\n"
"\n"
"(Y)es (N)o (C)ancel",
"ync")
if c is None or c == "c":
return None
if c == "y":
if _try_save(_kconf.write_config, standard_config_filename(),
"configuration"):
return "Configuration saved to '{}'" \
.format(standard_config_filename())
elif c == "n":
return "Configuration was not saved"
def _init():
# Initializes the main display with the list of symbols, etc. Also does
# misc. global initialization that needs to happen after initializing
# curses.
global _ERASE_CHAR
global _path_win
global _top_sep_win
global _menu_win
global _bot_sep_win
global _help_win
global _parent_screen_rows
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
global _show_help
global _show_name
# Looking for this in addition to KEY_BACKSPACE (which is unreliable) makes
# backspace work with TERM=vt100. That makes it likely to work in sane
# environments.
#
# erasechar() returns a 'bytes' object. Since we use get_wch(), we need to
# decode it. Just give up and avoid crashing if it can't be decoded.
_ERASE_CHAR = curses.erasechar().decode("utf-8", "ignore")
_init_styles()
# Hide the cursor
_safe_curs_set(0)
# Initialize windows
# Top row, with menu path
_path_win = _styled_win("path")
# Separator below menu path, with title and arrows pointing up
_top_sep_win = _styled_win("separator")
# List of menu entries with symbols, etc.
_menu_win = _styled_win("list")
_menu_win.keypad(True)
# Row below menu list, with arrows pointing down
_bot_sep_win = _styled_win("separator")
# Help window with keys at the bottom. Shows help texts in show-help mode.
_help_win = _styled_win("help")
# The rows we'd like the nodes in the parent menus to appear on. This
# prevents the scroll from jumping around when going in and out of menus.
_parent_screen_rows = []
# Initial state
_cur_menu = _kconf.top_node
_shown = _shown_nodes(_cur_menu)
_sel_node_i = _menu_scroll = 0
_show_help = _show_name = False
# Give windows their initial size
_resize_main()
def _resize_main():
# Resizes the main display, with the list of symbols, etc., to fill the
# terminal
global _menu_scroll
screen_height, screen_width = _stdscr.getmaxyx()
_path_win.resize(1, screen_width)
_top_sep_win.resize(1, screen_width)
_bot_sep_win.resize(1, screen_width)
help_win_height = _SHOW_HELP_HEIGHT if _show_help else \
len(_MAIN_HELP_LINES)
menu_win_height = screen_height - help_win_height - 3
if menu_win_height >= 1:
_menu_win.resize(menu_win_height, screen_width)
_help_win.resize(help_win_height, screen_width)
_top_sep_win.mvwin(1, 0)
_menu_win.mvwin(2, 0)
_bot_sep_win.mvwin(2 + menu_win_height, 0)
_help_win.mvwin(2 + menu_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
menu_win_height = 1
_menu_win.resize(1, screen_width)
_help_win.resize(1, screen_width)
for win in _top_sep_win, _menu_win, _bot_sep_win, _help_win:
win.mvwin(0, 0)
# Adjust the scroll so that the selected node is still within the window,
# if needed
if _sel_node_i - _menu_scroll >= menu_win_height:
_menu_scroll = _sel_node_i - menu_win_height + 1
def _height(win):
# Returns the height of 'win'
return win.getmaxyx()[0]
def _width(win):
# Returns the width of 'win'
return win.getmaxyx()[1]
def _prefer_toggle(item):
# For nodes with menus, determines whether Space should change the value of
# the node's item or enter its menu. We toggle symbols (which have menus
# when they're defined with 'menuconfig') and choices that can be in more
# than one mode (e.g. optional choices). In other cases, we enter the menu.
return isinstance(item, Symbol) or \
(isinstance(item, Choice) and len(item.assignable) > 1)
def _enter_menu(menu):
# Makes 'menu' the currently displayed menu. "Menu" here includes choices
# and symbols defined with the 'menuconfig' keyword.
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
shown_sub = _shown_nodes(menu)
# Never enter empty menus. We depend on having a current node.
if shown_sub:
# Remember where the current node appears on the screen, so we can try
# to get it to appear in the same place when we leave the menu
_parent_screen_rows.append(_sel_node_i - _menu_scroll)
# Jump into menu
_cur_menu = menu
_shown = shown_sub
_sel_node_i = _menu_scroll = 0
if isinstance(menu.item, Choice):
_select_selected_choice_sym()
def _select_selected_choice_sym():
# Puts the cursor on the currently selected (y-valued) choice symbol, if
# any. Does nothing if if the choice has no selection (is not visible/in y
# mode).
global _sel_node_i
choice = _cur_menu.item
if choice.selection:
# Search through all menu nodes to handle choice symbols being defined
# in multiple locations
for node in choice.selection.nodes:
if node in _shown:
_sel_node_i = _shown.index(node)
_center_vertically()
def _jump_to(node):
# Jumps directly to the menu node 'node'
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
global _show_all
global _parent_screen_rows
# Clear remembered menu locations. We might not even have been in the
# parent menus before.
_parent_screen_rows = []
old_show_all = _show_all
jump_into = (isinstance(node.item, Choice) or node.item == MENU) and \
node.list
# If we're jumping to a non-empty choice or menu, jump to the first entry
# in it instead of jumping to its menu node
if jump_into:
_cur_menu = node
node = node.list
else:
_cur_menu = _parent_menu(node)
_shown = _shown_nodes(_cur_menu)
if node not in _shown:
# The node wouldn't be shown. Turn on show-all to show it.
_show_all = True
_shown = _shown_nodes(_cur_menu)
_sel_node_i = _shown.index(node)
if jump_into and not old_show_all and _show_all:
# If we're jumping into a choice or menu and were forced to turn on
# show-all because the first entry wasn't visible, try turning it off.
# That will land us at the first visible node if there are visible
# nodes, and is a no-op otherwise.
_toggle_show_all()
_center_vertically()
# If we're jumping to a non-empty choice, jump to the selected symbol, if
# any
if jump_into and isinstance(_cur_menu.item, Choice):
_select_selected_choice_sym()
def _leave_menu():
# Jumps to the parent menu of the current menu. Does nothing if we're in
# the top menu.
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
if _cur_menu is _kconf.top_node:
return
# Jump to parent menu
parent = _parent_menu(_cur_menu)
_shown = _shown_nodes(parent)
_sel_node_i = _shown.index(_cur_menu)
_cur_menu = parent
# Try to make the menu entry appear on the same row on the screen as it did
# before we entered the menu.
if _parent_screen_rows:
# The terminal might have shrunk since we were last in the parent menu
screen_row = min(_parent_screen_rows.pop(), _height(_menu_win) - 1)
_menu_scroll = max(_sel_node_i - screen_row, 0)
else:
# No saved parent menu locations, meaning we jumped directly to some
# node earlier
_center_vertically()
def _select_next_menu_entry():
# Selects the menu entry after the current one, adjusting the scroll if
# necessary. Does nothing if we're already at the last menu entry.
global _sel_node_i
global _menu_scroll
if _sel_node_i < len(_shown) - 1:
# Jump to the next node
_sel_node_i += 1
# If the new node is sufficiently close to the edge of the menu window
# (as determined by _SCROLL_OFFSET), increase the scroll by one. This
# gives nice and non-jumpy behavior even when
# _SCROLL_OFFSET >= _height(_menu_win).
if _sel_node_i >= _menu_scroll + _height(_menu_win) - _SCROLL_OFFSET \
and _menu_scroll < _max_scroll(_shown, _menu_win):
_menu_scroll += 1
def _select_prev_menu_entry():
# Selects the menu entry before the current one, adjusting the scroll if
# necessary. Does nothing if we're already at the first menu entry.
global _sel_node_i
global _menu_scroll
if _sel_node_i > 0:
# Jump to the previous node
_sel_node_i -= 1
# See _select_next_menu_entry()
if _sel_node_i <= _menu_scroll + _SCROLL_OFFSET:
_menu_scroll = max(_menu_scroll - 1, 0)
def _select_last_menu_entry():
# Selects the last menu entry in the current menu
global _sel_node_i
global _menu_scroll
_sel_node_i = len(_shown) - 1
_menu_scroll = _max_scroll(_shown, _menu_win)
def _select_first_menu_entry():
# Selects the first menu entry in the current menu
global _sel_node_i
global _menu_scroll
_sel_node_i = _menu_scroll = 0
def _toggle_show_all():
# Toggles show-all mode on/off. If turning it off would give no visible
# items in the current menu, it is left on.
global _show_all
global _shown
global _sel_node_i
global _menu_scroll
# Row on the screen the cursor is on. Preferably we want the same row to
# stay highlighted.
old_row = _sel_node_i - _menu_scroll
_show_all = not _show_all
# List of new nodes to be shown after toggling _show_all
new_shown = _shown_nodes(_cur_menu)
# Find a good node to select. The selected node might disappear if show-all
# mode is turned off.
# If there are visible nodes before the previously selected node, select
# the closest one. This will select the previously selected node itself if
# it is still visible.
for node in reversed(_shown[:_sel_node_i + 1]):
if node in new_shown:
_sel_node_i = new_shown.index(node)
break
else:
# No visible nodes before the previously selected node. Select the
# closest visible node after it instead.
for node in _shown[_sel_node_i + 1:]:
if node in new_shown:
_sel_node_i = new_shown.index(node)
break
else:
# No visible nodes at all, meaning show-all was turned off inside
# an invisible menu. Don't allow that, as the implementation relies
# on always having a selected node.
_show_all = True
return
_shown = new_shown
# Try to make the cursor stay on the same row in the menu window. This
# might be impossible if too many nodes have disappeared above the node.
_menu_scroll = max(_sel_node_i - old_row, 0)
def _center_vertically():
# Centers the selected node vertically, if possible
global _menu_scroll
_menu_scroll = min(max(_sel_node_i - _height(_menu_win)//2, 0),
_max_scroll(_shown, _menu_win))
def _draw_main():
# Draws the "main" display, with the list of symbols, the header, and the
# footer.
#
# This could be optimized to only update the windows that have actually
# changed, but keep it simple for now and let curses sort it out.
term_width = _width(_stdscr)
#
# Update the separator row below the menu path
#
_top_sep_win.erase()
# Draw arrows pointing up if the symbol window is scrolled down. Draw them
# before drawing the title, so the title ends up on top for small windows.
if _menu_scroll > 0:
_safe_hline(_top_sep_win, 0, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS)
# Add the 'mainmenu' text as the title, centered at the top
_safe_addstr(_top_sep_win,
0, max((term_width - len(_kconf.mainmenu_text))//2, 0),
_kconf.mainmenu_text)
_top_sep_win.noutrefresh()
# Note: The menu path at the top is deliberately updated last. See below.
#
# Update the symbol window
#
_menu_win.erase()
# Draw the _shown nodes starting from index _menu_scroll up to either as
# many as fit in the window, or to the end of _shown
for i in range(_menu_scroll,
min(_menu_scroll + _height(_menu_win), len(_shown))):
node = _shown[i]
# The 'not _show_all' test avoids showing invisible items in red
# outside show-all mode, which could look confusing/broken. Invisible
# symbols show up outside show-all mode if an invisible symbol has
# visible children in an implicit (indented) menu.
if not _show_all or (node.prompt and expr_value(node.prompt[1])):
style = _style["selection" if i == _sel_node_i else "list"]
else:
style = _style["inv-selection" if i == _sel_node_i else "inv-list"]
_safe_addstr(_menu_win, i - _menu_scroll, 0, _node_str(node), style)
_menu_win.noutrefresh()
#
# Update the bottom separator window
#
_bot_sep_win.erase()
# Draw arrows pointing down if the symbol window is scrolled up
if _menu_scroll < _max_scroll(_shown, _menu_win):
_safe_hline(_bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
# Indicate when show-name/show-help/show-all mode is enabled
enabled_modes = []
if _show_help:
enabled_modes.append("show-help (toggle with [F])")
if _show_name:
enabled_modes.append("show-name")
if _show_all:
enabled_modes.append("show-all")
if enabled_modes:
s = " and ".join(enabled_modes) + " mode enabled"
_safe_addstr(_bot_sep_win, 0, max(term_width - len(s) - 2, 0), s)
_bot_sep_win.noutrefresh()
#
# Update the help window, which shows either key bindings or help texts
#
_help_win.erase()
if _show_help:
node = _shown[_sel_node_i]
if isinstance(node.item, (Symbol, Choice)) and node.help:
help_lines = textwrap.wrap(node.help, _width(_help_win))
for i in range(min(_height(_help_win), len(help_lines))):
_safe_addstr(_help_win, i, 0, help_lines[i])
else:
_safe_addstr(_help_win, 0, 0, "(no help)")
else:
for i, line in enumerate(_MAIN_HELP_LINES):
_safe_addstr(_help_win, i, 0, line)
_help_win.noutrefresh()
#
# Update the top row with the menu path.
#
# Doing this last leaves the cursor on the top row, which avoids some minor
# annoying jumpiness in gnome-terminal when reducing the height of the
# terminal. It seems to happen whenever the row with the cursor on it
# disappears.
#
_path_win.erase()
# Draw the menu path ("(top menu) -> menu -> submenu -> ...")
menu_prompts = []
menu = _cur_menu
while menu is not _kconf.top_node:
# Promptless choices can be entered in show-all mode. Use
# standard_sc_expr_str() for them, so they show up as
# '<choice (name if any)>'.
menu_prompts.append(menu.prompt[0] if menu.prompt else
standard_sc_expr_str(menu.item))
menu = _parent_menu(menu)
menu_prompts.append("(top menu)")
menu_prompts.reverse()
# Hack: We can't put ACS_RARROW directly in the string. Temporarily
# represent it with NULL. Maybe using a Unicode character would be better.
menu_path_str = " \0 ".join(menu_prompts)
# Scroll the menu path to the right if needed to make the current menu's
# title visible
if len(menu_path_str) > term_width:
menu_path_str = menu_path_str[len(menu_path_str) - term_width:]
# Print the path with the arrows reinserted
split_path = menu_path_str.split("\0")
_safe_addstr(_path_win, split_path[0])
for s in split_path[1:]:
_safe_addch(_path_win, curses.ACS_RARROW)
_safe_addstr(_path_win, s)
_path_win.noutrefresh()
def _parent_menu(node):
# Returns the menu node of the menu that contains 'node'. In addition to
# proper 'menu's, this might also be a 'menuconfig' symbol or a 'choice'.
# "Menu" here means a menu in the interface.
menu = node.parent
while not menu.is_menuconfig:
menu = menu.parent
return menu
def _shown_nodes(menu):
# Returns the list of menu nodes from 'menu' (see _parent_menu()) that
# would be shown when entering it
def rec(node):
res = []
while node:
# If a node has children but doesn't have the is_menuconfig flag
# set, the children come from a submenu created implicitly from
# dependencies, and are shown (indented) in the same menu as the
# parent node
shown_children = \
rec(node.list) if node.list and not node.is_menuconfig else []
# Always show the node if it is the root of an implicit submenu
# with visible items, even if the node itself is invisible. This
# can happen e.g. if the symbol has an optional prompt
# ('prompt "foo" if COND') that is currently invisible.
if shown(node) or shown_children:
res.append(node)
res += shown_children
node = node.next
return res
def shown(node):
# Show the node if its prompt is visible. For menus, also check
# 'visible if'. In show-all mode, show everything.
return _show_all or \
(node.prompt and expr_value(node.prompt[1]) and not
(node.item == MENU and not expr_value(node.visibility)))
if isinstance(menu.item, Choice):
# For named choices defined in multiple locations, entering the choice
# at a particular menu node would normally only show the choice symbols
# defined there (because that's what the MenuNode tree looks like).
#
# That might look confusing, and makes extending choices by defining
# them in multiple locations less useful. Instead, gather all the child
# menu nodes for all the choices whenever a choice is entered. That
# makes all choice symbols visible at all locations.
#
# Choices can contain non-symbol items (people do all sorts of weird
# stuff with them), hence the generality here. We really need to
# preserve the menu tree at each choice location.
#
# Note: Named choices are pretty broken in the C tools, and this is
# super obscure, so you probably won't find much that relies on this.
# This whole 'if' could be deleted if you don't care about defining
# choices in multiple locations to add symbols (which will still work,
# just with things being displayed in a way that might be unexpected).
# Do some additional work to avoid listing choice symbols twice if all
# or part of the choice is copied in multiple locations (e.g. by
# including some Kconfig file multiple times). We give the prompts at
# the current location precedence.
seen_syms = {node.item for node in rec(menu.list)
if isinstance(node.item, Symbol)}
res = []
for choice_node in menu.item.nodes:
for node in rec(choice_node.list):
# 'choice_node is menu' checks if we're dealing with the
# current location
if node.item not in seen_syms or choice_node is menu:
res.append(node)
if isinstance(node.item, Symbol):
seen_syms.add(node.item)
return res
return rec(menu.list)
def _change_node(node):
# Changes the value of the menu node 'node' if it is a symbol. Bools and
# tristates are toggled, while other symbol types pop up a text entry
# dialog.
if not isinstance(node.item, (Symbol, Choice)):
return
# This will hit for invisible symbols, which appear in show-all mode and
# when an invisible symbol has visible children (which can happen e.g. for
# symbols with optional prompts)
if not (node.prompt and expr_value(node.prompt[1])):
return
# sc = symbol/choice
sc = node.item
if sc.type in (INT, HEX, STRING):
s = sc.str_value
while True:
s = _input_dialog("{} ({})".format(
node.prompt[0], TYPE_TO_STR[sc.type]),
s, _range_info(sc))
if s is None:
break
if sc.type in (INT, HEX):
s = s.strip()
# 'make menuconfig' does this too. Hex values not starting with
# '0x' are accepted when loading .config files though.
if sc.type == HEX and not s.startswith(("0x", "0X")):
s = "0x" + s
if _check_validity(sc, s):
_set_val(sc, s)
break
elif len(sc.assignable) == 1:
# Handles choice symbols for choices in y mode, which are a special
# case: .assignable can be (2,) while .tri_value is 0.
_set_val(sc, sc.assignable[0])
elif sc.assignable:
# Set the symbol to the value after the current value in
# sc.assignable, with wrapping
val_index = sc.assignable.index(sc.tri_value)
_set_val(sc, sc.assignable[(val_index + 1) % len(sc.assignable)])
def _set_sel_node_tri_val(tri_val):
# Sets the value of the currently selected menu entry to 'tri_val', if that
# value can be assigned
sc = _shown[_sel_node_i].item
if isinstance(sc, (Symbol, Choice)) and tri_val in sc.assignable:
_set_val(sc, tri_val)
def _set_val(sc, val):
# Wrapper around Symbol/Choice.set_value() for updating the menu state and
# _conf_changed
global _conf_changed
# Use the string representation of tristate values. This makes the format
# consistent for all symbol types.
if val in TRI_TO_STR:
val = TRI_TO_STR[val]
if val != sc.str_value:
sc.set_value(val)
_conf_changed = True
# Changing the value of the symbol might have changed what items in the
# current menu are visible. Recalculate the state.
_update_menu()
def _update_menu():
# Updates the current menu after the value of a symbol or choice has been
# changed. Changing a value might change which items in the menu are
# visible.
#
# Tries to preserve the location of the cursor when items disappear above
# it.
global _shown
global _sel_node_i
global _menu_scroll
# Row on the screen the cursor was on
old_row = _sel_node_i - _menu_scroll
sel_node = _shown[_sel_node_i]
# New visible nodes
_shown = _shown_nodes(_cur_menu)
# New index of selected node
_sel_node_i = _shown.index(sel_node)
# Try to make the cursor stay on the same row in the menu window. This
# might be impossible if too many nodes have disappeared above the node.
_menu_scroll = max(_sel_node_i - old_row, 0)
def _input_dialog(title, initial_text, info_text=None):
# Pops up a dialog that prompts the user for a string
#
# title:
# Title to display at the top of the dialog window's border
#
# initial_text:
# Initial text to prefill the input field with
#
# info_text:
# String to show next to the input field. If None, just the input field
# is shown.
win = _styled_win("body")
win.keypad(True)
info_lines = info_text.split("\n") if info_text else []
# Give the input dialog its initial size
_resize_input_dialog(win, title, info_lines)
_safe_curs_set(2)
# Input field text
s = initial_text
# Cursor position
i = len(initial_text)
def edit_width():
return _width(win) - 4
# Horizontal scroll offset
hscroll = max(i - edit_width() + 1, 0)
while True:
# Draw the "main" display with the menu, etc., so that resizing still
# works properly. This is like a stack of windows, only hardcoded for
# now.
_draw_main()
_draw_input_dialog(win, title, info_lines, s, i, hscroll)
curses.doupdate()
c = _get_wch_compat(win)
if c == curses.KEY_RESIZE:
# Resize the main display too. The dialog floats above it.
_resize_main()
_resize_input_dialog(win, title, info_lines)
elif c == "\n":
_safe_curs_set(0)
return s
elif c == "\x1B": # \x1B = ESC
_safe_curs_set(0)
return None
else:
s, i, hscroll = _edit_text(c, s, i, hscroll, edit_width())
def _resize_input_dialog(win, title, info_lines):
# Resizes the input dialog to a size appropriate for the terminal size
screen_height, screen_width = _stdscr.getmaxyx()
win_height = 5
if info_lines:
win_height += len(info_lines) + 1
win_height = min(win_height, screen_height)
win_width = max(_INPUT_DIALOG_MIN_WIDTH,
len(title) + 4,
*(len(line) + 4 for line in info_lines))
win_width = min(win_width, screen_width)
win.resize(win_height, win_width)
win.mvwin((screen_height - win_height)//2,
(screen_width - win_width)//2)
def _draw_input_dialog(win, title, info_lines, s, i, hscroll):
edit_width = _width(win) - 4
win.erase()
# Note: Perhaps having a separate window for the input field would be nicer
visible_s = s[hscroll:hscroll + edit_width]
_safe_addstr(win, 2, 2, visible_s + " "*(edit_width - len(visible_s)),
_style["edit"])
for linenr, line in enumerate(info_lines):
_safe_addstr(win, 4 + linenr, 2, line)
# Draw the frame last so that it overwrites the body text for small windows
_draw_frame(win, title)
_safe_move(win, 2, 2 + i - hscroll)
win.noutrefresh()
def _load_dialog():
# Dialog for loading a new configuration
#
# Return value:
# True if a new configuration was loaded, and False if the user canceled
# the dialog
global _show_all
filename = ""
while True:
filename = _input_dialog("File to load", filename, _load_save_info())
if filename is None:
return False
filename = os.path.expanduser(filename)
if _try_load(filename):
sel_node = _shown[_sel_node_i]
# Turn on show-all mode if the current node is (no longer) visible
if not (sel_node.prompt and expr_value(sel_node.prompt[1])):
_show_all = True
_update_menu()
# The message dialog indirectly updates the menu display, so _msg()
# must be called after the new state has been initialized
_msg("Success", "Loaded {}".format(filename))
return True
def _try_load(filename):
# Tries to load a configuration file. Pops up an error and returns False on
# failure.
#
# filename:
# Configuration file to load
try:
_kconf.load_config(filename)
return True
except OSError as e:
_error("Error loading '{}'\n\n{} (errno: {})"
.format(filename, e.strerror, errno.errorcode[e.errno]))
return False
def _save_dialog(save_fn, default_filename, description):
# Dialog for saving the current configuration
#
# save_fn:
# Function to call with 'filename' to save the file
#
# default_filename:
# Prefilled filename in the input field
#
# description:
# String describing the thing being saved
#
# Return value:
# True if the configuration was saved, and False if the user canceled the
# dialog
filename = default_filename
while True:
filename = _input_dialog("Filename to save {} to".format(description),
filename, _load_save_info())
if filename is None:
return False
filename = os.path.expanduser(filename)
if _try_save(save_fn, filename, description):
_msg("Success", "{} saved to {}".format(description, filename))
return True
def _try_save(save_fn, filename, description):
# Tries to save a configuration file. Pops up an error and returns False on
# failure.
#
# save_fn:
# Function to call with 'filename' to save the file
#
# description:
# String describing the thing being saved
try:
save_fn(filename)
return True
except OSError as e:
_error("Error saving {} to '{}'\n\n{} (errno: {})"
.format(description, e.filename, e.strerror,
errno.errorcode[e.errno]))
return False
def _key_dialog(title, text, keys):
# Pops up a dialog that can be closed by pressing a key
#
# title:
# Title to display at the top of the dialog window's border
#
# text:
# Text to show in the dialog
#
# keys:
# List of keys that will close the dialog. Other keys (besides ESC) are
# ignored. The caller is responsible for providing a hint about which
# keys can be pressed in 'text'.
#
# Return value:
# The key that was pressed to close the dialog. Uppercase characters are
# converted to lowercase. ESC will always close the dialog, and returns
# None.
win = _styled_win("body")
win.keypad(True)
_resize_key_dialog(win, text)
while True:
# See _input_dialog()
_draw_main()
_draw_key_dialog(win, title, text)
curses.doupdate()
c = _get_wch_compat(win)
if c == curses.KEY_RESIZE:
# Resize the main display too. The dialog floats above it.
_resize_main()
_resize_key_dialog(win, text)
elif c == "\x1B": # \x1B = ESC
return None
elif isinstance(c, str):
c = c.lower()
if c in keys:
return c
def _resize_key_dialog(win, text):
# Resizes the key dialog to a size appropriate for the terminal size
screen_height, screen_width = _stdscr.getmaxyx()
lines = text.split("\n")
win_height = min(len(lines) + 4, screen_height)
win_width = min(max(len(line) for line in lines) + 4, screen_width)
win.resize(win_height, win_width)
win.mvwin((screen_height - win_height)//2,
(screen_width - win_width)//2)
def _draw_key_dialog(win, title, text):
win.erase()
for i, line in enumerate(text.split("\n")):
_safe_addstr(win, 2 + i, 2, line)
# Draw the frame last so that it overwrites the body text for small windows
_draw_frame(win, title)
win.noutrefresh()
def _draw_frame(win, title):
# Draw a frame around the inner edges of 'win', with 'title' at the top
win_height, win_width = win.getmaxyx()
win.attron(_style["frame"])
# Draw top/bottom edge
_safe_hline(win, 0, 0, " ", win_width)
_safe_hline(win, win_height - 1, 0, " ", win_width)
# Draw left/right edge
_safe_vline(win, 0, 0, " ", win_height)
_safe_vline(win, 0, win_width - 1, " ", win_height)
# Draw title
_safe_addstr(win, 0, max((win_width - len(title))//2, 0), title)
win.attroff(_style["frame"])
def _jump_to_dialog():
# Implements the jump-to dialog, where symbols can be looked up via
# incremental search and jumped to.
#
# Returns True if the user jumped to a symbol, and False if the dialog was
# canceled.
# Search text
s = ""
# Previous search text
prev_s = None
# Search text cursor position
s_i = 0
# Horizontal scroll offset
hscroll = 0
# Index of selected row
sel_node_i = 0
# Index in 'matches' of the top row of the list
scroll = 0
# Edit box at the top
edit_box = _styled_win("jump-edit")
edit_box.keypad(True)
# List of matches
matches_win = _styled_win("list")
# Bottom separator, with arrows pointing down
bot_sep_win = _styled_win("separator")
# Help window with instructions at the bottom
help_win = _styled_win("help")
# Give windows their initial size
_resize_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
_safe_curs_set(2)
# TODO: Code duplication with _select_{next,prev}_menu_entry(). Can this be
# factored out in some nice way?
def select_next_match():
nonlocal sel_node_i
nonlocal scroll
if sel_node_i < len(matches) - 1:
sel_node_i += 1
if sel_node_i >= scroll + _height(matches_win) - _SCROLL_OFFSET \
and scroll < _max_scroll(matches, matches_win):
scroll += 1
def select_prev_match():
nonlocal sel_node_i
nonlocal scroll
if sel_node_i > 0:
sel_node_i -= 1
if sel_node_i <= scroll + _SCROLL_OFFSET:
scroll = max(scroll - 1, 0)
while True:
if s != prev_s:
# The search text changed. Find new matching nodes.
prev_s = s
try:
# We could use re.IGNORECASE here instead of lower(), but this
# is noticeably less jerky while inputting regexes like
# '.*debug$' (though the '.*' is redundant there). Those
# probably have bad interactions with re.search(), which
# matches anywhere in the string.
#
# It's not horrible either way. Just a bit smoother.
regex_searches = [re.compile(regex).search
for regex in s.lower().split()]
# No exception thrown, so the regexes are okay
bad_re = None
# List of matching nodes
matches = []
# Search symbols and choices
for node in _sorted_sc_nodes():
# Symbol/choice
sc = node.item
for search in regex_searches:
# Both the name and the prompt might be missing, since
# we're searching both symbols and choices
# Does the regex match either the symbol name or the
# prompt (if any)?
if not (sc.name and search(sc.name.lower()) or
node.prompt and search(node.prompt[0].lower())):
# Give up on the first regex that doesn't match, to
# speed things up a bit when multiple regexes are
# entered
break
else:
matches.append(node)
# Search menus and comments
for node in _sorted_menu_comment_nodes():
for search in regex_searches:
if not search(node.prompt[0].lower()):
break
else:
matches.append(node)
except re.error as e:
# Bad regex. Remember the error message so we can show it.
bad_re = "Bad regular expression"
# re.error.msg was added in Python 3.5
if hasattr(e, "msg"):
bad_re += ": " + e.msg
matches = []
# Reset scroll and jump to the top of the list of matches
sel_node_i = scroll = 0
_draw_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
s, s_i, hscroll,
bad_re, matches, sel_node_i, scroll)
curses.doupdate()
c = _get_wch_compat(edit_box)
if c == "\n":
if matches:
_jump_to(matches[sel_node_i])
_safe_curs_set(0)
return True
elif c == "\x1B": # \x1B = ESC
_safe_curs_set(0)
return False
elif c == curses.KEY_RESIZE:
# We adjust the scroll so that the selected node stays visible in
# the list when the terminal is resized, hence the 'scroll'
# assignment
scroll = _resize_jump_to_dialog(
edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
elif c == "\x06": # \x06 = Ctrl-F
if matches:
_safe_curs_set(0)
_info_dialog(matches[sel_node_i], True)
_safe_curs_set(2)
scroll = _resize_jump_to_dialog(
edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
elif c == curses.KEY_DOWN:
select_next_match()
elif c == curses.KEY_UP:
select_prev_match()
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
# Keep it simple. This way we get sane behavior for small windows,
# etc., for free.
for _ in range(_PG_JUMP):
select_next_match()
# Page Up (no Ctrl-U, as it's already used by the edit box)
elif c == curses.KEY_PPAGE:
for _ in range(_PG_JUMP):
select_prev_match()
elif c == curses.KEY_END:
sel_node_i = len(matches) - 1
scroll = _max_scroll(matches, matches_win)
elif c == curses.KEY_HOME:
sel_node_i = scroll = 0
else:
s, s_i, hscroll = _edit_text(c, s, s_i, hscroll,
_width(edit_box) - 2)
# Obscure Python: We never pass a value for cached_nodes, and it keeps pointing
# to the same list. This avoids a global.
def _sorted_sc_nodes(cached_nodes=[]):
# Returns a sorted list of symbol and choice nodes to search. The symbol
# nodes appear first, sorted by name, and then the choice nodes, sorted by
# prompt and (secondarily) name.
if not cached_nodes:
# Add symbol nodes
for sym in sorted(_kconf.unique_defined_syms,
key=lambda sym: sym.name):
# += is in-place for lists
cached_nodes += sym.nodes
# Add choice nodes
choices = sorted(_kconf.unique_choices,
key=lambda choice: choice.name or "")
cached_nodes += sorted(
[node
for choice in choices
for node in choice.nodes],
key=lambda node: node.prompt[0] if node.prompt else "")
return cached_nodes
def _sorted_menu_comment_nodes(cached_nodes=[]):
# Returns a list of menu and comment nodes to search, sorted by prompt,
# with the menus first
if not cached_nodes:
def prompt_text(mc):
return mc.prompt[0]
cached_nodes += sorted(_kconf.menus, key=prompt_text)
cached_nodes += sorted(_kconf.comments, key=prompt_text)
return cached_nodes
def _resize_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll):
# Resizes the jump-to dialog to fill the terminal.
#
# Returns the new scroll index. We adjust the scroll if needed so that the
# selected node stays visible.
screen_height, screen_width = _stdscr.getmaxyx()
bot_sep_win.resize(1, screen_width)
help_win_height = len(_JUMP_TO_HELP_LINES)
matches_win_height = screen_height - help_win_height - 4
if matches_win_height >= 1:
edit_box.resize(3, screen_width)
matches_win.resize(matches_win_height, screen_width)
help_win.resize(help_win_height, screen_width)
matches_win.mvwin(3, 0)
bot_sep_win.mvwin(3 + matches_win_height, 0)
help_win.mvwin(3 + matches_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
matches_win_height = 1
edit_box.resize(screen_height, screen_width)
matches_win.resize(1, screen_width)
help_win.resize(1, screen_width)
for win in matches_win, bot_sep_win, help_win:
win.mvwin(0, 0)
# Adjust the scroll so that the selected row is still within the window, if
# needed
if sel_node_i - scroll >= matches_win_height:
return sel_node_i - matches_win_height + 1
return scroll
def _draw_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
s, s_i, hscroll,
bad_re, matches, sel_node_i, scroll):
edit_width = _width(edit_box) - 2
#
# Update list of matches
#
matches_win.erase()
if matches:
for i in range(scroll,
min(scroll + _height(matches_win), len(matches))):
node = matches[i]
if isinstance(node.item, (Symbol, Choice)):
node_str = _name_and_val_str(node.item)
if node.prompt:
node_str += ' "{}"'.format(node.prompt[0])
elif node.item == MENU:
node_str = 'menu "{}"'.format(node.prompt[0])
else: # node.item == COMMENT
node_str = 'comment "{}"'.format(node.prompt[0])
_safe_addstr(matches_win, i - scroll, 0, node_str,
_style["selection" if i == sel_node_i else "list"])
else:
# bad_re holds the error message from the re.error exception on errors
_safe_addstr(matches_win, 0, 0, bad_re or "No matches")
matches_win.noutrefresh()
#
# Update bottom separator line
#
bot_sep_win.erase()
# Draw arrows pointing down if the symbol list is scrolled up
if scroll < _max_scroll(matches, matches_win):
_safe_hline(bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
bot_sep_win.noutrefresh()
#
# Update help window at bottom
#
help_win.erase()
for i, line in enumerate(_JUMP_TO_HELP_LINES):
_safe_addstr(help_win, i, 0, line)
help_win.noutrefresh()
#
# Update edit box. We do this last since it makes it handy to position the
# cursor.
#
edit_box.erase()
_draw_frame(edit_box, "Jump to symbol/choice/menu/comment")
# Draw arrows pointing up if the symbol list is scrolled down
if scroll > 0:
# TODO: Bit ugly that _style["frame"] is repeated here
_safe_hline(edit_box, 2, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS,
_style["frame"])
visible_s = s[hscroll:hscroll + edit_width]
_safe_addstr(edit_box, 1, 1, visible_s)
_safe_move(edit_box, 1, 1 + s_i - hscroll)
edit_box.noutrefresh()
def _info_dialog(node, from_jump_to_dialog):
# Shows a fullscreen window with information about 'node'.
#
# If 'from_jump_to_dialog' is True, the information dialog was opened from
# within the jump-to-dialog. In this case, we make '/' from within the
# information dialog just return, to avoid a confusing recursive invocation
# of the jump-to-dialog.
# Top row, with title and arrows point up
top_line_win = _styled_win("separator")
# Text display
text_win = _styled_win("text")
text_win.keypad(True)
# Bottom separator, with arrows pointing down
bot_sep_win = _styled_win("separator")
# Help window with keys at the bottom
help_win = _styled_win("help")
# Give windows their initial size
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
# Get lines of help text
lines = _info_str(node).split("\n")
# Index of first row in 'lines' to show
scroll = 0
while True:
_draw_info_dialog(node, lines, scroll, top_line_win, text_win,
bot_sep_win, help_win)
curses.doupdate()
c = _get_wch_compat(text_win)
if c == curses.KEY_RESIZE:
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
elif c in (curses.KEY_DOWN, "j", "J"):
if scroll < _max_scroll(lines, text_win):
scroll += 1
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
scroll = min(scroll + _PG_JUMP, _max_scroll(lines, text_win))
elif c in (curses.KEY_PPAGE, "\x15"): # Page Up/Ctrl-U
scroll = max(scroll - _PG_JUMP, 0)
elif c in (curses.KEY_END, "G"):
scroll = _max_scroll(lines, text_win)
elif c in (curses.KEY_HOME, "g"):
scroll = 0
elif c in (curses.KEY_UP, "k", "K"):
if scroll > 0:
scroll -= 1
elif c == "/":
# Support starting a search from within the information dialog
if from_jump_to_dialog:
# Avoid recursion
return
if _jump_to_dialog():
# Jumped to a symbol. Cancel the information dialog.
return
# Stay in the information dialog if the jump-to dialog was
# canceled. Resize it in case the terminal was resized while the
# fullscreen jump-to dialog was open.
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
elif c in (curses.KEY_LEFT, curses.KEY_BACKSPACE, _ERASE_CHAR,
"\x1B", # \x1B = ESC
"q", "Q", "h", "H"):
return
def _resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win):
# Resizes the info dialog to fill the terminal
screen_height, screen_width = _stdscr.getmaxyx()
top_line_win.resize(1, screen_width)
bot_sep_win.resize(1, screen_width)
help_win_height = len(_INFO_HELP_LINES)
text_win_height = screen_height - help_win_height - 2
if text_win_height >= 1:
text_win.resize(text_win_height, screen_width)
help_win.resize(help_win_height, screen_width)
text_win.mvwin(1, 0)
bot_sep_win.mvwin(1 + text_win_height, 0)
help_win.mvwin(1 + text_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
text_win.resize(1, screen_width)
help_win.resize(1, screen_width)
for win in text_win, bot_sep_win, help_win:
win.mvwin(0, 0)
def _draw_info_dialog(node, lines, scroll, top_line_win, text_win,
bot_sep_win, help_win):
text_win_height, text_win_width = text_win.getmaxyx()
# Note: The top row is deliberately updated last. See _draw_main().
#
# Update text display
#
text_win.erase()
for i, line in enumerate(lines[scroll:scroll + text_win_height]):
_safe_addstr(text_win, i, 0, line)
text_win.noutrefresh()
#
# Update bottom separator line
#
bot_sep_win.erase()
# Draw arrows pointing down if the symbol window is scrolled up
if scroll < _max_scroll(lines, text_win):
_safe_hline(bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
bot_sep_win.noutrefresh()
#
# Update help window at bottom
#
help_win.erase()
for i, line in enumerate(_INFO_HELP_LINES):
_safe_addstr(help_win, i, 0, line)
help_win.noutrefresh()
#
# Update top row
#
top_line_win.erase()
# Draw arrows pointing up if the information window is scrolled down. Draw
# them before drawing the title, so the title ends up on top for small
# windows.
if scroll > 0:
_safe_hline(top_line_win, 0, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS)
title = ("Symbol" if isinstance(node.item, Symbol) else
"Choice" if isinstance(node.item, Choice) else
"Menu" if node.item == MENU else
"Comment") + " information"
_safe_addstr(top_line_win, 0, max((text_win_width - len(title))//2, 0),
title)
top_line_win.noutrefresh()
def _info_str(node):
# Returns information about the menu node 'node' as a string.
#
# The helper functions are responsible for adding newlines. This allows
# them to return "" if they don't want to add any output.
if isinstance(node.item, Symbol):
sym = node.item
return (
_name_info(sym) +
_prompt_info(sym) +
"Type: {}\n".format(TYPE_TO_STR[sym.type]) +
_value_info(sym) +
_help_info(sym) +
_direct_dep_info(sym) +
_defaults_info(sym) +
_select_imply_info(sym) +
_kconfig_def_info(sym)
)
if isinstance(node.item, Choice):
choice = node.item
return (
_name_info(choice) +
_prompt_info(choice) +
"Type: {}\n".format(TYPE_TO_STR[choice.type]) +
'Mode: {}\n'.format(choice.str_value) +
_help_info(choice) +
_choice_syms_info(choice) +
_direct_dep_info(choice) +
_defaults_info(choice) +
_kconfig_def_info(choice)
)
# node.item in (MENU, COMMENT)
return _kconfig_def_info(node)
def _name_info(sc):
# Returns a string with the name of the symbol/choice. Names are optional
# for choices.
return "Name: {}\n".format(sc.name) if sc.name else ""
def _prompt_info(sc):
# Returns a string listing the prompts of 'sc' (Symbol or Choice)
s = ""
for node in sc.nodes:
if node.prompt:
s += "Prompt: {}\n".format(node.prompt[0])
return s
def _value_info(sym):
# Returns a string showing 'sym's value
# Only put quotes around the value for string symbols
return "Value: {}\n".format(
'"{}"'.format(sym.str_value)
if sym.orig_type == STRING
else sym.str_value)
def _choice_syms_info(choice):
# Returns a string listing the choice symbols in 'choice'. Adds
# "(selected)" next to the selected one.
s = "Choice symbols:\n"
for sym in choice.syms:
s += " - " + sym.name
if sym is choice.selection:
s += " (selected)"
s += "\n"
return s + "\n"
def _help_info(sc):
# Returns a string with the help text(s) of 'sc' (Symbol or Choice).
# Symbols and choices defined in multiple locations can have multiple help
# texts.
s = "\n"
for node in sc.nodes:
if node.help is not None:
s += "Help:\n\n{}\n\n" \
.format(textwrap.indent(node.help, " "))
return s
def _direct_dep_info(sc):
# Returns a string describing the direct dependencies of 'sc' (Symbol or
# Choice). The direct dependencies are the OR of the dependencies from each
# definition location. The dependencies at each definition location come
# from 'depends on' and dependencies inherited from parent items.
if sc.direct_dep is _kconf.y:
return ""
return 'Direct dependencies (={}):\n{}\n' \
.format(TRI_TO_STR[expr_value(sc.direct_dep)],
_split_expr_info(sc.direct_dep, 2))
def _defaults_info(sc):
# Returns a string describing the defaults of 'sc' (Symbol or Choice)
if not sc.defaults:
return ""
s = "Defaults:\n"
for val, cond in sc.defaults:
s += " - "
if isinstance(sc, Symbol):
s += _expr_str(val)
# Skip the tristate value hint if the expression is just a single
# symbol. _expr_str() already shows its value as a string.
#
# This also avoids showing the tristate value for string/int/hex
# defaults, which wouldn't make any sense.
if isinstance(val, tuple):
s += ' (={})'.format(TRI_TO_STR[expr_value(val)])
else:
# Don't print the value next to the symbol name for choice
# defaults, as it looks a bit confusing
s += val.name
s += "\n"
if cond is not _kconf.y:
s += " Condition (={}):\n{}" \
.format(TRI_TO_STR[expr_value(cond)],
_split_expr_info(cond, 4))
return s + "\n"
def _split_expr_info(expr, indent):
# Returns a string with 'expr' split into its top-level && or || operands,
# with one operand per line, together with the operand's value. This is
# usually enough to get something readable for long expressions. A fancier
# recursive thingy would be possible too.
#
# indent:
# Number of leading spaces to add before the split expression.
if len(split_expr(expr, AND)) > 1:
split_op = AND
op_str = "&&"
else:
split_op = OR
op_str = "||"
s = ""
for i, term in enumerate(split_expr(expr, split_op)):
s += "{}{} {}".format(" "*indent,
" " if i == 0 else op_str,
_expr_str(term))
# Don't bother showing the value hint if the expression is just a
# single symbol. _expr_str() already shows its value.
if isinstance(term, tuple):
s += " (={})".format(TRI_TO_STR[expr_value(term)])
s += "\n"
return s
def _select_imply_info(sym):
# Returns a string with information about which symbols 'select' or 'imply'
# 'sym'. The selecting/implying symbols are grouped according to which
# value they select/imply 'sym' to (n/m/y).
s = ""
def add_sis(expr, val, title):
nonlocal s
# sis = selects/implies
sis = [si for si in split_expr(expr, OR) if expr_value(si) == val]
if sis:
s += title
for si in sis:
s += " - {}\n".format(split_expr(si, AND)[0].name)
s += "\n"
if sym.rev_dep is not _kconf.n:
add_sis(sym.rev_dep, 2,
"Symbols currently y-selecting this symbol:\n")
add_sis(sym.rev_dep, 1,
"Symbols currently m-selecting this symbol:\n")
add_sis(sym.rev_dep, 0,
"Symbols currently n-selecting this symbol (no effect):\n")
if sym.weak_rev_dep is not _kconf.n:
add_sis(sym.weak_rev_dep, 2,
"Symbols currently y-implying this symbol:\n")
add_sis(sym.weak_rev_dep, 1,
"Symbols currently m-implying this symbol:\n")
add_sis(sym.weak_rev_dep, 0,
"Symbols currently n-implying this symbol (no effect):\n")
return s
def _kconfig_def_info(item):
# Returns a string with the definition of 'item' in Kconfig syntax,
# together with the definition location(s) and their include and menu paths
nodes = [item] if isinstance(item, MenuNode) else item.nodes
s = "Kconfig definition{}, with propagated dependencies\n" \
.format("s" if len(nodes) > 1 else "")
s += (len(s) - 1)*"="
for node in nodes:
s += "\n\n" \
"At {}:{}\n" \
"{}" \
"Menu path: {}\n\n" \
"{}" \
.format(node.filename, node.linenr,
_include_path_info(node),
_menu_path_info(node),
textwrap.indent(node.custom_str(_name_and_val_str), " "))
return s
def _include_path_info(node):
if not node.include_path:
# In the top-level Kconfig file
return ""
return "Included via {}\n".format(
" -> ".join("{}:{}".format(filename, linenr)
for filename, linenr in node.include_path))
def _menu_path_info(node):
# Returns a string describing the menu path leading up to 'node'
path = ""
node = _parent_menu(node)
while node is not _kconf.top_node:
# Promptless choices might appear among the parents. Use
# standard_sc_expr_str() for them, so that they show up as
# '<choice (name if any)>'.
path = " -> " + (node.prompt[0] if node.prompt else
standard_sc_expr_str(node.item)) + path
node = _parent_menu(node)
return "(top menu)" + path
def _name_and_val_str(sc):
# Custom symbol/choice printer that shows symbol values after symbols
# Show the values of non-constant (non-quoted) symbols that don't look like
# numbers. Things like 123 are actually symbol references, and only work as
# expected due to undefined symbols getting their name as their value.
# Showing the symbol value for those isn't helpful though.
if isinstance(sc, Symbol) and not sc.is_constant and not _is_num(sc.name):
if not sc.nodes:
# Undefined symbol reference
return "{}(undefined/n)".format(sc.name)
return '{}(={})'.format(sc.name, sc.str_value)
# For other items, use the standard format
return standard_sc_expr_str(sc)
def _expr_str(expr):
# Custom expression printer that shows symbol values
return expr_str(expr, _name_and_val_str)
def _styled_win(style):
# Returns a new curses window with style 'style' and space as the fill
# character. The initial dimensions are (1, 1), so the window needs to be
# sized and positioned separately.
win = curses.newwin(1, 1)
_set_style(win, style)
return win
def _set_style(win, style):
# Changes the style of an existing window
win.bkgdset(" ", _style[style])
def _max_scroll(lst, win):
# Assuming 'lst' is a list of items to be displayed in 'win',
# returns the maximum number of steps 'win' can be scrolled down.
# We stop scrolling when the bottom item is visible.
return max(0, len(lst) - _height(win))
def _edit_text(c, s, i, hscroll, width):
# Implements text editing commands for edit boxes. Takes a character (which
# could also be e.g. curses.KEY_LEFT) and the edit box state, and returns
# the new state after the character has been processed.
#
# c:
# Character from user
#
# s:
# Current contents of string
#
# i:
# Current cursor index in string
#
# hscroll:
# Index in s of the leftmost character in the edit box, for horizontal
# scrolling
#
# width:
# Width in characters of the edit box
#
# Return value:
# An (s, i, hscroll) tuple for the new state
if c == curses.KEY_LEFT:
if i > 0:
i -= 1
elif c == curses.KEY_RIGHT:
if i < len(s):
i += 1
elif c in (curses.KEY_HOME, "\x01"): # \x01 = CTRL-A
i = 0
elif c in (curses.KEY_END, "\x05"): # \x05 = CTRL-E
i = len(s)
elif c in (curses.KEY_BACKSPACE, _ERASE_CHAR):
if i > 0:
s = s[:i-1] + s[i:]
i -= 1
elif c == curses.KEY_DC:
s = s[:i] + s[i+1:]
elif c == "\x17": # \x17 = CTRL-W
# The \W removes characters like ',' one at a time
new_i = re.search(r"(?:\w*|\W)\s*$", s[:i]).start()
s = s[:new_i] + s[i:]
i = new_i
elif c == "\x0B": # \x0B = CTRL-K
s = s[:i]
elif c == "\x15": # \x15 = CTRL-U
s = s[i:]
i = 0
elif isinstance(c, str):
# Insert character
s = s[:i] + c + s[i:]
i += 1
# Adjust the horizontal scroll so that the cursor never touches the left or
# right edges of the edit box, except when it's at the beginning or the end
# of the string
if i < hscroll + _SCROLL_OFFSET:
hscroll = max(i - _SCROLL_OFFSET, 0)
elif i >= hscroll + width - _SCROLL_OFFSET:
max_scroll = max(len(s) - width + 1, 0)
hscroll = min(i - width + _SCROLL_OFFSET + 1, max_scroll)
return s, i, hscroll
def _load_save_info():
# Returns an information string for load/save dialog boxes
return "(Relative to {})\n\nRefer to your home directory with ~" \
.format(os.path.join(os.getcwd(), ""))
def _msg(title, text):
# Pops up a message dialog that can be dismissed with Space/Enter/ESC
_key_dialog(title, text, " \n")
def _error(text):
# Pops up an error dialog that can be dismissed with Space/Enter/ESC
_msg("Error", text)
def _node_str(node):
# Returns the complete menu entry text for a menu node.
#
# Example return value: "[*] Support for X"
# Calculate the indent to print the item with by checking how many levels
# above it the closest 'menuconfig' item is (this includes menus and
# choices as well as menuconfig symbols)
indent = 0
parent = node.parent
while not parent.is_menuconfig:
indent += _SUBMENU_INDENT
parent = parent.parent
# This approach gives nice alignment for empty string symbols ("() Foo")
s = "{:{}}".format(_value_str(node), 3 + indent)
if _should_show_name(node):
if isinstance(node.item, Symbol):
s += " <{}>".format(node.item.name)
else:
# For choices, use standard_sc_expr_str(). That way they show up as
# '<choice (name if any)>'.
s += " " + standard_sc_expr_str(node.item)
if node.prompt:
if node.item == COMMENT:
s += " *** {} ***".format(node.prompt[0])
else:
s += " " + node.prompt[0]
if isinstance(node.item, Symbol):
sym = node.item
# Print "(NEW)" next to symbols without a user value (from e.g. a
# .config), but skip it for choice symbols in choices in y mode,
# and for symbols of UNKNOWN type (which generate a warning though)
if sym.user_value is None and \
sym.type != UNKNOWN and \
not (sym.choice and sym.choice.tri_value == 2):
s += " (NEW)"
if isinstance(node.item, Choice) and node.item.tri_value == 2:
# Print the prompt of the selected symbol after the choice for
# choices in y mode
sym = node.item.selection
if sym:
for sym_node in sym.nodes:
# Use the prompt used at this choice location, in case the
# choice symbol is defined in multiple locations
if sym_node.parent is node and sym_node.prompt:
s += " ({})".format(sym_node.prompt[0])
break
else:
# If the symbol isn't defined at this choice location, then
# just use whatever prompt we can find for it
for sym_node in sym.nodes:
if sym_node.prompt:
s += " ({})".format(sym_node.prompt[0])
break
# Print "--->" next to nodes that have menus that can potentially be
# entered. Print "----" if the menu is empty. We don't allow those to be
# entered.
if node.is_menuconfig:
s += " --->" if _shown_nodes(node) else " ----"
return s
def _should_show_name(node):
# Returns True if 'node' is a symbol or choice whose name should shown (if
# any, as names are optional for choices)
# The 'not node.prompt' case only hits in show-all mode, for promptless
# symbols and choices
return not node.prompt or \
(_show_name and isinstance(node.item, (Symbol, Choice)))
def _value_str(node):
# Returns the value part ("[*]", "<M>", "(foo)" etc.) of a menu node
item = node.item
if item in (MENU, COMMENT):
return ""
# Wouldn't normally happen, and generates a warning
if item.type == UNKNOWN:
return ""
if item.type in (STRING, INT, HEX):
return "({})".format(item.str_value)
# BOOL or TRISTATE
if _is_y_mode_choice_sym(item):
return "(X)" if item.choice.selection is item else "( )"
tri_val_str = (" ", "M", "*")[item.tri_value]
if len(item.assignable) <= 1:
# Pinned to a single value
return "" if isinstance(item, Choice) else "-{}-".format(tri_val_str)
if item.type == BOOL:
return "[{}]".format(tri_val_str)
# item.type == TRISTATE
if item.assignable == (1, 2):
return "{{{}}}".format(tri_val_str) # {M}/{*}
return "<{}>".format(tri_val_str)
def _is_y_mode_choice_sym(item):
# The choice mode is an upper bound on the visibility of choice symbols, so
# we can check the choice symbols' own visibility to see if the choice is
# in y mode
return isinstance(item, Symbol) and item.choice and item.visibility == 2
def _check_validity(sym, s):
# Returns True if the string 's' is a well-formed value for 'sym'.
# Otherwise, displays an error and returns False.
if sym.type not in (INT, HEX):
# Anything goes for non-int/hex symbols
return True
base = 10 if sym.type == INT else 16
try:
int(s, base)
except ValueError:
_error("'{}' is a malformed {} value"
.format(s, TYPE_TO_STR[sym.type]))
return False
for low_sym, high_sym, cond in sym.ranges:
if expr_value(cond):
low = int(low_sym.str_value, base)
val = int(s, base)
high = int(high_sym.str_value, base)
if not low <= val <= high:
_error("{} is outside the range {}-{}"
.format(s, low_sym.str_value, high_sym.str_value))
return False
break
return True
def _range_info(sym):
# Returns a string with information about the valid range for the symbol
# 'sym', or None if 'sym' doesn't have a range
if sym.type in (INT, HEX):
for low, high, cond in sym.ranges:
if expr_value(cond):
return "Range: {}-{}".format(low.str_value, high.str_value)
return None
def _is_num(name):
# Heuristic to see if a symbol name looks like a number, for nicer output
# when printing expressions. Things like 16 are actually symbol names, only
# they get their name as their value when the symbol is undefined.
try:
int(name)
except ValueError:
if not name.startswith(("0x", "0X")):
return False
try:
int(name, 16)
except ValueError:
return False
return True
def _get_wch_compat(win):
# Decent resizing behavior on PDCurses requires calling resize_term(0, 0)
# after receiving KEY_RESIZE, while ncurses (usually) handles terminal
# resizing automatically in get(_w)ch() (see the end of the
# resizeterm(3NCURSES) man page).
#
# resize_term(0, 0) reliably fails and does nothing on ncurses, so this
# hack gives ncurses/PDCurses compatibility for resizing. I don't know
# whether it would cause trouble for other implementations.
c = win.get_wch()
if c == curses.KEY_RESIZE:
try:
curses.resize_term(0, 0)
except curses.error:
pass
return c
def _warn(*args):
# Temporarily returns from curses to shell mode and prints a warning to
# stderr. The warning would get lost in curses mode.
curses.endwin()
print("menuconfig warning: ", end="", file=sys.stderr)
print(*args, file=sys.stderr)
curses.doupdate()
# Ignore exceptions from some functions that might fail, e.g. for small
# windows. They usually do reasonable things anyway.
def _safe_curs_set(visibility):
try:
curses.curs_set(visibility)
except curses.error:
pass
def _safe_addstr(win, *args):
# Clip the line to avoid wrapping to the next line, which looks glitchy.
# addchstr() would do it for us, but it's not available in the 'curses'
# module.
attr = None
if isinstance(args[0], str):
y, x = win.getyx()
s = args[0]
if len(args) == 2:
attr = args[1]
else:
y, x, s = args[:3]
if len(args) == 4:
attr = args[3]
maxlen = _width(win) - x
s = s.expandtabs()
try:
# The 'curses' module uses wattr_set() internally if you pass 'attr',
# overwriting the background style, so setting 'attr' to 0 in the first
# case won't do the right thing
if attr is None:
win.addnstr(y, x, s, maxlen)
else:
win.addnstr(y, x, s, maxlen, attr)
except curses.error:
pass
def _safe_addch(win, *args):
try:
win.addch(*args)
except curses.error:
pass
def _safe_hline(win, *args):
try:
win.hline(*args)
except curses.error:
pass
def _safe_vline(win, *args):
try:
win.vline(*args)
except curses.error:
pass
def _safe_move(win, *args):
try:
win.move(*args)
except curses.error:
pass
def _convert_c_lc_ctype_to_utf8():
# See _CONVERT_C_LC_CTYPE_TO_UTF8
if _IS_WINDOWS:
# Windows rarely has issues here, and the PEP 538 implementation avoids
# changing the locale on it. None of the UTF-8 locales below were
# supported from some quick testing either. Play it safe.
return
def _try_set_locale(loc):
try:
locale.setlocale(locale.LC_CTYPE, loc)
return True
except locale.Error:
return False
# Is LC_CTYPE set to the C locale?
if locale.setlocale(locale.LC_CTYPE, None) == "C":
# This list was taken from the PEP 538 implementation in the CPython
# code, in Python/pylifecycle.c
for loc in "C.UTF-8", "C.utf8", "UTF-8":
if _try_set_locale(loc):
print("Note: Your environment is configured to use ASCII. To "
"avoid Unicode issues, LC_CTYPE was changed from the "
"C locale to the {} locale.".format(loc))
break
# Are we running on Windows?
_IS_WINDOWS = (platform.system() == "Windows")
if __name__ == "__main__":
_main()
|
[
"daniel@agar.ca"
] |
daniel@agar.ca
|
cf93695b96b1a6a0089146e52e457aad1e3a56ab
|
e8d2d04e1e03c5cbd5882964f7a908307a2e4c01
|
/MITOCWHomeWork1.py
|
30989f6af297cd7b8d7f3543473a5da982c87c6d
|
[] |
no_license
|
APstring/Technology-Portfolio-of-Anand-Parthiban
|
90c7f3b689cad9524894006bd2a0f5b6a269f6ca
|
9d90cea88bb350ba4c87a7d57ffe7e2f552a4718
|
refs/heads/master
| 2021-06-10T01:36:01.865712
| 2017-01-14T20:15:22
| 2017-01-14T20:15:22
| 54,732,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,586
|
py
|
print('Hello World') #Ex 1.1
print(' | |''\n''--------''\n'' | |''\n''--------''\n'' | |') #Ex 1.2, prints out tictactoe
def tictactoe():
a = '\n | | \n'
b = ' --------'
print( a,b,a,b,a) #Ex 1.3 I made a function so that it would be easier to run, prints out tictactoe
a = (3*5)/(2+3), print(a) #Ex 1.4 part 2 1st
b = 2*((7+9)**0.5), print(b)#Ex 1.4 part 2 2nd; had to use powers instead of sqaureroots
c = (4-7)**3, print(c)#Ex 1.4 part 2 3rd
d = (-19 + 100)**(0.25), print(d)#Ex 1.4 part 2 4th
e = 6%4, print(e)#Ex 1.4 part 2 5th
(3+2)*(4+5), 3+2*4+5 #Ex 1.4 part 3; 1st evaluates to 45, 2nd evaluates to 16
# Ex. 1.5
fname = input("Enter your first name:")
lname = input("Enter your last name:")
print('Enter your date of birth:')
m = input("Month?")
d = input("Day?")
y = input("Year?")
print(fname, lname,'was born on', m, d,',',y)
#As suggested by the homework, I went on to do 1.9-1.11 and continued through till 1.15 before continueing with the rest of the problems.
#Ex 1.9 problems
#1. and: since 'and' is an existing phrase in Python with it's own syntax, it would be unfit for naming a variable.
#2. _and: This name is legal due to the underscore before 'and'
#3.var:This name is legal; no existing phrase 'var' in-built into python
#4.var1:This name is legal
#5.1var:The '1' in front of var causes an error as an integer can't be in a string in the beginning
#6.my-name:The dash causes 'my' and 'name' to be treated as seperate so the phrase can't be used in assignment
#7.your_name: This name is legal; the underscore between ' your' and 'name' allows for this phrase to be unrelated to any inbuilt phrases in python
#8.COLOR: This name is legal; the casing of the letters does not affect assignment, unless changing the casing causes the phrase to gain a meaning, such as with 'class' and 'Class'
#Ex 1.10 problems
#1. a = False: This is a boolean value
#2. b = 3.7:This is a float value since 3.7 has a decimal place
#3. c = 'Alex': This is a string since the phrase is surrounded by quotes
#4. d = 7: This is an integer since the input is an number that is whole
#5. e = 'True': This is a string due to the quotes surrounding the phrase
#6. f = 17: This is an integer since the input is a whole number
#7. g = '17':This is a string since the phrase is surrounded by quotes
#8. h = True: This is a boolean value since the input is the phrase True
#9. i = '3.14159': This is a string since the phrase is surrounded by quotes
#Ex 1.11
#1. The girl saw a boy on a hill, and the boy was using a telescope.
#2. The girl saw a boy on a hill, and this girl was using a telescope to see the boy.
#3.Above sentences act as a description and answers this part
#4. Most programming languages are designed to have a preset order of events that occur in the code, similar to an order of operations, which tells the program how to run and when to run, cutting out ambiguity from trying to think which order the programs would be executed in.
#Ex. 1.12
a = False
b = True
c = False
#1. b and c: is True and False; something can't be simultaneously true and false, so False is produced
#2. b or c: is True or False; an occurance can be true or false, so since there is freedom in the choice, it's True
#3. not a nad b: is not False and True; True means not false, so output is True
#4.(a and b) or not c: is (False and True) or not False; True and False becomes False, which leads to False or not False; since something can be either false or not false, output is true
#5.not b and not(a or c): is not True and not(False or False); False or False becomes false, so becomes not true or not false, which becomes True or False; True or False leads to the True output since either is possible
#Ex. 1.13
#1. Massachusettes, 50,000: No way; Massachusettes requires 100,000 for wrk, otherwise not taken
#2. Iowa, 50,000: No thanks, I can find something better;
#3.California, 50,000: "I’ll take it!"
#4. U.S.S. Enterprise, 1: So long, suckers! I’ll take it!
#5. California, 25,000: No thanks, I can find something better
#Ex. 1.14
#1. prints 10, 9, 8, 7, 6, 5, 4; since it is a while loop, it loops until condition is not met which is that num <= 3
#2. prints 0, 1.0, 2.0, 3.0, 4.0; since range(0,10,2) means range of 0 to 10 for every even number in the range, with every number of that range divided by two and printed out
#3. prints 10, 9, 8 7; skips the num < 7 and prints the num, then reduces by 1, then repeats the while loop until num < 7 condition is fulfilled and breaks, which stops loop
#4. prints Letter #0 is S, Letter #1 is n, Letter #2 is o, Letter #3 is w, Letter #4 is !
#Ex. 1.15
#1. n is always equal to 10; i output(10, 5, 6, 3, 4, 2, 1, 2,1, ... alternating between 1 and 2)
#2.This code has an initial i that is set, acting as the input; the program outputs a number and loops based on whether the current number is even or odd. This program appears to have two problems: n has no relevance to this program and this program doesn't end. To fix that, the while loop could have i>1 as the condition; n could just be removed from the program since it has no relevance.
#Returning to problems 1.7- 1.8(Ex. 1.6 has no problems, just examples)
#Ex. 1.7
""" Player 1: R P S R P S R P S
Player 2: R P S P S R S R P
Outcome: For Player 1- Tie(first 3), Lose(middle 3), Win(Last 3)
For Player 2- opposite #not a doctest"""
player1 = input('Player 1?')
player2 = input ('Player 2?')
if player1 == 'rock' and player2 == 'scissors':
print('Player 1 wins.')
elif player1 == 'scissors' and player2 == 'paper':
print('Player 1 wins.')
elif player1 == 'paper' and player2 == 'rock':
print('Player 1 wins.')
elif player1 == 'rock' and player2 == 'paper':
print('Player 2 wins.')
elif player1 == 'paper' and player2 == 'scissors':
print('Player 2 wins.')
elif player1 == 'scissors' and player2 == 'rock':
print('Player 2 wins.')
elif player1 == player2:
print('Tie game')
else:
print('This is not a valid object selection.')
# have to input as a string since only input is available
#Ex. 1.8
#part 1:
for num in range(2,11):
print(1/num)
#part 2:
n = eval(input('Starting number?'))
if n < 0:
print("Can't countdown to zero from a negatve number!")
while n > -1:
print(n)
n -= 1
#part 3:
b = eval(input("What is your base?"))
e = eval(input("What is your exponent?"))
print(pow(b,e))
#part 4:
n = eval(input("Enter an even number."))
while n%2 != 0:
print('\n'"Sorry, try again.")
n = eval(input('\n'"Enter an even number."))
print('\n'"You did it!")
#Ex. OPT.1
fname = input("Enter your first name:")
lname = input("Enter your last name:")
A = eval(input("What number month is your birthday?\ni.e. March = 1, April = 2, ..., December = 10, Janurary = 11, Febraury = 12"'\n'))
B = eval(input("What day of the month was your birthday?"))
C = eval(input("What is the year of the century you were born on?\nif Janurary or february was submitted previously, enter the number corresponding to the year before the desired year."'\n'))
D = eval(input("What century were you born in?(Enter a two-digit number i.e. for 1900s, input 19 for 19th century"'\n'))
W = (13*A - 1)/5
X = C/4
Y = D/4
Z = W + X + Y + B + C - 2*D
R = Z%7
while R < 0:
R = R + 7
day = ['Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']#used a list to simplify process
print(fname, lname,'was born on a',day[round(R)])
|
[
"APstring@users.noreply.github.com"
] |
APstring@users.noreply.github.com
|
34b9e38e60666d4c37a8a80f5c36ccc9b83a0ba7
|
c76d96457f3b5f58de75097152b133b30aee3296
|
/starter/starter/settings.py
|
582ba7d2c9b05c4bff53d590e17b5d69dff763c5
|
[] |
no_license
|
philolo1/python-starter
|
3d8b83a5b5fe74c2831bee70459a6364bee6ea3b
|
cceb1160bce69548efe3ae8c58ca91b21ed251e9
|
refs/heads/master
| 2021-01-13T07:23:24.391851
| 2016-10-10T20:24:45
| 2016-10-10T20:24:45
| 71,341,245
| 0
| 0
| null | 2016-10-19T09:32:09
| 2016-10-19T09:32:09
| null |
UTF-8
|
Python
| false
| false
| 5,404
|
py
|
"""
Django settings for the project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
import datetime
import json
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
with open(os.path.join(BASE_DIR, '../shared_config.json')) as common_config_file:
common_config = json.load(common_config_file)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET')
ALLOWED_HOSTS = ['*']
APPEND_SLASH = True
# Application definition
INSTALLED_APPS = (
'django_gulp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storages',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'static'
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
},
'console': {
'class': 'logging.StreamHandler'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': False,
}
},
}
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend'
]
ANONYMOUS_USER_ID = -1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware'
)
ROOT_URLCONF = 'starter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'starter.context_processors.environment',
],
},
},
]
WSGI_APPLICATION = 'starter.wsgi.application'
# Django Rest Framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
}
# JSON Web Tokens
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_ALLOW_REFRESH': True
}
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Frontend settings
SITE_NAME = "Starter Project"
# Email settings
EMAIL_HOST = os.getenv('EMAIL_HOST', 'localhost')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', '')
EMAIL_PORT = os.getenv('EMAIL_PORT', 25)
EMAIL_USE_TLS = bool(os.getenv('EMAIL_USE_TLS', False))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "../frontend/dist"),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
WEBPACK_ROOT = ''
# Load local settings
env = os.getenv('ENVIRONMENT')
if env == "local":
from .env_settings.local import *
|
[
"morgante.pell@morgante.net"
] |
morgante.pell@morgante.net
|
106b146a73be9c5a10c9d1516d98943b6345cdc9
|
8ec766830804b782f72e489a82f92600ab2357b0
|
/xgboost/accuracy.py
|
fb63b3bfb7a5d25b603899a8bd665c814adfc584
|
[] |
no_license
|
daytonpe/continuous-fault-diagnostic
|
d40f9d82ffcedee8dffa8639f26102948faa19a1
|
de1ea1a6c68a2353d2d12a96ace2809d915270bd
|
refs/heads/master
| 2022-06-01T00:23:29.940267
| 2020-04-17T04:13:07
| 2020-04-17T04:13:07
| 242,881,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
import pandas as pd
import sklearn
from sklearn import metrics
import numpy as np
from influxdb import InfluxDBClient
import datetime
import time
import arg_inputs
# connect to the TSDB
client = InfluxDBClient(host='localhost', port=8086,
username='admin', password='password')
client.switch_database('timeseriesdb')
i = 0
def calc_accuracy(mins):
"""
mins - minutes of rolling accuracy to be calculated
calculate the accuracy for the last X number of minutes and write that to InfluxDB
"""
results = {}
data = []
predictions = []
query = "SELECT label, prediction FROM timeseriesdb.autogen.labeled_data where time > '{}'".format(
datetime.datetime.utcnow() - datetime.timedelta(minutes=mins))
results = client.query(query)
try:
data = results.raw['series'][0]['values']
pass
except KeyError:
return
df = pd.DataFrame(data=data, columns=[
"time", "label", "prediction"])
labels = df['label'].astype(str).astype(int).to_numpy()
predictions = df['prediction'].astype(str).astype(int).to_numpy()
accuracy = sklearn.metrics.accuracy_score(
labels, predictions, normalize=True, sample_weight=None)
data = ["{},metric={} norm_acc={}".format(
'labeled_data', 'accuracy', str(accuracy))]
client.write_points(data, database='timeseriesdb',
time_precision='s', batch_size=1, protocol='line')
print(accuracy)
args = arg_inputs.get_input_args()
while(True):
try:
calc_accuracy(args.mins)
time.sleep(5)
pass
except ValueError as identifier:
print('Value Error caught, skipping iteration ', i)
continue
|
[
"daytonpe@Pats-MacBook-Pro.local"
] |
daytonpe@Pats-MacBook-Pro.local
|
a8c12c092afa894c3e4d5a4d602626f98cede1e4
|
865bd0c84d06b53a39943dd6d71857e9cfc6d385
|
/645-set-mismatch/set-mismatch.py
|
195dc65ad7e9c23df1db4800f971b262c7a02801
|
[] |
no_license
|
ANDYsGUITAR/leetcode
|
1fd107946f4df50cadb9bd7189b9f7b7128dc9f1
|
cbca35396738f1fb750f58424b00b9f10232e574
|
refs/heads/master
| 2020-04-01T18:24:01.072127
| 2019-04-04T08:38:44
| 2019-04-04T08:38:44
| 153,473,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
#
# The set S originally contains numbers from 1 to n. But unfortunately, due to the data error, one of the numbers in the set got duplicated to another number in the set, which results in repetition of one number and loss of another number.
#
#
#
# Given an array nums representing the data status of this set after the error. Your task is to firstly find the number occurs twice and then find the number that is missing. Return them in the form of an array.
#
#
#
# Example 1:
#
# Input: nums = [1,2,2,4]
# Output: [2,3]
#
#
#
# Note:
#
# The given array size will in the range [2, 10000].
# The given array's numbers won't have any order.
#
#
class Solution:
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
d = {i:0 for i in range(1, len(nums) + 1)}
for num in nums:
if d[num] == 0:
d[num] += 1
else:
duplicate = num
for k,v in d.items():
if v == 0:
return [duplicate, k]
|
[
"andyandwei@163.com"
] |
andyandwei@163.com
|
ce674133fe7706ef28daa1c17e98f8e508626e9d
|
cafb0f090143b8645de00e64acc440bec855235a
|
/blog/migrations/0006_blogcategory.py
|
738731313c656a4a7f3bf9b2d82d8ffb54318f88
|
[] |
no_license
|
thomasdahlberg/wag_tutorial
|
a7e2854650f65abd096d4cc9367e3c6aee1aebc2
|
03c31efd2c304929d188be9dafda8d457f5a0f0d
|
refs/heads/master
| 2023-02-25T23:01:45.444431
| 2021-01-30T02:21:01
| 2021-01-30T02:21:01
| 334,007,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
# Generated by Django 3.1.5 on 2021-01-17 18:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0022_uploadedimage'),
('blog', '0005_blogtagindexpage'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'verbose_name_plural': 'blog categories',
},
),
]
|
[
"thomas.dahlberg8@gmail.com"
] |
thomas.dahlberg8@gmail.com
|
37e3e5505aa27d63359e5dc5b20c0ef9ee7d8818
|
a0d2a90c21ff3e05e0fd939698a6dfb7e54d16d9
|
/GServer/utils/lora_crypto.py
|
23c63c1274b822d1fb70e070bc83afb121956f5b
|
[
"MIT"
] |
permissive
|
soybean217/lora-python
|
4a72407607d2201a91b5e0a7dcd115d7788b7e65
|
9c4324f81bae8b20f6c353447189f724a5cf54c6
|
refs/heads/master
| 2022-12-13T08:24:13.267783
| 2017-12-06T08:20:40
| 2017-12-06T08:20:40
| 102,331,484
| 0
| 0
|
MIT
| 2022-12-07T23:56:50
| 2017-09-04T07:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 6,466
|
py
|
from ctypes import CDLL, c_uint8, c_uint32, create_string_buffer, byref
import os
class LoRaCrypto:
if os.name == 'nt':
sys_path = 'D:\lora_server\GServer'
Crypto = CDLL(sys_path + "\DLLs\LoRaMacCrypto.dll")
else:
Crypto = CDLL("./lora_encrypt/libloraCrypto.so")
# initial_dir = os.getcwd()
# os.chdir('/home/gaozhi/GServer/www/')
# Crypto = CDLL("./lora_encrypt/libloraCrypto.so")
# os.chdir(initial_dir)
@staticmethod
def compute_mic(msg, key, address, dir, sequenceCounter):
mic = (c_uint32 * 1)()
LoRaCrypto.Crypto.LoRaMacComputeMic(create_string_buffer(msg),
c_uint8(len(msg)),
create_string_buffer(key),
c_uint32(address),
c_uint8(dir),
c_uint32(sequenceCounter),
byref(mic))
return bytes(mic)
@staticmethod
def payload_encrypt(buffer, key, address, dir, sequenceCounter):
enBuffer = (c_uint8 * len(buffer))()
LoRaCrypto.Crypto.LoRaMacPayloadEncrypt(create_string_buffer(buffer),
c_uint8(len(buffer)),
create_string_buffer(key),
c_uint32(address),
c_uint8(dir),
c_uint32(sequenceCounter),
byref(enBuffer))
# print('encryptbuffer:',hexlify(bytes(enBuffer)[:len(buffer)]).decode())
return bytes(enBuffer)
@staticmethod
def payload_decrypt(encbuffer, key, address, dir, sequenceCounter):
Buffer = (c_uint8 * len(encbuffer))()
LoRaCrypto.Crypto.LoRaMacPayloadDecrypt(create_string_buffer(bytes(encbuffer)),
c_uint8(len(encbuffer)),
create_string_buffer(bytes(key)),
c_uint32(address),
c_uint8(dir),
c_uint32(sequenceCounter),
byref(Buffer))
# print('dncryptbuffer result:',hexlify(bytes(Buffer)[:len(encbuffer)]).decode())
return bytes(Buffer)
@staticmethod
def join_compute_skey(key, appNonce, devNonce):
'''
:param key: bytes 16
:param appNonce: bytes 3
:param devNonce: bytes 2
:return:
'''
nwkSKey = (c_uint8 * 16)()
appSKey = (c_uint8 * 16)()
devnonce = int.from_bytes(devNonce, byteorder='little')
LoRaCrypto.Crypto.LoRaMacJoinComputeSKeys(create_string_buffer(key),
create_string_buffer(appNonce),
c_uint8(devnonce),
byref(nwkSKey),
byref(appSKey))
return bytes(nwkSKey), bytes(appSKey)
@staticmethod
def join_compute_mic(join_request, key):
mic = (c_uint32 * 1)()
LoRaCrypto.Crypto.LoRaMacJoinComputeMic(create_string_buffer(join_request),
c_uint8(len(join_request)),
create_string_buffer(key),
byref(mic))
return bytes(mic)
@staticmethod
def join_encrypt(clear,key):
cypher = (c_uint8 * len(clear))()
LoRaCrypto.Crypto.LoRaMacJoinEncrypt(create_string_buffer(bytes(clear)),
c_uint8(len(clear)),
create_string_buffer(bytes(key)),
byref(cypher))
# print('encryptbuffer result:',hexlify(bytes(cypher)).decode())
return bytes(cypher)
@staticmethod
def join_decrypt(cypher,key):
clear = (c_uint8 * len(cypher))()
LoRaCrypto.Crypto.LoRaMacJoinDecrypt(create_string_buffer(bytes(cypher)),
c_uint8(len(cypher)),
create_string_buffer(bytes(key)),
byref(clear))
# print('decryptbuffer result:',hexlify(bytes(clear)).decode())
return bytes(clear)
@staticmethod
def ping_rand_compute(key, beacon_time, dev_addr):
enBuffer = (c_uint8 * 16)()
LoRaCrypto.Crypto.LoRaPingRandencrypt(
create_string_buffer(key),
c_uint32(beacon_time),
c_uint32(dev_addr),
byref(enBuffer))
return bytes(enBuffer)
# void LoRaMacJoinDecrypt( uint8_t *buffer, uint16_t size, const uint8_t *key, uint8_t *decBuffer );
#
# void LoRaMacJoinEncrypt(uint8_t *buffer, uint16_t size, const uint8_t *key, uint8_t *decBuffer);
if __name__ == '__main__':
# frampayload = b'\xaf\xa0\r\x9fp>'
# appskey = b'#\xc4\xcd\n\x8e\xb8\x93\x03G$q\xbbc\x906\n'
# addr_int = 1939865614
# DIR_UP = 0
# fcnt = 6
# plain_text = LoRaCrypto.payload_decrypt(frampayload, appskey, addr_int, DIR_UP, fcnt)
# print(plain_text)
from binascii import a2b_base64, hexlify
import binascii
def base64_decode(data):
missing_padding = 4 - len(data) % 4
if missing_padding:
data += '=' * missing_padding
try:
return a2b_base64(data)
except binascii.Error as error:
raise error
return None
data = base64_decode("gA4AcImAAQDycpx+oEjC1A==")
print(data)
print(hexlify(data[1:5]))
dev_addr = int.from_bytes(data[1:5], byteorder='little')
print(dev_addr)
mic = data[len(data)-4:]
nwkskey = b'\xfc\n\x83\xb7\xab\x04\xaa\xa0\x14\x94u\x8b\xea\xcf\x97:'
fcnt = 1
addr = 2305818638
dir_up = 0
mic_compute = LoRaCrypto.compute_mic(data[0:len(data)-4], nwkskey, addr, dir_up, fcnt)
print()
print('mic', mic)
print('mic_compute', mic_compute)
|
[
"13565644@qq.com"
] |
13565644@qq.com
|
97a9f2739416e4a50695d2f42ddad60c3f00e90e
|
375d97944171f1300a979a7603adfa2a0741f921
|
/HW1/projectSet_2.py
|
e68143949a888a2d335511331b31a4dd4333ca3c
|
[] |
no_license
|
yasinBursali/neural-network-lecture-projects
|
741ad111658c8d3b5c9b2d185a7d4fd81c605003
|
9d8f3096ae4ad2507493aa5ea3a094e7d456dd15
|
refs/heads/master
| 2022-11-12T13:49:05.142575
| 2020-06-29T16:28:06
| 2020-06-29T16:28:06
| 275,821,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
py
|
import numpy as np
# Input dataset for Training
X = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
# Output class labels for Training
y = np.array([[0],
[1],
[1],
[0]])
X1_test = np.array([1, 1, 0])
X2_test = np.array([1, 1, 1])
# eq 1 in the slide
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# eq 2 in the slide
def sigmoid_prime(z):
# Note that z input into this function comes as a sigmoid value
# therefore no need another sigmoid function for the derivative
return (z * (1 - z))
def test_NN(X, w0, w1):
z1 = np.dot(X, w0)
a1 = sigmoid(z1)
z2 = np.dot(a1, w1)
a2 = sigmoid(z2)
return a2
def SGD(X, y, alpha, epoch, batch_size):
m, b = 0.33, 0.48 # initial parameters
log, mse = [], [] # lists to store learning process
for _ in range(epoch):
indexes = np.random.randint(0, len(X), batch_size) # random sample
Xs = np.take(X, indexes)
ys = np.take(y, indexes)
N = len(Xs)
f = ys - (m * Xs + b)
# Updating parameters m and b
m -= lr * (-2 * Xs.dot(f).sum() / N)
b -= lr * (-2 * f.sum() / N)
log.append((m, b))
mse.append(mean_squared_error(y, m * X + b))
return m, b, log, mse
# to ensure that generated random numbers
# are the same no matter how many times you run this
m,b, log, mse = np.random.random(1)
print(m)
alpha = 0.4
batch = 10000
a0 = X
# weights, initialize it randomly
# make sure first weight matrix (weights connecting the input layer
# into hidden layer is 3 by 4
# and second weight matrix, weights connecting Hidden layer to output layer
# We assign random weights with values in the range -1 to 1
# and mean 0.
w0 = 2 * np.random.random((3, 4)) - 1 # Weights between input and first hidden layer
w1 = 2 * np.random.random((4, 1)) - 1
SGD(X,y,apha,5,2)
""""
for cntr in range(batch):
batch_x = X
batch_y = y
n = batch_x.shape[0]
# first input layer no activation functions
a0 = batch_x
# Perform Feedforward operation
z1 = np.dot(a0, w0)
a1 = sigmoid(z1) # a1: activation values of the first layer, zeroth layer is input layer!
# do the calcuation for the z values, notice that instead of sum operator
# as we have seen in the class, we utilize the matrix operation dot product
z2 = np.dot(a1, w1)
a2 = sigmoid(z2) # a2: second layer activation aka NN's output values
l2_error = (a2 - batch_y) / n
# print the total error sum for our gradient descent algorithm
if cntr % 1000 == 0:
print('Error:' + str(np.mean(np.mean(np.abs(l2_error)))))
# eq. 6 in the slide
l2_delta = l2_error * sigmoid_prime(a2)
l1_error = l2_delta.dot(w1.T)
# eq 7 in the slide
l1_delta = l1_error * sigmoid_prime(a1)
# eq 5 in the slide
w1 -= alpha * a1.T.dot(l2_delta)
w0 -= alpha * a0.T.dot(l1_delta)
"""
print('Output after training:')
print(a2)
print("\n********** QUESTION 2 **********\n")
print("Output for X1_test:")
print(test_NN(X1_test, w0, w1))
print("Output for X2_test:")
print(test_NN(X2_test, w0, w1))
|
[
"noreply@github.com"
] |
yasinBursali.noreply@github.com
|
97c2fc574e11b5a778ff83ecdac089b988d5387d
|
5e06a943a9a73f122cf4b520ab6cc5f695a1abc3
|
/test.py
|
06408d75b26e5e78f9eac323dc851d9f04a38279
|
[] |
no_license
|
kundan8239/salescalculater
|
0074bb10eb8537e4a6c435f6217cb6a4b25c5790
|
35ca12558c42c8c2c8f45af8be6015489d7e89eb
|
refs/heads/master
| 2021-07-16T08:46:00.971869
| 2017-10-23T18:31:43
| 2017-10-23T18:31:43
| 108,022,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
from Inputs import getInput
from ItemType import getTaxStatus
from ItemTax import getItemTax
import csv
def csv_writer(data):
with open("output.csv", "w") as csv_file:
writer = csv.writer(csv_file)
header=['SaleTax','Total']
writer.writerow(header)
content=list()
#for line in data:
#content+=line
writer.writerow(data)
while True:
filename = raw_input('Enter \'Filename\' or \'Done\' to exit\t')
print "\n"
if filename.lower()=='done':
break
all_items = getInput(filename)
total_without_tax = 0
total_with_tax = 0
for item in all_items:
qty = int(item[0][0])
taxStatus, imported = getTaxStatus(item[0]) # Returns True if tax is applied on item and True if item is imported.
itemTax = getItemTax(qty, taxStatus, imported, item[1])
total_without_tax += float(item[1])
total_with_tax += float(itemTax)
print item[0], " : ", itemTax
salesTax = total_with_tax - total_without_tax
salesTax = format(salesTax)
total_with_tax = format(total_with_tax)
datalist=list()
datalist=[salesTax,total_with_tax];
csv_writer(datalist)
print "Sales Taxes : ", salesTax
print "Total : ", total_with_tax,"\n"
|
[
"singhkundan7631@gmail.com"
] |
singhkundan7631@gmail.com
|
ff043dffe53eb0008d006afac66a7fda19bcde0f
|
9c713425498c8366c47c3a4ce1a50c791c24572f
|
/src/controller/python/test/test_scripts/base.py
|
5afa974834dcebfe182c8b98e53286b4be6193e2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
krzysztofziobro/connectedhomeip
|
d17a754c88bf9a4703baf1bef222e34327042139
|
f07ff95183d33af17bc833c92c64e7409c6fd2eb
|
refs/heads/master
| 2023-07-17T22:46:22.035503
| 2021-08-28T16:08:30
| 2021-08-28T16:08:30
| 385,177,354
| 0
| 0
|
Apache-2.0
| 2021-07-12T11:39:14
| 2021-07-12T08:28:33
| null |
UTF-8
|
Python
| false
| false
| 10,364
|
py
|
#
# Copyright (c) 2021 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass
from typing import Any
from chip import ChipDeviceCtrl
import chip.interaction_model as IM
import threading
import os
import sys
import logging
import time
logger = logging.getLogger('PythonMatterControllerTEST')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s %(message)s'))
sh.setStream(sys.stdout)
logger.addHandler(sh)
def TestFail(message):
logger.fatal("Testfail: {}".format(message))
os._exit(1)
def FailIfNot(cond, message):
if not cond:
TestFail(message)
class TestTimeout(threading.Thread):
def __init__(self, timeout: int):
threading.Thread.__init__(self)
self._timeout = timeout
self._should_stop = False
self._cv = threading.Condition()
def stop(self):
with self._cv:
self._should_stop = True
self._cv.notify_all()
self.join()
def run(self):
stop_time = time.time() + self._timeout
logger.info("Test timeout set to {} seconds".format(self._timeout))
with self._cv:
wait_time = stop_time - time.time()
while wait_time > 0 and not self._should_stop:
self._cv.wait(wait_time)
wait_time = stop_time - time.time()
if time.time() > stop_time:
TestFail("Timeout")
class BaseTestHelper:
def __init__(self, nodeid: int):
self.devCtrl = ChipDeviceCtrl.ChipDeviceController(
controllerNodeId=nodeid)
self.logger = logger
def TestKeyExchange(self, ip: str, setuppin: int, nodeid: int):
self.logger.info("Conducting key exchange with device {}".format(ip))
if not self.devCtrl.ConnectIP(ip.encode("utf-8"), setuppin, nodeid):
self.logger.info(
"Failed to finish key exchange with device {}".format(ip))
return False
self.logger.info("Device finished key exchange.")
return True
def TestCloseSession(self, nodeid: int):
self.logger.info(f"Closing sessions with device {nodeid}")
try:
self.devCtrl.CloseSession(nodeid)
return True
except Exception as ex:
self.logger.exception(
f"Failed to close sessions with device {nodeid}: {ex}")
return False
def TestNetworkCommissioning(self, nodeid: int, endpoint: int, group: int, dataset: str, network_id: str):
self.logger.info("Commissioning network to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "AddThreadNetwork", nodeid, endpoint, group, {
"operationalDataset": bytes.fromhex(dataset),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send AddThreadNetwork command")
return False
self.logger.info(
"Send EnableNetwork command to device {}".format(nodeid))
try:
self.devCtrl.ZCLSend("NetworkCommissioning", "EnableNetwork", nodeid, endpoint, group, {
"networkID": bytes.fromhex(network_id),
"breadcrumb": 0,
"timeoutMs": 1000}, blocking=True)
except Exception as ex:
self.logger.exception("Failed to send EnableNetwork command")
return False
return True
def TestOnOffCluster(self, nodeid: int, endpoint: int, group: int):
self.logger.info(
"Sending On/Off commands to device {} endpoint {}".format(nodeid, endpoint))
err, resp = self.devCtrl.ZCLSend("OnOff", "On", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.ProtocolCode != 0:
self.logger.error(
"failed to send OnOff.On: error is {} with im response{}".format(err, resp))
return False
err, resp = self.devCtrl.ZCLSend("OnOff", "Off", nodeid,
endpoint, group, {}, blocking=True)
if err != 0 or resp is None or resp.ProtocolCode != 0:
self.logger.error(
"failed to send OnOff.Off: error is {} with im response {}".format(err, resp))
return False
return True
def TestResolve(self, fabricid, nodeid):
self.logger.info(
"Resolve {} with fabric id: {}".format(nodeid, fabricid))
try:
self.devCtrl.ResolveNode(fabricid=fabricid, nodeid=nodeid)
except Exception as ex:
self.logger.exception("Failed to resolve. {}".format(ex))
def TestReadBasicAttribiutes(self, nodeid: int, endpoint: int, group: int):
basic_cluster_attrs = {
"VendorName": "TEST_VENDOR",
"VendorID": 9050,
"ProductName": "TEST_PRODUCT",
"ProductID": 65279,
"UserLabel": "",
"Location": "",
"HardwareVersion": 0,
"HardwareVersionString": "TEST_VERSION",
"SoftwareVersion": 0,
"SoftwareVersionString": "prerelease",
}
failed_zcl = {}
for basic_attr, expected_value in basic_cluster_attrs.items():
try:
res = self.devCtrl.ZCLReadAttribute(cluster="Basic",
attribute=basic_attr,
nodeid=nodeid,
endpoint=endpoint,
groupid=group)
if res is None:
raise Exception(
"Read {} attribute: no value get".format(basic_attr))
elif res.status != 0:
raise Exception(
"Read {} attribute: non-zero status code {}".format(basic_attr, res.status))
elif res.value != expected_value:
raise Exception("Read {} attribute: expect {} got {}".format(
basic_attr, repr(expected_value), repr(res.value)))
except Exception as ex:
failed_zcl[basic_attr] = str(ex)
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestWriteBasicAttributes(self, nodeid: int, endpoint: int, group: int):
@dataclass
class AttributeWriteRequest:
cluster: str
attribute: str
value: Any
expected_status: IM.ProtocolCode = IM.ProtocolCode.Success
requests = [
AttributeWriteRequest("Basic", "UserLabel", "Test"),
AttributeWriteRequest("Basic", "Location",
"a pretty loooooooooooooog string", IM.ProtocolCode.InvalidValue),
]
failed_zcl = []
for req in requests:
try:
res = self.devCtrl.ZCLWriteAttribute(cluster=req.cluster,
attribute=req.attribute,
nodeid=nodeid,
endpoint=endpoint,
groupid=group,
value=req.value)
if res is None:
raise Exception(
f"Write {req.cluster}.{req.attribute} attribute: no value get")
elif res.status != req.expected_status:
raise Exception(
f"Write {req.cluster}.{req.attribute} attribute: expected status is {req.expected_status} got {res.status}")
if req.expected_status != IM.ProtocolCode.Success:
# If the write interaction is expected to success, proceed to verify it.
continue
res = self.devCtrl.ZCLReadAttribute(
cluster=req.cluster, attribute=req.attribute, nodeid=nodeid, endpoint=endpoint, groupid=group)
if res is None:
raise Exception(
f"Read written {req.cluster}.{req.attribute} attribute: failed to read attribute")
elif res.status != 0:
raise Exception(
f"Read written {req.cluster}.{req.attribute} attribute: non-zero status code {res.status}")
elif res.value != req.value:
raise Exception(
f"Read written {req.cluster}.{req.attribute} attribute: expected {req.value} got {res.value}")
except Exception as ex:
failed_zcl.append(str(ex))
if failed_zcl:
self.logger.exception(f"Following attributes failed: {failed_zcl}")
return False
return True
def TestNonControllerAPIs(self):
'''
This function validates various APIs provided by chip package which is not related to controller.
TODO: Add more tests for APIs
'''
try:
cluster = self.devCtrl.GetClusterHandler()
clusterInfo = cluster.GetClusterInfoById(0x50F) # TestCluster
if clusterInfo["clusterName"] != "TestCluster":
raise Exception(
f"Wrong cluster info clusterName: {clusterInfo['clusterName']} expected TestCluster")
except Exception as ex:
self.logger.exception(f"Failed to finish API test: {ex}")
return False
return True
|
[
"noreply@github.com"
] |
krzysztofziobro.noreply@github.com
|
3f6bdf1c025180bb27170573672938f6f4319cf0
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/examples/error_handling/handle_partial_failure.py
|
193a562a1fbb34bfa2c42d7a72038a932e4d70e1
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 8,924
|
py
|
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This shows how to handle responses that may include partial_failure errors.
"""
import argparse
import sys
import uuid
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, campaign_id):
"""Runs the example code, which demonstrates how to handle partial failures.
The example creates three Ad Groups, two of which intentionally fail in
order to generate a partial failure error. It also demonstrates how to
properly identify a partial error and how to log the error messages.
Args:
client: An initialized GoogleAdsClient instance.
customer_id: A valid customer account ID.
campaign_id: The ID for a campaign to create Ad Groups under.
"""
try:
ad_group_response = _create_ad_groups(client, customer_id, campaign_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
else:
_print_results(client, ad_group_response)
# [START handle_partial_failure]
def _create_ad_groups(client, customer_id, campaign_id):
"""Creates three Ad Groups, two of which intentionally generate errors.
Args:
client: An initialized GoogleAdsClient instance.
customer_id: A valid customer account ID.
campaign_id: The ID for a campaign to create Ad Groups under.
Returns: A MutateAdGroupsResponse message instance.
"""
ad_group_service = client.get_service("AdGroupService")
campaign_service = client.get_service("CampaignService")
resource_name = campaign_service.campaign_path(customer_id, campaign_id)
invalid_resource_name = campaign_service.campaign_path(customer_id, 0)
ad_group_operations = []
# This AdGroup should be created successfully - assuming the campaign in
# the params exists.
ad_group_op1 = client.get_type("AdGroupOperation")
ad_group_op1.create.name = f"Valid AdGroup: {uuid.uuid4()}"
ad_group_op1.create.campaign = resource_name
ad_group_operations.append(ad_group_op1)
# This AdGroup will always fail - campaign ID 0 in resource names is
# never valid.
ad_group_op2 = client.get_type("AdGroupOperation")
ad_group_op2.create.name = f"Broken AdGroup: {uuid.uuid4()}"
ad_group_op2.create.campaign = invalid_resource_name
ad_group_operations.append(ad_group_op2)
# This AdGroup will always fail - duplicate ad group names are not allowed.
ad_group_op3 = client.get_type("AdGroupOperation")
ad_group_op3.create.name = ad_group_op1.create.name
ad_group_op3.create.campaign = resource_name
ad_group_operations.append(ad_group_op3)
# Issue a mutate request, setting partial_failure=True.
request = client.get_type("MutateAdGroupsRequest")
request.customer_id = customer_id
request.operations = ad_group_operations
request.partial_failure = True
return ad_group_service.mutate_ad_groups(request=request)
# [END handle_partial_failure]
# [START handle_partial_failure_1]
def _is_partial_failure_error_present(response):
"""Checks whether a response message has a partial failure error.
In Python the partial_failure_error attr is always present on a response
message and is represented by a google.rpc.Status message. So we can't
simply check whether the field is present, we must check that the code is
non-zero. Error codes are represented by the google.rpc.Code proto Enum:
https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
Args:
response: A MutateAdGroupsResponse message instance.
Returns: A boolean, whether or not the response message has a partial
failure error.
"""
partial_failure = getattr(response, "partial_failure_error", None)
code = getattr(partial_failure, "code", None)
return code != 0
# [END handle_partial_failure_1]
# [START handle_partial_failure_2]
def _print_results(client, response):
"""Prints partial failure errors and success messages from a response.
This function shows how to retrieve partial_failure errors from a response
message (in the case of this example the message will be of type
MutateAdGroupsResponse) and how to unpack those errors to GoogleAdsFailure
instances. It also shows that a response with partial failures may still
contain successful requests, and that those messages should be parsed
separately. As an example, a GoogleAdsFailure object from this example will
be structured similar to:
error_code {
range_error: TOO_LOW
}
message: "Too low."
trigger {
string_value: ""
}
location {
field_path_elements {
field_name: "operations"
index {
value: 1
}
}
field_path_elements {
field_name: "create"
}
field_path_elements {
field_name: "campaign"
}
}
Args:
client: an initialized GoogleAdsClient.
response: a MutateAdGroupsResponse instance.
"""
# Check for existence of any partial failures in the response.
if _is_partial_failure_error_present(response):
print("Partial failures occurred. Details will be shown below.\n")
# Prints the details of the partial failure errors.
partial_failure = getattr(response, "partial_failure_error", None)
# partial_failure_error.details is a repeated field and iterable
error_details = getattr(partial_failure, "details", [])
for error_detail in error_details:
# Retrieve an instance of the GoogleAdsFailure class from the client
failure_message = client.get_type("GoogleAdsFailure")
# Parse the string into a GoogleAdsFailure message instance.
# To access class-only methods on the message we retrieve its type.
GoogleAdsFailure = type(failure_message)
failure_object = GoogleAdsFailure.deserialize(error_detail.value)
for error in failure_object.errors:
# Construct and print a string that details which element in
# the above ad_group_operations list failed (by index number)
# as well as the error message and error code.
print(
"A partial failure at index "
f"{error.location.field_path_elements[0].index} occurred "
f"\nError message: {error.message}\nError code: "
f"{error.error_code}"
)
else:
print(
"All operations completed successfully. No partial failure "
"to show."
)
# In the list of results, operations from the ad_group_operation list
# that failed will be represented as empty messages. This loop detects
# such empty messages and ignores them, while printing information about
# successful operations.
for message in response.results:
if not message:
continue
print(f"Created ad group with resource_name: {message.resource_name}.")
# [END handle_partial_failure_2]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v10")
parser = argparse.ArgumentParser(
description="Adds an ad group for specified customer and campaign id."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-i", "--campaign_id", type=str, required=True, help="The campaign ID."
)
args = parser.parse_args()
main(googleads_client, args.customer_id, args.campaign_id)
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
c8d6371959d4baa088bbc0db5d8d44ff22799d38
|
06b105d66dabb1a4f6a7520c1d154f18b6dccfc7
|
/coding challenge 3.py
|
4ae128fd5bc545d7ffb5bc46b64fdcad5f455e51
|
[] |
no_license
|
vampirepapi/PythonBasicPrograms
|
c2cf8873af46852c7f78195f9035fd9cac991585
|
8693c0b67d3e44031954841bc4738fae332c9536
|
refs/heads/master
| 2020-12-23T09:03:26.482161
| 2020-01-29T23:55:11
| 2020-01-29T23:55:11
| 237,105,773
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
w = float(input("enter weight"))
h = float(input("enter height"))
'''bmi = (w/h**2)
print(bmi)'''
'''def bmi(w,h):
print(w/h**2)
bmi(w,h)'''
'''def bmi(w,h):
return w/(pow(h,2))
result = bmi(w, h)
print(result)'''
def bmi(w,h):
return w/h**2
result = bmi(w, h)
print(result)
|
[
"noreply@github.com"
] |
vampirepapi.noreply@github.com
|
90c38a6e71a1e5eed24bfcd11bdacc8b6c43f208
|
fbd2d335c9ee3bb17e13cbcb38f11a5d1e06b517
|
/gbpservice/neutron/extensions/apic_segmentation_label.py
|
8ede42c8a2683aa35cf6feeb72e9ec55cf7a1372
|
[
"Apache-2.0"
] |
permissive
|
baodongli/group-based-policy
|
50b13937686b3fac38f16a7a10fd34853eb4b1e6
|
f3b892ecdc1051b204376e18679f73bf457ce7dc
|
refs/heads/master
| 2020-05-21T05:10:36.616122
| 2017-03-10T15:51:59
| 2017-03-10T15:51:59
| 84,575,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from gbpservice.neutron.extensions import group_policy as gp
CISCO_APIC_GBP_SEGMENTATION_LABEL_EXT = 'cisco_apic_gbp_label_segmentation'
EXTENDED_ATTRIBUTES_2_0 = {
gp.POLICY_TARGETS: {
'segmentation_labels': {
'allow_post': True, 'allow_put': True, 'default': None,
'validate': {'type:list_of_unique_strings': None},
'convert_to': attr.convert_none_to_empty_list,
'is_visible': True},
},
}
class Apic_segmentation_label(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "APIC GBP Segmentation Extension"
@classmethod
def get_alias(cls):
return CISCO_APIC_GBP_SEGMENTATION_LABEL_EXT
@classmethod
def get_description(cls):
return _("This extension supports a list of (micro)segmentation "
"labels that can be applied to the Policy Target resource.")
@classmethod
def get_updated(cls):
return "2016-08-03T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
[
"sumitnaiksatam@gmail.com"
] |
sumitnaiksatam@gmail.com
|
a8300b195a4efcc78bc6ec54e8b44b3a74b8d9a9
|
8af9f1a36301e264817edc315634b2422c351331
|
/aoc/2021/day8/day8.py
|
aa4fed65b714a1198db2267c8da9b005231c6de8
|
[] |
no_license
|
mo-morgan/CPSolutions
|
af04a7e252198a249977cd6698a3f588331c9e6f
|
2f1d84610370abb79eacbcf3c231f5dff1c060f4
|
refs/heads/master
| 2022-01-25T18:54:36.653881
| 2021-12-31T02:21:51
| 2021-12-31T02:21:51
| 157,975,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
if __name__ == "__main__":
c = 0
while True:
try:
after = input().split(" | ")[1]
nums = after.split()
valid = [2, 3, 4, 7]
for i in range(len(nums)):
k = len(nums[i])
if k in valid:
c += 1
except EOFError:
break
print(c)
|
[
"momorgan@student.ubc.ca"
] |
momorgan@student.ubc.ca
|
6c48e77b59d7cfa27fb25143be0313c51be85a23
|
90cbf7093998b1cdceb270e3828a076a468d2d63
|
/main.py
|
9a1f4409a289a1934744e96b5c9bb32534c50acb
|
[] |
no_license
|
llwasampijja/andela35-databases
|
e2852445f7fef085d78d4d72f11f3ffcdb218bcc
|
89a1ae35f479ba90066d2645946779755304dadd
|
refs/heads/feature
| 2020-04-12T01:15:28.038141
| 2018-12-18T13:36:44
| 2018-12-18T13:36:44
| 162,226,635
| 0
| 0
| null | 2018-12-18T13:46:23
| 2018-12-18T03:48:53
|
Python
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
from simcard_controller import SimCardController
from human_controller import HumanController
from database_migrations import DatabaseMigration
databaseMigration = DatabaseMigration()
def run_humans():
humanController = HumanController()
humanController.add_humans()
humanController.get_human_by_id()
humanController.update_human()
humanController.delete_human()
def run_simcards():
simcardcontroller = SimCardController()
simcardcontroller.add_simcards()
simcardcontroller.get_simcard_by_id()
simcardcontroller.update_simcard()
simcardcontroller.delete_simcard()
if __name__ == "__main__":
databaseMigration.create_db_tables()
databaseMigration.add_profession_column()
databaseMigration.add_nationality_column()
databaseMigration.change_phone_datatype()
run_humans()
run_simcards()
|
[
"llwasampijja@gmail.com"
] |
llwasampijja@gmail.com
|
fdc14ec3b246a7885740217270aa1a0f12e1c377
|
a9143ce455988523a4cbae465755c12577ea4af7
|
/deals/serializers.py
|
cb8580f8d0ff74c51abbbc6693c329d522e50924
|
[] |
no_license
|
sudhirmishra/treebodeals
|
06eeaa5bc63294132df50b581b0a4a8032573f70
|
1a6950250ecf5a2a8575b69576536f6dd0bac84b
|
refs/heads/master
| 2021-01-19T11:52:21.940420
| 2016-10-02T06:40:01
| 2016-10-02T06:40:01
| 69,770,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from rest_framework import serializers
from deals import models
class DealSerializer(serializers.ModelSerializer):
class Meta:
model = models.Deal
fields = ('id', 'name', 'image', 'rating', 'link', 'actual_price','discount','location')
|
[
"sudhirxps@gmail.com"
] |
sudhirxps@gmail.com
|
0a38096e3f9c40e21e1465a6e917c1e8e01d841f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/139/usersdata/189/61633/submittedfiles/diagonaldominante.py
|
b939318ccc98950ec57977b3f08ee5c530bf1988
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
import numpy as np
def soma(a):
b=[]
for i in range (0,a.shape[0],1):
soma=0
for l in range(0,a.shape[1],1):
if i!=l:
soma=soma+a[i,l]
b.append(soma)
return (b)
def diagonal (a,b):
for i in range (0,a.shape[0],1):
for l in range(0,a.shape[1],1):
if i==l:
if a[i,l]<b[i]:
return False
return True
n=int(input('Matriz:'))
a=np.zeros((n,n))
for i in range (0,a.shape[0],1):
for l in range(0,a.shape[1],1):
a[i,l]=int(input('valor:'))
b=soma(a)
if diagonal(a,b):
print('SIM')
else:
print('NÃO')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
d88217fda390ffb9747b444e050ab4566c0fe5e0
|
ae3911ef8fb5ec0d1a7ed4a9dfc400957e6f7e98
|
/nli/models/__init__.py
|
e620d6df2929ab4c93504e02bacd54e975211b0c
|
[
"MIT"
] |
permissive
|
CeliaYao329/latent-treelstm
|
8495cf08cd7b91ffd63c5f4f582b0740f39d9562
|
8850aadc67fea1ff5e2545f628cd26359df1a92c
|
refs/heads/master
| 2020-08-22T02:15:01.557943
| 2019-10-26T15:32:54
| 2019-10-26T15:32:54
| 216,297,652
| 0
| 0
|
MIT
| 2019-10-20T02:38:26
| 2019-10-20T02:38:26
| null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from nli.models.ReinforceModel import ReinforceModel
from nli.models.PpoModel import PpoModel
|
[
"ajoulin@fb.com"
] |
ajoulin@fb.com
|
da1d35c100ab64c6c74ffbc7ea9b8d5e4a4c798a
|
358f6b9ac21f2c0f64acc132197a57310bbcede8
|
/flack/tests/selenium/test_channel_view.py
|
499afbdfb9201dc161d837ea0b064f876eaabef5
|
[] |
no_license
|
thenakedthunder/flack
|
b43bda331772dd79f50d9016befa792a234376ff
|
a2285c75d2f9ca3568380b3868da0b35b10188cd
|
refs/heads/master
| 2021-06-23T23:12:06.209614
| 2020-03-11T17:31:48
| 2020-03-11T17:31:48
| 187,687,357
| 0
| 0
| null | 2021-01-05T20:58:35
| 2019-05-20T17:44:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
#FILE :selenium/test_channel_view.py
import unittest
import time
from test_helper import TestHelper
class Test_channel_view(unittest.TestCase):
def __init__(self, methodName = 'runTest'):
self.test_helper = TestHelper(self)
return super().__init__(methodName)
@classmethod
def setUpClass(cls):
# Get the display name prompt out of the way
# (it is tested in another class)
TestHelper.setup_with_new_displayname()
# and create new channels
TestHelper.create_new_channel("Csepűrágó1")
time.sleep(1)
TestHelper.create_new_channel("Csepűrágó2")
# ------------------ CHANNEL VIEW TESTS ------------------- #
def test_channel_step1_channel_name_displayed(self):
driver = self.test_helper.driver
open_channel_link = driver.find_element_by_id("channel-2")
open_channel_link.click()
channel_name_text = driver.find_element_by_id("channel-name").text
self.assertEqual(channel_name_text, "Csepűrágó2")
|
[
"balazs.radvanyi88@gmail.com"
] |
balazs.radvanyi88@gmail.com
|
41e5f935d6c9d79bc316b189a822aedc29cb0299
|
9bf3a3d2b9a4b64af4c7768242661664d1ea214f
|
/src/util/DataAccess.py
|
bc02fa4faaa6bbf7cc96e870c2969e678a71b36b
|
[] |
no_license
|
foreversand/quantstrategy
|
9c13cb3b4f7a93e0ad2ff06cefd005882dd67199
|
50143fe3d19868ae05e0bbb50d08188b0d2f8f39
|
refs/heads/master
| 2021-01-10T11:46:17.909556
| 2015-11-12T09:12:08
| 2015-11-12T09:12:08
| 44,369,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,606
|
py
|
'''
Created on Nov 11, 2015
@author: sandfan
'''
import numpy as np
import pandas as pa
import os
import re
import csv
import pickle as pkl
import time
import datetime as dt
import dircache
import tempfile
import copy
class Exchange (object):
SHSE = 1
SZSE = 2
FUTURE = 3
class DataItem (object):
OPEN = "open"
HIGH = "high"
LOW = "low"
CLOSE = "close"
VOL = "volume"
AMU = "amount"
ACTUAL_CLOSE = "actual_close"
ADJUSTED_CLOSE = "adj_close"
# fundamental data
FUNDDATA = ['MRQ','PB', 'EPS', 'PE', 'PS', 'ROE', 'GROWTH', 'PEG', 'CAP']
# Compustat label list pulled from _analyze() in compustat_csv_to_pkl.py
COMPUSTAT = ['gvkey', 'fyearq', 'fqtr', 'fyr', 'ACCTSTDQ', 'ADRRQ', 'AJEXQ', 'AJPQ', 'CURRTRQ', 'CURUSCNQ', 'PDQ', 'PDSA', 'PDYTD', 'SCFQ', 'SRCQ', 'UPDQ', 'ACCDQ', 'ACCHGQ', 'ACCOQ', 'ACOMINCQ', 'ACOQ', 'ACOXQ', 'ACTQ', 'ADPACQ', 'ALTOQ', 'AMQ', 'ANCQ', 'ANOQ', 'AOCIDERGLQ', 'AOCIOTHERQ', 'AOCIPENQ', 'AOCISECGLQ', 'AOL2Q', 'AOQ', 'AOTQ', 'APOQ', 'APQ', 'AQAQ', 'AQDQ', 'AQEPSQ', 'AQPL1Q', 'AQPQ', 'ARCED12', 'ARCEDQ', 'ARCEEPS12', 'ARCEEPSQ', 'ARCEQ', 'ARTFSQ', 'ATQ', 'AUL3Q', 'AUTXRQ', 'BCEFQ', 'BCTQ', 'BDIQ', 'CAPCSTQ', 'CAPR1Q', 'CAPR2Q', 'CAPR3Q', 'CAPRTQ', 'CAPSQ', 'CAQ', 'CEQQ', 'CFBDQ', 'CFEREQ', 'CFOQ', 'CFPDOQ', 'CHEQ', 'CHQ', 'CHSQ', 'CIBEGNIQ', 'CICURRQ', 'CIDERGLQ', 'CIMIIQ', 'CIOTHERQ', 'CIPENQ', 'CIQ', 'CISECGLQ', 'CITOTALQ', 'CLTQ', 'COGSQ', 'CSH12Q', 'CSHFDQ', 'CSHIQ', 'CSHOPQ', 'CSHOQ', 'CSHPRQ', 'CSTKEQ', 'CSTKQ', 'DCOMQ', 'DFPACQ', 'DFXAQ', 'DILADQ', 'DILAVQ', 'DITQ', 'DLCQ', 'DLTTQ', 'DOQ', 'DPACREQ', 'DPACTQ', 'DPQ', 'DPRETQ', 'DPTBQ', 'DPTCQ', 'DRCQ', 'DRLTQ', 'DTEAQ', 'DTEDQ', 'DTEEPSQ', 'DTEPQ', 'DVPDPQ', 'DVPQ', 'DVRREQ', 'DVTQ', 'EPSF12', 'EPSFIQ', 'EPSFXQ', 'EPSPIQ', 'EPSPXQ', 'EPSX12', 'EQRTQ', 'EROQ', 'ESOPCTQ', 'ESOPNRQ', 'ESOPRQ', 'ESOPTQ', 'ESUBQ', 'FCAQ', 'FEAQ', 'FELQ', 'FFOQ', 'GDWLAMQ', 'GDWLIA12', 'GDWLIAQ', 'GDWLID12', 'GDWLIDQ', 'GDWLIEPS12', 'GDWLIEPSQ', 'GDWLIPQ', 'GDWLQ', 'GLAQ', 'GLCEA12', 'GLCEAQ', 'GLCED12', 'GLCEDQ', 'GLCEEPS12', 'GLCEEPSQ', 'GLCEPQ', 'GLDQ', 'GLEPSQ', 'GLPQ', 'GPQ', 'HEDGEGLQ', 'IATIQ', 'IBADJ12', 'IBADJQ', 'IBCOMQ', 'IBKIQ', 'IBMIIQ', 'IBQ', 'ICAPTQ', 'IDITQ', 'IIREQ', 'IITQ', 'INTACCQ', 'INTANOQ', 'INTANQ', 'INTCQ', 'INVFGQ', 'INVOQ', 'INVRMQ', 'INVTQ', 'INVWIPQ', 'IOBDQ', 'IOIQ', 'IOREQ', 'IPQ', 'IPTIQ', 'ISGTQ', 'ISTQ', 'IVAEQQ', 'IVAOQ', 'IVIQ', 'IVLTQ', 'IVPTQ', 'IVSTQ', 'IVTFSQ', 'LCABGQ', 'LCACUQ', 'LCOQ', 'LCOXQ', 'LCTQ', 'LLTQ', 'LNOQ', 'LOL2Q', 'LOQ', 'LOXDRQ', 'LQPL1Q', 'LSEQ', 'LSQ', 'LTMIBQ', 'LTQ', 'LUL3Q', 'MIBNQ', 'MIBQ', 'MIBTQ', 'MIIQ', 'MSAQ', 'MTLQ', 'NCOQ', 'NIITQ', 'NIMQ', 'NIQ', 'NITQ', 'NOPIOQ', 'NOPIQ', 'NPATQ', 'NRTXTDQ', 'NRTXTEPSQ', 'NRTXTQ', 'OEPF12', 'OEPS12', 'OEPSXQ', 'OIADPQ', 'OIBDPQ', 'OPEPSQ', 'OPROQ', 'OPTDRQ', 'OPTFVGRQ', 'OPTLIFEQ', 'OPTRFRQ', 'OPTVOLQ', 'PCLQ', 'PIQ', 'PLLQ', 'PNC12', 'PNCD12', 'PNCDQ', 'PNCEPS12', 'PNCEPSQ', 'PNCIAPQ', 'PNCIAQ', 'PNCIDPQ', 'PNCIDQ', 'PNCIEPSPQ', 'PNCIEPSQ', 'PNCIPPQ', 'PNCIPQ', 'PNCPD12', 'PNCPDQ', 'PNCPEPS12', 'PNCPEPSQ', 'PNCPQ', 'PNCQ', 'PNCWIAPQ', 'PNCWIAQ', 'PNCWIDPQ', 'PNCWIDQ', 'PNCWIEPQ', 'PNCWIEPSQ', 'PNCWIPPQ', 'PNCWIPQ', 'PNRSHOQ', 'PPEGTQ', 'PPENTQ', 'PRCAQ', 'PRCD12', 'PRCDQ', 'PRCE12', 'PRCEPS12', 'PRCEPSQ', 'PRCPD12', 'PRCPDQ', 'PRCPEPS12', 'PRCPEPSQ', 'PRCPQ', 'PRCQ', 'PRCRAQ', 'PRSHOQ', 'PSTKNQ', 'PSTKQ', 'PSTKRQ', 'PTRANQ', 'PVOQ', 'PVTQ', 'RATIQ', 'RAWMSMQ', 'RCAQ', 'RCDQ', 'RCEPSQ', 'RCPQ', 'RDIPAQ', 'RDIPDQ', 'RDIPEPSQ', 'RDIPQ', 'RECCOQ', 'RECDQ', 'RECTAQ', 'RECTOQ', 'RECTQ', 'RECTRQ', 'RECUBQ', 'REITQ', 'REQ', 'RETQ', 'REUNAQ', 'REVTQ', 'RISQ', 'RLLQ', 'RLTQ', 'RRA12', 'RRAQ', 'RRD12', 'RRDQ', 'RREPS12', 'RREPSQ', 'RRPQ', 'RVLRVQ', 'RVTIQ', 'RVUTXQ', 'SAAQ', 'SALEQ', 'SALQ', 'SBDCQ', 'SCOQ', 'SCQ', 'SCTQ', 'SEQOQ', 'SEQQ', 'SETA12', 'SETAQ', 'SETD12', 'SETDQ', 'SETEPS12', 'SETEPSQ', 'SETPQ', 'SPCE12', 'SPCED12', 'SPCEDPQ', 'SPCEDQ', 'SPCEEPS12', 'SPCEEPSP12', 'SPCEEPSPQ', 'SPCEEPSQ', 'SPCEP12', 'SPCEPD12', 'SPCEPQ', 'SPCEQ', 'SPIDQ', 'SPIEPSQ', 'SPIOAQ', 'SPIOPQ', 'SPIQ', 'SRETQ', 'SSNPQ', 'STKCHQ', 'STKCOQ', 'STKCPAQ', 'TDSGQ', 'TDSTQ', 'TEQQ', 'TFVAQ', 'TFVCEQ', 'TFVLQ', 'TIEQ', 'TIIQ', 'TRANSAQ', 'TSTKNQ', 'TSTKQ', 'TXDBAQ', 'TXDBQ', 'TXDIQ', 'TXDITCQ', 'TXPQ', 'TXTQ', 'TXWQ', 'UACOQ', 'UAOQ', 'UAPTQ', 'UCAPSQ', 'UCCONSQ', 'UCEQQ', 'UDDQ', 'UDMBQ', 'UDOLTQ', 'UDPCOQ', 'UDVPQ', 'UGIQ', 'UINVQ', 'ULCOQ', 'UNIAMIQ', 'UNNPQ', 'UNOPINCQ', 'UOPIQ', 'UPDVPQ', 'UPMCSTKQ', 'UPMPFQ', 'UPMPFSQ', 'UPMSUBPQ', 'UPSTKCQ', 'UPSTKQ', 'URECTQ', 'USPIQ', 'USUBDVPQ', 'USUBPCVQ', 'UTEMQ', 'WCAPQ', 'WDAQ', 'WDDQ', 'WDEPSQ', 'WDPQ', 'XAGTQ', 'XBDTQ', 'XCOMIQ', 'XCOMQ', 'XDVREQ', 'XIDOQ', 'XINTQ', 'XIOQ', 'XIQ', 'XIVIQ', 'XIVREQ', 'XOBDQ', 'XOIQ', 'XOPROQ', 'XOPRQ', 'XOPT12', 'XOPTD12', 'XOPTD12P', 'XOPTDQ', 'XOPTDQP', 'XOPTEPS12', 'XOPTEPSP12', 'XOPTEPSQ', 'XOPTEPSQP', 'XOPTQ', 'XOPTQP', 'XOREQ', 'XPPQ', 'XRDQ', 'XRETQ', 'XSGAQ', 'XSQ', 'XSTOQ', 'XSTQ', 'XTQ', 'ACCHGY', 'ACCLIY', 'ACQDISNY', 'ACQDISOY', 'ADPACY', 'AFUDCCY', 'AFUDCIY', 'AMCY', 'AMY', 'AOLOCHY', 'APALCHY', 'APCHY', 'AQAY', 'AQCY', 'AQDY', 'AQEPSY', 'AQPY', 'ARCEDY', 'ARCEEPSY', 'ARCEY', 'ASDISY', 'ASINVY', 'ATOCHY', 'AUTXRY', 'BCEFY', 'BCTY', 'BDIY', 'CAPCSTY', 'CAPFLY', 'CAPXFIY', 'CAPXY', 'CDVCY', 'CFBDY', 'CFEREY', 'CFLAOTHY', 'CFOY', 'CFPDOY', 'CHECHY', 'CHENFDY', 'CIBEGNIY', 'CICURRY', 'CIDERGLY', 'CIMIIY', 'CIOTHERY', 'CIPENY', 'CISECGLY', 'CITOTALY', 'CIY', 'COGSY', 'CSHFDY', 'CSHPRY', 'CSTKEY', 'DCSFDY', 'DCUFDY', 'DEPCY', 'DFXAY', 'DILADY', 'DILAVY', 'DISPOCHY', 'DITY', 'DLCCHY', 'DLTISY', 'DLTRY', 'DOCY', 'DOY', 'DPCY', 'DPRETY', 'DPY', 'DTEAY', 'DTEDY', 'DTEEPSY', 'DTEPY', 'DVPDPY', 'DVPY', 'DVRECY', 'DVRREY', 'DVTY', 'DVY', 'EIEACY', 'EPSFIY', 'EPSFXY', 'EPSPIY', 'EPSPXY', 'EQDIVPY', 'ESUBCY', 'ESUBY', 'EXRESY', 'EXREUY', 'EXREY', 'FCAY', 'FFOY', 'FIAOY', 'FINCFY', 'FININCY', 'FINLEY', 'FINREY', 'FINVAOY', 'FOPOXY', 'FOPOY', 'FOPTY', 'FSRCOPOY', 'FSRCOPTY', 'FSRCOY', 'FSRCTY', 'FUSEOY', 'FUSETY', 'GDWLAMY', 'GDWLIAY', 'GDWLIDY', 'GDWLIEPSY', 'GDWLIPY', 'GLAY', 'GLCEAY', 'GLCEDY', 'GLCEEPSY', 'GLCEPY', 'GLDY', 'GLEPSY', 'GLPY', 'GPY', 'HEDGEGLY', 'IBADJY', 'IBCOMY', 'IBCY', 'IBKIY', 'IBMIIY', 'IBY', 'IDITY', 'IIREY', 'IITY', 'INTANDY', 'INTANPY', 'INTCY', 'INTFACTY', 'INTFLY', 'INTIACTY', 'INTOACTY', 'INTPDY', 'INTPNY', 'INTRCY', 'INVCHY', 'INVDSPY', 'INVSVCY', 'IOBDY', 'IOIY', 'IOREY', 'IPTIY', 'ISGTY', 'ITCCY', 'IVACOY', 'IVCHY', 'IVIY', 'IVNCFY', 'IVSTCHY', 'LIQRESNY', 'LIQRESOY', 'LNDEPY', 'LNINCY', 'LNMDY', 'LNREPY', 'LTDCHY', 'LTDLCHY', 'LTLOY', 'MICY', 'MIIY', 'MISEQY', 'NCFLIQY', 'NCOY', 'NEQMIY', 'NIITY', 'NIMY', 'NITY', 'NIY', 'NOASUBY', 'NOPIOY', 'NOPIY', 'NRTXTDY', 'NRTXTEPSY', 'NRTXTY', 'OANCFCY', 'OANCFDY', 'OANCFY', 'OEPSXY', 'OIADPY', 'OIBDPY', 'OPEPSY', 'OPPRFTY', 'OPROY', 'OPTDRY', 'OPTFVGRY', 'OPTLIFEY', 'OPTRFRY', 'OPTVOLY', 'PCLY', 'PDVCY', 'PIY', 'PLIACHY', 'PLLY', 'PNCDY', 'PNCEPSY', 'PNCIAPY', 'PNCIAY', 'PNCIDPY', 'PNCIDY', 'PNCIEPSPY', 'PNCIEPSY', 'PNCIPPY', 'PNCIPY', 'PNCPDY', 'PNCPEPSY', 'PNCPY', 'PNCWIAPY', 'PNCWIAY', 'PNCWIDPY', 'PNCWIDY', 'PNCWIEPSY', 'PNCWIEPY', 'PNCWIPPY', 'PNCWIPY', 'PNCY', 'PRCAY', 'PRCDY', 'PRCEPSY', 'PRCPDY', 'PRCPEPSY', 'PRCPY', 'PROSAIY', 'PRSTKCCY', 'PRSTKCY', 'PRSTKPCY', 'PRVY', 'PSFIXY', 'PTRANY', 'PURTSHRY', 'PVOY', 'RAWMSMY', 'RCAY', 'RCDY', 'RCEPSY', 'RCPY', 'RDIPAY', 'RDIPDY', 'RDIPEPSY', 'RDIPY', 'RECCHY', 'REITY', 'REVTY', 'RISY', 'RRAY', 'RRDY', 'RREPSY', 'RRPY', 'RVY', 'SALEY', 'SCSTKCY', 'SETAY', 'SETDY', 'SETEPSY', 'SETPY', 'SHRCAPY', 'SIVY', 'SPCEDPY', 'SPCEDY', 'SPCEEPSPY', 'SPCEEPSY', 'SPCEPY', 'SPCEY', 'SPIDY', 'SPIEPSY', 'SPIOAY', 'SPIOPY', 'SPIY', 'SPPCHY', 'SPPEY', 'SPPIVY', 'SPSTKCY', 'SRETY', 'SSTKY', 'STFIXAY', 'STINVY', 'STKCHY', 'STKCOY', 'STKCPAY', 'SUBDISY', 'SUBPURY', 'TDCY', 'TDSGY', 'TFVCEY', 'TIEY', 'TIIY', 'TSAFCY', 'TXACHY', 'TXBCOFY', 'TXBCOY', 'TXDCY', 'TXDIY', 'TXOPY', 'TXPDY', 'TXTY', 'TXWY', 'TXY', 'UAOLOCHY', 'UDFCCY', 'UDVPY', 'UFRETSDY', 'UGIY', 'UNIAMIY', 'UNOPINCY', 'UNWCCY', 'UOISY', 'UPDVPY', 'UPTACY', 'USPIY', 'USTDNCY', 'USUBDVPY', 'UTFDOCY', 'UTFOSCY', 'UTMEY', 'UWKCAPCY', 'WCAPCHCY', 'WCAPCHY', 'WCAPCY', 'WCAPOPCY', 'WCAPSAY', 'WCAPSUY', 'WCAPSY', 'WCAPTY', 'WCAPUY', 'WDAY', 'WDDY', 'WDEPSY', 'WDPY', 'XAGTY', 'XBDTY', 'XCOMIY', 'XCOMY', 'XDVREY', 'XIDOCY', 'XIDOY', 'XINTY', 'XIOY', 'XIVIY', 'XIVREY', 'XIY', 'XOBDY', 'XOIY', 'XOPROY', 'XOPRY', 'XOPTDQPY', 'XOPTDY', 'XOPTEPSQPY', 'XOPTEPSY', 'XOPTQPY', 'XOPTY', 'XOREY', 'XRDY', 'XRETY', 'XSGAY', 'XSTOY', 'XSTY', 'XSY', 'XTY', 'DLRSN', 'FYRC', 'GGROUP', 'GIND', 'GSECTOR', 'GSUBIND', 'NAICS', 'PRIUSA', 'SIC', 'SPCINDCD', 'SPCSECCD', 'STKO']
class DataSource(object):
NORGATE = "Norgate"
YAHOO = "Yahoo"
YAHOOold = "YahooOld"
COMPUSTAT = "Compustat"
CUSTOM = "Custom"
MLT = "ML4Trading"
#class DataSource ends
class DataAccess(object):
'''
@summary: This class is used to access all the symbol data. It readin in pickled numpy arrays converts them into appropriate pandas objects
and returns that object. The {main} function currently demonstrates use.
@note: The earliest time for which this works is platform dependent because the python date functionality is platform dependent.
'''
def __init__(self, sourcein=DataSource.YAHOO, s_datapath=None,
s_scratchpath=None, cachestalltime=12, verbose=False):
'''
@param sourcestr: Specifies the source of the data. Initializes paths based on source.
@note: No data is actually read in the constructor. Only paths for the source are initialized
@param: Scratch defaults to a directory in /tmp/MyScratch
'''
self.folderList = list()
self.folderSubList = list()
self.cachestalltime = cachestalltime
self.fileExtensionToRemove = ".pkl"
try:
self.rootdir = os.environ['MYDATA']
try:
self.scratchdir = os.environ['MYSCRATCH']
except:
self.scratchdir = os.path.join(tempfile.gettempdir(), 'MYScratch')
except:
if s_datapath != None:
self.rootdir = s_datapath
if s_scratchpath != None:
self.scratchdir = s_scratchpath
else:
self.scratchdir = os.path.join(tempfile.gettempdir(), 'MyScratch')
else:
self.rootdir = os.path.join(os.path.dirname(__file__), '..', 'MyData')
self.scratchdir = os.path.join(tempfile.gettempdir(), 'MyScratch')
if verbose:
print "Scratch Directory: ", self.scratchdir
print "Data Directory: ", self.rootdir
if not os.path.isdir(self.rootdir):
print "Data path provided is invalid"
raise
if not os.path.exists(self.scratchdir):
os.mkdir(self.scratchdir)
if (sourcein == DataSource.NORGATE):
self.source = DataSource.NORGATE
self.midPath = "/Processed/Norgate/Stocks/"
self.folderSubList.append("/US/AMEX/")
self.folderSubList.append("/US/NASDAQ/")
self.folderSubList.append("/US/NYSE/")
self.folderSubList.append("/US/NYSE Arca/")
self.folderSubList.append("/US/OTC/")
self.folderSubList.append("/US/Delisted Securities/")
self.folderSubList.append("/US/Indices/")
for i in self.folderSubList:
self.folderList.append(self.rootdir + self.midPath + i)
elif (sourcein == DataSource.CUSTOM):
self.source = DataSource.CUSTOM
self.folderList.append(self.rootdir + "/Processed/Custom/")
elif (sourcein == DataSource.MLT):
self.source = DataSource.MLT
self.folderList.append(self.rootdir + "/ML4Trading/")
elif (sourcein == DataSource.YAHOO):
self.source = DataSource.YAHOO
self.folderList.append(self.rootdir + "/Yahoo/")
self.fileExtensionToRemove = ".csv"
elif (sourcein == DataSource.COMPUSTAT):
self.source = DataSource.COMPUSTAT
self.midPath = "/Processed/Compustat"
#What if these paths don't exist?
self.folderSubList.append("/CN/SHSE/")
self.folderSubList.append("/CN/SZSE/")
self.folderSubList.append("/CN/FUTURE/")
for i in self.folderSubList:
self.folderList.append(self.rootdir + self.midPath + i)
#if DataSource.Compustat ends
else:
raise ValueError("Incorrect data source requested.")
#__init__ ends
def get_data_hardread(self, ts_list, symbol_list, data_item, verbose=False, bIncDelist=False):
'''
Read data into a DataFrame no matter what.
@param ts_list: List of timestamps for which the data values are needed. Timestamps must be sorted.
@param symbol_list: The list of symbols for which the data values are needed
@param data_item: The data_item needed. Like open, close, volume etc. May be a list, in which case a list of DataFrame is returned.
@param bIncDelist: If true, delisted securities will be included.
@note: If a symbol is not found then a message is printed. All the values in the column for that stock will be NaN. Execution then
continues as usual. No errors are raised at the moment.
'''
''' Now support lists of items, still support old string behaviour '''
bStr = False
if( isinstance( data_item, str) ):
data_item = [data_item]
bStr = True
# init data struct - list of arrays, each member is an array corresponding do a different data type
# arrays contain n rows for the timestamps and m columns for each stock
all_stocks_data = []
for i in range( len(data_item) ):
all_stocks_data.append( np.zeros ((len(ts_list), len(symbol_list))) );
all_stocks_data[i][:][:] = np.NAN
list_index= []
''' For each item in the list, add to list_index (later used to delete non-used items) '''
for sItem in data_item:
if( self.source == DataSource.CUSTOM ) :
''' If custom just load what you can '''
if (sItem == DataItem.CLOSE):
list_index.append(1)
elif (sItem == DataItem.ACTUAL_CLOSE):
list_index.append(2)
if( self.source == DataSource.COMPUSTAT ):
''' If compustat, look through list of features '''
for i, sLabel in enumerate(DataItem.COMPUSTAT):
if sItem == sLabel:
''' First item is date index, labels start at 1 index '''
list_index.append(i+1)
break
else:
raise ValueError ("Incorrect value for data_item %s"%sItem)
if( self.source == DataSource.NORGATE ):
if (sItem == DataItem.OPEN):
list_index.append(1)
elif (sItem == DataItem.HIGH):
list_index.append (2)
elif (sItem ==DataItem.LOW):
list_index.append(3)
elif (sItem == DataItem.CLOSE):
list_index.append(4)
elif(sItem == DataItem.VOL):
list_index.append(5)
elif (sItem == DataItem.ACTUAL_CLOSE):
list_index.append(6)
else:
#incorrect value
raise ValueError ("Incorrect value for data_item %s"%sItem)
if( self.source == DataSource.MLT or self.source == DataSource.YAHOO):
if (sItem == DataItem.OPEN):
list_index.append(1)
elif (sItem == DataItem.HIGH):
list_index.append (2)
elif (sItem ==DataItem.LOW):
list_index.append(3)
elif (sItem == DataItem.ACTUAL_CLOSE):
list_index.append(4)
elif(sItem == DataItem.VOL):
list_index.append(5)
elif (sItem == DataItem.CLOSE):
list_index.append(6)
else:
#incorrect value
raise ValueError ("Incorrect value for data_item %s"%sItem)
#end elif
#end data_item loop
#read in data for a stock
symbol_ctr=-1
for symbol in symbol_list:
_file = None
symbol_ctr = symbol_ctr + 1
#print self.getPathOfFile(symbol)
try:
if (self.source == DataSource.CUSTOM) or (self.source == DataSource.MLT)or (self.source == DataSource.YAHOO):
file_path= self.getPathOfCSVFile(symbol);
else:
file_path= self.getPathOfFile(symbol);
''' Get list of other files if we also want to include delisted '''
if bIncDelist:
lsDelPaths = self.getPathOfFile( symbol, True )
if file_path == None and len(lsDelPaths) > 0:
print 'Found delisted paths:', lsDelPaths
''' If we don't have a file path continue... unless we have delisted paths '''
if (type (file_path) != type ("random string")):
if bIncDelist == False or len(lsDelPaths) == 0:
continue; #File not found
if not file_path == None:
_file = open(file_path, "rb")
except IOError:
# If unable to read then continue. The value for this stock will be nan
print _file
continue;
assert( not _file == None or bIncDelist == True )
''' Open the file only if we have a valid name, otherwise we need delisted data '''
if _file != None:
if (self.source==DataSource.CUSTOM) or (self.source==DataSource.YAHOO)or (self.source==DataSource.MLT):
creader = csv.reader(_file)
row=creader.next()
row=creader.next()
#row.pop(0)
for i, item in enumerate(row):
if i==0:
try:
date = dt.datetime.strptime(item, '%Y-%m-%d')
date = date.strftime('%Y%m%d')
row[i] = float(date)
except:
date = dt.datetime.strptime(item, '%m/%d/%y')
date = date.strftime('%Y%m%d')
row[i] = float(date)
else:
row[i]=float(item)
naData=np.array(row)
for row in creader:
for i, item in enumerate(row):
if i==0:
try:
date = dt.datetime.strptime(item, '%Y-%m-%d')
date = date.strftime('%Y%m%d')
row[i] = float(date)
except:
date = dt.datetime.strptime(item, '%m/%d/%y')
date = date.strftime('%Y%m%d')
row[i] = float(date)
else:
row[i]=float(item)
naData=np.vstack([np.array(row),naData])
else:
naData = pkl.load (_file)
_file.close()
else:
naData = None
''' If we have delisted data, prepend to the current data '''
if bIncDelist == True and len(lsDelPaths) > 0 and naData == None:
for sFile in lsDelPaths[-1:]:
''' Changed to only use NEWEST data since sometimes there is overlap (JAVA) '''
inFile = open( sFile, "rb" )
naPrepend = pkl.load( inFile )
inFile.close()
if naData == None:
naData = naPrepend
else:
naData = np.vstack( (naPrepend, naData) )
#now remove all the columns except the timestamps and one data column
if verbose:
print self.getPathOfFile(symbol)
''' Fix 1 row case by reshaping '''
if( naData.ndim == 1 ):
naData = naData.reshape(1,-1)
#print naData
#print list_index
''' We open the file once, for each data item we need, fill out the array in all_stocks_data '''
for lLabelNum, lLabelIndex in enumerate(list_index):
ts_ctr = 0
b_skip = True
''' select timestamps and the data column we want '''
temp_np = naData[:,(0,lLabelIndex)]
#print temp_np
num_rows= temp_np.shape[0]
symbol_ts_list = range(num_rows) # preallocate
for i in range (0, num_rows):
timebase = temp_np[i][0]
timeyear = int(timebase/10000)
# Quick hack to skip most of the data
# Note if we skip ALL the data, we still need to calculate
# last time, so we know nothing is valid later in the code
if timeyear < ts_list[0].year and i != num_rows - 1:
continue
elif b_skip == True:
ts_ctr = i
b_skip = False
timemonth = int((timebase-timeyear*10000)/100)
timeday = int((timebase-timeyear*10000-timemonth*100))
timehour = 16
#The earliest time it can generate a time for is platform dependent
symbol_ts_list[i]=dt.datetime(timeyear,timemonth,timeday,timehour) # To make the time 1600 hrs on the day previous to this midnight
#for ends
#now we have only timestamps and one data column
#Skip data from file which is before the first timestamp in ts_list
while (ts_ctr < temp_np.shape[0]) and (symbol_ts_list[ts_ctr] < ts_list[0]):
ts_ctr= ts_ctr+1
#print "skipping initial data"
#while ends
for time_stamp in ts_list:
if (symbol_ts_list[-1] < time_stamp):
#The timestamp is after the last timestamp for which we have data. So we give up. Note that we don't have to fill in NaNs because that is
#the default value.
break;
else:
while ((ts_ctr < temp_np.shape[0]) and (symbol_ts_list[ts_ctr]< time_stamp)):
ts_ctr = ts_ctr+1
#while ends
#else ends
#print "at time_stamp: " + str(time_stamp) + " and symbol_ts " + str(symbol_ts_list[ts_ctr])
if (time_stamp == symbol_ts_list[ts_ctr]):
#Data is present for this timestamp. So add to numpy array.
#print " adding to numpy array"
if (temp_np.ndim > 1): #This if is needed because if a stock has data for 1 day only then the numpy array is 1-D rather than 2-D
all_stocks_data[lLabelNum][ts_list.index(time_stamp)][symbol_ctr] = temp_np [ts_ctr][1]
else:
all_stocks_data[lLabelNum][ts_list.index(time_stamp)][symbol_ctr] = temp_np [1]
#if ends
ts_ctr = ts_ctr +1
#inner for ends
#outer for ends
#print all_stocks_data
ldmReturn = [] # List of data matrixes to return
for naDataLabel in all_stocks_data:
ldmReturn.append( pa.DataFrame( naDataLabel, ts_list, symbol_list) )
''' Contine to support single return type as a non-list '''
if bStr:
return ldmReturn[0]
else:
return ldmReturn
#get_data_hardread ends
def get_data (self, ts_list, symbol_list, data_item, verbose=False, bIncDelist=False):
'''
Read data into a DataFrame, but check to see if it is in a cache first.
@param ts_list: List of timestamps for which the data values are needed. Timestamps must be sorted.
@param symbol_list: The list of symbols for which the data values are needed
@param data_item: The data_item needed. Like open, close, volume etc. May be a list, in which case a list of DataFrame is returned.
@param bIncDelist: If true, delisted securities will be included.
@note: If a symbol is not found then a message is printed. All the values in the column for that stock will be NaN. Execution then
continues as usual. No errors are raised at the moment.
'''
# Construct hash -- filename where data may be already
#
# The idea here is to create a filename from the arguments provided.
# We then check to see if the filename exists already, meaning that
# the data has already been created and we can just read that file.
ls_syms_copy = copy.deepcopy(symbol_list)
# Create the hash for the symbols
hashsyms = 0
for i in symbol_list:
hashsyms = (hashsyms + hash(i)) % 10000000
# Create the hash for the timestamps
hashts = 0
# print "test point 1: " + str(len(ts_list))
# spyfile=os.environ['QSDATA'] + '/Processed/Norgate/Stocks/US/NYSE Arca/SPY.pkl'
for i in ts_list:
hashts = (hashts + hash(i)) % 10000000
hashstr = 'qstk-' + str (self.source)+'-' +str(abs(hashsyms)) + '-' + str(abs(hashts)) \
+ '-' + str(hash(str(data_item))) # + '-' + str(hash(str(os.path.getctime(spyfile))))
# get the directory for scratch files from environment
# try:
# scratchdir = os.environ['QSSCRATCH']
# except KeyError:
# #self.rootdir = "/hzr71/research/QSData"
# raise KeyError("Please be sure to set the value for QSSCRATCH in config.sh or local.sh")
# final complete filename
cachefilename = self.scratchdir + '/' + hashstr + '.pkl'
if verbose:
print "cachefilename is: " + cachefilename
# now eather read the pkl file, or do a hardread
readfile = False # indicate that we have not yet read the file
#check if the cachestall variable is defined.
# try:
# catchstall=dt.timedelta(hours=int(os.environ['CACHESTALLTIME']))
# except:
# catchstall=dt.timedelta(hours=1)
cachestall = dt.timedelta(hours=self.cachestalltime)
# Check if the file is older than the cachestalltime
if os.path.exists(cachefilename):
if ((dt.datetime.now() - dt.datetime.fromtimestamp(os.path.getmtime(cachefilename))) < cachestall):
if verbose:
print "cache hit"
try:
cachefile = open(cachefilename, "rb")
start = time.time() # start timer
retval = pkl.load(cachefile)
elapsed = time.time() - start # end timer
readfile = True # remember success
cachefile.close()
except IOError:
if verbose:
print "error reading cache: " + cachefilename
print "recovering..."
except EOFError:
if verbose:
print "error reading cache: " + cachefilename
print "recovering..."
if (readfile!=True):
if verbose:
print "cache miss"
print "beginning hardread"
start = time.time() # start timer
if verbose:
print "data_item(s): " + str(data_item)
print "symbols to read: " + str(symbol_list)
retval = self.get_data_hardread(ts_list,
symbol_list, data_item, verbose, bIncDelist)
elapsed = time.time() - start # end timer
if verbose:
print "end hardread"
print "saving to cache"
try:
cachefile = open(cachefilename,"wb")
pkl.dump(retval, cachefile, -1)
os.chmod(cachefilename,0666)
except IOError:
print "error writing cache: " + cachefilename
if verbose:
print "end saving to cache"
if verbose:
print "reading took " + str(elapsed) + " seconds"
if type(retval) == type([]):
for i, df_single in enumerate(retval):
retval[i] = df_single.reindex(columns=ls_syms_copy)
else:
retval = retval.reindex(columns=ls_syms_copy)
return retval
def getPathOfFile(self, symbol_name, bDelisted=False):
'''
@summary: Since a given pkl file can exist in any of the folders- we need to look for it in each one until we find it. Thats what this function does.
@return: Complete path to the pkl file including the file name and extension
'''
if not bDelisted:
for path1 in self.folderList:
if (os.path.exists(str(path1) + str(symbol_name + ".pkl"))):
# Yay! We found it!
return (str(str(path1) + str(symbol_name) + ".pkl"))
#if ends
elif (os.path.exists(str(path1) + str(symbol_name + ".csv"))):
# Yay! We found it!
return (str(str(path1) + str(symbol_name) + ".csv"))
#for ends
else:
''' Special case for delisted securities '''
lsPaths = []
for sPath in self.folderList:
if re.search('Delisted Securities', sPath) == None:
continue
for sFile in dircache.listdir(sPath):
if not re.match( '%s-\d*.pkl'%symbol_name, sFile ) == None:
lsPaths.append(sPath + sFile)
lsPaths.sort()
return lsPaths
print "Did not find path to " + str(symbol_name) + ". Looks like this file is missing"
def getPathOfCSVFile(self, symbol_name):
for path1 in self.folderList:
if (os.path.exists(str(path1)+str(symbol_name+".csv"))):
# Yay! We found it!
return (str(str(path1)+str(symbol_name)+".csv"))
#if ends
#for ends
print "Did not find path to " + str (symbol_name)+". Looks like this file is missing"
def get_all_symbols (self):
'''
@summary: Returns a list of all the symbols located at any of the paths for this source. @see: {__init__}
@attention: This will discard all files that are not of type pkl. ie. Only the files with an extension pkl will be reported.
'''
listOfStocks = list()
#Path does not exist
if (len(self.folderList) == 0):
raise ValueError("DataAccess source not set")
for path in self.folderList:
stocksAtThisPath = list()
#print str(path)
stocksAtThisPath = dircache.listdir(str(path))
#Next, throw away everything that is not a .pkl And these are our stocks!
stocksAtThisPath = filter (lambda x:(str(x).find(str(self.fileExtensionToRemove)) > -1), stocksAtThisPath)
#Now, we remove the .pkl to get the name of the stock
stocksAtThisPath = map(lambda x:(x.partition(str(self.fileExtensionToRemove))[0]),stocksAtThisPath)
listOfStocks.extend(stocksAtThisPath)
#for stock in stocksAtThisPath:
#listOfStocks.append(stock)
return listOfStocks
#get_all_symbols ends
def check_symbol(self, symbol, s_list=None):
'''
@summary: Returns True if given symbol is present in the s_list.
@param symbol: Symbol to be checked for.
@param s_list: Optionally symbol sub-set listing can be given.
if not provided, all listings are searched.
@return: True if symbol is present in specified list, else False.
'''
all_symbols = list()
# Create a super-set of symbols.
if s_list is not None:
all_symbols = self.get_symbols_from_list(s_list)
else:
all_symbols = self.get_all_symbols()
# Check if the symbols is present.
if ( symbol in all_symbols ):
return True
else:
return False
def get_symbols_from_list(self, s_list):
''' Reads all symbols from a list '''
ls_symbols = []
if (len(self.folderList) == 0):
raise ValueError("DataAccess source not set")
for path in self.folderList:
path_to_look = path + 'Lists/' + s_list + '.txt'
ffile = open(path_to_look, 'r')
for f in ffile.readlines():
j = f[:-1]
ls_symbols.append(j)
ffile.close()
return ls_symbols
def get_symbols_in_sublist (self, subdir):
'''
@summary: Returns all the symbols belonging to that subdir of the data store.
@param subdir: Specifies which subdir you want.
@return: A list of symbols belonging to that subdir
'''
pathtolook = self.rootdir + self.midPath + subdir
stocksAtThisPath = dircache.listdir(pathtolook)
#Next, throw away everything that is not a .pkl And these are our stocks!
try:
stocksAtThisPath = filter (lambda x:(str(x).find(str(self.fileExtensionToRemove)) > -1), stocksAtThisPath)
#Now, we remove the .pkl to get the name of the stock
stocksAtThisPath = map(lambda x:(x.partition(str(self.fileExtensionToRemove))[0]),stocksAtThisPath)
except:
print "error: no path to " + subdir
stocksAtThisPath = list()
return stocksAtThisPath
#get_all_symbols_on_exchange ends
def get_sublists(self):
'''
@summary: Returns a list of all the sublists for a data store.
@return: A list of the valid sublists for the data store.
'''
return self.folderSubList
#get_sublists
def get_data_labels(self):
'''
@summary: Returns a list of all the data labels available for this type of data access object.
@return: A list of label strings.
'''
if (self.source != DataSource.COMPUSTAT):
print 'Function only valid for Compustat objects!'
return []
return DataItem.COMPUSTAT
#get_data_labels
def get_info(self):
'''
@summary: Returns and prints a string that describes the datastore.
@return: A string.
'''
if (self.source == DataSource.NORGATE):
retstr = "Norgate:\n"
retstr = retstr + "Daily price and volume data from Norgate (premiumdata.net)\n"
retstr = retstr + "that is valid at the time of NYSE close each trading day.\n"
retstr = retstr + "\n"
retstr = retstr + "Valid data items include: \n"
retstr = retstr + "\topen, high, low, close, volume, actual_close\n"
retstr = retstr + "\n"
retstr = retstr + "Valid subdirs include: \n"
for i in self.folderSubList:
retstr = retstr + "\t" + i + "\n"
elif (self.source == DataSource.YAHOO):
retstr = "Yahoo:\n"
retstr = retstr + "Attempts to load a custom data set, assuming each stock has\n"
retstr = retstr + "a csv file with the name and first column as the stock ticker,\ date in second column, and data in following columns.\n"
retstr = retstr + "everything should be located in QSDATA/Yahoo\n"
for i in self.folderSubList:
retstr = retstr + "\t" + i + "\n"
elif (self.source == DataSource.COMPUSTAT):
retstr = "Compustat:\n"
retstr = retstr + "Compilation of (almost) all data items provided by Compustat\n"
retstr = retstr + "Valid data items can be retrieved by calling get_data_labels(): \n"
retstr = retstr + "\n"
retstr = retstr + "Valid subdirs include: \n"
for i in self.folderSubList:
retstr = retstr + "\t" + i + "\n"
elif (self.source == DataSource.CUSTOM):
retstr = "Custom:\n"
retstr = retstr + "Attempts to load a custom data set, assuming each stock has\n"
retstr = retstr + "a csv file with the name and first column as the stock ticker, date in second column, and data in following columns.\n"
retstr = retstr + "everything should be located in QSDATA/Processed/Custom\n"
elif (self.source == DataSource.MLT):
retstr = "ML4Trading:\n"
retstr = retstr + "Attempts to load a custom data set, assuming each stock has\n"
retstr = retstr + "a csv file with the name and first column as the stock ticker,\ date in second column, and data in following columns.\n"
retstr = retstr + "everything should be located in QSDATA/Processed/ML4Trading\n"
else:
retstr = "DataAccess internal error\n"
print retstr
return retstr
#get_sublists
#class DataAccess ends
if __name__ == '__main__':
# Setup DataAccess object
c_dataobj = DataAccess('Yahoo')
# Check if GOOG is a valid symbol.
val = c_dataobj.check_symbol('GOOG')
print "Is GOOG a valid symbol? :" , val
# Check if QWERTY is a valid symbol.
val = c_dataobj.check_symbol('QWERTY')
print "Is QWERTY a valid symbol? :" , val
# Check if EBAY is part of SP5002012 list.
val = c_dataobj.check_symbol('EBAY', s_list='sp5002012')
print "Is EBAY a valid symbol in SP5002012 list? :", val
# Check if GLD is part of SP5002012 after checking if GLD is a valid symbol.
val = c_dataobj.check_symbol('GLD')
print "Is GLD a valid symbol? : ", val
val = c_dataobj.check_symbol('GLD', 'sp5002012')
print "Is GLD a valid symbol in sp5002012 list? :", val
|
[
"sandfan@gmail.com"
] |
sandfan@gmail.com
|
475526c501952e86158ad5a7f12ac7b9992e3633
|
71aac7fa568091c55b3b97b1ec8c00f5145781ef
|
/backend/insurances/tests/test_domain_model.py
|
4ad384de588a04e3385a93acac9407d95ade5295
|
[
"MIT"
] |
permissive
|
ptrojanowski/python-architecture-examples
|
510f2ac5f533e6300b442336b12141cb9479a915
|
826f6fef7ff5eb683dca4509addcf7c3c74db6b8
|
refs/heads/master
| 2022-04-13T07:37:35.113357
| 2020-03-31T15:48:27
| 2020-03-31T15:48:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
import pytest
from insurances.domain_model import InsuranceStatus, MAX_AVAILABLE_PAUSES, PauseLimitExceeded, \
WrongStateForAction, CarLocationNotAllowed, PauseAlreadyFinished
def test_hold_insurance(insurance):
insurance.hold()
assert insurance.status is InsuranceStatus.ON_HOLD
def test_unable_to_hold_when_exceeded_pause_limit(insurance, allowed_location):
for _ in range(MAX_AVAILABLE_PAUSES):
insurance.hold()
insurance.resume(allowed_location)
with pytest.raises(PauseLimitExceeded):
insurance.hold()
def test_unable_to_hold_when_already_on_hold(insurance):
insurance.hold()
with pytest.raises(WrongStateForAction):
insurance.hold()
def test_unable_to_hold_when_in_inactive_status(insurance):
insurance.status = InsuranceStatus.INACTIVE
with pytest.raises(WrongStateForAction):
insurance.hold()
def test_resume_insurance(insurance, allowed_location):
insurance.hold()
insurance.resume(allowed_location)
assert insurance.status is InsuranceStatus.ACTIVE
def test_unable_to_resume_when_not_on_hold(insurance, allowed_location):
with pytest.raises(WrongStateForAction):
insurance.resume(allowed_location)
def test_unable_to_resume_when_outside_the_allowed_location(insurance, not_allowed_location):
insurance.hold()
with pytest.raises(CarLocationNotAllowed):
insurance.resume(not_allowed_location)
def test_pause_for_constant_period_when_exceeded_pause_limit(insurance, allowed_location):
for _ in range(MAX_AVAILABLE_PAUSES):
insurance.hold()
insurance.resume(allowed_location)
insurance.pause()
assert insurance.status is InsuranceStatus.ON_HOLD
def test_unable_to_resume_pause(insurance, allowed_location):
insurance.pause()
with pytest.raises(PauseAlreadyFinished):
insurance.resume(allowed_location)
def test_unable_to_pause_when_in_grey_period(insurance):
insurance.status = InsuranceStatus.IN_GREY_PERIOD
with pytest.raises(WrongStateForAction):
insurance.pause()
def test_pause_not_overlap_on_grey_period(insurance, after_10_min):
insurance.protection_end = after_10_min
insurance.pause()
assert insurance.pauses[-1].end_at == after_10_min
|
[
"mikolevy1@gmail.com"
] |
mikolevy1@gmail.com
|
ec2395de54f8f6eff180dd1b5393f67b9a97fe37
|
59fbeea017110472a788218db3c6459e9130c7fe
|
/[307]Range Sum Query - Mutable.py
|
542727ace3973eed2f81b283f59eda981a3f5f47
|
[] |
no_license
|
niufenjujuexianhua/Leetcode
|
82b55d9382bc9f63f4d9da9431194e20a4d299f1
|
542c99e038d21429853515f62af51a77deaa4d9c
|
refs/heads/master
| 2022-04-27T16:55:00.035969
| 2022-03-10T01:10:04
| 2022-03-10T01:10:04
| 79,742,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,387
|
py
|
# Given an array nums and two types of queries where you should update the value
# of an index in the array, and retrieve the sum of a range in the array.
#
# Implement the NumArray class:
#
#
# NumArray(int[] nums) initializes the object with the integer array nums.
# void update(int index, int val) updates the value of nums[index] to be val.
# int sumRange(int left, int right) returns the sum of the subarray nums[left,
# right] (i.e., nums[left] + nums[left + 1], ..., nums[right]).
#
#
#
# Example 1:
#
#
# Input
# ["NumArray", "sumRange", "update", "sumRange"]
# [[[1, 3, 5]], [0, 2], [1, 2], [0, 2]]
# Output
# [null, 9, null, 8]
#
# Explanation
# NumArray numArray = new NumArray([1, 3, 5]);
# numArray.sumRange(0, 2); // return 9 = sum([1,3,5])
# numArray.update(1, 2); // nums = [1,2,5]
# numArray.sumRange(0, 2); // return 8 = sum([1,2,5])
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 3 * 104
# -100 <= nums[i] <= 100
# 0 <= index < nums.length
# -100 <= val <= 100
# 0 <= left <= right < nums.length
# At most 3 * 104 calls will be made to update and sumRange.
#
# Related Topics Binary Indexed Tree Segment Tree
# 👍 1779 👎 106
# leetcode submit region begin(Prohibit modification and deletion)
class Node():
def __init__(self, s, e, val):
self.s = s
self.e = e
self.val = val
self.left = self.right = None
class NumArray(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.tree = self.buildTree(nums, 0, len(nums) - 1)
def buildTree(self, nums, s, e):
if s > e:
return
if s == e:
return Node(s, e, nums[s])
m = s + (e - s) // 2
root = Node(s, e, 0)
root.left = self.buildTree(nums, s, m)
root.right = self.buildTree(nums, m + 1, e)
root.val = root.left.val + root.right.val
return root
def updateTree(self, root, index, val):
if root.s == root.e:
root.val = val
return
m = root.s + (root.e - root.s) // 2
if index <= m:
self.updateTree(root.left, index, val)
else:
self.updateTree(root.right, index, val)
root.val = root.left.val + root.right.val
def query(self, root, lt, rt):
if rt < root.s or lt > root.e:
return 0
if lt == root.s and rt == root.e:
return root.val
ans = 0
m = root.s + (root.e - root.s) // 2
if rt <= m:
ans += self.query(root.left, lt, rt)
elif lt > m:
ans += self.query(root.right, lt, rt)
else:
ans += self.query(root.left, lt, m)
ans += self.query(root.right, m + 1, rt)
return ans
def update(self, index, val):
"""
:type index: int
:type val: int
:rtype: None
"""
self.updateTree(self.tree, index, val)
def sumRange(self, left, right):
"""
:type left: int
:type right: int
:rtype: int
"""
return self.query(self.tree, left, right)
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# obj.update(index,val)
# param_2 = obj.sumRange(left,right)
# leetcode submit region end(Prohibit modification and deletion)
|
[
"noreply@github.com"
] |
niufenjujuexianhua.noreply@github.com
|
ba774a6e7d2faf7bfaf1354713517da4179bb404
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/scatterpolargl/unselected/marker/_size.py
|
e99c33326e072d1205a0a668171001f0bb3029fb
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='size',
parent_name='scatterpolargl.unselected.marker',
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='style',
min=0,
role='style',
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
d46746ab1195244a9e028992e5fc1bb7d369725d
|
1ba085b962452dd6c305803ad709f2b00aa305d1
|
/education/boards/tests/test_views.py
|
3689a3485f8d385e1eb2e22a8045c0a932170134
|
[] |
no_license
|
tongri/lab_test
|
2b9aea40ad72a0316238174b0c89e78779c47bf7
|
e73ec5b60dfb8bde06302fce07dd15780e6a766c
|
refs/heads/master
| 2023-04-15T18:56:45.950878
| 2021-04-19T10:41:44
| 2021-04-19T10:41:44
| 355,573,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,221
|
py
|
from django.test import TestCase
from django.urls import reverse, resolve
from boards import views
from boards.models import Board, Topic, Post
from django.contrib.auth.models import User
from ..forms import NewTopicForm
# Create your tests here.
class HomeTest(TestCase):
def test_home_view_status_code(self):
url = reverse('home')
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_home_url_resolves_home_view(self):
view = resolve('/')
self.assertEquals(view.func, views.BoardListView)
class BoardTopicsTests(TestCase):
def setUp(self):
Board.objects.create(name='Django', description='testing Django')
def test_board_topics_view_success_status_code(self):
url = reverse('board_topics', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_board_topics_view_not_found_status_code(self):
url = reverse('board_topics', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_board_topics_url_resolves_board_topics_view(self):
view = resolve('/boards/1/')
self.assertEquals(view.func, views.board_topics)
def test_board_topics_view_contains_navigation_links(self):
url = reverse('board_topics', kwargs={'pk': 1})
homepage = reverse("home")
new_topic_url = reverse('new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertContains(response, 'href="{0}"'.format(homepage))
self.assertContains(response, 'href="{0}"'.format(new_topic_url))
class HomeTests(TestCase):
def setUp(self):
self.board = Board.objects.create(name='Django', description='Django board.')
url = reverse('home')
self.response = self.client.get(url)
def test_home_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_home_url_resolves_home_view(self):
view = resolve('/')
self.assertEquals(view.func, views.home)
def test_home_view_contains_link_to_topics_page(self):
board_topics_url = reverse('board_topics', kwargs={'pk': self.board.pk})
self.assertContains(self.response, 'href="{0}"'.format(board_topics_url))
def test_board_topics_view_contains_link_back_to_homepage(self):
board_topics_url = reverse('board_topics', kwargs={'pk': 1})
response = self.client.get(board_topics_url)
homepage_url = reverse("home")
self.assertContains(response, 'href="{0}"'.format(homepage_url))
class NewTopicTests(TestCase):
def setUp(self):
self.board = Board.objects.create(name='Django', description='Django board.')
User.objects.create_user(username='anton', password='1')
def test_new_topic_view_success_status_code(self):
url = reverse('new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_new_topic_view_not_found_status_code(self):
url = reverse('new_topic', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_new_topic_url_resolves_new_topic_view(self):
view = resolve('/boards/1/new/')
self.assertEquals(view.func, views.new_topic)
def test_new_topic_view_contains_link_back_to_board_topics_view(self):
new_topic_url = reverse("new_topic", kwargs={'pk': 1})
board_topics_url = reverse("board_topics", kwargs={'pk': 1})
response = self.client.get(new_topic_url)
print(response)
self.assertContains(response, 'href="{0}"'.format(board_topics_url))
def test_csrf(self):
url = reverse('new_topic', kwargs={'pk': 1})
response = self.client.get(url)
self.assertContains(response, 'csrfmiddlewaretoken')
def test_new_topic_valid_post_data(self):
url = reverse('new_topic', kwargs={'pk': 1})
data = {
'subject': 'Test title',
'message': 'Lorem ipsum dolor sit amet'
}
response = self.client.post(url, data)
self.assertTrue(Topic.objects.exists())
self.assertTrue(Post.objects.exists())
def test_new_topic_invalid_post_data(self):
url = reverse('new_topic', kwargs={'pk': 1})
response = self.client.post(url, {})
form = response.context.get('form')
self.assertEquals(response.status_code, 200)
self.assertTrue(form.errors)
def test_new_topic_invalid_post_data_empty_fields(self):
url = reverse('new_topic', kwargs={'pk': 1})
data = {
'subject': '',
'message': ''
}
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
self.assertFalse(Topic.objects.exists())
self.assertFalse(Post.objects.exists())
def test_contains_form(self):
url = reverse('new_topic', kwargs={'pk': 1})
response = self.client.get(url)
form = response.context.get('form')
self.assertIsInstance(form, NewTopicForm)
|
[
"alexander.ksenzov@gmail.com"
] |
alexander.ksenzov@gmail.com
|
b859f547cfcafe75851f7afcceebf9844e632a84
|
a2a1be37f50c0a23ffca0f10fe5559af543fa3e5
|
/scripts/calculations.py
|
9328337e9dbe15423b2985f0970d83f9a3cdaeb8
|
[] |
no_license
|
quentinprieels/LEPL1501
|
f9e9419f9d0a2b657a85c6b82dd0578bd431e4a1
|
9c530befc5e69e3ef5427414100a39f3562b75fe
|
refs/heads/master
| 2023-01-29T13:30:16.088570
| 2020-12-13T09:46:29
| 2020-12-13T09:46:29
| 312,902,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,297
|
py
|
import numpy as np
from scripts.variables import *
from math import sin, cos, tan, atan, pi
"""COORDINATE SYSTEM
The following code takes place in a 3-dimensional coordinate system. However, some dimensions will be regularly ignored
(especially the Y component). A tuple with 2 coordinates is thus composed of the x-y coordinates.
The X axis is horizontal (length)
The Y-axis is horizontal (width)
he Z axis is vertical (height)
The origin is positioned in the middle of the barge along the X and Y axis and at water level along the Z axis.
"""
# --- Simulation parameters ---
step = 0.01 # [s] steps (dt)
end = 90 # [s] duration
theta_0 = 0 # [rad] angle of inclination at t == 0
omega_0 = 0 # [rad / s] angular velocity at t == 0
begin_motion = 20 # [%] begin of motion (elapsed time)
end_motion = 60 # [%] end of motion (elapsed time)
# Lists with numpy
t = np.arange(0, end, step) # [s] list of all times
theta = np.empty_like(t) # [rad] list of all values of theta
omega = np.empty_like(t) # [rad / s] list of all values of omega : the angular velocity
a = np.empty_like(t) # [rad / s**2] list of all values of a : the angular acceleration
couples = np.empty_like(t) # [N] list of all sum of torques
crane_cg_x = np.empty_like(t)
crane_cg_z = np.empty_like(t)
cg_x = np.empty_like(t) # [m] All the position along the x-axis of the center of gravity
cg_z = np.empty_like(t) # [m] All the position along the z-axis of the center of gravity
cp_x = np.empty_like(t) # [m] All the position along the x-axis of the center of thrust
cp_z = np.empty_like(t) # [m] All the position along the z-axis of the center of thrust
immersed_mass_values = np.empty_like(t)
# --- Moving of the crane ---
def motion():
"""
Fill in the (movement) lists of the center of gravity of the crane according to time and percentage of duration.
:return: True if there are no problem, otherwise False
"""
try:
# Lists with numpy
crane_cg_x[0] = crane_cg_x_values[0]
crane_cg_z[0] = crane_cg_z_values[0]
# Start and end time of the movement
motion_time = [int((len(t) / 100) * begin_motion), int((len(t) / 100) * end_motion)]
# Steps
step_crane_cg_x = (crane_cg_x_values[1] - crane_cg_x_values[0]) / (motion_time[1] - motion_time[0])
step_crane_cg_z = (crane_cg_z_values[1] - crane_cg_z_values[0]) / (motion_time[1] - motion_time[0])
# Fill lists
for i in range(len(t) - 1):
if motion_time[0] < i < motion_time[1]:
crane_cg_x[i + 1] = crane_cg_x[i] + step_crane_cg_x
crane_cg_z[i + 1] = crane_cg_z[i] + step_crane_cg_z
else:
crane_cg_x[i + 1] = crane_cg_x[i]
crane_cg_z[i + 1] = crane_cg_z[i]
return True
except:
return False
# --- Calculations ---
def rotate_coord(coord, angle):
"""
This function applies a rotation to a couple of points x, y
:type coord: tuple
:type angle: float
:param coord: The x, y coordinates of the point in R ** 2
:param angle: The angle rotation IN RADIANS
:return: a tuple witch is the coordinates pf the new point
"""
x_prime = (cos(angle) * coord[0]) + (-sin(angle) * coord[1])
y_prime = (sin(angle) * coord[0]) + (cos(angle) * coord[1])
return tuple([x_prime, y_prime])
# --- Initial situation ---
def height_submersion():
""" IT'S THE PARAMETER hc
Calculate the submerged height of the barge
:return: If hc < barge height : the distance hc (for height of submersion), where hc is the length follow the
submerged z-axis of the barge. Otherwise, print that there is a problem en return None
"""
hc = sum_mass / (1000 * barge_x * barge_y) # [m]
if 0 < hc < barge_z:
return hc
else:
print("WARNING : hc >= barge z : the barge sinks")
return None
def angle_submersion():
"""
Calculate the angle of submersion of the barge. This is the angle beyond which water begins to come on the barge.
:return: The value in radians of the angle of submersion (rotation around the y-axis).
"""
angle_submersion_value = - atan((barge_z - height_submersion()) / (barge_x / 2)) # [rad]
if - pi / 2 < angle_submersion_value < pi / 2:
return angle_submersion_value
else:
raise ValueError("WARNING : The angle of submersion is not between -pi / 2 and pi / 2")
def angle_elevation():
"""
Calculate the angle of elevation of the barge. This is the angle at which a corner of the barge, which is nominally
submerged, comes out of the water. (soulevement)
:return: The value in radians of the angle of elevation (rotation around the y-axis).
"""
angle_elevation_value = - atan(height_submersion() / (barge_x / 2)) # [rad]
if - pi / 2 < angle_elevation_value < pi / 2:
return angle_elevation_value
else:
raise ValueError("WARNING : The angle of elevation is not between -pi / 2 and pi / 2")
def center_gravity():
"""
This function calculates the center of gravity of the whole system.
FORMULA cg = sum of (mass_i * distance_(origin,point_i) / sum of mass_i
:return: Fill the list of cg_x and cg_z
"""
counter_problem = 0
try:
hc = height_submersion()
hb = barge_z - hc
for i in range(len(t)):
barge_cg = (barge_cg_values[0], barge_cg_values[1] - hc)
crane_cg = (crane_cg_x[i], hb + 0.075 + crane_cg_z[i])
counterweight_cg = (counterweight_cg_x, hb + counterweight_cg_z)
cg_x[i] = ((crane_mass * barge_cg[0]) +
(crane_mass * crane_cg[0]) +
(counterweight_mass * counterweight_cg[0])) / sum_mass
cg_z[i] = ((crane_mass * barge_cg[1]) +
(crane_mass * crane_cg[1]) +
(counterweight_mass * counterweight_cg[1])) / sum_mass
counter_problem += 1
return True
except ZeroDivisionError:
ZeroDivisionError("WARNING : Problem when calculating the center of gravity = ZeroDivision Value of i {}"
.format(counter_problem))
except:
raise ("WARNING : Problem when calculating the center of gravity. Value of i {}".format(counter_problem))
def center_thrust(angle):
"""
Calculate the coordinate of the center of trust of the barge
:type angle: float
:param angle: The angle of inclination that the barge undergoes, changing the coordinate system and causing the
submerged shape change. IN RADIANS
:return: A tuple with the coordinate along X- and Z-axis of the center of trust
"""
hc = height_submersion()
# Slides S8 - Page 'Flotteur' where parallel_long = h1 and parallel_short = h2
if angle < 0:
# tan(angle) is < 0 and parallel_right > parallel_left
parallel_right = hc + abs((tan(angle) * (barge_x / 2)))
parallel_left = hc + (tan(angle) * (barge_x / 2))
# Coordinate for x component
lctx = (barge_x * (parallel_right + (2 * parallel_left))) / \
(3 * (parallel_right + parallel_left))
ctx_rotate = (barge_x / 2) - lctx
else:
# tan(angle) is >= 0 and parallel_right <= parallel_left
parallel_right = hc - (tan(angle) * (barge_x / 2))
parallel_left = hc + (tan(angle) * (barge_x / 2))
# Coordinate for x component
lctx = (barge_x * (parallel_left + (2 * parallel_right))) / \
(3 * (parallel_left + parallel_right))
ctx_rotate = -(barge_x / 2) + lctx
# Coordinate for z component
lctz = ((parallel_right ** 2) + (parallel_right * parallel_left) + (parallel_left ** 2)) / \
(3 * (parallel_right + parallel_left))
ctz_rotate = -hc + lctz # It is underwater thus negative
# Rotation of the system
return rotate_coord((ctx_rotate, ctz_rotate), angle)
def immersed_mass(angle):
"""
Calculate the mass of the volume of water displaced by the barge
:type angle: float
:param angle: the angle of inclination of the barge IN RADIANS
:return: float that is the immersed mass
"""
hc = height_submersion()
if angle < 0:
# tan(angle) is < 0 and parallel_right > parallel_left
parallel_right = hc + abs((tan(angle) * (barge_x / 2)))
parallel_left = hc + (tan(angle) * (barge_x / 2))
else:
# tan(angle) is >= 0 and parallel_right <= parallel_left
parallel_right = hc - (tan(angle) * (barge_x / 2))
parallel_left = hc + (tan(angle) * (barge_x / 2))
trapeze_area = ((parallel_right + parallel_left) * barge_x) / 2
sub_volume = trapeze_area * barge_y
mass_im = sub_volume * 1000
return mass_im
# --- Simulation ---
def simulation():
"""
:return: Completes the omega, theta, a, cp_x and cp_z lists according to couples
"""
motion()
center_gravity()
# Initial conditions
dt = step
omega[0] = omega_0
theta[0] = theta_0
for i in range(len(t) - 1):
# Rotation center gravity
cg_x_current = cg_x[i]
cg_x[i] = rotate_coord((cg_x[i], cg_z[i]), theta[i])[0]
cg_z[i] = rotate_coord((cg_x_current, cg_z[i]), theta[i])[1]
# Torques
couple_g = -sum_mass * g * cg_x[i]
couple_p = immersed_mass(theta[i]) * g * center_thrust(theta[i])[0]
couple_d = - damping * omega[i]
couples[i] = couple_g + couple_p + couple_d
# Angle, velocity and acceleration
a[i] = couples[i] / inertia
omega[i + 1] = omega[i] + a[i] * dt
theta[i + 1] = theta[i] + omega[i] * dt
# Fill lists
immersed_mass_values[i] = immersed_mass(theta[i])
cp_x[i] = center_thrust(theta[i])[0]
cp_z[i] = center_thrust(theta[i])[1]
# Fill the last element of the lists
immersed_mass_values[-1] = immersed_mass_values[-2]
cp_x[1] = cp_x[2]
cp_z[1] = cp_z[2]
cp_x[-1] = cp_x[-2]
cp_z[-1] = cp_z[-2]
cg_x[-1] = cg_x[-2]
cg_z[-1] = cg_z[-2]
# --- Lunch program ---
simulation()
# --- Create energy's lists ---
E_g = sum_mass * g * (cg_x - cg_x[0])
E_p = - immersed_mass_values * g * (cp_x - cp_x[0])
E_k = (inertia * omega ** 2) / 2
E_tot = E_g + E_p + E_k
|
[
"45366083+quentinprieels@users.noreply.github.com"
] |
45366083+quentinprieels@users.noreply.github.com
|
17cd7afe71eef27cae287ac6a7950dadb0958958
|
86dc22e140785fdb7f4b9b7f6eac0dcc719bb42e
|
/plonetheme/jquerymobile/testing.py
|
9a3bfc98c43c4017e5e64ca655399323cac06f6a
|
[] |
no_license
|
toutpt/plonetheme.jquerymobile
|
545121aa42ca7ac06852bb7402216c730fa91dc5
|
d09a57ced100327934c2637676264f0ec43356d2
|
refs/heads/master
| 2021-01-22T06:48:35.002427
| 2013-11-19T17:58:07
| 2013-11-19T17:58:07
| 1,245,664
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import IntegrationTesting, FunctionalTesting
from plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE
from plone.testing import z2
class Layer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
# Load ZCML
import collective.js.jquerymobile
import plonetheme.jquerymobile
import plonetheme.classic
self.loadZCML(package=collective.js.jquerymobile)
self.loadZCML(package=plonetheme.classic)
self.loadZCML(package=plonetheme.jquerymobile)
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'plonetheme.classic:default')
self.applyProfile(portal, 'plonetheme.jquerymobile:default')
FIXTURE = Layer()
INTEGRATION = IntegrationTesting(bases=(FIXTURE,),
name="plonetheme.jquerymobile:Integration")
FUNCTIONAL = FunctionalTesting(bases=(FIXTURE,),
name="plonetheme.jquerymobile:Functional")
ROBOT = FunctionalTesting(
bases=(FIXTURE, AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER),
name="plonetheme.jquerymobile:Robot")
|
[
"toutpt@gmail.com"
] |
toutpt@gmail.com
|
af3322783318e9078310bd6a24f050bbb8ffa0b1
|
2ac0fdf2b8a8f4b43f745f8585b375c3a71b9977
|
/checkout/migrations/0007_remove_order_postcode.py
|
8bcf16bab7192aa246270f8632a9de02544f2b95
|
[] |
no_license
|
philip-hughes/fourth-milestone-project
|
c13909dbe6724a26152e11adf1f913014ca7cab3
|
e029a7255268251b85bc6ae4241196b11e28e3fc
|
refs/heads/master
| 2023-08-02T16:20:17.174776
| 2020-10-08T11:41:51
| 2020-10-08T11:41:51
| 280,018,228
| 0
| 0
| null | 2021-09-22T19:27:57
| 2020-07-16T01:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Generated by Django 3.1 on 2020-09-15 01:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checkout', '0006_auto_20200907_1423'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='postcode',
),
]
|
[
"phugh80@hotmail.com"
] |
phugh80@hotmail.com
|
dfb3b32c69b22a73cd4d769542368aab9f78e516
|
6484f860e13c8083f80e9b7896ce7c1c533a3982
|
/gen_inventory.py
|
d84ddc3632f8b850e047b082cbd294ff434c2cfa
|
[] |
no_license
|
asmtal/cd-iac
|
4e839922dc8578dc81f3e71ed056c2863ca083e7
|
d89856113c0476916305a4996d1bf373be6b91f1
|
refs/heads/master
| 2023-03-17T04:48:04.749059
| 2015-09-11T22:24:35
| 2015-09-11T22:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import sys
def main():
with open('ansible/inventory', 'w') as inventory:
for line in sys.stdin:
splitted_line = line.split(': ')
key = splitted_line[0]
values = splitted_line[1]
data = '[%s]\n%s\n'%(key, values.replace(',', '\n'))
inventory.write(data)
main()
|
[
"mikael.gibert@numergy.com"
] |
mikael.gibert@numergy.com
|
8138643e307317670ef9aeb852bd57bfebb122ff
|
7fee528e7d91acd06dcb0949c33669a564b59ae3
|
/catkin_ws/build/ros_python/function_ws/srv_sub_pub/catkin_generated/pkg.installspace.context.pc.py
|
b019d28797956f27003859e602b8442b5974e88e
|
[
"MIT"
] |
permissive
|
min-chuir-Park/ROS_Tutorials
|
ef5122e1a94efbe4634c144152963d17709cac67
|
4c19e7673ec7098019c747833c45f0d32b85dab4
|
refs/heads/master
| 2020-05-20T09:33:23.811622
| 2019-07-07T04:26:51
| 2019-07-07T04:26:51
| 185,502,100
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/nvidia/ROS_Tutorials/catkin_ws/install/include".split(';') if "/home/nvidia/ROS_Tutorials/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "srv_sub_pub"
PROJECT_SPACE_DIR = "/home/nvidia/ROS_Tutorials/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"dhrmach45679@naver.com"
] |
dhrmach45679@naver.com
|
241659b021a634ec979c352169c3c7ae6f91610c
|
f288f3e0f66da2bc9d7216f2c605a2895f32a9ee
|
/online_tetris/migrations/0002_auto_20171201_1853.py
|
01a6ab9d9d19e21278d8fe4c843d50fe7d97e8e1
|
[
"Apache-2.0"
] |
permissive
|
JonatanRoig/django-online-tetris
|
1a80eb60ccdfafdf21a67927ab4ecc6d7f0b32db
|
ad6f4045ea50c1b49c11aaca10460e76a1720679
|
refs/heads/master
| 2018-07-14T12:33:24.588948
| 2018-06-01T13:18:02
| 2018-06-01T13:18:02
| 115,737,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-01 18:53
from __future__ import unicode_literals
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('online_tetris', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sesion',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='nombre'),
),
]
|
[
"jona.roig@gmail.com"
] |
jona.roig@gmail.com
|
6710267d658eacafcab3064266de6c55bc18d338
|
4b01a53d6b43d97f6fc15c94985baac3933109c7
|
/cam_stream/camproject/manage.py
|
8dd7f7b4775cc5852e6f353e4ae7ddcb5b3df921
|
[] |
no_license
|
are1224/drow_project
|
001a53c55f2d49982d90964df8281cae5917e13b
|
b79c6a14c9165486f09dfe55168ed4917d1db626
|
refs/heads/master
| 2022-12-14T04:25:59.072928
| 2020-09-15T17:08:07
| 2020-09-15T17:08:07
| 295,796,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'camproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"passtel62@gmail.com"
] |
passtel62@gmail.com
|
ac8ae0bf0b3ac6e860e0f89ed17e2c4b40a9eca3
|
5e02039d9ec8d8d878383ed478360d9c35f57a11
|
/machinelearninginaction-master/Ch02/notebook/knn.py
|
9b59b0477cc488f01781414eed763543d27e6cb1
|
[] |
no_license
|
yoseham/notebook
|
97f01b92591107b8e623e4009657fad79b777ab4
|
4c7360aeaed54cfd63af475455c60dc4e18840f1
|
refs/heads/master
| 2022-03-01T22:40:43.058046
| 2019-09-19T01:51:31
| 2019-09-19T01:51:31
| 209,438,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
import numpy as np
import operator
def createDataset():
features = np.random.randint(0,10,(4,2))
labels = ['A','A','B','B']
return features,labels
def classify0(inx,dataset,labels,k):
datasetSize = dataset.shape[0]
diffMat = np.tile(inx,(datasetSize,1))-dataset
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0)+1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
|
[
"1416708654@qq.com"
] |
1416708654@qq.com
|
3e765d97ff441c4aa4c853ebe0d92889eba3289e
|
52be02c1ddf1610356d4818a5fd06e8d9ee98a73
|
/algorithms/a2c/utils.py
|
ad08e8afd773c2aec8aace55e20d74c1f7992e50
|
[] |
no_license
|
DengYuelin/multiple_assembly_old
|
f32e34a2e6633c99639489fb02b2e28edb1db180
|
696c55de23bb217b4471324bf3c3246a1bfcd5d8
|
refs/heads/master
| 2020-12-22T07:12:06.749844
| 2020-02-03T18:27:39
| 2020-02-03T18:27:39
| 236,664,915
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,342
|
py
|
import os
import numpy as np
import tensorflow as tf
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1]
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
|
[
"2539722953@qq.com"
] |
2539722953@qq.com
|
d01432fe7d7d0bb7a25b6a0bdcfdfecac11cb16e
|
12f7c22513e5edf7654fd689457a2530053d7a62
|
/env/bin/pip3.5
|
8a5d1c57010b3a317f425fd3078dc7d856f570c0
|
[] |
no_license
|
sts-sadr/machine_learning
|
b9ed61a6c67e5c25f0f8ff26a6262f0b0e46b845
|
b89f5a85de8233230def2315b6304fd42b678b5d
|
refs/heads/master
| 2021-01-07T08:23:43.532310
| 2019-03-18T02:24:24
| 2019-03-18T02:24:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
5
|
#!/home/priyaroy/machine_learning/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"roy.priyashree@gmail.com"
] |
roy.priyashree@gmail.com
|
d89e780fbbba8bb5e258c75db43588de7c3a2856
|
9a9e739dcc559476ba796510182374ad460f2f8b
|
/PA1/PA1 2013/PA1/diamond printer.py
|
bafd1a7ac6da4700b8981078d1e22c9781475ae9
|
[] |
no_license
|
Divisekara/Python-Codes-First-sem
|
542e8c0d4a62b0f66c598ff68a5c1c37c20e484d
|
e4ca28f07ecf96181af3c528d74377ab02d83353
|
refs/heads/master
| 2022-11-28T01:12:51.283260
| 2020-08-01T08:55:53
| 2020-08-01T08:55:53
| 284,220,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
letter=raw_input("Enter lowercase letter:")
i=ord(letter)-96
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for n in range(1,i+1):
for x in range(i-n):
print "",
if n==1:
print alphabet[n-1]
continue
print "%s%s"%(alphabet[:n],alphabet[n-2::-1])
for n in range(1,i):
for x in range(n):
print "",
if n==i-1:
print alphabet[i-n-1]
continue
print "%s%s"%(alphabet[:i-n],alphabet[i-n-2::-1])
print "\n"
|
[
"9asitha7@gmail.com"
] |
9asitha7@gmail.com
|
20f70af8e5b6e0808ea692d2dbe330944499941a
|
f331bebb696790a767a8302453ee00b644025f2c
|
/App/setup.py
|
a3a35327f31b11cbbe6ad13951a74735b948d152
|
[] |
no_license
|
ErickSancho/Proyecto_Plataformas
|
b56655e4bb787fd19368a4af813bde0df2f6360e
|
a48adad41e5be2a316b59bdd96fef5026c539949
|
refs/heads/master
| 2023-01-31T16:07:53.089313
| 2020-12-11T19:17:25
| 2020-12-11T19:17:25
| 307,012,505
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
import sys
def setup():
for line in sys.stdin:
write_file(line)
def write_file(path):
path = path[:-1]
with open("./Aplicacion_financiera.desktop", "w") as file:
file.write("[Desktop Entry]\n")
file.writelines("Version=1.0\n")
file.writelines("Name=Aplicacion Financiera\n")
ruta_exec = "Exec=" + path + "/main.py\n"
file.write(ruta_exec)
ruta_ico = "Icon="+path+"/img/dollar.ico\n"
file.write(ruta_ico)
file.write("Type=Application")
if __name__ == "__main__":
setup()
|
[
"alonso.sancho99@gmail.com"
] |
alonso.sancho99@gmail.com
|
648fa15399a4eeb4e666ff3242a0a14daea4be75
|
2a1d87d1cd963ffe98a925d80cb8f4a3f4b47729
|
/profileapp/decorators.py
|
6c5e7aa23de4fdda005324f8d2b1530eecc5ed2d
|
[] |
no_license
|
seunghyeon-shin/pragmatic
|
f53df68aff452baf1cdffd2e11826cb8cff02082
|
a556de4dc8e4af71fe8828d0332bbf54a4fa7e89
|
refs/heads/main
| 2023-07-08T02:27:20.466759
| 2021-08-08T05:08:24
| 2021-08-08T05:08:24
| 393,553,209
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from django.http import HttpResponseForbidden
from .models import Profile
def profile_ownership_required(func):
def decorated(request, *args, **kwargs):
profile = Profile.objects.get(pk=kwargs['pk'])
if not profile.user == request.user:
return HttpResponseForbidden()
return func(request, *args, **kwargs)
return decorated
|
[
"seunghyeon.shin@vntgcorp.com"
] |
seunghyeon.shin@vntgcorp.com
|
0e245fe8458c419cb06d2702c8ea72766051afab
|
23c0de84ef45a3d2c851029349f04413b4fa6715
|
/mysite/myapp/api.py
|
2926982808931320604b65627da0d44eb7acf931
|
[] |
no_license
|
Arnoaili/vue-django
|
06ad800f0211111df4f835f02c46b19730630af2
|
8e6c44249e85c4f82883aef0900a78eecaa91148
|
refs/heads/master
| 2021-01-20T22:55:08.461682
| 2017-08-30T03:34:09
| 2017-08-30T03:34:09
| 101,828,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,824
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import MySQLdb
from django.http import JsonResponse, StreamingHttpResponse, HttpResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login, logout, models #登入和登出
from django.conf import settings
def operator_db(request, sql, is_search=False):
db = MySQLdb.connect(host="localhost",user="root",passwd="admin1",db="myaoo",charset="utf8")
cursor = db.cursor()
print sql
cursor.execute(sql)
if is_search:
data = cursor.fetchall()
db.close()
return data
db.commit()
db.close()
def resource(request):
sql = "select *from blog_user"
data = operator_db(request, sql, True)
aa = []
for one in data:
info = dict()
info['customerId'] = one[0]
info['companyName'] = one[1]
info['contactName'] = one[2]
info['phone'] = one[3]
aa.append(info)
# safe : 默认为True。如果设置为False,可以传递任何对象进行序列化(否则,只允许dict 实例)。
# 如果safe 为True,而第一个参数传递的不是dict 对象,将抛出一个TypeError。
return JsonResponse(aa, safe=False)
def add(request):
if request.method == 'POST':
try:
user_obj=json.loads(request.body)
print "import data: ", user_obj
id = int(user_obj["customerId"])
name = user_obj["companyName"]
password = user_obj["contactName"]
message_id = int(user_obj["phone"])
except:
print "Input error!"
sys.exit()
sql = "insert into blog_user(id,name,password,message_id) values('%d','%s','%s','%d')" % (id,name,password,message_id)
operator_db(request, sql)
print "Import data done!"
return JsonResponse({})
def login_api(request):
if request.method == "POST":
print request.body
user_obj=json.loads(request.body)
username = user_obj["username"]
password = user_obj["password"]
print user_obj["username"]
user = authenticate(username=username,password=password) # 类型为<class 'django.contrib.auth.models.User'>
# print(type(models.Customer.objects.get(name="赵凡")))
print(user,type(user))
if user:
login(request, user) # 验证成功之后登录
# return redirect('/myapp/post/')
return JsonResponse({'id':1, 'name':username})
else:
return JsonResponse({'status':'error','message':'用户或密码错误!'})
return JsonResponse({})
def register_api(request):
if request.method == "POST":
user_obj = json.loads(request.body)
print "**************", user_obj
username = user_obj["username"]
email = "person@126.com"#user_obj["email"]
password = user_obj["password"]
print username
user = models.User.objects.create_user(username, email, password)
user.save()
print "Register Success!!!"
return JsonResponse({})
def file_upload(request): #列出树形目录,上传文件页面
if request.method == 'POST':
path_root = settings.FILE_ROOT # 文件根路径 #上传文件存放的主目录
myFile =request.FILES.get("myfile", None) # 获取上传的文件,如果没有文件,则默认为None
print "^^^^^^^^^^", myFile, type(myFile), myFile.name
if not myFile:
dstatus = "Please select the file to upload!"
else:
path_ostype = os.path.join(path_root, 'file_upload_preserve_dir')
path_dst_file = os.path.join(path_ostype, myFile.name)
# print path_dst_file
if os.path.isfile(path_dst_file):
dstatus = "%s already exist!"%(myFile.name)
else:
destination = open(path_dst_file,'wb+') # 打开特定的文件进行二进制的写操作
for chunk in myFile.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
dstatus = "%s upload success!"%(myFile.name)
return HttpResponse(dstatus)
return JsonResponse({})
def file_download(request):
response = file_download_api(fpath='file_download_source/files', fname='secret.txt')
if response:
return response
else:
return HttpResponse(u"Sorry!文件路径错误!")
def file_download_api(**kwarg): #提供文件下载页面
#定义文件分块下载函数
def file_iterator(file_name, chunk_size=512):
with open(file_name,'rb') as f: #如果不加‘rb’以二进制方式打开,文件流中遇到特殊字符会终止下载,下载下来的文件不完整
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
fpath = kwarg["fpath"]
fname = kwarg["fname"]
# path_root = "C:\Users\111\Desktop\mysite\file_upload_dir"
path_root = settings.FILE_ROOT # 文件根路径
if fpath is not None and fname is not None:
file_fpath = os.path.join(path_root, fpath) #fapth是文件的上一级目录名称
file_dstpath = os.path.join(file_fpath, fname) #fname是文件名称
if not os.path.exists(file_dstpath):
return HttpResponse(u"Sorry!没有找到该文件!")
response = StreamingHttpResponse(file_iterator(file_dstpath))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(fname) #此处fname是要下载的文件的文件名称
#format:字符串格式化,通过{}和:来代替传统 %方式
return response
return {}
from src.QcloudApi.qcloudapi import QcloudApi
def qcloudapi(request):
module = 'account'
action = 'DescribeProject'
config = {
'Region': 'tj',
'secretId': 'AKIDnX6UjeBdjiXDh4TM9oIbigT6YoyzwtdM',
'secretKey': 'mNm03vdBXihVDj4DC0c4LXYP4XvBHCyc',
'method': 'get'
}
# params = {
# 'entityFileName': '/test.txt',
# 'entityFile': '/tmp/test.txt',
# 'SignatureMethod':'HmacSHA256',#指定所要用的签名算法,可选HmacSHA256或HmacSHA1,默认为HmacSHA1
# }
params = {}
try:
service = QcloudApi(module, config)
print service.generateUrl(action, params)
result = service.call(action, params)
#service.setRequestMethod('get')
#print service.call('DescribeCdnEntities', {})
except Exception, e:
print 'exception:', e
# print "qqqqqqqqq", json.loads(result)["data"]
if request.method == 'POST':
print json.loads(request.body)
for one in json.loads(result)["data"]:
if json.loads(request.body)['name'].strip() == one["projectName"]:
return JsonResponse([one], safe=False)
return JsonResponse(json.loads(result)["data"], safe=False)
def get_booklist(request):
if request.method == 'POST':
request_data = json.loads(request.body)
if request_data.has_key("name"):
name = request_data["name"].strip()
sql = "select *from books where name='%s'" % (name)
data = operator_db(request, sql, True)
total = len(data)
else:
sql = "select count(1) from books"
total = int(operator_db(request, sql, True)[0][0])
page = request_data["page"]
lines = request_data["lines"]
sql = "select *from books limit %d,%d" % ((page-1)*lines, page*lines)
data = operator_db(request, sql, True)
aa = []
for one in data:
info = dict()
info['name'] = one[1]
print info['name']
info['author'] = one[2]
info['date'] = one[3]
info['description'] = one[4]
aa.append(info)
# safe : 默认为True。如果设置为False,可以传递任何对象进行序列化(否则,只允许dict 实例)。
# 如果safe 为True,而第一个参数传递的不是dict 对象,将抛出一个TypeError。
return JsonResponse({"total":total, "data":aa}, safe=False)
return JsonResponse({})
def delete_book_api(request):
if request.method == 'POST':
data = json.loads(request.body)
names = []
if data.has_key('name'):
names.append(data["name"])
elif data.has_key('names'):
names = data["names"].split(',')
for name in names:
sql = "delete from books where name='%s'" % (name)
operator_db(request, sql)
print "Delete data!"
return JsonResponse({})
def edit_book_api(request):
if request.method == 'POST':
data = json.loads(request.body)
date = data["date"]
name = data["name"]
sql = "update books set date='%s' where name='%s'" % (date, name)
operator_db(request, sql)
print "Edit data!"
return JsonResponse({})
def add_book_api(request):
if request.method == 'POST':
data = json.loads(request.body)
name = data["name"]
author = data["author"]
date = data["date"]
description = data["description"]
sql = "insert into books values(NULL, '%s', '%s', '%s', '%s')" % (name,author,date,description)
operator_db(request, sql)
print "Add data!"
return JsonResponse({})
|
[
"969814683@qq.com"
] |
969814683@qq.com
|
42bc82842d453fbfc42f8357a14f155068060efd
|
330fecf1a29109210739368a2b5a34a18379078a
|
/codespacedirstructure/T4. Asynchronous programming/Asyncio/async_python_precondition/asyncio_await_example.py
|
76f9a9b3f303cbd0d8639b20b289da8bf17b86a2
|
[] |
no_license
|
apavlyk/Advanced_Python
|
e1ab84887b3a83636b84641d492f8b9ec6b4c748
|
05b7694629ee3ad5f97fadb7a1236cbd08c16a4a
|
refs/heads/master
| 2020-09-13T13:17:36.680696
| 2019-11-20T21:35:39
| 2019-11-20T21:35:39
| 222,794,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
"""
Basically what is happening here is an async method,
when executed, returns a coroutine which can then be awaited.
"""
import asyncio
import aiohttp
import time
urls = ['http://www.google.com', 'http://devdocs.io', 'http://www.python.org']
async def call_url(url):
print('Starting {}'.format(url))
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.text()
print('{}: {} bytes: {}'.format(url, len(data), data[:30]))
return data
time.clock()
futures = [call_url(url) for url in urls]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(futures))
print('Done', time.clock())
|
[
"noreply@github.com"
] |
apavlyk.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.