blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96e752ef58371284632e6104607d5315508c9bb6
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc016/B/4834420.py
|
fab45b44e4df201a1ef33eeb57c143fffed010ec
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
A,B,C=map(int, input().split(' '))
if A+B==C:
if A-B!=C:
print('+')
else:
print('?')
elif A+B!=C:
if A-B==C:
print('-')
else:
print('!')
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
1f1b52c64bb4fa7cb05fa710e400b6eb82dfc8b4
|
02fd06563211f0098d5ae1acf8a28f8f80546d46
|
/CursoPython/practica_3.py
|
e8ad6f0cc576cda41011ecc28254ecd9887c3cb0
|
[] |
no_license
|
HRN88/python
|
9cd078f29cc212ee5d19e6debd05d1ce494c654d
|
b092844daf8e7c65a319f0431845dddae967bb62
|
refs/heads/master
| 2021-04-27T00:25:17.317818
| 2018-06-01T12:40:39
| 2018-06-01T12:40:39
| 123,810,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
'''
Realiza un programa que cumpla el siguiente algoritmo utilizando siempre que sea posible operadores en asignación:
Guarda en una variable numero_magico el valor 12345679 (sin el 8)
Lee por pantalla otro numero_usuario, especifica que sea entre 1 y 9 (asegúrate que es un número)
Multiplica el numero_usuario por 9 en sí mismo
Multiplica el numero_magico por el numero_usuario en sí mismo
Finalmente muestra el valor final del numero_magico por pantalla'''
magic = 12345679
numero = int(input("Ingresa un numero entero entre 1 y 9"))
numero *= 9
magic *= numero
print(magic)
|
[
"gustavoheras.h@gmail.com"
] |
gustavoheras.h@gmail.com
|
444afc706986e82c2c7fe90bcf186d3199b09e84
|
377bf89b212a75076ed533c9e030d029170eb885
|
/build/my_turtle/catkin_generated/pkg.develspace.context.pc.py
|
62dd85df2ac7590e1931b185258e3c3ce25428bb
|
[] |
no_license
|
rezwanshubh/rez2-robtech
|
b6552d84ed9066f708dd8f58530bb18daeef97b2
|
1bcf5a9192c5b6332e215bafcbe4db95f7a6750f
|
refs/heads/master
| 2021-01-20T14:47:16.569917
| 2017-05-11T18:48:51
| 2017-05-11T18:48:51
| 90,658,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "my_turtle"
PROJECT_SPACE_DIR = "/home/rostest/rez2-robtech/devel"
PROJECT_VERSION = "0.0.0"
|
[
"shubhbd@yahoo.com"
] |
shubhbd@yahoo.com
|
26c20e4780e62be0b0ecab519d78604fcbefea32
|
bef17e897dfb88108eb41bad73de92d843f02de8
|
/telegram_token.py
|
f8bc2a1fe69a8fd85bf588954b82c90e3d8a4181
|
[] |
no_license
|
Bogoyavlenskaya/my_bot_classify
|
5fd449fa77696269929a04421012d6dac1558f83
|
b78925401a59b36a980ab9d7a81ec69692fd3f47
|
refs/heads/master
| 2020-06-09T01:24:09.389269
| 2019-06-27T19:22:04
| 2019-06-27T19:22:04
| 193,342,513
| 0
| 0
| null | null | null | null |
WINDOWS-1251
|
Python
| false
| false
| 27
|
py
|
token = 'ваш токен'
|
[
"noreply@github.com"
] |
Bogoyavlenskaya.noreply@github.com
|
36811f8219dbaded0d61e71c73332056b64cba72
|
e4806fe953cbb76a6baf1f27ae40562561014f36
|
/python_base/2019.1.21/practise6.py
|
068ec312de0bd481be8f8b1c960c5ced08aab9fa
|
[] |
no_license
|
LaBravel/Tedu-code
|
701daa5f49ab42129db0a4684c8e7b3cbcbe1d65
|
9c93e9d88e940e627c7a3d1e8c2519035b462086
|
refs/heads/master
| 2020-04-27T16:33:55.535261
| 2019-03-08T07:27:29
| 2019-03-08T07:27:29
| 174,486,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
def mymap(fn,iterable) :
for i in iterable :
yield fn(i)
L = [i for i in map(lambda x : x * 2,range(10))]
print(L)
|
[
"463662798@qq.com"
] |
463662798@qq.com
|
6e42bcd24052180e879951ee01f26b90c8bfc243
|
4f10dff7d20520dcff239372b29586a5e6dbd54b
|
/transcripts_crawler_v2.py
|
4a9ac0fe2dc4222a036ced1e709706355c168429
|
[] |
no_license
|
maxwelllee54/ProjectBigData
|
4f46cfa208a20940316aeac746e05393d6cdf49a
|
51b8d0283ac82a141f931164eeb1d531ab431798
|
refs/heads/master
| 2021-01-19T05:47:06.408015
| 2017-05-05T03:21:44
| 2017-05-05T03:21:44
| 87,446,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
############################################################################
# Instruction: transcripts crawler on SeekingAlpha.com #
# Version: 2.0 #
# Date: April 08 2017 #
############################################################################
from bs4 import BeautifulSoup
import requests
import time
import pandas as pd
class tscpCrawler():
def __init__(self, ticker, user_name=None, pwd=None):
self.ticker = ticker.upper()
self.headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
'AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/39.0.2171.95 '
'Safari/537.36'}
self.user_name = user_name
self.pwd = pwd
def links(self):
allLinks = []
page = 1
flag = 2
while page < 10 and flag:
url = 'https://seekingalpha.com/symbol/' + self.ticker + '/earnings/more_transcripts?page=' + str(page)
wb = requests.get(url, headers=self.headers)
soup = BeautifulSoup(wb.text, 'lxml')
if not soup.select('div div a'):
break
else:
for i in soup.select('div div a'):
if i.get('href').strip('\"\\') == 'https://www.perimeterx.com':
time.sleep(600)
page = 1
flag -= 1
continue
elif i.get('href').strip('\"\\').find('transcript') > 0:
allLinks.append('https://seekingalpha.com' + i.get('href').strip('\"\\') + '?part=single')
page += 1
time.sleep(10)
return allLinks
def transcripts(self):
with requests.Session() as c:
requestUrl = 'http://seekingalpha.com/account/orthodox_login'
USERNAME = self.user_name
PASSWORD = self.pwd
userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'
login_data = {
"slugs[]": None,
"rt": None,
"user[url_source]": None,
"user[location_source]": "orthodox_login",
"user[email]": USERNAME,
"user[password]": PASSWORD
}
c.post(requestUrl, data=login_data,
headers={"referer": "http://seekingalpha.com/account/login", 'user-agent': userAgent})
df = pd.DataFrame(columns=['Ticker', 'Date', 'Time', 'Links'])
for ind, link in enumerate(self.links()):
page = c.get(link, headers=self.headers)
soup = BeautifulSoup(page.text, 'lxml')
titleline = soup.select('#a-hd > h1')
try:
title = titleline[0].get_text()
file_name = self.ticker + '-' + title[title.find('Q'):title.find('Q') + 2] + '-' \
+ title[title.find('Q') + 3:title.find('Q') + 7] + '.txt'
contexts = '\n'.join([text.get_text() for text in soup.select('#a-body > p.p')])
df.loc[ind, 'Ticker'] = self.ticker
df.loc[ind, 'Links'] = link
df.loc[ind, 'Date'] = soup.select('#a-body > p.p.p1')[1].get_text()
df.loc[ind, 'Time'] = soup.select('#a-body > p.p.p1')[2].get_text()
with open(r'all_output/'+file_name, 'w') as f:
f.write(contexts)
except IndexError:
df.loc[ind, 'Ticker'] = self.ticker
df.loc[ind, 'Links'] = link
print(link)
continue
except FileNotFoundError as error:
print(error)
pass
time.sleep(60)
if not df.empty:
df.to_csv(r'all_output/'+self.ticker+'_data.csv')
if __name__ == '__main__':
user_info = {'user_name': 'maxwelllee54@gmail.com',
'pwd': 'lee890504'}
ticker_list = ['RIO', 'XOM', 'GE', 'F', 'MO', 'XRX', 'GS', 'HBC', 'JPM', 'LYG', 'MS', 'RF']
#ticker_list = pd.read_csv('ticker_list.txt', header=None).iloc[:,0]
for ticker in ticker_list:
print(ticker)
tscpCrawler(ticker, **user_info).transcripts()
time.sleep(60)
|
[
"maxwelllee54@gmail.com"
] |
maxwelllee54@gmail.com
|
a7ebe38383c9e72e6e66c61a2a2f22fceb3db32e
|
6d42732f98cbbc50f6a66e95370b163d4701bc60
|
/CCD/CCD/main/migrations/0007_privateannouncement.py
|
7fbd1f0735dd35c59dc321383947efdfd80f9382
|
[] |
no_license
|
DHAWAL812/CCD-Portal
|
07c7bf16268015f8ebe0dd6a77e9a81fa9871156
|
d304c8161ef69cbea069fb6cac85c0a1dbb146a9
|
refs/heads/master
| 2023-01-02T21:44:45.205186
| 2020-02-21T17:25:40
| 2020-02-21T17:25:40
| 286,717,458
| 0
| 1
| null | 2020-08-11T10:41:54
| 2020-08-11T10:41:53
| null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
# Generated by Django 2.2.5 on 2020-02-16 11:34
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0006_auto_20200215_0558'),
]
operations = [
migrations.CreateModel(
name='PrivateAnnouncement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('poc_company', models.CharField(default='', max_length=40)),
('Announcement', models.TextField()),
('AnnouncementTime', models.DateTimeField(default=django.utils.timezone.now)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.UserProfile')),
],
),
]
|
[
"siddhart18a@iitg.ac.in"
] |
siddhart18a@iitg.ac.in
|
4341fe83287d4e4e25701edd5898fa04b419acde
|
d779f3deff16d1c1458740b42452aedccf8cb1f4
|
/multiclass_tracking/tracker.py
|
64a26730881b9b7e71d2105efc39d80eb32a6e80
|
[
"MIT"
] |
permissive
|
Pandinosaurus/multiclass-people-tracking
|
84f827e63ef1252e6cb2da93554bbca99d7586fc
|
0768745a66e392c96a3df17a4e78cb33b5d851a3
|
refs/heads/master
| 2022-01-12T16:39:01.238776
| 2019-02-18T15:11:19
| 2019-02-18T15:11:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,646
|
py
|
#!/usr/bin/python
import numpy as np
from EKF_with_HMM import EKF_with_HMM
from scipy.stats import multivariate_normal
from image_projection import ImageProjection
import logging
logger = logging.getLogger(__name__)
class Tracker:
def __init__(self, ekf_sensor_noise, hmm_observation_model,
use_hmm=True):
self.tracks = []
self.curr_id = 0
self.HMM_observation_model = hmm_observation_model
self.EKF_sensor_noise = ekf_sensor_noise
#standard tracking thresholds
pos_cov_limit = 4.0 #threshold for track pose covariance
chi2_thresh = 7.815 #threshold for Mahalanobis distance
eucl_thresh = 1.0 #threshold for Eucledian distance
self.set_thresholds(pos_cov_limit, chi2_thresh, eucl_thresh)
#standard tracking config
accel_noise = 15.0
height_noise = 0.25
init_vel_sigma = 1.5
hmm_transition_prob = 2e-4
self.set_tracking_config(accel_noise, height_noise, init_vel_sigma,
hmm_transition_prob, use_hmm)
def set_thresholds(self, pos_cov_threshold, mahalanobis_max_dist,
euclidean_max_dist):
self.pos_cov_limit = pos_cov_threshold,
self.chi2_thresh = mahalanobis_max_dist,
self.eucl_thresh = euclidean_max_dist
def set_tracking_config(self, accel_noise, height_noise,
init_vel_sigma, hmm_transition_prob,
use_hmm):
self.accel_noise = accel_noise
self.height_noise = height_noise
self.init_vel_sigma = init_vel_sigma
self.HMM_transition_prob = hmm_transition_prob
self.use_hmm = use_hmm
#reset tracks
self.tracks = []
def get_track_ids(self):
ids = []
for track in self.tracks:
ids.append(track.track_id)
return ids
def get_track_positions(self):
track_positions = []
for track in self.tracks:
track_pos = track.get_odom_position()
track_positions.append(track_pos)
return track_positions
def get_track_velocities(self):
track_velocities = []
for track in self.tracks:
track_vel = track.get_odom_velocity()
track_velocities.append(track_vel)
return track_velocities
def get_track_detections(self, trafo_odom_in_cam):
track_detections = []
for track in self.tracks:
track_det = track.get_track_detection(trafo_odom_in_cam)
track_detections.append(track_det)
return track_detections
def get_track_covariances(self):
track_covariances = []
for track in self.tracks:
track_cov = track.get_covariance()
track_covariances.append(track_cov)
return track_covariances
def predict(self, dt):
for track in self.tracks:
track.predict(dt)
def update(self, detections, trafo_odom_in_cam, camera_calibration):
#calculate pairwise mahalanobis distance
assignment_profit = np.zeros([len(self.tracks), len(detections)])
trafo_cam_in_odom = np.linalg.inv(trafo_odom_in_cam)
#sort detections with increasing confidence so the most confident
#detection determines the track bbox
detections = sorted(detections, key=lambda item: item['score'])
for i, detection in enumerate(detections):
for j,track in enumerate(self.tracks):
z_exp = track.get_z_exp(trafo_odom_in_cam)
H = track.get_H(trafo_odom_in_cam)
z = ImageProjection.get_measurement(detection)
v = z - z_exp
S = H.dot(track.sigma).dot(np.transpose(H)) + track.R
mahalanobis_d = np.transpose(v).dot(np.linalg.inv(S)).dot(v)
x = np.squeeze(v)
mu = np.array([0.0, 0.0, 0.0])
try:
pdf = multivariate_normal.pdf(x, mu, S)
assignment_profit[j,i] = pdf
except np.linalg.LinAlgError as e:
print e
assignment_profit[j,i] = -1
cam_det = ImageProjection.get_cart_detection(detection, camera_calibration)
odom_det = ImageProjection.transform_detection(cam_det, trafo_cam_in_odom)
eucl_distance = np.hypot(odom_det["x"] - track.mu[0], odom_det["y"] - track.mu[1])
if mahalanobis_d > self.chi2_thresh:
assignment_profit[j,i] = -1
if eucl_distance > self.eucl_thresh:
assignment_profit[j,i] = -1
detection_assignments = -1 * np.ones(len(detections), np.int)
#pair each detection to the closest track
for i,odom_det in enumerate(detections):
max_profit = 0
for j,track in enumerate(self.tracks):
if assignment_profit[j,i] > max_profit:
detection_assignments[i] = j
max_profit = assignment_profit[j,i]
for i,detection in enumerate(detections):
#if detection was paired, update tracker
if detection_assignments[i] != -1:
#detection was paired, update tracker
track = self.tracks[detection_assignments[i]]
track.update(detection, trafo_odom_in_cam)
else:
#start new tracker
track = EKF_with_HMM(detection, trafo_odom_in_cam, camera_calibration,
self.accel_noise, self.height_noise,
self.init_vel_sigma, self.EKF_sensor_noise,
self.HMM_observation_model,
self.HMM_transition_prob,self.curr_id,
self.use_hmm)
self.curr_id += 1
self.tracks.append(track)
logger.debug("detection not matched, start new KF with id %d" % track.track_id)
for track in self.tracks:
#apply background detection if not detected
if not track.updated:
track.update_with_background()
#find position uncertainty at sigma interval
pos_uncertainty = 0
pos_cov = track.sigma[0:2,0:2]
try:
eigenvals, eigenvecs = np.linalg.eig(pos_cov)
except np.linalg.linalg.LinAlgError as e:
print e
#get largest eigenvalue
max_eigval = np.max(eigenvals)
if max_eigval < 0.00001 or max_eigval == np.nan:
pos_uncertainty = np.inf
#chi-square value for sigma confidence interval
chisquare_scale = 2.2789
#calculate width and height of confidence ellipse
pos_uncertainty = 2 * np.sqrt(chisquare_scale*max_eigval)
#check if we need to delete the track
if pos_uncertainty > self.pos_cov_limit or track.get_class() == 0:
logger.debug("deleting track %d" % track.track_id)
self.tracks.remove(track)
|
[
"kollmitz@informatik.uni-freiburg.de"
] |
kollmitz@informatik.uni-freiburg.de
|
dea9c6913296fbbb51f69b686c834f4d74357686
|
56ca9cbd29bf0bbd545b5857530fbbe8c6ffff95
|
/docker_sdk_api/shared/helpers/get_model_zip.py
|
11a22b195401a97025bc1265b213cb97ff210032
|
[
"Apache-2.0"
] |
permissive
|
hadikoub/BMW-Semantic-Segmentation-Training-GUI
|
b34bf819942dbe20a3a6df2bc44b6435ca3e6754
|
902f35a7e367e635898f687b16a830db892fbaa5
|
refs/heads/main
| 2023-06-13T05:00:55.631277
| 2021-06-30T15:34:26
| 2021-06-30T15:34:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
import os
from typing import Dict
def get_downloadable_zip(folder_path: str) -> Dict[str, str]:
servable_models: Dict[str, str] = {}
for root, dirs, files in os.walk(folder_path):
for directory in dirs:
for f in os.listdir(os.path.join(root, directory)):
if f.endswith(".zip"):
servable_models[f] = directory
return servable_models
|
[
"123.hadikoubeissy@gmail.com"
] |
123.hadikoubeissy@gmail.com
|
e21a6e57ceb16bb0d8877544b56dca9af3292c5c
|
17c14b758959cdceec0dce8f783346fdeee8e111
|
/chap05_nlp/attention_seq2seq/tf1.1/j_min_test.py
|
4c8c39e7bcad969050811c59eff16ddf3db6c40f
|
[] |
no_license
|
yurimkoo/tensormsa_jupyter
|
b0a340119339936d347d12fbd88fb017599a0029
|
0e75784114ec6dc8ee7eff8094aef9cf37131a5c
|
refs/heads/master
| 2021-07-18T12:22:31.396433
| 2017-10-25T01:42:24
| 2017-10-25T01:42:24
| 109,469,220
| 1
| 0
| null | 2017-11-04T05:20:15
| 2017-11-04T05:20:15
| null |
UTF-8
|
Python
| false
| false
| 18,542
|
py
|
# To plot learning curve graph
#%matplotlib inline
import matplotlib.pyplot as plt
# for pretty print
from pprint import pprint
# for tokenizer
import re
# for word counter in vocabulary dictionary
from collections import Counter
# for checkpoint paths
import os
# for fancy progress bar
from tqdm import tqdm
# TensorFlow
import tensorflow as tf
# for output_projection
from tensorflow.python.layers.core import Dense
# for initial attention (not required ver1.2+)
from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors
# maximum length of input and target sentences including paddings
enc_sentence_length = 10
dec_sentence_length = 10
# Batch_size: 2
input_batches = [
['Hi What is your name?', 'Nice to meet you!'],
['Which programming language do you use?', 'See you later.'],
['Where do you live?', 'What is your major?'],
['What do you want to drink?', 'What is your favorite beer?']]
target_batches = [
['Hi this is Jaemin.', 'Nice to meet you too!'],
['I like Python.', 'Bye Bye.'],
['I live in Seoul, South Korea.', 'I study industrial engineering.'],
['Beer please!', 'Leffe brown!']]
all_input_sentences = []
for input_batch in input_batches:
all_input_sentences.extend(input_batch)
all_target_sentences = []
for target_batch in target_batches:
all_target_sentences.extend(target_batch)
# Example
all_input_sentences
def tokenizer(sentence):
tokens = re.findall(r"[\w]+|[^\s\w]", sentence)
return tokens
# Example
tokenizer('Hello world?? "sdfs%@#%')
def build_vocab(sentences, is_target=False, max_vocab_size=None):
word_counter = Counter()
vocab = dict()
reverse_vocab = dict()
for sentence in sentences:
tokens = tokenizer(sentence)
word_counter.update(tokens)
if max_vocab_size is None:
max_vocab_size = len(word_counter)
if is_target:
vocab['_GO'] = 0
vocab['_PAD'] = 1
vocab_idx = 2
for key, value in word_counter.most_common(max_vocab_size):
vocab[key] = vocab_idx
vocab_idx += 1
else:
vocab['_PAD'] = 0
vocab_idx = 1
for key, value in word_counter.most_common(max_vocab_size):
vocab[key] = vocab_idx
vocab_idx += 1
for key, value in vocab.items():
reverse_vocab[value] = key
return vocab, reverse_vocab, max_vocab_size
# Example
pprint(build_vocab(all_input_sentences))
print('\n')
pprint(build_vocab(all_target_sentences))
enc_vocab, enc_reverse_vocab, enc_vocab_size = build_vocab(all_input_sentences)
dec_vocab, dec_reverse_vocab, dec_vocab_size = build_vocab(all_target_sentences, is_target=True)
print('input vocabulary size:', enc_vocab_size)
print('target vocabulary size:', dec_vocab_size)
def token2idx(word, vocab):
return vocab[word]
for token in tokenizer('Nice to meet you!'):
print(token, token2idx(token, enc_vocab))
def sent2idx(sent, vocab=enc_vocab, max_sentence_length=enc_sentence_length, is_target=False):
tokens = tokenizer(sent)
current_length = len(tokens)
pad_length = max_sentence_length - current_length
if is_target:
return [0] + [token2idx(token, vocab) for token in tokens] + [1] * pad_length, current_length
else:
return [token2idx(token, vocab) for token in tokens] + [0] * pad_length, current_length
# Enc Example
print('Hi What is your name?')
print(sent2idx('Hi What is your name?'))
# Dec Example
print('Hi this is Jaemin.')
print(sent2idx('Hi this is Jaemin.', vocab=dec_vocab, max_sentence_length=dec_sentence_length, is_target=True))
def idx2token(idx, reverse_vocab):
return reverse_vocab[idx]
def idx2sent(indices, reverse_vocab=dec_reverse_vocab):
return " ".join([idx2token(idx, reverse_vocab) for idx in indices])
class DemoConfig:
# Model
hidden_size = 30
enc_emb_size = 30
dec_emb_size = 30
attn_size = 30
cell = tf.contrib.rnn.BasicLSTMCell
# Training
optimizer = tf.train.RMSPropOptimizer
n_epoch = 801
learning_rate = 0.001
# Tokens
start_token = 0 # GO
end_token = 1 # PAD
# Checkpoint Path
ckpt_dir = './ckpt_dir/'
class Seq2SeqModel(object):
def __init__(self, config, mode='training'):
assert mode in ['training', 'evaluation', 'inference']
self.mode = mode
# Model
self.hidden_size = config.hidden_size
self.enc_emb_size = config.enc_emb_size
self.dec_emb_size = config.dec_emb_size
self.attn_size = config.attn_size
self.cell = config.cell
# Training
self.optimizer = config.optimizer
self.n_epoch = config.n_epoch
self.learning_rate = config.learning_rate
# Tokens
self.start_token = config.start_token
self.end_token = config.end_token
# Checkpoint Path
self.ckpt_dir = config.ckpt_dir
def add_placeholders(self):
self.enc_inputs = tf.placeholder(
tf.int32,
shape=[None, enc_sentence_length],
name='input_sentences')
self.enc_sequence_length = tf.placeholder(
tf.int32,
shape=[None, ],
name='input_sequence_length')
if self.mode == 'training':
self.dec_inputs = tf.placeholder(
tf.int32,
shape=[None, dec_sentence_length + 1],
name='target_sentences')
self.dec_sequence_length = tf.placeholder(
tf.int32,
shape=[None, ],
name='target_sequence_length')
def add_encoder(self):
with tf.variable_scope('Encoder') as scope:
with tf.device('/cpu:0'):
self.enc_Wemb = tf.get_variable('embedding',
initializer=tf.random_uniform([enc_vocab_size + 1, self.enc_emb_size]),
dtype=tf.float32)
# [Batch_size x enc_sent_len x embedding_size]
self.enc_emb_inputs = tf.nn.embedding_lookup(
self.enc_Wemb, self.enc_inputs, name='emb_inputs')
enc_cell = self.cell(self.hidden_size)
# self.enc_inputs self.enc_Wemb
# enc_outputs: [batch_size x enc_sent_len x embedding_size]
# enc_last_state: [batch_size x embedding_size]
self.enc_outputs, self.enc_last_state = tf.nn.dynamic_rnn(
cell=enc_cell,
inputs=self.enc_emb_inputs,
sequence_length=self.enc_sequence_length,
time_major=False,
dtype=tf.float32)
def add_decoder(self):
with tf.variable_scope('Decoder') as scope:
with tf.device('/cpu:0'):
self.dec_Wemb = tf.get_variable('embedding',
initializer=tf.random_uniform([dec_vocab_size + 2, self.dec_emb_size]),
dtype=tf.float32)
# get dynamic batch_size
batch_size = tf.shape(self.enc_inputs)[0]
dec_cell = self.cell(self.hidden_size)
attn_mech = tf.contrib.seq2seq.LuongAttention(
num_units=self.attn_size,
memory=self.enc_outputs,
memory_sequence_length=self.enc_sequence_length,
normalize=False,
name='LuongAttention')
dec_cell = tf.contrib.seq2seq.DynamicAttentionWrapper(
cell=dec_cell,
attention_mechanism=attn_mech,
attention_size=self.attn_size,
# attention_history=False (in ver 1.2)
name='Attention_Wrapper')
initial_state = tf.contrib.seq2seq.DynamicAttentionWrapperState(
cell_state=self.enc_last_state,
attention=_zero_state_tensors(self.attn_size, batch_size, tf.float32))
# output projection (replacing `OutputProjectionWrapper`)
output_layer = Dense(dec_vocab_size + 2, name='output_projection')
if self.mode == 'training':
# maxium unrollings in current batch = max(dec_sent_len) + 1(GO symbol)
self.max_dec_len = tf.reduce_max(self.dec_sequence_length + 1, name='max_dec_len')
self.dec_emb_inputs = tf.nn.embedding_lookup(
self.dec_Wemb, self.dec_inputs, name='emb_inputs')
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=self.dec_emb_inputs,
sequence_length=self.dec_sequence_length + 1,
time_major=False,
name='training_helper')
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=dec_cell,
helper=training_helper,
initial_state=initial_state,
output_layer=output_layer)
self.train_dec_outputs, train_dec_last_state = tf.contrib.seq2seq.dynamic_decode(
training_decoder,
output_time_major=False,
impute_finished=True,
maximum_iterations=self.max_dec_len)
# dec_outputs: collections.namedtuple(rnn_outputs, sample_id)
# dec_outputs.rnn_output: [batch_size x max(dec_sequence_len) x dec_vocab_size+2], tf.float32
# dec_outputs.sample_id [batch_size], tf.int32
# logits: [batch_size x max_dec_len x dec_vocab_size+2]
self.logits = tf.identity(self.train_dec_outputs.rnn_output, name='logits')
# targets: [batch_size x max_dec_len x dec_vocab_size+2]
self.targets = tf.slice(self.dec_inputs, [0, 0], [-1, self.max_dec_len], 'targets')
# masks: [batch_size x max_dec_len]
# => ignore outputs after `dec_senquence_length+1` when calculating loss
self.masks = tf.sequence_mask(self.dec_sequence_length + 1, self.max_dec_len, dtype=tf.float32, name='masks')
# Control loss dimensions with `average_across_timesteps` and `average_across_batch`
# internal: `tf.nn.sparse_softmax_cross_entropy_with_logits`
self.batch_loss = tf.contrib.seq2seq.sequence_loss(
logits=self.logits,
targets=self.targets,
weights=self.masks,
name='batch_loss')
# prediction sample for validation
self.valid_predictions = tf.identity(self.train_dec_outputs.sample_id, name='valid_preds')
# List of training variables
# self.training_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
elif self.mode == 'inference':
start_tokens = tf.tile(tf.constant([self.start_token], dtype=tf.int32), [batch_size],
name='start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=self.dec_Wemb,
start_tokens=start_tokens,
end_token=self.end_token)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=dec_cell,
helper=inference_helper,
initial_state=initial_state,
output_layer=output_layer)
infer_dec_outputs, infer_dec_last_state = tf.contrib.seq2seq.dynamic_decode(
inference_decoder,
output_time_major=False,
impute_finished=True,
maximum_iterations=dec_sentence_length)
# [batch_size x dec_sentence_length], tf.int32
self.predictions = tf.identity(infer_dec_outputs.sample_id, name='predictions')
# equivalent to tf.argmax(infer_dec_outputs.rnn_output, axis=2, name='predictions')
# List of training variables
# self.training_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def add_training_op(self):
self.training_op = self.optimizer(self.learning_rate, name='training_op').minimize(self.batch_loss)
def save(self, sess, var_list=None, save_path=None):
# print(f'Saving model at {save_path}')
if hasattr(self, 'training_variables'):
var_list = self.training_variables
saver = tf.train.Saver(var_list)
saver.save(sess, save_path, write_meta_graph=False)
def restore(self, sess, var_list=None, ckpt_path=None):
if hasattr(self, 'training_variables'):
var_list = self.training_variables
self.restorer = tf.train.Saver(var_list)
self.restorer.restore(sess, ckpt_path)
print('Restore Finished!')
def summary(self):
summary_writer = tf.summary.FileWriter(
logdir=self.ckpt_dir,
graph=tf.get_default_graph())
def build(self):
self.add_placeholders()
self.add_encoder()
self.add_decoder()
def train(self, sess, data, from_scratch=False,
load_ckpt=None, save_path=None):
# Restore Checkpoint
if from_scratch is False and os.path.isfile(load_ckpt):
self.restore(sess, load_ckpt)
# Add Optimizer to current graph
self.add_training_op()
sess.run(tf.global_variables_initializer())
input_batches, target_batches = data
loss_history = []
for epoch in tqdm(range(self.n_epoch)):
all_preds = []
epoch_loss = 0
for input_batch, target_batch in zip(input_batches, target_batches):
input_batch_tokens = []
target_batch_tokens = []
enc_sentence_lengths = []
dec_sentence_lengths = []
for input_sent in input_batch:
tokens, sent_len = sent2idx(input_sent)
input_batch_tokens.append(tokens)
enc_sentence_lengths.append(sent_len)
for target_sent in target_batch:
tokens, sent_len = sent2idx(target_sent,
vocab=dec_vocab,
max_sentence_length=dec_sentence_length,
is_target=True)
target_batch_tokens.append(tokens)
dec_sentence_lengths.append(sent_len)
# Evaluate 3 ops in the graph
# => valid_predictions, loss, training_op(optimzier)
#
# sess.run(
# [self.dec_inputs, self.dec_sequence_length, self.max_dec_len, self.masks],
# feed_dict={
# self.enc_inputs: input_batch_tokens,
# self.enc_sequence_length: enc_sentence_lengths,
# self.dec_inputs: target_batch_tokens,
# self.dec_sequence_length: dec_sentence_lengths,
# })
sess.run(
[self.enc_outputs , self.enc_emb_inputs, self.enc_inputs , self.enc_sequence_length, self.enc_Wemb],
feed_dict={
self.enc_inputs: input_batch_tokens,
self.enc_sequence_length: enc_sentence_lengths,
self.dec_inputs: target_batch_tokens,
self.dec_sequence_length: dec_sentence_lengths,
})
batch_preds, batch_loss, _ = sess.run(
[self.valid_predictions, self.batch_loss, self.training_op],
feed_dict={
self.enc_inputs: input_batch_tokens,
self.enc_sequence_length: enc_sentence_lengths,
self.dec_inputs: target_batch_tokens,
self.dec_sequence_length: dec_sentence_lengths,
})
# loss_history.append(batch_loss)
epoch_loss += batch_loss
all_preds.append(batch_preds)
loss_history.append(epoch_loss)
# Logging every 400 epochs
if epoch % 400 == 0:
print('Epoch', epoch)
for input_batch, target_batch, batch_preds in zip(input_batches, target_batches, all_preds):
for input_sent, target_sent, pred in zip(input_batch, target_batch, batch_preds):
print("!!!!!")
# print(f'\tInput: {input_sent}')
# print(f'\tPrediction:', idx2sent(pred, reverse_vocab=dec_reverse_vocab))
# print(f'\tTarget:, {target_sent}')
# print(f'\tepoch loss: {epoch_loss:.2f}\n')
if save_path:
self.save(sess, save_path=save_path)
return loss_history
def inference(self, sess, data, load_ckpt):
self.restore(sess, ckpt_path=load_ckpt)
input_batch, target_batch = data
batch_preds = []
batch_tokens = []
batch_sent_lens = []
for input_sent in input_batch:
tokens, sent_len = sent2idx(input_sent)
batch_tokens.append(tokens)
batch_sent_lens.append(sent_len)
batch_preds = sess.run(
self.predictions,
feed_dict={
self.enc_inputs: batch_tokens,
self.enc_sequence_length: batch_sent_lens,
})
for input_sent, target_sent, pred in zip(input_batch, target_batch, batch_preds):
print('Input:', input_sent)
print('Prediction:', idx2sent(pred, reverse_vocab=dec_reverse_vocab))
print('Target:', target_sent, '\n')
tf.reset_default_graph()
config = DemoConfig()
model = Seq2SeqModel(config, mode='training')
model.build()
# model.summary()
print('Training model built!')
tf.reset_default_graph()
config = DemoConfig()
model = Seq2SeqModel(config, mode='inference')
model.build()
# model.summary()
print('Inference model built!')
tf.reset_default_graph()
with tf.Session() as sess:
config = DemoConfig()
model = Seq2SeqModel(config, mode='training')
model.build()
data = (input_batches, target_batches)
print(input_batches)
print(target_batches)
loss_history = model.train(sess, data, from_scratch=True, save_path=model.ckpt_dir)
|
[
"tmddno1@naver.com"
] |
tmddno1@naver.com
|
5e51f31efe6e42031b193cc548e42e79af4e65f6
|
053cf1be6a56370de6e2a18147ed59401ec742b1
|
/kochira/db.py
|
18857ef71ce182b1bbde032907f32e56522cb121
|
[
"MS-PL"
] |
permissive
|
nolanlum/kochira
|
7b6023ecf283e4d20f6e1cafc4dc956c2df79d8d
|
0158a6877930f45ff6946770a3fb8a041117fe54
|
refs/heads/master
| 2022-08-06T09:28:50.956815
| 2022-05-25T23:20:04
| 2022-05-25T23:20:04
| 16,592,868
| 0
| 1
|
MS-PL
| 2019-05-13T22:02:05
| 2014-02-06T20:35:14
|
Python
|
UTF-8
|
Python
| false
| false
| 152
|
py
|
import threading
from peewee import Proxy, Model, SqliteDatabase
database = Proxy()
class Model(Model):
class Meta:
database = database
|
[
"tony@rfw.name"
] |
tony@rfw.name
|
5565fd2ad6e0ac2e727b11714d0af064b4bca49c
|
39aced2a49c6e911692536f620c994f553caa1d0
|
/check_mk/openstack/common/rpc/__init__.py
|
619b15f158e640693eea7c8410a952aca2df6087
|
[] |
no_license
|
linvictor88/check_mk_server
|
ba9bd0e376fc825c9a3dbb045808ee1ce688af46
|
f279c8eb56795839872c7cfce60584d4f2650ba7
|
refs/heads/master
| 2021-01-01T06:38:20.120820
| 2014-01-16T04:00:58
| 2014-01-16T04:00:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,768
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
import inspect
from oslo.config import cfg
from check_mk.openstack.common.gettextutils import _
from check_mk.openstack.common import importutils
from check_mk.openstack.common import local
from check_mk.openstack.common import log as logging
LOG = logging.getLogger(__name__)
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['check_mk.openstack.common.exception',
'nova.exception',
'cinder.exception',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
cfg.StrOpt('control_exchange',
default='openstack',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
cfg.set_defaults(rpc_opts,
control_exchange=control_exchange)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(CONF, new=new)
def _check_for_lock():
if not CONF.debug:
return None
if ((hasattr(local.strong_store, 'locks_held')
and local.strong_store.locks_held)):
stack = ' :: '.join([frame[3] for frame in inspect.stack()])
LOG.warn(_('A RPC is being made while holding a lock. The locks '
'currently held are %(locks)s. This is probably a bug. '
'Please report it. Include the following: [%(stack)s].'),
{'locks': local.strong_store.locks_held,
'stack': stack})
return True
return False
def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:param check_for_lock: if True, a warning is emitted if a RPC call is made
with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
if check_for_lock:
_check_for_lock()
return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:param envelope: Set to True to enable message envelope for notifications.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
|
[
"lin_victor@163.com"
] |
lin_victor@163.com
|
9fb6b68b1ee1e3a80cba15f99763810f7486deed
|
37d6ba9865ff154ffe337b73a2fd6824874385d5
|
/job_client/jobs/test.py
|
d9615293c3ecf6aa78997a39a27448b669453464
|
[
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
electronicarts/ava-capture
|
fc11291432153a58179c0eedce99f537e9c13bbf
|
fc6a1f89706309139f5ce0976e5a29969158a191
|
refs/heads/master
| 2023-07-05T20:31:03.083021
| 2022-12-07T22:19:16
| 2022-12-07T22:19:16
| 100,387,988
| 63
| 16
|
NOASSERTION
| 2023-09-04T23:33:41
| 2017-08-15T14:52:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,861
|
py
|
#
# Copyright (c) 2017 Electronic Arts Inc. All Rights Reserved
#
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
from .base import BaseJob
import time
import subprocess
import logging
import json
import os
class JobTestFixture(object):
def __init__(self):
FORMAT = "[MAIN] %(message)s"
logging.basicConfig(format=FORMAT)
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
class MockPipe(object):
def send(self,str):
print('PIPE> %s' % str)
self.pipe = MockPipe()
def __call__(self, job, parameters):
job(parameters, self.pipe, self.logger)
class DummyJob(BaseJob):
def __call__(self, parameters, pipe, log):
pipe.send('Progress 0%')
time.sleep(0.3)
pipe.send('Progress 40%')
time.sleep(0.3)
pipe.send('Progress 60%')
time.sleep(0.3)
pipe.send('Progress 100%')
class Meta(object):
description = 'This is a test Job'
class DummyJobLaunch(BaseJob):
def __call__(self, parameters, pipe, log):
pipe.send('Launch new job')
new_job_id = self.launch_job('jobs.test.DummyJob', tags=['alpha','bravo'])
pipe.send('Launched %d' % new_job_id)
class Meta(object):
description = 'This is a test Job'
class DummyJobWithImage(BaseJob):
def __call__(self, parameters, pipe, log):
import cv2
import numpy as np
height = 512
width = 512
img = np.zeros((height,width,3), np.uint8)
img[:,0:int(0.5*width)] = (255,0,0) # (B, G, R)
img[:,int(0.5*width):width] = (0,255,0)
pipe.send('Uploading JPG thumbnail...')
self.set_job_image(img)
class Meta(object):
description = 'This is a test Job'
class HttpTestJob(BaseJob):
def __call__(self, parameters, pipe, log):
r = self.server_get('/archive/archive_session/3/', json={'asdf':'asdf'})
print(r.status_code)
if not r.status_code==200:
raise Exception('Status:%d Content:%s' % (r.status_code, r.content))
class Meta(object):
description = 'This is a test Job'
class DummyJobWithChildren(BaseJob):
def __call__(self, parameters, pipe, log):
pipe.send('Begin DummyJobWithChildren')
child_launch_info = {}
child_launch_info['job_class'] = 'jobs.test.DummyJob'
child_launch_info['params'] = 'parameters'
child_launch_info['req_gpu'] = False
#child_launch_info['node_name'] = node_name
self.yieldToChildren([child_launch_info, child_launch_info, child_launch_info])
# anything after yieldToChildren will not be executed
class Meta(object):
description = 'This is a test Job With Children'
class DummyJobWithLog(BaseJob):
def __call__(self, parameters, pipe, log):
log.info('Begin DummyJobWithLog')
log.warning('DummyJobWithLog warning')
log.info('End DummyJobWithLog')
class Meta(object):
description = 'This is a test Job'
class DummyJobRaisingException(BaseJob):
def __call__(self, parameters, pipe, log):
pipe.send('Before Exception')
time.sleep(0.3)
raise Exception('This job is raising an exception')
pipe.send('After Exception')
class Meta(object):
description = 'This is a test Job, which raises an Exception'
class DummyJobSubprocess(BaseJob):
def __call__(self, parameters, pipe, log):
# Run a subprocess without capturing its output.
# The prefered method is to use run_subprocess()
p = subprocess.Popen(["ipconfig", "/all"])
p.wait()
# Run a subprocess without capturing its output, will raise an exception if the process
# has a return code different than 0.
# The prefered method is to use run_subprocess()
subprocess.check_output(["ipconfig", "/all"])
class Meta(object):
description = 'This is a test Job, launching a subprocess'
class DummyJobSubprocessFail(BaseJob):
def __call__(self, parameters, pipe, log):
# Runs a subprocess that fails, the resulting exception will be logged, but not the process output
# The prefered method is to use run_subprocess()
subprocess.check_output("exit 1", shell=True)
class Meta(object):
description = 'This is a test Job, launching a subprocess'
class DummyJobSubprocessCaptureSmallOutput(BaseJob):
def __call__(self, parameters, pipe, log):
# This will work for commands with a small output, but as mentionned in the Python documentation,
# it may hang if the process outputs a lot of data.
# Using self.run_subprocess should be the prefered method to launch a subprocess.
p = subprocess.Popen(["ipconfig", "/all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stdout:
log.info(stdout.decode())
if stderr:
log.error(stderr.decode())
class DummyJobSubprocessCaptureOutput(BaseJob):
def __call__(self, parameters, pipe, log):
(retcode, output) = self.run_subprocess(["ipconfig", "/all"], log)
if not retcode==0:
raise Exception('Expecting retcode=0')
class Meta(object):
description = 'This is a test Job, launching a subprocess'
class DummyJobSubprocessCaptureOutputFail(BaseJob):
def __call__(self, parameters, pipe, log):
self.run_subprocess(["ipkasdjhfgak"], log)
class Meta(object):
description = 'This is a test Job, launching a subprocess that doesnt exist'
class Meta(object):
description = 'Test transfer speed to a fileserver'
|
[
"edanvoye@ea.com"
] |
edanvoye@ea.com
|
a66069a913da5d01f77e6db578d958cc20704f7b
|
2e336a1c8876f17588e8144114b2b1bacf4d4f66
|
/fde_cmp/old_src/old8_py_source/parse_xde.py
|
40b191f41d99678d2505d735b6a3994237d20757
|
[] |
no_license
|
darlyz/FelacGenerator-py-version
|
f67c7032f04d587e34f9e538d3dce4377fd2954b
|
bcd47ade1fa95182a0ff9363b6a49b45bc746d79
|
refs/heads/master
| 2020-05-29T14:30:44.451730
| 2019-08-30T07:37:59
| 2019-08-30T07:37:59
| 189,195,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,850
|
py
|
'''
Copyright: Copyright (c) 2019
Created: 2019-3-30
Author: Zhang_Licheng
Title: parse the xde file and check it
All rights reserved
'''
from colorama import init, Fore, Back, Style
init(autoreset=True)
Error_color = Fore.MAGENTA
Warnn_color = Fore.CYAN
Empha_color = Fore.GREEN
import re as regx
pre_check = 1
sec_check = 1
ifo_folder = '../4other_gen_file/'
def parse_xde(ges_info, xde_lists, list_addr, xdefile):
# 1 preliminary parse
pre_parse(ges_info, xde_lists, list_addr, xdefile)
# 2 checking
from check_xde import check_xde
error = check_xde(ges_info, xde_lists, list_addr)
if error : return error
# 3 secondary parse
sec_parse(ges_info, xde_lists, list_addr)
return False
def pre_parse(ges_info, xde_lists, list_addr, xdefile):
# all the xde keys
keyws_reg = r'ARRAY|COEF|COOR|COMMON|DAMP' \
+ r'|DEFI|DISP|DIST|END|USERC|' \
+ r'FUNC|FVECT|FMATR|GAUS|LOAD|' \
+ r'MATE|MASS|MATRIX|SHAP|STIF|VECT|' \
+ r'\$C[CPV6]|\$I|@[LAWSR]'
keywd_tag = {'bf_matrix' : '', \
'matr_dclr' : 0, \
'paragraph' : 'BFmate',\
}
line_i, stitchline = 0, ''
xde_lists['code'] = {}
list_addr['code'] = {}
# 1 fist step parsing while read xde to xde_lists
for line in xdefile.readlines():
line_i += 1
# 1.1.2 skip comment line and blank line
if regx.match(r'\s*(\$c[c6])?\s*((\\|//).*)?\s*\n',line,regx.I) != None:
continue
# 1.1 deal with valid sentence with comment
# 1.1.1 identify comment and stitch the next line begin with '\'
line = stitchline + line
if line.find('\\') != -1 :
stitchline = line.split('\\')[0]
continue
else: stitchline = ''
# 1.1.3 identify comment begin with '//'
if line.find('//') != -1:
line = line.split('//')[0]
# 1.1.4 pop the space from head and tail
line = line.strip()
# 1.2 retrieve the keywords
code_regx = regx.match(keyws_reg, line, regx.I)
# 1.2.1 find the keyword at the head
if code_regx != None:
key_lower = code_regx.group().lower()
# 1.2.1.1 match and pop to xde_lists
if key_lower in ['disp','coef','coor','gaus']:
push_key_declare(key_lower, line_i, line, xde_lists, list_addr)
elif key_lower == 'mate':
if keywd_tag['paragraph'] != 'BFmate':
print(f"{Error_color}Error NSN02, : if other " \
+ "paragraph is writed before definition, " \
+ "key word 'DEFI' should not be ommited before " \
+ "'MATE' line and C insertion for 'MATE'.\n")
push_key_declare('mate', line_i, line, xde_lists, list_addr)
keywd_tag['paragraph'] = 'AFmate'
elif key_lower == 'vect':
push_tonser_declare('vect', line_i, line, xde_lists, list_addr)
if line.find('|') != -1: xde_lists['cmplx_tag'] = 1
elif key_lower in ['fmatr','fvect']:
push_tonser_declare(key_lower, line_i, line, xde_lists, list_addr)
elif key_lower.find('$')!= -1 or key_lower.find('@')!= -1:
line = line \
.replace('%1',ges_info['shap_form']) \
.replace('%2',ges_info['shap_nodn'])
push_code_line (line_i, line, keywd_tag, xde_lists, list_addr)
if code_regx.group().lower() == '$cp':
xde_lists['cmplx_tag'] = 1
elif key_lower in ['common','array']:
line = line \
.replace('%1',ges_info['shap_form']) \
.replace('%2',ges_info['shap_nodn'])
push_code_line (line_i, line, keywd_tag, xde_lists, list_addr)
elif key_lower in ['mass','damp','stif']:
push_weak_declare(key_lower, line_i, line, keywd_tag, xde_lists, list_addr)
elif key_lower == 'shap':
if not 'shap' in xde_lists:
list_addr['shap'] = []
xde_lists['shap'] = []
list_addr['shap'].append(line_i)
xde_lists['shap'].append(line.split()[1:])
elif key_lower == 'dist':
if line.find('|') != -1:
xde_lists['cmplx_tag'] = 1
if keywd_tag['paragraph'] in ['mass','damp','stif']:
xde_lists[keywd_tag['paragraph']].append('dist')
xde_lists[keywd_tag['paragraph']].append(line.split('=')[1].strip())
list_addr[keywd_tag['paragraph']].append(line_i)
elif key_lower == 'load':
if line.find('|') != -1:
xde_lists['cmplx_tag'] = 1
if not 'load' in xde_lists:
xde_lists['load'] = []
list_addr['load'] = []
if line.find('=') != -1:
xde_lists['load'].append(line.split('=')[1].strip())
list_addr['load'].append(line_i)
keywd_tag['paragraph'] = 'load'
else:
load_list = line.rstrip().split()[1:]
xde_lists['load'].append(str(len(load_list)))
xde_lists['load'] += load_list
list_addr['load'].append(line_i)
elif key_lower == 'func':
wordlist = line.split()
if len(wordlist) != 1:
if not 'func' in xde_lists:
xde_lists['func'] = []
keywd_tag['func'] = 1
xde_lists['func'] += wordlist[1:]
else:
keywd_tag['paragraph'] = 'func'
elif key_lower == 'matrix':
push_tonser_declare('matrix', line_i, line, xde_lists, list_addr)
matrix_name = line.split()[1]
line_num = list_addr['matrix'][matrix_name]
list_addr['matrix'][matrix_name] = []
list_addr['matrix'][matrix_name].append(line_num)
# tackle that code line write between matrix and other paragraph
if keywd_tag['matr_dclr'] == 0:
keywd_tag['matr_dclr'] = 1
keywd_tag['bf_matrix'] = keywd_tag['paragraph']
keywd_tag['paragraph'] = 'matrix'
elif key_lower == 'defi':
keywd_tag['paragraph'] = 'BFmate'
elif key_lower == 'end':
pass
elif key_lower == 'userc':
pass
# 1.2.2 find the non-keyword-head line in 'func' 'stif' 'mass' and 'damp' paragraph
else:
# 1.2.2.1 find cmplx_tag tag
if line.find('|') != -1:
xde_lists['cmplx_tag'] = 1
key_words= keywd_tag['paragraph']
# 1.2.2.2 find weak form and disp var deform in non-keyword-head line
if key_words in ['mass','damp','stif','load'] \
and key_words in xde_lists:
if line.rstrip().lower() == 'null':
xde_lists[key_words].append(line.rstrip())
list_addr[key_words].append(line_i)
else:
xde_lists[key_words].append(line)
list_addr[key_words].append(line_i)
elif key_words == 'func':
xde_lists['code'][key_words].append(line)
list_addr['code'][key_words].append(line_i)
elif key_words == 'matrix' :
xde_lists['matrix'][matrix_name].append(line)
list_addr['matrix'][matrix_name].append(line_i)
else:
print(f'{Warnn_color}Warn NSN03: redundant information ' \
+ 'or wrong declare, line {line_i}: ' + line)
if pre_check == 1:
import json
file = open(ifo_folder+'pre_check.json',mode='w')
file.write(json.dumps(xde_lists,indent=4))
file.close()
file = open(ifo_folder+'pre_addr.json',mode='w')
file.write(json.dumps(list_addr,indent=4))
file.close()
# end pre_parse()
# key declare type1: DISP, COEF, COOR, GAUS, MATE
def push_key_declare (strs, line_num, line, xde_lists, list_addr):
if strs in xde_lists:
print(f'{Warnn_color}Warn NSN04: line {Empha_color}' \
+f'{line_num}, {strs} {Warnn_color}has been declared ' \
+f'at line {Empha_color}{list_addr[strs]}\n')
else:
line = line.replace(',',' ').replace(';',' ')
list_addr[strs] = line_num
xde_lists[strs] = line.split()[1:]
# common declare type: VECT, FMATR
def push_tonser_declare (strs, line_num, line, xde_lists, list_addr):
if strs not in xde_lists:
xde_lists[strs] = {}
list_addr[strs] = {}
line = regx.sub(r'\s*=\s*',' ',line)
wordlist = line.split()
list_addr[strs][wordlist[1]] = line_num
xde_lists[strs][wordlist[1]] = wordlist[2:]
# common code line : @x, $Cx
def push_code_line (line_num, line, keywd_tag, xde_lists, list_addr):
code_find = 0
key_words = keywd_tag['paragraph']
# tackle that code line write between matrix and other paragraph
if key_words == 'matrix':
key_words = keywd_tag['bf_matrix']
if key_words in ['BFmate','AFmate','func','stif','mass','damp']:
code_find = 1
if key_words not in xde_lists['code']:
xde_lists['code'][key_words] = []
list_addr['code'][key_words] = []
xde_lists['code'][key_words].append(line)
list_addr['code'][key_words].append(line_num)
if code_find == 0:
print(f'{Error_color}Error NSN05: line {line_num}, ' \
+ 'wrong position inserted.\n')
# stif, mass, damp declare
def push_weak_declare (strs, line_num, line, keywd_tag, xde_lists, list_addr):
if strs in xde_lists:
print(f'{Error_color}Error NSN06: line {Empha_color}' \
+ f'{line_num}, {strs} {Error_color}has been declared ' \
+ f'at line {Empha_color}{list_addr[strs][0]}.\n')
else:
list_addr[strs], xde_lists[strs] = [], []
wordlist = line.split()
if len(wordlist) > 1:
list_addr[strs].append(line_num)
xde_lists[strs] = wordlist[1:]
else:
keywd_tag['paragraph'] = strs
def sec_parse(ges_info, xde_lists, list_addr):
# 3.0 parse disp and func for complex
if 'cmplx_tag' in xde_lists and xde_lists['cmplx_tag'] == 1:
if 'disp' in xde_lists:
xde_lists['cmplx_disp'] = xde_lists['disp'].copy()
xde_lists['disp'].clear()
for strs in xde_lists['cmplx_disp']:
xde_lists['disp'].append(strs+'r')
xde_lists['disp'].append(strs+'i')
if 'func' in xde_lists:
xde_lists['cmplx_func'] = xde_lists['func'].copy()
xde_lists['func'].clear()
for strs in xde_lists['cmplx_func']:
xde_lists['func'].append(strs+'r')
xde_lists['func'].append(strs+'i')
# 3.1 parsing shap
if 'shap' in xde_lists:
parse_shap(ges_info, xde_lists)
# 3.2 parsing mate
if 'mate' in xde_lists:
parse_mate(xde_lists)
# 3.3 parsing gaus
if 'gaus' in xde_lists:
if xde_lists['gaus'][0] == '%3':
xde_lists['gaus'] = ges_info['gaus_type']
else:
xde_lists['gaus'] = xde_lists['gaus'][0]
# 3.4 parsing mass and damp
if 'mass' in xde_lists:
if xde_lists['mass'][0] == '%1':
xde_lists['mass'][0] = 'lump'
if len(xde_lists['mass']) == 1:
xde_lists['mass'].append('1.0')
if 'damp' in xde_lists:
if xde_lists['damp'][0] == '%1':
xde_lists['damp'][0] = 'lump'
if len(xde_lists['damp']) == 1:
xde_lists['mass'].append('1.0')
# 3.5 parsing fvect, fmatr, vect, matrix
if 'fvect' in xde_lists:
for lists in xde_lists['fvect'].values():
if len(lists) == 0:
lists.append('1')
if len(lists) == 1:
lists += ['' for ii in range(int(lists[0]))]
if 'fmatr' in xde_lists:
for lists in xde_lists['fmatr'].values():
if len(lists) == 0:
lists += ['1','1']
if len(lists) == 2:
lists += [['' for ii in range(int(lists[1]))] \
for ii in range(int(lists[0]))]
if 'vect' in xde_lists:
for lists in xde_lists['vect'].values():
if not lists[0].isnumeric():
lists.insert(0,str(len(lists)))
if 'matrix' in xde_lists:
for lists in xde_lists['matrix'].values():
if not lists[0].isnumeric() \
and not lists[1].isnumeric() :
row = len(lists)
clm = len(lists[0].split())
lists.insert(0,str(clm))
lists.insert(0,str(row))
else:
row = int(lists[0])
for ii in range(row):
lists[ii+2] = lists[ii+2].split()
if 'load' in xde_lists:
if xde_lists['load'][0].isnumeric():
xde_lists['load'] \
= ['+[' + xde_lists['disp'][i] + ']*' + xde_lists['load'][i+1] \
for i in range(int(xde_lists['load'][0]))]
# 3.6 parsing code
parse_code(xde_lists)
if sec_check == 1:
import json
file = open(ifo_folder+'sec_check.json',mode='w')
file.write(json.dumps(xde_lists,indent=4))
file.close()
# end sec_parse()
def parse_shap(ges_info, xde_lists):
shap_dict = {}
# 3.1.1 common shap (maybe user declare twice or more, so the last active)
for shap_list in xde_lists['shap']:
if len(shap_list) == 2:
if shap_list[0] == '%1':
shap_list[0] = ges_info['shap_form']
if shap_list[1] == '%2':
shap_list[1] = ges_info['shap_nodn']
base_shap_type = shap_list[0] + shap_list[1]
shap_dict[base_shap_type] = xde_lists['disp'].copy()
if 'coef' in xde_lists:
xde_lists['coef_shap'] = {}
xde_lists['coef_shap'][base_shap_type] = xde_lists['coef'].copy()
# 3.1.2 penalty or mix shap
for shap_list in xde_lists['shap']:
if len(shap_list) >= 3:
if shap_list[0] == '%1':
shap_list[0] = ges_info['shap_form']
if shap_list[1] == '%4' \
or shap_list[1].isnumeric():
var_list = shap_list[2:]
disp_find_n = len(set(var_list)&set(xde_lists['disp']))
coef_find_n = 0
if 'coef' in xde_lists:
coef_find_n = len(set(var_list)&set(xde_lists['coef']))
if (disp_find_n > 0 or coef_find_n > 0) \
and shap_list[1] == '%4':
if base_shap_type == 't6' :
shap_list[1] = '3'
elif base_shap_type == 'q9' :
shap_list[1] = '4'
elif base_shap_type == 'w10':
shap_list[1] = '4'
elif base_shap_type == 'c27':
shap_list[1] = '8'
subs_shap_type = shap_list[0] + shap_list[1]
if disp_find_n > 0:
if subs_shap_type not in shap_dict:
shap_dict[subs_shap_type] = []
if coef_find_n > 0:
if subs_shap_type not in xde_lists['coef_shap']:
xde_lists['coef_shap'][subs_shap_type] = []
for var_name in var_list:
if var_name.isnumeric():
continue
if 'coef' not in xde_lists:
if var_name not in xde_lists['disp'] :
continue
else:
if var_name not in xde_lists['disp'] \
and var_name not in xde_lists['coef'] :
continue
if var_name in shap_dict[base_shap_type]:
shap_dict[base_shap_type].remove(var_name)
shap_dict[subs_shap_type].append(var_name)
if 'coef_shap' in xde_lists:
if var_name in xde_lists['coef_shap'][base_shap_type]:
xde_lists['coef_shap'][base_shap_type].remove(var_name)
xde_lists['coef_shap'][subs_shap_type].append(var_name)
elif shap_list[1] == '%2c' \
or (shap_list[1][-1].lower() == 'c' \
and shap_list[1][:-1].isnumeric) :
var_list = shap_list[2:]
pena_vars = {}
for var_name in var_list:
if var_name.isnumeric():
continue
if var_name.find('_'):
var_name, pan_name = var_name.split('_')[:2]
pena_vars[var_name] = pan_name
shap_list[1] = shap_list[1].replace('%2',ges_info['shap_nodn'])
subs_shap_type = shap_list[0] + shap_list[1]
if subs_shap_type not in shap_dict:
shap_dict[subs_shap_type] = pena_vars
for pena_var in pena_vars.keys() :
shap_dict[base_shap_type].remove(pena_var)
xde_lists['shap'] = shap_dict
# end parse_shap()
def parse_mate(xde_lists):
mate_dict = {}
mate_dict['default'] = {}
mate_var = []
mate_val = []
for strs in xde_lists['mate']:
if regx.match(r'[a-z]\w*', strs, regx.I) == None :
mate_val.append(strs)
else:
mate_var.append(strs)
for var_i, var in enumerate(mate_var):
if var_i < len(mate_val):
mate_dict['default'][var] = mate_val[var_i]
else:
mate_dict['default'][var] = '0.0'
xde_lists['mate'] = mate_dict
# end parse_mate()
def parse_code(xde_lists):
regx_key = r'\$C[CPV6]|@[LAWSR]|ARRAY'
for code_place in xde_lists['code'].keys():
for code_i, code_line in enumerate(xde_lists['code'][code_place]):
code_regx = regx.match(regx_key,code_line,regx.I)
if code_regx == None:
# say something
continue
code_key = code_regx.group()
if code_key.lower() == '$cc' \
or code_key.lower() == '$c6':
xde_lists['code'][code_place][code_i] \
= 'Insr_Code: ' + code_line.replace(code_key,'').lstrip()
elif code_key.lower() == '$cv':
xde_lists['code'][code_place][code_i] \
= 'Tnsr_Asgn: ' + code_line.replace(code_key,'').lstrip()
elif code_key.lower() == '$cp':
xde_lists['code'][code_place][code_i] \
= 'Cplx_Asgn: ' + code_line.replace(code_key,'').lstrip()
# 3.6.2 parsing operator
elif code_key.lower() == '@l':
opr_list = code_line.replace(code_key,'').lstrip().split()
opr_expr = opr_list[0]
opr_name = opr_expr.split('.')[0]
asgn_type = opr_list[1].lower()
var_prefxs = ['', '', '', '[']
var_posfxs = ['', '_i', '_i_j', ']']
asgn_types = ['c', 'v', 'm', 'f']
if asgn_type == 'n':
if opr_name.lower() == 'singular':
xde_lists['code'][code_place][code_i] \
= 'Oprt_Asgn: '+opr_expr
elif opr_name.lower() == 'vol':
xde_lists['code'][code_place][code_i] \
= 'Oprt_Asgn: '+opr_expr
elif asgn_type in asgn_types:
type_indx = asgn_types.index(asgn_type)
prefx_str = var_prefxs[type_indx]
posfx_str = var_posfxs[type_indx]
if asgn_type == 'f':
if 'fvect' in xde_lists \
and opr_list[2] in xde_lists['fvect']:
posfx_str = '_i' + posfx_str
elif 'fmatr' in xde_lists \
and opr_list[2] in xde_lists['fmatr']:
posfx_str = '_i_j' + posfx_str
temp_str = 'Oprt_Asgn: ' \
+ prefx_str + opr_list[2] + posfx_str \
+ '=' + opr_expr + '(' \
+ ','.join(opr_list[3:]) + ')'
xde_lists['code'][code_place][code_i] = temp_str
# 3.6.3 parsing assignment
elif code_key.lower() == '@a':
expr = code_line.replace(code_key,'').lstrip().split('=')
xde_lists['code'][code_place][code_i] \
= 'Func_Asgn: [' + expr[0].rstrip() \
+ ']=' + expr[1].lstrip()
elif code_key.lower() == '@r':
expr = code_line.replace(code_key,'').lstrip().split('=')
xde_lists['code'][code_place][code_i] \
= 'Func_Asgn: [' + expr[0].rstrip() \
+ ']=' + expr[1].lstrip().replace('[','').replace(']','')
elif code_key.lower() in ['@w','@s']:
opr_list = code_line.replace(code_key,'').lstrip().split()
temp_str = 'Func_Asgn: '
for strs, prefx, posfx \
in zip(['vect', 'matrix', 'fvect', 'fmatr' ], \
['', '', '[', '[' ],
['_i=', '_i_j=', '_i]=', '_i_j]=']) :
if strs in xde_lists \
and opr_list[0] in xde_lists[strs]:
prefx_str, posfx_str = prefx, posfx
temp_str += prefx_str + opr_list[0] + posfx_str
temp_str += opr_list[1] + '[' + ','.join(opr_list[2:]) + ']'
xde_lists['code'][code_place][code_i] = temp_str
elif code_key.lower() == 'array':
var_list = code_line.replace(code_key,'').strip().split(',')
temp_str = 'Insr_Code: double '
for var_strs in var_list:
var_name = var_strs.strip().split('[')[0]
idx_list = regx.findall(r'\[\d+\]',var_strs,regx.I)
if len(idx_list) == 1:
vect_len = idx_list[0].lstrip('[').rstrip(']')
if 'vect' not in xde_lists :
xde_lists['vect'] = {}
xde_lists['vect'][var_name] = [vect_len] \
+ [var_name + '[' + str(ii+1) + ']' \
for ii in range(int(vect_len))]
var_strs = var_name + '[' + str(int(vect_len)+1) +']'
elif len(idx_list) == 2:
matr_row = idx_list[0].lstrip('[').rstrip(']')
matr_clm = idx_list[1].lstrip('[').rstrip(']')
if 'matrix' not in xde_lists :
xde_lists['matrix'] = {}
xde_lists['matrix'][var_name] = [matr_row, matr_clm] \
+ [[var_name+'['+str(ii+1)+']['+str(jj+1)+']' \
for jj in range(int(matr_clm))] \
for ii in range(int(matr_row))]
temp_str += var_strs + ','
xde_lists['code'][code_place][code_i] = temp_str.rstrip(',') +';'
# end parse_code()
|
[
"darlyz@163.com"
] |
darlyz@163.com
|
3addcd5596e8abe50095bbc5edb726f467a0da2a
|
1cf12fe24ea501f5b028f76e103235e6c9bb61d1
|
/Stage3/stage3_hw/conf/settings.py
|
bbe2917e6c5014d7f488240c2335a3be08e9b52a
|
[] |
no_license
|
MichelleYang2017/python_camp_note
|
5961d895251fc9569f29fb35f36fba9c76808ee0
|
0f68a1c4a2c10a29be5741974f9225c9032f69c1
|
refs/heads/master
| 2020-06-30T21:19:13.791402
| 2019-09-16T13:29:27
| 2019-09-16T13:29:27
| 200,954,366
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time :2019/8/27 13:49
# @Author:Michelle Yang
# @File :settings.py
import logging
class Configuration():
def __init__(self):
self.log_file_path = '../log/access.log'
self.user_filepath = '../db/user_info.txt'
self.school_file = '../db/school_info.txt'
def Log():
config = Configuration()
logging.basicConfig(
filename=config.log_file_path,
format='%(asctime)s - %(name)s -%(levelname)s - %(module)s-%(funcName)s : %(message)s', # 控制日志的格式
datefmt='%Y-%m-%d %H:%M:%S %p', # 控制日志的格式
level=20,
)
return logging
|
[
"michelleyang2017a@gmail.com"
] |
michelleyang2017a@gmail.com
|
8c02828757de239f0ead038ffaf9d6b377d4e6a5
|
9b9c23561583e5f7ead97be5f0771bfbd82f3111
|
/takeout/test_Case/testLogin.py
|
c05379fc00724e45103b00caf100253218c3c68f
|
[] |
no_license
|
chensiting/Takeout-
|
3260b6f4bab1bb5a35f79b8572d6e5bfc887b070
|
f2f87d6de6ce7efd0ae71e87ab690aedd1c6b442
|
refs/heads/master
| 2023-03-07T17:58:31.723561
| 2021-02-25T19:51:50
| 2021-02-25T19:51:50
| 341,072,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
#-*- coding: utf-8 -*-
#@File : testLogin.py
#@Time : 2021-02-25 1:52
#@Author : chensiting
#@Email : 475707078@qq.com
#@Software: PyCharm
import json
#1- 读取数据
from takeout.tool.getExcelData import get_excelData,set_excelData #导入excel表数据
workBookNew,workSheetNew = set_excelData() #元组
#2- 关联请求
dataList = get_excelData("登录模块",2,7) #[(请求body,响应数据),(),()] 元组形式组装外面是列表
from takeout.lib.apiLib.login import Login
# for one in range(0,len(get_excelData("登录模块",2,7))):
# Login().login(get_excelData("登录模块",2,7))
for one in range(0,len(dataList)):
print(one)
res = Login().login(dataList[one][0], False) #列表的下标再取0
if res['msg'] == json.loads(dataList[one][1])['msg']: #响应结果的msg==表格的预期响应值
#one是下标不是值
print('---pass---')
workSheetNew.write(one +1,12,'pass') #(行号,列号,字符串内容) #
else:
print('---fail---')
workSheetNew.write(one +1, 12, 'fail')
#3- 写结果
workBookNew.save('../data/res.xls')
#用index下标会有一个bug,当出现相同值,只会取第一个下标,后来更换成len
# for one in dataList: #one----元组--(请求body,响应数据) for遍历
# #print('表格的预期结果',one) #取的表格预期的结果
# res=Login().login(one[0],False) #要实际的响应结果 #login返回是有2个参数,要返回json要false
# #print('响应结果',res)
# #print(one[1])
# #预期与实际的响应数据进行比较
# if res['msg'] == json.loads(one[1])['msg']: #响应结果的msg==表格的预期响应值
# print('---pass---')
# #列表.index(元素)---求出该元素的下标
# workSheetNew.write(dataList.index(one)+1,12,'pass') #(行号,列号,字符串内容) #
# # 对象.元素 对象是dataList 元组的index你要把元素放进去求,元素是one index里面需要加元素 行号有问题因为行号是取one,值要用index下标取下标,one是值要填下标,index()+1 one元素是从0开始的,用例0行是不需要的,所以+1取值
# else:
# print('---fail---')
# workSheetNew.write(dataList.index(one) + 1, 12, 'fail')
|
[
"475707078@qq.com"
] |
475707078@qq.com
|
936522237d18094ee546cb518d80f91f14a54951
|
8d55c88625b838c46381fee6f0b96de009c9d127
|
/creon_api/scripts/crawl_daily_minute_price.py
|
e99e0972f37b04764ef73cccde9030f7d3e47ea4
|
[] |
no_license
|
woojae-jang/creon-api
|
fac544ab871ef54e66a2d2ab76bda96fbac64030
|
b424766a157e9ab118a9d77e61881540b312d93d
|
refs/heads/main
| 2023-04-09T02:51:09.940878
| 2021-04-22T22:04:07
| 2021-04-22T22:04:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
from datetime import datetime
import pandas as pd
from pandas.errors import EmptyDataError
from creon_api.cybos import Cybos
from .. import utils
from ..config import MINUTE_DATA_PATH
from ..logger import logger
def get_not_available_code_list(path: str) -> list:
try:
code_df = pd.read_csv(f"{path}.csv", header=None, dtype=object)
code_list = code_df.iloc[:, 0].astype(str).tolist()
print(code_list)
except FileNotFoundError:
code_list = []
except EmptyDataError:
code_list = []
return code_list
def save_not_available_code_list(code_list: list, path: str) -> None:
not_found_code_series = pd.Series(code_list)
not_found_code_series.to_csv(f"{path}.csv", index=False, header=False)
def save_daily_minute_price_file(stock_code_list: list, date: datetime = None, data_folder_path: str = MINUTE_DATA_PATH):
if date is None:
date = datetime.today()
utils.make_dir(data_folder_path)
cybos_api = Cybos()
date_for_file_name = date.strftime("%Y-%m-%d")
not_available_code_list = get_not_available_code_list(f"{data_folder_path}/not_available_{date_for_file_name}")
for i, code in enumerate(stock_code_list):
print(f"{date_for_file_name} {code} {i + 1}/{len(stock_code_list)}")
utils.make_dir(f"{data_folder_path}/{code}")
if code in not_available_code_list:
continue
if utils.is_exist(f"{data_folder_path}/{code}/{date_for_file_name}.csv"):
print("이미있음.")
continue
try:
res = cybos_api.get_minutely_price(code, date.strftime("%Y%m%d"))
except Exception as e:
logger.error(e)
not_available_code_list.append(code)
continue
res.to_csv(f"{data_folder_path}/{code}/{date_for_file_name}.csv", encoding='utf-8-sig')
save_not_available_code_list(not_available_code_list, f"{data_folder_path}/not_available_{date_for_file_name}")
|
[
"dnwogo@naver.com"
] |
dnwogo@naver.com
|
c155897cccd8137d1996ec92abda932c6816020d
|
3dcdc2c1b59fb790be09689e70677ce0f48046e5
|
/nacc/ftld/ivp/forms.py
|
adf9791f341c1daa5cd8ee84ff328e2bcb70b73f
|
[
"BSD-2-Clause"
] |
permissive
|
utsw-bicf/nacculator
|
d517d1ae3257291ea954c7839c44645a64787866
|
1e8eb9b4029c7c52b242c76f941a1572577d300e
|
refs/heads/master
| 2021-01-06T16:50:32.602080
| 2020-07-28T16:10:14
| 2020-07-28T16:10:14
| 241,405,424
| 0
| 0
|
BSD-2-Clause
| 2020-07-28T16:10:16
| 2020-02-18T16:06:04
|
Python
|
UTF-8
|
Python
| false
| false
| 84,782
|
py
|
###############################################################################
# Copyright 2015-2019 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
import nacc.uds3
### BEGIN non-generated code
# WARNING: When generating new forms, do not overwrite this section
from datetime import date
# WARNING: When generating new forms, use CURRENT_YEAR instead of "2014"
# WARNING: When generating new forms, use CURRENT_YEAR-15 instead of "1999"
CURRENT_YEAR = date.today().year
### END non-generated code
# This form is for FTLD IVP
def header_fields():
fields = {}
fields['PACKET'] = nacc.uds3.Field(name='PACKET', typename='Char', position=(1, 2), length=2, inclusive_range=None, allowable_values=[], blanks=[])
fields['FORMID'] = nacc.uds3.Field(name='FORMID', typename='Char', position=(4, 6), length=3, inclusive_range=None, allowable_values=[], blanks=[])
fields['FORMVER'] = nacc.uds3.Field(name='FORMVER', typename='Num', position=(8, 10), length=3, inclusive_range=None, allowable_values=['3'], blanks=[])
fields['ADCID'] = nacc.uds3.Field(name='ADCID', typename='Num', position=(12, 13), length=2, inclusive_range=(2, 43), allowable_values=[], blanks=[])
fields['PTID'] = nacc.uds3.Field(name='PTID', typename='Char', position=(15, 24), length=10, inclusive_range=None, allowable_values=[], blanks=[])
fields['VISITMO'] = nacc.uds3.Field(name='VISITMO', typename='Num', position=(26, 27), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=[])
fields['VISITDAY'] = nacc.uds3.Field(name='VISITDAY', typename='Num', position=(29, 30), length=2, inclusive_range=(1, 31), allowable_values=[], blanks=[])
fields['VISITYR'] = nacc.uds3.Field(name='VISITYR', typename='Num', position=(32, 35), length=4, inclusive_range=(2015, CURRENT_YEAR), allowable_values=[], blanks=[])
fields['VISITNUM'] = nacc.uds3.Field(name='VISITNUM', typename='Char', position=(37, 39), length=3, inclusive_range=None, allowable_values=[], blanks=[])
fields['INITIALS'] = nacc.uds3.Field(name='INITIALS', typename='Char', position=(41, 43), length=3, inclusive_range=None, allowable_values=[], blanks=[])
return fields
class FormZ1X(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['LANGA1'] = nacc.uds3.Field(name='LANGA1', typename='Num', position=(45, 45), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGA2'] = nacc.uds3.Field(name='LANGA2', typename='Num', position=(47, 47), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 2b A2SUB = 0 (No)'])
self.fields['A2SUB'] = nacc.uds3.Field(name='A2SUB', typename='Num', position=(49, 49), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['A2NOT'] = nacc.uds3.Field(name='A2NOT', typename='Num', position=(51, 52), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 2b A2SUB = 1 (Yes)'])
self.fields['LANGA3'] = nacc.uds3.Field(name='LANGA3', typename='Num', position=(54, 54), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 3b A3SUB = 0 (No)'])
self.fields['A3SUB'] = nacc.uds3.Field(name='A3SUB', typename='Num', position=(56, 56), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['A3NOT'] = nacc.uds3.Field(name='A3NOT', typename='Num', position=(58, 59), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 3b A3SUB = 1 (Yes)'])
self.fields['LANGA4'] = nacc.uds3.Field(name='LANGA4', typename='Num', position=(61, 61), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 4b A4SUB = 0 (No)'])
self.fields['A4SUB'] = nacc.uds3.Field(name='A4SUB', typename='Num', position=(63, 63), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['A4NOT'] = nacc.uds3.Field(name='A4NOT', typename='Num', position=(65, 66), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 4b A4SUB = 1 (Yes)'])
self.fields['LANGA5'] = nacc.uds3.Field(name='LANGA5', typename='Num', position=(68, 68), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGB1'] = nacc.uds3.Field(name='LANGB1', typename='Num', position=(70, 70), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 6b B1SUB = 0 (No)'])
self.fields['B1SUB'] = nacc.uds3.Field(name='B1SUB', typename='Num', position=(72, 72), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['B1NOT'] = nacc.uds3.Field(name='B1NOT', typename='Num', position=(74, 75), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 6b B1SUB = 1 (Yes)'])
self.fields['LANGB4'] = nacc.uds3.Field(name='LANGB4', typename='Num', position=(77, 77), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGB5'] = nacc.uds3.Field(name='LANGB5', typename='Num', position=(79, 79), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 8b B5SUB = 0 (No)'])
self.fields['B5SUB'] = nacc.uds3.Field(name='B5SUB', typename='Num', position=(81, 81), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['B5NOT'] = nacc.uds3.Field(name='B5NOT', typename='Num', position=(83, 84), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 8b B5SUB = 1 (Yes)'])
self.fields['LANGB6'] = nacc.uds3.Field(name='LANGB6', typename='Num', position=(86, 86), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 9b B6SUB = 0 (No)'])
self.fields['B6SUB'] = nacc.uds3.Field(name='B6SUB', typename='Num', position=(88, 88), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['B6NOT'] = nacc.uds3.Field(name='B6NOT', typename='Num', position=(90, 91), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 9b B6SUB = 1 (Yes)'])
self.fields['LANGB7'] = nacc.uds3.Field(name='LANGB7', typename='Num', position=(93, 93), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 10b B7SUB = 0 (No)'])
self.fields['B7SUB'] = nacc.uds3.Field(name='B7SUB', typename='Num', position=(95, 95), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['B7NOT'] = nacc.uds3.Field(name='B7NOT', typename='Num', position=(97, 98), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if Question 10b B7SUB = 1 (Yes)'])
self.fields['LANGB8'] = nacc.uds3.Field(name='LANGB8', typename='Num', position=(100, 100), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGB9'] = nacc.uds3.Field(name='LANGB9', typename='Num', position=(102, 102), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGC2'] = nacc.uds3.Field(name='LANGC2', typename='Num', position=(104, 104), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGD1'] = nacc.uds3.Field(name='LANGD1', typename='Num', position=(106, 106), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGD2'] = nacc.uds3.Field(name='LANGD2', typename='Num', position=(108, 108), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGA3A'] = nacc.uds3.Field(name='LANGA3A', typename='Num', position=(110, 110), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 16b FTDA3AFS = 0 (No)'])
self.fields['FTDA3AFS'] = nacc.uds3.Field(name='FTDA3AFS', typename='Num', position=(112, 112), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDA3AFR'] = nacc.uds3.Field(name='FTDA3AFR', typename='Num', position=(114, 115), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98', '99'], blanks=['Blank if Question 16b FTDA3AFS = 1 (Yes)'])
self.fields['LANGB3F'] = nacc.uds3.Field(name='LANGB3F', typename='Num', position=(117, 117), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGB9F'] = nacc.uds3.Field(name='LANGB9F', typename='Num', position=(119, 119), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGC1F'] = nacc.uds3.Field(name='LANGC1F', typename='Num', position=(121, 121), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGC2F'] = nacc.uds3.Field(name='LANGC2F', typename='Num', position=(123, 123), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGC3F'] = nacc.uds3.Field(name='LANGC3F', typename='Num', position=(125, 125), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGC4F'] = nacc.uds3.Field(name='LANGC4F', typename='Num', position=(127, 127), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 22b FTDC4FS = 0 (No)'])
self.fields['FTDC4FS'] = nacc.uds3.Field(name='FTDC4FS', typename='Num', position=(129, 129), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDC4FR'] = nacc.uds3.Field(name='FTDC4FR', typename='Num', position=(131, 132), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98', '99'], blanks=['Blank if Question 22b FTDC4FS = 1 (Yes)'])
self.fields['LANGC5F'] = nacc.uds3.Field(name='LANGC5F', typename='Num', position=(134, 134), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 23b FTDC5FS = 0 (No)'])
self.fields['FTDC5FS'] = nacc.uds3.Field(name='FTDC5FS', typename='Num', position=(136, 136), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDC5FR'] = nacc.uds3.Field(name='FTDC5FR', typename='Num', position=(138, 139), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98', '99'], blanks=['Blank if Question 23b FTDC5FS = 1 (Yes)'])
self.fields['LANGC6F'] = nacc.uds3.Field(name='LANGC6F', typename='Num', position=(141, 141), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 24b FTDC6FS = 0 (No)'])
self.fields['FTDC6FS'] = nacc.uds3.Field(name='FTDC6FS', typename='Num', position=(143, 143), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDC6FR'] = nacc.uds3.Field(name='FTDC6FR', typename='Num', position=(145, 146), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98', '99'], blanks=['Blank if Question 24b FTDC6FS= 1 (Yes)'])
self.fields['LANGE2F'] = nacc.uds3.Field(name='LANGE2F', typename='Num', position=(148, 148), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGE3F'] = nacc.uds3.Field(name='LANGE3F', typename='Num', position=(150, 150), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['LANGCLS'] = nacc.uds3.Field(name='LANGCLS', typename='Num', position=(152, 152), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=['Blank if Question 27b CLSSUB = 0 (No)'])
self.fields['CLSSUB'] = nacc.uds3.Field(name='CLSSUB', typename='Num', position=(154, 154), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
class FormA3a(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDRELCO'] = nacc.uds3.Field(name='FTDRELCO', typename='Num', position=(45, 45), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=[])
self.fields['FTDSIBBY'] = nacc.uds3.Field(name='FTDSIBBY', typename='Num', position=(47, 50), length=4, inclusive_range=(1885, 2000), allowable_values=[], blanks=['Blank if Question 1 FTDRELCO ne 3 (Sibling)'])
self.fields['FTDChDBY'] = nacc.uds3.Field(name='FTDChDBY', typename='Num', position=(52, 55), length=4, inclusive_range=(1920, 2000), allowable_values=[], blanks=['Blank if Question 1 FTDRELCO ne 4 (Child)'])
self.fields['FTDSTORE'] = nacc.uds3.Field(name='FTDSTORE', typename='Num', position=(57, 57), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDSLEAR'] = nacc.uds3.Field(name='FTDSLEAR', typename='Num', position=(59, 59), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDCOMME'] = nacc.uds3.Field(name='FTDCOMME', typename='Num', position=(61, 61), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
class FormB3F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDLTFAS'] = nacc.uds3.Field(name='FTDLTFAS', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDLIMB'] = nacc.uds3.Field(name='FTDLIMB', typename='Num', position=(47, 47), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDBULB'] = nacc.uds3.Field(name='FTDBULB', typename='Num', position=(49, 49), length=1, inclusive_range=None, allowable_values=['0', '3'], blanks=[])
self.fields['FTDGSEV'] = nacc.uds3.Field(name='FTDGSEV', typename='Num', position=(51, 51), length=1, inclusive_range=(0, 4), allowable_values=['0', '1', '2', '3', '4', '8'], blanks=[])
self.fields['FTDGSEVX'] = nacc.uds3.Field(name='FTDGSEVX', typename='Char', position=(53, 112), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question B1 FTDGSEV ne 8 (Untestable)'])
self.fields['FTDGTYP'] = nacc.uds3.Field(name='FTDGTYP', typename='Num', position=(114, 114), length=1, inclusive_range=(0, 7), allowable_values=['0', '1', '2', '3', '4', '5', '6', '7', '8'], blanks=[])
self.fields['FTDGTYPG'] = nacc.uds3.Field(name='FTDGTYPG', typename='Char', position=(116, 175), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question B2 FTDGTYP ne 7 (Other gait disorder not listed above)'])
self.fields['FTDGTYPX'] = nacc.uds3.Field(name='FTDGTYPX', typename='Char', position=(177, 236), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question B2 FTDGTYP ne 8 (Untestable)'])
class FormB9F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDPPASL'] = nacc.uds3.Field(name='FTDPPASL', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDPPAPO'] = nacc.uds3.Field(name='FTDPPAPO', typename='Num', position=(47, 47), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPAIW'] = nacc.uds3.Field(name='FTDPPAIW', typename='Num', position=(49, 49), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPASW'] = nacc.uds3.Field(name='FTDPPASW', typename='Num', position=(51, 51), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPAPK'] = nacc.uds3.Field(name='FTDPPAPK', typename='Num', position=(53, 53), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPAGS'] = nacc.uds3.Field(name='FTDPPAGS', typename='Num', position=(55, 55), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPAEh'] = nacc.uds3.Field(name='FTDPPAEh', typename='Num', position=(57, 57), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPACS'] = nacc.uds3.Field(name='FTDPPACS', typename='Num', position=(59, 59), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPASS'] = nacc.uds3.Field(name='FTDPPASS', typename='Num', position=(61, 61), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPASR'] = nacc.uds3.Field(name='FTDPPASR', typename='Num', position=(63, 63), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDPPASD'] = nacc.uds3.Field(name='FTDPPASD', typename='Num', position=(65, 65), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDCPPA'] = nacc.uds3.Field(name='FTDCPPA', typename='Num', position=(67, 67), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)'])
self.fields['FTDCPPAS'] = nacc.uds3.Field(name='FTDCPPAS', typename='Num', position=(69, 69), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if Question 1 FTDPPASL = 0 (No)', 'Blank if Question 12 FTDCPPA = 0 (No)'])
self.fields['FTDBVCLN'] = nacc.uds3.Field(name='FTDBVCLN', typename='Num', position=(71, 71), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDBVDIS'] = nacc.uds3.Field(name='FTDBVDIS', typename='Num', position=(73, 73), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVAPA'] = nacc.uds3.Field(name='FTDBVAPA', typename='Num', position=(75, 75), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVLOS'] = nacc.uds3.Field(name='FTDBVLOS', typename='Num', position=(77, 77), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVRIT'] = nacc.uds3.Field(name='FTDBVRIT', typename='Num', position=(79, 79), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVhYP'] = nacc.uds3.Field(name='FTDBVhYP', typename='Num', position=(81, 81), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVNEU'] = nacc.uds3.Field(name='FTDBVNEU', typename='Num', position=(83, 83), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVIDL'] = nacc.uds3.Field(name='FTDBVIDL', typename='Num', position=(85, 85), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2', '9'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDBVFT'] = nacc.uds3.Field(name='FTDBVFT', typename='Num', position=(87, 87), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=['Blank if Question 14 FTDBVCLN = 0 (No)'])
self.fields['FTDEMGPV'] = nacc.uds3.Field(name='FTDEMGPV', typename='Num', position=(89, 89), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDEMGPY'] = nacc.uds3.Field(name='FTDEMGPY', typename='Num', position=(91, 91), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 23 FTDEMGPV = 1 (Yes)'])
self.fields['FTDEMGMN'] = nacc.uds3.Field(name='FTDEMGMN', typename='Num', position=(93, 93), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 24 FTDEMGPY = 0 (No)'])
self.fields['FTDPABVF'] = nacc.uds3.Field(name='FTDPABVF', typename='Num', position=(95, 95), length=1, inclusive_range=(1, 5), allowable_values=['1', '2', '3', '4', '5', '9'], blanks=['Blank if Question 12 FTDCPPA = 0 (No) ', 'Blank if Question 12 FTDCPPA = blank', 'Blank if Question 22 FTDBVFT = 0 (Does not meet criteria)', 'Blank if Question 22 FTDBVFT = blank'])
class FormC1F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDWORRC'] = nacc.uds3.Field(name='FTDWORRC', typename='Num', position=(45, 46), length=2, inclusive_range=(0, 15), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDWORRS'] = nacc.uds3.Field(name='FTDWORRS', typename='Num', position=(48, 49), length=2, inclusive_range=(0, 15), allowable_values=[], blanks=['Blank if Question 1a FTDWORRC = 95', 'Blank if Question 1a FTDWORRC = 96', 'Blank if Question 1a FTDWORRC = 97', 'Blank if Question 1a FTDWORRC = 98'])
self.fields['FTDWORRR'] = nacc.uds3.Field(name='FTDWORRR', typename='Num', position=(51, 52), length=2, inclusive_range=(0, 15), allowable_values=[], blanks=['Blank if Question 1a FTDWORRC = 95', 'Blank if Question 1a FTDWORRC = 96', 'Blank if Question 1a FTDWORRC = 97', 'Blank if Question 1a FTDWORRC = 98'])
self.fields['FTDWORIC'] = nacc.uds3.Field(name='FTDWORIC', typename='Num', position=(54, 55), length=2, inclusive_range=(0, 15), allowable_values=[], blanks=['Blank if Question 1a FTDWORRC = 95', 'Blank if Question 1a FTDWORRC = 96', 'Blank if Question 1a FTDWORRC = 97', 'Blank if Question 1a FTDWORRC = 98'])
self.fields['FTDWORIS'] = nacc.uds3.Field(name='FTDWORIS', typename='Num', position=(57, 58), length=2, inclusive_range=(0, 15), allowable_values=[], blanks=['Blank if Question 1a FTDWORRC = 95', 'Blank if Question 1a FTDWORRC = 96', 'Blank if Question 1a FTDWORRC = 97', 'Blank if Question 1a FTDWORRC = 98'])
self.fields['FTDWORIR'] = nacc.uds3.Field(name='FTDWORIR', typename='Num', position=(60, 61), length=2, inclusive_range=(0, 15), allowable_values=[], blanks=['Blank if Question 1a FTDWORRC = 95', 'Blank if Question 1a FTDWORRC = 96', 'Blank if Question 1a FTDWORRC = 97', 'Blank if Question 1a FTDWORRC = 98'])
self.fields['FTDWORIP'] = nacc.uds3.Field(name='FTDWORIP', typename='Num', position=(63, 64), length=2, inclusive_range=(0, 15), allowable_values=[], blanks=['Blank if Question 1a FTDWORRC = 95', 'Blank if Question 1a FTDWORRC = 96', 'Blank if Question 1a FTDWORRC = 97', 'Blank if Question 1a FTDWORRC = 98'])
self.fields['FTDSEMMT'] = nacc.uds3.Field(name='FTDSEMMT', typename='Num', position=(66, 67), length=2, inclusive_range=(0, 20), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDSEMAA'] = nacc.uds3.Field(name='FTDSEMAA', typename='Num', position=(69, 70), length=2, inclusive_range=(0, 8), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDSEMTA'] = nacc.uds3.Field(name='FTDSEMTA', typename='Num', position=(72, 73), length=2, inclusive_range=(0, 8), allowable_values=[], blanks=['Blank if Question 3a FTDSEMAA = 95-98'])
self.fields['FTDSEMSU'] = nacc.uds3.Field(name='FTDSEMSU', typename='Num', position=(75, 76), length=2, inclusive_range=(0, 16), allowable_values=[], blanks=['Blank if Question 3a FTDSEMAA = 95-98'])
self.fields['FTDANASW'] = nacc.uds3.Field(name='FTDANASW', typename='Num', position=(78, 79), length=2, inclusive_range=(0, 5), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDANAOW'] = nacc.uds3.Field(name='FTDANAOW', typename='Num', position=(81, 82), length=2, inclusive_range=(0, 5), allowable_values=[], blanks=['Blank if Question 4a FTDANASW = 95-98'])
self.fields['FTDANATS'] = nacc.uds3.Field(name='FTDANATS', typename='Num', position=(84, 85), length=2, inclusive_range=(0, 10), allowable_values=[], blanks=['Blank if Question 4a FTDANASW = 95-98'])
self.fields['FTDSENAS'] = nacc.uds3.Field(name='FTDSENAS', typename='Num', position=(87, 88), length=2, inclusive_range=(0, 5), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDSENOS'] = nacc.uds3.Field(name='FTDSENOS', typename='Num', position=(90, 91), length=2, inclusive_range=(0, 37), allowable_values=[], blanks=['Blank if Question 5a FTDSENAS = 95-98'])
self.fields['FTDSENSR'] = nacc.uds3.Field(name='FTDSENSR', typename='Num', position=(93, 94), length=2, inclusive_range=(0, 20), allowable_values=[], blanks=['Blank if Question 5a FTDSENAS = 95-98'])
self.fields['FTDSENPR'] = nacc.uds3.Field(name='FTDSENPR', typename='Num', position=(96, 97), length=2, inclusive_range=(0, 20), allowable_values=[], blanks=['Blank if Question 5a FTDSENAS = 95-98'])
self.fields['FTDNOUNC'] = nacc.uds3.Field(name='FTDNOUNC', typename='Num', position=(99, 100), length=2, inclusive_range=(0, 16), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDVERBC'] = nacc.uds3.Field(name='FTDVERBC', typename='Num', position=(102, 103), length=2, inclusive_range=(0, 16), allowable_values=[], blanks=['Blank if Question 6a FTDNOUNC = 95-98'])
self.fields['FTDRATIO'] = nacc.uds3.Field(name='FTDRATIO', typename='Num', position=(105, 109), length=5, inclusive_range=(0,16), allowable_values=[], blanks=['Blank if Question 6a FTDNOUNC = 95-98'])
self.fields['FTDREAAS'] = nacc.uds3.Field(name='FTDREAAS', typename='Num', position=(111, 112), length=2, inclusive_range=(0, 5), allowable_values=['95', '96', '97', '98'], blanks=[])
self.fields['FTDREAOS'] = nacc.uds3.Field(name='FTDREAOS', typename='Num', position=(114, 115), length=2, inclusive_range=(0, 37), allowable_values=[], blanks=['Blank if Question 7a FTDREAAS = 95-98'])
self.fields['FTDREASR'] = nacc.uds3.Field(name='FTDREASR', typename='Num', position=(117, 118), length=2, inclusive_range=(0, 20), allowable_values=[], blanks=['Blank if Question 7a FTDREAAS = 95-98'])
self.fields['FTDREAPR'] = nacc.uds3.Field(name='FTDREAPR', typename='Num', position=(120, 121), length=2, inclusive_range=(0, 20), allowable_values=[], blanks=['Blank if Question 7a FTDREAAS = 95-98'])
class FormC2F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDCPC2F'] = nacc.uds3.Field(name='FTDCPC2F', typename='Num', position=(45, 46), length=2, inclusive_range=None, allowable_values=['95', '96', '97', '98'], blanks=['Blank if form completed'])
self.fields['FTDhAIRD'] = nacc.uds3.Field(name='FTDhAIRD', typename='Num', position=(48, 48), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDSPIT'] = nacc.uds3.Field(name='FTDSPIT', typename='Num', position=(50, 50), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDNOSE'] = nacc.uds3.Field(name='FTDNOSE', typename='Num', position=(52, 52), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDCOAGE'] = nacc.uds3.Field(name='FTDCOAGE', typename='Num', position=(54, 54), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDCRY'] = nacc.uds3.Field(name='FTDCRY', typename='Num', position=(56, 56), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDCUT'] = nacc.uds3.Field(name='FTDCUT', typename='Num', position=(58, 58), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDYTRIP'] = nacc.uds3.Field(name='FTDYTRIP', typename='Num', position=(60, 60), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDEATP'] = nacc.uds3.Field(name='FTDEATP', typename='Num', position=(62, 62), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDTELLA'] = nacc.uds3.Field(name='FTDTELLA', typename='Num', position=(64, 64), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDOPIN'] = nacc.uds3.Field(name='FTDOPIN', typename='Num', position=(66, 66), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDLAUGh'] = nacc.uds3.Field(name='FTDLAUGh', typename='Num', position=(68, 68), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDShIRT'] = nacc.uds3.Field(name='FTDShIRT', typename='Num', position=(70, 70), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDKEEPM'] = nacc.uds3.Field(name='FTDKEEPM', typename='Num', position=(72, 72), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDPICKN'] = nacc.uds3.Field(name='FTDPICKN', typename='Num', position=(74, 74), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDOVER'] = nacc.uds3.Field(name='FTDOVER', typename='Num', position=(76, 76), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDEATR'] = nacc.uds3.Field(name='FTDEATR', typename='Num', position=(78, 78), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDhAIRL'] = nacc.uds3.Field(name='FTDhAIRL', typename='Num', position=(80, 80), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDShIRW'] = nacc.uds3.Field(name='FTDShIRW', typename='Num', position=(82, 82), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDMOVE'] = nacc.uds3.Field(name='FTDMOVE', typename='Num', position=(84, 84), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDhUGS'] = nacc.uds3.Field(name='FTDhUGS', typename='Num', position=(86, 86), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDLOUD'] = nacc.uds3.Field(name='FTDLOUD', typename='Num', position=(88, 88), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDLOST'] = nacc.uds3.Field(name='FTDLOST', typename='Num', position=(90, 90), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98', 'Blank if question not answered'])
self.fields['FTDSNTOT'] = nacc.uds3.Field(name='FTDSNTOT', typename='Num', position=(92, 93), length=2, inclusive_range=(0, 22), allowable_values=['88'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98'])
self.fields['FTDSNTBS'] = nacc.uds3.Field(name='FTDSNTBS', typename='Num', position=(95, 96), length=2, inclusive_range=(0, 12), allowable_values=['88'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98'])
self.fields['FTDSNTOS'] = nacc.uds3.Field(name='FTDSNTOS', typename='Num', position=(98, 99), length=2, inclusive_range=(0, 10), allowable_values=['88'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98'])
self.fields['FTDSNRAT'] = nacc.uds3.Field(name='FTDSNRAT', typename='Num', position=(101, 105), length=5, inclusive_range=(0, 22), allowable_values=['88.88'], blanks=['Blank if Question 0 FTDCPC2F = 95', 'Blank if Question 0 FTDCPC2F = 96', 'Blank if Question 0 FTDCPC2F = 97', 'Blank if Question 0 FTDCPC2F = 98'])
class FormC3F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDSELF'] = nacc.uds3.Field(name='FTDSELF', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDBADLY'] = nacc.uds3.Field(name='FTDBADLY', typename='Num', position=(47, 47), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDDEPR'] = nacc.uds3.Field(name='FTDDEPR', typename='Num', position=(49, 49), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDEMOTD'] = nacc.uds3.Field(name='FTDEMOTD', typename='Num', position=(51, 51), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDLSELF'] = nacc.uds3.Field(name='FTDLSELF', typename='Num', position=(53, 53), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDDISR'] = nacc.uds3.Field(name='FTDDISR', typename='Num', position=(55, 55), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDBELCh'] = nacc.uds3.Field(name='FTDBELCh', typename='Num', position=(57, 57), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDGIGG'] = nacc.uds3.Field(name='FTDGIGG', typename='Num', position=(59, 59), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDPRIV'] = nacc.uds3.Field(name='FTDPRIV', typename='Num', position=(61, 61), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDNEGAT'] = nacc.uds3.Field(name='FTDNEGAT', typename='Num', position=(63, 63), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDECOMM'] = nacc.uds3.Field(name='FTDECOMM', typename='Num', position=(65, 65), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDINAPJ'] = nacc.uds3.Field(name='FTDINAPJ', typename='Num', position=(67, 67), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDFAILA'] = nacc.uds3.Field(name='FTDFAILA', typename='Num', position=(69, 69), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDRESIS'] = nacc.uds3.Field(name='FTDRESIS', typename='Num', position=(71, 71), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDINTER'] = nacc.uds3.Field(name='FTDINTER', typename='Num', position=(73, 73), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDVERBA'] = nacc.uds3.Field(name='FTDVERBA', typename='Num', position=(75, 75), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDPhYSI'] = nacc.uds3.Field(name='FTDPhYSI', typename='Num', position=(77, 77), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDTOPIC'] = nacc.uds3.Field(name='FTDTOPIC', typename='Num', position=(79, 79), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDPROTO'] = nacc.uds3.Field(name='FTDPROTO', typename='Num', position=(81, 81), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDPREO'] = nacc.uds3.Field(name='FTDPREO', typename='Num', position=(83, 83), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDFINI'] = nacc.uds3.Field(name='FTDFINI', typename='Num', position=(85, 85), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDACTED'] = nacc.uds3.Field(name='FTDACTED', typename='Num', position=(87, 87), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDABS'] = nacc.uds3.Field(name='FTDABS', typename='Num', position=(89, 89), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDFEEDB'] = nacc.uds3.Field(name='FTDFEEDB', typename='Num', position=(91, 91), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDFRUST'] = nacc.uds3.Field(name='FTDFRUST', typename='Num', position=(93, 93), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDANXI'] = nacc.uds3.Field(name='FTDANXI', typename='Num', position=(95, 95), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDNERVO'] = nacc.uds3.Field(name='FTDNERVO', typename='Num', position=(97, 97), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDNDIAG'] = nacc.uds3.Field(name='FTDNDIAG', typename='Num', position=(99, 99), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDSTIMB'] = nacc.uds3.Field(name='FTDSTIMB', typename='Num', position=(101, 101), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDSTIME'] = nacc.uds3.Field(name='FTDSTIME', typename='Num', position=(103, 103), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDOBJEC'] = nacc.uds3.Field(name='FTDOBJEC', typename='Num', position=(105, 105), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDCIRCU'] = nacc.uds3.Field(name='FTDCIRCU', typename='Num', position=(107, 107), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDPERSE'] = nacc.uds3.Field(name='FTDPERSE', typename='Num', position=(109, 109), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDREPEA'] = nacc.uds3.Field(name='FTDREPEA', typename='Num', position=(111, 111), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDANECD'] = nacc.uds3.Field(name='FTDANECD', typename='Num', position=(113, 113), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDDINIT'] = nacc.uds3.Field(name='FTDDINIT', typename='Num', position=(115, 115), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDDELAY'] = nacc.uds3.Field(name='FTDDELAY', typename='Num', position=(117, 117), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDADDVE'] = nacc.uds3.Field(name='FTDADDVE', typename='Num', position=(119, 119), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDFLUCT'] = nacc.uds3.Field(name='FTDFLUCT', typename='Num', position=(121, 121), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDLOSTT'] = nacc.uds3.Field(name='FTDLOSTT', typename='Num', position=(123, 123), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDREPRU'] = nacc.uds3.Field(name='FTDREPRU', typename='Num', position=(125, 125), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDTRAIN'] = nacc.uds3.Field(name='FTDTRAIN', typename='Num', position=(127, 127), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDDISCL'] = nacc.uds3.Field(name='FTDDISCL', typename='Num', position=(129, 129), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDSPONT'] = nacc.uds3.Field(name='FTDSPONT', typename='Num', position=(131, 131), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDSPONR'] = nacc.uds3.Field(name='FTDSPONR', typename='Num', position=(133, 133), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDSTOOD'] = nacc.uds3.Field(name='FTDSTOOD', typename='Num', position=(135, 135), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDTOUCh'] = nacc.uds3.Field(name='FTDTOUCh', typename='Num', position=(137, 137), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDDSOCI'] = nacc.uds3.Field(name='FTDDSOCI', typename='Num', position=(139, 139), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDEXAGG'] = nacc.uds3.Field(name='FTDEXAGG', typename='Num', position=(141, 141), length=1, inclusive_range=(0, 3), allowable_values=['0', '1', '2', '3'], blanks=[])
self.fields['FTDSBTOT'] = nacc.uds3.Field(name='FTDSBTOT', typename='Num', position=(143, 144), length=2, inclusive_range=(0, 42), allowable_values=[], blanks=[])
self.fields['FTDSBCTO'] = nacc.uds3.Field(name='FTDSBCTO', typename='Num', position=(146, 148), length=3, inclusive_range=(0, 105), allowable_values=[], blanks=[])
self.fields['FTDLENGT'] = nacc.uds3.Field(name='FTDLENGT', typename='Num', position=(150, 152), length=3, inclusive_range=(20, 240), allowable_values=[], blanks=[])
class FormC4F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDCPC4F'] = nacc.uds3.Field(name='FTDCPC4F', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2'], blanks=[])
self.fields['FTDWORKU'] = nacc.uds3.Field(name='FTDWORKU', typename='Num', position=(47, 47), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDMIST'] = nacc.uds3.Field(name='FTDMIST', typename='Num', position=(49, 49), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDCRIT'] = nacc.uds3.Field(name='FTDCRIT', typename='Num', position=(51, 51), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDWORR'] = nacc.uds3.Field(name='FTDWORR', typename='Num', position=(53, 53), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDBAD'] = nacc.uds3.Field(name='FTDBAD', typename='Num', position=(55, 55), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDPOOR'] = nacc.uds3.Field(name='FTDPOOR', typename='Num', position=(57, 57), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDFFEAR'] = nacc.uds3.Field(name='FTDFFEAR', typename='Num', position=(59, 59), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4'], blanks=['Blank if question not answered'])
self.fields['FTDBIST'] = nacc.uds3.Field(name='FTDBIST', typename='Num', position=(61, 62), length=2, inclusive_range=(7, 28), allowable_values=['88'], blanks=[])
class FormC5F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDCPC5F'] = nacc.uds3.Field(name='FTDCPC5F', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2'], blanks=[])
self.fields['FTDINSEX'] = nacc.uds3.Field(name='FTDINSEX', typename='Num', position=(47, 47), length=1, inclusive_range=(1, 2), allowable_values=['1', '2'], blanks=[])
self.fields['FTDINFMO'] = nacc.uds3.Field(name='FTDINFMO', typename='Num', position=(49, 50), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=[])
self.fields['FTDINFYR'] = nacc.uds3.Field(name='FTDINFYR', typename='Num', position=(52, 55), length=4, inclusive_range=(1900, 1990), allowable_values=[], blanks=[])
self.fields['FTDINFRE'] = nacc.uds3.Field(name='FTDINFRE', typename='Num', position=(57, 57), length=1, inclusive_range=(1, 6), allowable_values=['1', '2', '3', '4', '5', '6'], blanks=[])
self.fields['FTDFEEL'] = nacc.uds3.Field(name='FTDFEEL', typename='Num', position=(59, 59), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDDIFF'] = nacc.uds3.Field(name='FTDDIFF', typename='Num', position=(61, 61), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDSORR'] = nacc.uds3.Field(name='FTDSORR', typename='Num', position=(63, 63), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDSIDE'] = nacc.uds3.Field(name='FTDSIDE', typename='Num', position=(65, 65), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDADVAN'] = nacc.uds3.Field(name='FTDADVAN', typename='Num', position=(67, 67), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDIMAG'] = nacc.uds3.Field(name='FTDIMAG', typename='Num', position=(69, 69), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDMISF'] = nacc.uds3.Field(name='FTDMISF', typename='Num', position=(71, 71), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDWASTE'] = nacc.uds3.Field(name='FTDWASTE', typename='Num', position=(73, 73), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDPITY'] = nacc.uds3.Field(name='FTDPITY', typename='Num', position=(75, 75), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDQTOUC'] = nacc.uds3.Field(name='FTDQTOUC', typename='Num', position=(77, 77), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDSIDES'] = nacc.uds3.Field(name='FTDSIDES', typename='Num', position=(79, 79), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDSOFTh'] = nacc.uds3.Field(name='FTDSOFTh', typename='Num', position=(81, 81), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDUPSET'] = nacc.uds3.Field(name='FTDUPSET', typename='Num', position=(83, 83), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDCRITI'] = nacc.uds3.Field(name='FTDCRITI', typename='Num', position=(85, 85), length=1, inclusive_range=(1, 5), allowable_values=['1', '5'], blanks=['Blank if question not answered'])
self.fields['FTDIRIEC'] = nacc.uds3.Field(name='FTDIRIEC', typename='Num', position=(87, 88), length=2, inclusive_range=(7, 35), allowable_values=['88'], blanks=[])
self.fields['FTDIRIPT'] = nacc.uds3.Field(name='FTDIRIPT', typename='Num', position=(90, 91), length=2, inclusive_range=(7, 35), allowable_values=['88'], blanks=[])
class FormC6F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDCPC6F'] = nacc.uds3.Field(name='FTDCPC6F', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 2), allowable_values=['0', '1', '2'], blanks=[])
self.fields['FTDALTER'] = nacc.uds3.Field(name='FTDALTER', typename='Num', position=(47, 47), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDEMOT'] = nacc.uds3.Field(name='FTDEMOT', typename='Num', position=(49, 49), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDACROS'] = nacc.uds3.Field(name='FTDACROS', typename='Num', position=(51, 51), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDCONV'] = nacc.uds3.Field(name='FTDCONV', typename='Num', position=(53, 53), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDINTUI'] = nacc.uds3.Field(name='FTDINTUI', typename='Num', position=(55, 55), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDJOKE'] = nacc.uds3.Field(name='FTDJOKE', typename='Num', position=(57, 57), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDIMAGP'] = nacc.uds3.Field(name='FTDIMAGP', typename='Num', position=(59, 59), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDINAPP'] = nacc.uds3.Field(name='FTDINAPP', typename='Num', position=(61, 61), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDChBEh'] = nacc.uds3.Field(name='FTDChBEh', typename='Num', position=(63, 63), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDADBEh'] = nacc.uds3.Field(name='FTDADBEh', typename='Num', position=(65, 65), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDLYING'] = nacc.uds3.Field(name='FTDLYING', typename='Num', position=(67, 67), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDGOODF'] = nacc.uds3.Field(name='FTDGOODF', typename='Num', position=(69, 69), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDREGUL'] = nacc.uds3.Field(name='FTDREGUL', typename='Num', position=(71, 71), length=1, inclusive_range=(0, 5), allowable_values=['0', '1', '2', '3', '4', '5'], blanks=['Blank if question not answered'])
self.fields['FTDSMSCR'] = nacc.uds3.Field(name='FTDSMSCR', typename='Num', position=(73, 74), length=2, inclusive_range=(0, 30), allowable_values=['88'], blanks=[])
self.fields['FTDSPSCR'] = nacc.uds3.Field(name='FTDSPSCR', typename='Num', position=(76, 77), length=2, inclusive_range=(0, 35), allowable_values=['88'], blanks=[])
self.fields['FTDRSMST'] = nacc.uds3.Field(name='FTDRSMST', typename='Num', position=(79, 80), length=2, inclusive_range=(0, 65), allowable_values=['88'], blanks=[])
class FormE2F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDSMRI'] = nacc.uds3.Field(name='FTDSMRI', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDSMMO'] = nacc.uds3.Field(name='FTDSMMO', typename='Num', position=(47, 48), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMDY'] = nacc.uds3.Field(name='FTDSMDY', typename='Num', position=(50, 51), length=2, inclusive_range=(1, 31), allowable_values=[99], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMYR'] = nacc.uds3.Field(name='FTDSMYR', typename='Num', position=(53, 56), length=4, inclusive_range=(2000, CURRENT_YEAR), allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMDIC'] = nacc.uds3.Field(name='FTDSMDIC', typename='Num', position=(58, 58), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMDIS'] = nacc.uds3.Field(name='FTDSMDIS', typename='Char', position=(60, 119), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)', 'Blank if Question 1b FTDSMDIC ne 1 (Yes)'])
self.fields['FTDSMADN'] = nacc.uds3.Field(name='FTDSMADN', typename='Num', position=(121, 121), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMADV'] = nacc.uds3.Field(name='FTDSMADV', typename='Char', position=(123, 182), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)', 'Blank if Question 1c FTDSMADN ne 1 (Yes)'])
self.fields['FTDSMMAN'] = nacc.uds3.Field(name='FTDSMMAN', typename='Num', position=(184, 184), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4', '9'], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMMAO'] = nacc.uds3.Field(name='FTDSMMAO', typename='Char', position=(186, 245), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)', 'Blank if Question 1d FTDSMMAN ne 4 (Other)'])
self.fields['FTDSMMAM'] = nacc.uds3.Field(name='FTDSMMAM', typename='Char', position=(247, 306), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)', 'Blank if Question 1d FTDSMMAN = 9 (Unknown)'])
self.fields['FTDSMFS'] = nacc.uds3.Field(name='FTDSMFS', typename='Num', position=(308, 308), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4', '9'], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDSMFSO'] = nacc.uds3.Field(name='FTDSMFSO', typename='Char', position=(310, 369), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDSMRI = 0 (No)', 'Blank if Question 1e FTDSMFS ne 4 (Other)'])
self.fields['FTDSMQU'] = nacc.uds3.Field(name='FTDSMQU', typename='Num', position=(371, 371), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDSMRI = 0 (No)'])
self.fields['FTDFDGPT'] = nacc.uds3.Field(name='FTDFDGPT', typename='Num', position=(373, 373), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDFPMO'] = nacc.uds3.Field(name='FTDFPMO', typename='Num', position=(375, 376), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDFPDY'] = nacc.uds3.Field(name='FTDFPDY', typename='Num', position=(378, 379), length=2, inclusive_range=(1, 31), allowable_values=['99'], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDFPYR'] = nacc.uds3.Field(name='FTDFPYR', typename='Num', position=(381, 384), length=4, inclusive_range=(2000, CURRENT_YEAR), allowable_values=[], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDFDDIC'] = nacc.uds3.Field(name='FTDFDDIC', typename='Num', position=(386, 386), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDFDDID'] = nacc.uds3.Field(name='FTDFDDID', typename='Char', position=(388, 447), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)', 'Blank if Question 2b FTDFDDIC ne 1 (Yes)'])
self.fields['FTDFDADN'] = nacc.uds3.Field(name='FTDFDADN', typename='Num', position=(449, 449), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDFDADV'] = nacc.uds3.Field(name='FTDFDADV', typename='Char', position=(451, 510), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)', 'Blank if Question 2c FTDFDADN ne 1 (Yes)'])
self.fields['FTDFDMAN'] = nacc.uds3.Field(name='FTDFDMAN', typename='Num', position=(512, 512), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4', '9'], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDFDMAO'] = nacc.uds3.Field(name='FTDFDMAO', typename='Char', position=(514, 573), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 2 FTDFDGPT, = 0 (No) ', 'Blank if Question 2d, FTDFDMAN, ne 4 (Other)'])
self.fields['FTDFDMAM'] = nacc.uds3.Field(name='FTDFDMAM', typename='Char', position=(575, 634), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 2 FTDFDGPT, = 0 (No)', 'Blank if Question 2d, FTDFDMAN = 9 (Unknown)'])
self.fields['FTDFDQU'] = nacc.uds3.Field(name='FTDFDQU', typename='Num', position=(636, 636), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 2 FTDFDGPT = 0 (No)'])
self.fields['FTDAMYPT'] = nacc.uds3.Field(name='FTDAMYPT', typename='Num', position=(638, 638), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDAMMO'] = nacc.uds3.Field(name='FTDAMMO', typename='Num', position=(640, 641), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMDY'] = nacc.uds3.Field(name='FTDAMDY', typename='Num', position=(643, 644), length=2, inclusive_range=(1, 31), allowable_values=['99'], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMYR'] = nacc.uds3.Field(name='FTDAMYR', typename='Num', position=(646, 649), length=4, inclusive_range=(2000, CURRENT_YEAR), allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMDIC'] = nacc.uds3.Field(name='FTDAMDIC', typename='Num', position=(651, 651), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMDID'] = nacc.uds3.Field(name='FTDAMDID', typename='Char', position=(653, 712), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)', 'Blank if Question 3b, FTDAMDIC, ne 1 (Yes)'])
self.fields['FTDAMLIG'] = nacc.uds3.Field(name='FTDAMLIG', typename='Num', position=(714, 714), length=1, inclusive_range=(1, 3), allowable_values=['1', '2', '3', '9'], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMLIO'] = nacc.uds3.Field(name='FTDAMLIO', typename='Char', position=(716, 775), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)', 'Blank if Question 3c, FTDAMLIG, ne 3 (Other)'])
self.fields['FTDAMADN'] = nacc.uds3.Field(name='FTDAMADN', typename='Num', position=(777, 777), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMADV'] = nacc.uds3.Field(name='FTDAMADV', typename='Char', position=(779, 838), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)', 'Blank if Question 3d, FTDAMADN, ne 1 (Yes)'])
self.fields['FTDAMMAN'] = nacc.uds3.Field(name='FTDAMMAN', typename='Num', position=(840, 840), length=1, inclusive_range=(1, 4), allowable_values=['1', '2', '3', '4', '9'], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDAMMAO'] = nacc.uds3.Field(name='FTDAMMAO', typename='Char', position=(842, 901), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)', 'Blank if Question 3e FTDAMMAN ne 4 (Other)'])
self.fields['FTDAMMAM'] = nacc.uds3.Field(name='FTDAMMAM', typename='Char', position=(903, 962), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)', 'Blank if Question 3e FTDAMMAN = 9 (Unknown)'])
self.fields['FTDAMQU'] = nacc.uds3.Field(name='FTDAMQU', typename='Num', position=(964, 964), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 3 FTDAMYPT = 0 (No)'])
self.fields['FTDOThER'] = nacc.uds3.Field(name='FTDOThER', typename='Num', position=(966, 966), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDOTDOP'] = nacc.uds3.Field(name='FTDOTDOP', typename='Num', position=(968, 968), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 4 FTDOThER = 0 (No)'])
self.fields['FTDOTSER'] = nacc.uds3.Field(name='FTDOTSER', typename='Num', position=(970, 970), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 4 FTDOThER = 0 (No)'])
self.fields['FTDOTChO'] = nacc.uds3.Field(name='FTDOTChO', typename='Num', position=(972, 972), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 4 FTDOThER = 0 (No)'])
self.fields['FTDOTANO'] = nacc.uds3.Field(name='FTDOTANO', typename='Num', position=(974, 974), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 4 FTDOThER = 0 (No)'])
self.fields['FTDOTANS'] = nacc.uds3.Field(name='FTDOTANS', typename='Char', position=(976, 1035), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 4 FTDOThER = 0 (No)', 'Blank if Question 4d FTDOTANO ne 1 (Yes)'])
class FormE3F(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['FTDIDIAG'] = nacc.uds3.Field(name='FTDIDIAG', typename='Num', position=(45, 45), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=[])
self.fields['FTDSMRIO'] = nacc.uds3.Field(name='FTDSMRIO', typename='Num', position=(47, 47), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)'])
self.fields['FTDMRIFA'] = nacc.uds3.Field(name='FTDMRIFA', typename='Num', position=(49, 49), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)'])
self.fields['FTDMRIRF'] = nacc.uds3.Field(name='FTDMRIRF', typename='Num', position=(51, 51), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRILF'] = nacc.uds3.Field(name='FTDMRILF', typename='Num', position=(53, 53), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRIRT'] = nacc.uds3.Field(name='FTDMRIRT', typename='Num', position=(55, 55), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRILT'] = nacc.uds3.Field(name='FTDMRILT', typename='Num', position=(57, 57), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRIRM'] = nacc.uds3.Field(name='FTDMRIRM', typename='Num', position=(59, 59), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRILM'] = nacc.uds3.Field(name='FTDMRILM', typename='Num', position=(61, 61), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRIRP'] = nacc.uds3.Field(name='FTDMRIRP', typename='Num', position=(63, 63), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRILP'] = nacc.uds3.Field(name='FTDMRILP', typename='Num', position=(65, 65), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRIRB'] = nacc.uds3.Field(name='FTDMRIRB', typename='Num', position=(67, 67), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRILB'] = nacc.uds3.Field(name='FTDMRILB', typename='Num', position=(69, 69), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRIOB'] = nacc.uds3.Field(name='FTDMRIOB', typename='Num', position=(71, 71), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)'])
self.fields['FTDMRIOS'] = nacc.uds3.Field(name='FTDMRIOS', typename='Char', position=(73, 132), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 2 FTDSMRIO = 0 (No)', 'Blank if Question 2a, FTDMRIFA, = 0 (No) or 9 (Unknown)', 'Blank if Question 2a11 FTDMRIOB ne 1 (Yes)'])
self.fields['FTDFDGPE'] = nacc.uds3.Field(name='FTDFDGPE', typename='Num', position=(134, 134), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)'])
self.fields['FTDFDGFh'] = nacc.uds3.Field(name='FTDFDGFh', typename='Num', position=(136, 136), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)'])
self.fields['FTDFDGRF'] = nacc.uds3.Field(name='FTDFDGRF', typename='Num', position=(138, 138), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGLF'] = nacc.uds3.Field(name='FTDFDGLF', typename='Num', position=(140, 140), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGRT'] = nacc.uds3.Field(name='FTDFDGRT', typename='Num', position=(142, 142), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGLT'] = nacc.uds3.Field(name='FTDFDGLT', typename='Num', position=(144, 144), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGRM'] = nacc.uds3.Field(name='FTDFDGRM', typename='Num', position=(146, 146), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGLM'] = nacc.uds3.Field(name='FTDFDGLM', typename='Num', position=(148, 148), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGRP'] = nacc.uds3.Field(name='FTDFDGRP', typename='Num', position=(150, 150), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGLP'] = nacc.uds3.Field(name='FTDFDGLP', typename='Num', position=(152, 152), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGRB'] = nacc.uds3.Field(name='FTDFDGRB', typename='Num', position=(154, 154), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGLB'] = nacc.uds3.Field(name='FTDFDGLB', typename='Num', position=(156, 156), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGOA'] = nacc.uds3.Field(name='FTDFDGOA', typename='Num', position=(158, 158), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)'])
self.fields['FTDFDGOS'] = nacc.uds3.Field(name='FTDFDGOS', typename='Char', position=(160, 219), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 3 FTDFDGPE = 0 (No)', 'Blank if Question 3a FTDFDGFh = 0 (No) or 9 (Unknown)', 'Blank if Question 3a11, FTDFDGOA, ne 1 (Yes)'])
self.fields['FTDAMYP'] = nacc.uds3.Field(name='FTDAMYP', typename='Num', position=(221, 221), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)'])
self.fields['FTDAMYVI'] = nacc.uds3.Field(name='FTDAMYVI', typename='Num', position=(223, 223), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)'])
self.fields['FTDAMYRF'] = nacc.uds3.Field(name='FTDAMYRF', typename='Num', position=(225, 225), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYLF'] = nacc.uds3.Field(name='FTDAMYLF', typename='Num', position=(227, 227), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYRT'] = nacc.uds3.Field(name='FTDAMYRT', typename='Num', position=(229, 229), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYLT'] = nacc.uds3.Field(name='FTDAMYLT', typename='Num', position=(231, 231), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYRM'] = nacc.uds3.Field(name='FTDAMYRM', typename='Num', position=(233, 233), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYLM'] = nacc.uds3.Field(name='FTDAMYLM', typename='Num', position=(235, 235), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYRP'] = nacc.uds3.Field(name='FTDAMYRP', typename='Num', position=(237, 237), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYLP'] = nacc.uds3.Field(name='FTDAMYLP', typename='Num', position=(239, 239), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYRB'] = nacc.uds3.Field(name='FTDAMYRB', typename='Num', position=(241, 241), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYLB'] = nacc.uds3.Field(name='FTDAMYLB', typename='Num', position=(243, 243), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYOA'] = nacc.uds3.Field(name='FTDAMYOA', typename='Num', position=(245, 245), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDAMYOS'] = nacc.uds3.Field(name='FTDAMYOS', typename='Char', position=(247, 306), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 4 FTDAMYP = 0 (No)', 'Blank if Question 4a FTDAMYVI = 0 (No) or 9 (Unknown)', 'Blank if Question 4a11, FTDAMYOA, ne 1 (Yes)'])
self.fields['FTDCBFSP'] = nacc.uds3.Field(name='FTDCBFSP', typename='Num', position=(308, 308), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)'])
self.fields['FTDCBFVI'] = nacc.uds3.Field(name='FTDCBFVI', typename='Num', position=(310, 310), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)'])
self.fields['FTDCBFRF'] = nacc.uds3.Field(name='FTDCBFRF', typename='Num', position=(312, 312), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFLF'] = nacc.uds3.Field(name='FTDCBFLF', typename='Num', position=(314, 314), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFRT'] = nacc.uds3.Field(name='FTDCBFRT', typename='Num', position=(316, 316), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFLT'] = nacc.uds3.Field(name='FTDCBFLT', typename='Num', position=(318, 318), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFRM'] = nacc.uds3.Field(name='FTDCBFRM', typename='Num', position=(320, 320), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFLM'] = nacc.uds3.Field(name='FTDCBFLM', typename='Num', position=(322, 322), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFRP'] = nacc.uds3.Field(name='FTDCBFRP', typename='Num', position=(324, 324), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFLP'] = nacc.uds3.Field(name='FTDCBFLP', typename='Num', position=(326, 326), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFRB'] = nacc.uds3.Field(name='FTDCBFRB', typename='Num', position=(328, 328), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFLB'] = nacc.uds3.Field(name='FTDCBFLB', typename='Num', position=(330, 330), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFOA'] = nacc.uds3.Field(name='FTDCBFOA', typename='Num', position=(332, 332), length=1, inclusive_range=(0, 1), allowable_values=['0', '1', '9'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)'])
self.fields['FTDCBFOS'] = nacc.uds3.Field(name='FTDCBFOS', typename='Char', position=(334, 393), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 5 FTDCBFSP = 0 (No)', 'Blank if Question 5a FTDCBFVI = 0 (No) or 9 (Unknown)', 'Blank if Question 5a11, FTDCBFOA, ne 1 (Yes)'])
self.fields['FTDOThI'] = nacc.uds3.Field(name='FTDOThI', typename='Num', position=(395, 395), length=1, inclusive_range=(0, 1), allowable_values=['0', '1'], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)'])
self.fields['FTDOThIS'] = nacc.uds3.Field(name='FTDOThIS', typename='Char', position=(397, 456), length=60, inclusive_range=None, allowable_values=[], blanks=['Blank if Question 1 FTDIDIAG = 0 (No)', 'Blank if Question 6, FTDOTHI, = 0 (No)'])
|
[
"s.emerson@ufl.edu"
] |
s.emerson@ufl.edu
|
888533314b0d894a570b7d8f182cb6048b8f53d6
|
161fa0c3cec32aa48d11a3fdc15f78228465d1e4
|
/twitter_listening/urls.py
|
98987a1340691664e30531fda31574136f86e6c8
|
[] |
no_license
|
diogoeverson/Data-Sci-NLP-Python-TwitterListening
|
9248ecb62d45c5493b47b31e61ad794a853590f2
|
3d8e2438b9f039d4c8db5a76125812b46b04ddc9
|
refs/heads/master
| 2020-07-14T05:00:19.691106
| 2020-01-09T18:39:18
| 2020-01-09T18:39:18
| 205,244,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from django.urls import path
from . import views
urlpatterns = [
# ex: /polls/
path('', views.index, name='index'),
# ex: /polls/5/
# path('<str:id_str>/', views.detail, name='detail'),
]
|
[
"diogoe@sebraemg.com.br"
] |
diogoe@sebraemg.com.br
|
df86c139af6fab5c6c3400d6c1cd3ef1195631bb
|
1cbb97508b0658f57f99a4fd77c79150d2ef5282
|
/MFNotif/MFNotif/urls.py
|
51e8305d8c8edc35cb692c6ca9d1a0d3bd08ea9c
|
[] |
no_license
|
muralinair/MFTnotif
|
b669be717f42a488586eebce3c22805059576fac
|
f95960ca78df985ba489df8221304e81e4109ebe
|
refs/heads/master
| 2020-08-15T05:01:28.300162
| 2019-10-15T11:46:48
| 2019-10-15T11:49:08
| 215,284,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
"""MFNotif URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"mura.lee@hotmail.com"
] |
mura.lee@hotmail.com
|
1a9df49ffd28b6f3b6895a4964d4bd70eb2b8d61
|
f5fb5e7c5916dee71691f84a48c4349f6135fb2f
|
/wqxwq/serializers.py
|
ad5fdea4cb3fb834badd6b645eb4eafad2e5c91c
|
[] |
no_license
|
heigeo/wqxwq
|
6ce925680e0997abfcb86a2da5e80e78f301b668
|
f4e224420befbea02c6eea89ffcac76c94c562b7
|
refs/heads/master
| 2021-01-19T23:44:05.759060
| 2017-04-21T20:38:33
| 2017-04-21T20:38:33
| 89,023,536
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,075
|
py
|
from wq.db.rest.serializers import LookupRelatedField
from wq.db.patterns import serializers as patterns
from vera.results import serializers as vera
from vera.series.serializers import EventSerializer, ReportSerializer
from rest_framework import serializers
from .models import Characteristic, ProjectParameter, Result
from django.db.models import Count
class WqxDomainField(LookupRelatedField):
default_error_messages = {
'does_not_exist': 'Domain value {value} not found.',
'invalid': 'Invalid value',
}
def to_internal_value(self, data):
try:
return self.model.objects.get_by_identifier(data)
except self.model.DoesNotExist:
self.fail('does_not_exist', value=data)
except (TypeError, ValueError):
self.fail('invalid')
class WqxDomainSerializer(patterns.IdentifiedModelSerializer):
pass
class WaterbodySerializer(patterns.IdentifiedModelSerializer):
icon = serializers.ReadOnlyField()
icon_url = serializers.ReadOnlyField()
class SiteSerializer(patterns.IdentifiedModelSerializer):
icon = serializers.ReadOnlyField()
icon_url = serializers.ReadOnlyField()
parameters = serializers.SerializerMethodField()
event_count = serializers.SerializerMethodField()
def get_parameters(self, instance):
request = self.context.get('request')
if request and request.path.endswith('geojson'):
return []
type_ids = instance.eventresult_set.order_by(
'result_type_id'
).distinct(
'result_type_id'
).values_list('result_type_id', flat=True)
params = []
for param in Characteristic.objects.filter(pk__in=type_ids):
results = instance.eventresult_set.filter(
result_type=param
).order_by('event_date')
params.append({
'parameter_label': str(param),
'parameter_id': param.slug,
'count': results.count(),
'first': results.first().event_date,
'last': results.last().event_date,
})
return params
def get_event_count(self, instance):
return instance.event_set.count()
class Meta(patterns.IdentifiedModelSerializer.Meta):
html_list_exclude = ('identifiers', 'parameters')
class EventSerializer(EventSerializer):
comment = serializers.SerializerMethodField()
reports = serializers.SerializerMethodField()
def get_comment(self, instance):
comments = set(
report.comment
for report in instance.valid_reports
if report.comment
)
return "\n".join(sorted(comments))
def get_reports(self, instance):
return [{
'id': report.id,
'label': report.activity_id or report.id
} for report in instance.report_set.all()]
class ResultSerializer(vera.ResultSerializer):
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(
ResultSerializer, self
).build_relational_field(field_name, relation_info)
if field_class == LookupRelatedField:
field_class = WqxDomainField
return field_class, field_kwargs
class ReportSerializer(ReportSerializer):
results = ResultSerializer(many=True)
def get_wq_config(self):
conf = super().get_wq_config()
for field in conf['form']:
if field['name'] == 'results':
field['initial']['filter'] = {'projects': '{{project_id}}'}
return conf
class EventResultSerializer(vera.EventResultSerializer):
type = serializers.ReadOnlyField(source='event_type.slug')
tb = serializers.SerializerMethodField()
depth = serializers.ReadOnlyField(source='event_depth')
def get_tb(self, instance):
if instance.event_depth:
if instance.event_depth < 5:
return "top"
else:
return "bottom"
class Meta:
pandas_index = ['date', 'type', 'depth']
pandas_unstacked_header = ['site', 'tb', 'parameter', 'units']
pandas_scatter_coord = ['units', 'parameter']
pandas_scatter_header = ['site']
pandas_boxplot_group = 'site'
pandas_boxplot_date = 'date'
pandas_boxplot_header = ['units', 'parameter', 'tb', 'depth', 'type']
class ProjectParameterSerializer(patterns.AttachmentSerializer):
class Meta(patterns.AttachmentSerializer.Meta):
model = ProjectParameter
exclude = ('project',)
object_field = 'project'
wq_config = {
'initial': 3,
}
class ProjectSerializer(patterns.IdentifiedModelSerializer):
parameters = ProjectParameterSerializer(many=True, required=False)
_characteristic_defaults = {}
class CharacteristicSerializer(patterns.IdentifiedModelSerializer):
projects = serializers.SerializerMethodField()
default_speciations = serializers.SerializerMethodField()
default_units = serializers.SerializerMethodField()
default_methods = serializers.SerializerMethodField()
def get_default_speciations(self, instance):
return self.get_default_choices('speciation', instance)
def get_default_units(self, instance):
return self.get_default_choices('unit', instance)
def get_default_methods(self, instance):
return self.get_default_choices('method', instance)
def get_default_choices(self, field, instance):
_characteristic_defaults.setdefault(field, {})
defaults = _characteristic_defaults[field]
if instance.pk not in defaults:
ids = Result.objects.filter(
type=instance
).values_list(field + '__slug').annotate(
Count('id')
).order_by('-id__count')
defaults[instance.pk] = [id[0] for id in ids]
return defaults[instance.pk]
def get_projects(self, instance):
return [project.project.slug for project in instance.projects.all()]
|
[
"andrew@wq.io"
] |
andrew@wq.io
|
0a51ec040e936c8867f8cd6b3c33a3b9715b14b5
|
8e4a062696df5b34e366f8e5fc9b8855cfe20fad
|
/basic_part16_PriorityQueues.py
|
49c0019548a6a35f10ae388b78384d1b9ff39c9a
|
[] |
no_license
|
joon2332/algorithms
|
4714202d0b63b278998b7b9ac5ad07309ef7de0d
|
958d4db5ed335fcd6bbcecae8a27a63321d8785c
|
refs/heads/master
| 2022-01-19T06:44:24.608699
| 2019-07-29T16:19:50
| 2019-07-29T16:19:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,070
|
py
|
(16) 우선순위 큐의 enqueue 연산 구현
앞선 강의에서 소개된 양방향 연결 리스트의 추상적 자료구조 구현인 클래스 DoublyLinkedList 를 이용하여
우선순위 큐의 추상적 자료구조인 클래스 PriorityQueue 의 구현을 완성하세요.
코드의 윗부분은 양방향 연결 리스트를 이용하기 위한 클래스 Node 와 DoublyLinikedList 의 구현입니다.
그대로 둔 채, 아래에 있는 class PriorityQueue 의 메서드들 중 enqueue() 메서드의 구현을 위한 빈 칸 채우기만 완성하면 됩니다.
[참고] 함수 solution() 은 이 클래스의 구현과는 관계 없는 것이지만,
문제가 올바르게 동작하는 데 필요해서 넣어 둔 것이니 무시해도 좋습니다.
또한, 실행 을 눌렀을 때 예시 테스트 케이스를 통과한다고 출력되는 것은 아무런 의미가 없습니다.
class Node:
def __init__(self, item):
self.data = item
self.prev = None
self.next = None
class DoublyLinkedList:
def __init__(self):
self.nodeCount = 0
self.head = Node(None)
self.tail = Node(None)
self.head.prev = None
self.head.next = self.tail
self.tail.prev = self.head
self.tail.next = None
def __repr__(self):
if self.nodeCount == 0:
return 'LinkedList: empty'
s = ''
curr = self.head
while curr.next.next:
curr = curr.next
s += repr(curr.data)
if curr.next.next is not None:
s += ' -> '
return s
def getLength(self):
return self.nodeCount
def traverse(self):
result = []
curr = self.head
while curr.next.next:
curr = curr.next
result.append(curr.data)
return result
def reverse(self):
result = []
curr = self.tail
while curr.prev.prev:
curr = curr.prev
result.append(curr.data)
return result
def getAt(self, pos):
if pos < 0 or pos > self.nodeCount:
return None
if pos > self.nodeCount // 2:
i = 0
curr = self.tail
while i < self.nodeCount - pos + 1:
curr = curr.prev
i += 1
else:
i = 0
curr = self.head
while i < pos:
curr = curr.next
i += 1
return curr
def insertAfter(self, prev, newNode):
next = prev.next
newNode.prev = prev
newNode.next = next
prev.next = newNode
next.prev = newNode
self.nodeCount += 1
return True
def insertAt(self, pos, newNode):
if pos < 1 or pos > self.nodeCount + 1:
return False
prev = self.getAt(pos - 1)
return self.insertAfter(prev, newNode)
def popAfter(self, prev):
curr = prev.next
next = curr.next
prev.next = next
next.prev = prev
self.nodeCount -= 1
return curr.data
def popAt(self, pos):
if pos < 1 or pos > self.nodeCount:
return None
prev = self.getAt(pos - 1)
return self.popAfter(prev)
def concat(self, L):
self.tail.prev.next = L.head.next
L.head.next.prev = self.tail.prev
self.tail = L.tail
self.nodeCount += L.nodeCount
class PriorityQueue:
def __init__(self):
self.queue = DoublyLinkedList()
def size(self):
return self.queue.getLength()
def isEmpty(self):
return self.size() == 0
def enqueue(self, x):
newNode = Node(x)
curr = self.queue.head
while curr.next.data != None and x < curr.next.data:
curr = curr.next
self.queue.insertAfter(curr, newNode)
def dequeue(self):
return self.queue.popAt(self.queue.getLength())
def peek(self):
return self.queue.getAt(self.queue.getLength()).data
def solution(x):
return 0
|
[
"noreply@github.com"
] |
joon2332.noreply@github.com
|
59138a452aa6f52dddc71276fa460e4d147e0f67
|
528eeeca0c60cbcd03d1a436558b493319c5e5b3
|
/guias_cacao/forms.py
|
a38258a41fd8fa6095ad70945b3294ef780c900b
|
[
"MIT"
] |
permissive
|
CARocha/ciat_plataforma
|
9ce40d95425a6e64a79c6f39ff6c016563c93761
|
76c11b835aae3d0c8153d97e31b7672f8ac78582
|
refs/heads/master
| 2021-01-09T07:03:57.853079
| 2018-11-20T04:48:04
| 2018-11-20T04:48:04
| 28,819,215
| 0
| 0
| null | 2015-01-05T15:48:01
| 2015-01-05T15:48:01
| null |
UTF-8
|
Python
| false
| false
| 6,346
|
py
|
# -*- coding: utf-8 -*-
from django import forms
from lookups import ProductorLookup, TecnicoLookup
import selectable.forms as selectable
from .models import FichaSombra, FichaPoda, FichaPlaga, FichaPiso, FichaSuelo, FichaVivero, FichaCosecha, FichaSaf, FichaCierre, Ciclos
from mapeo.models import Persona, Organizaciones
from comunicacion.lugar.models import Pais, Departamento, Municipio, Comunidad
class ProductorSombraAdminForm(forms.ModelForm):
class Meta(object):
model = FichaSombra
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class TecnicoAdminForm(forms.ModelForm):
class Meta(object):
model = FichaSombra
widgets = {
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorPodaAdminForm(forms.ModelForm):
class Meta(object):
model = FichaPoda
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorPlagaAdminForm(forms.ModelForm):
class Meta(object):
model = FichaPlaga
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorPisoAdminForm(forms.ModelForm):
class Meta(object):
model = FichaPiso
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorSueloAdminForm(forms.ModelForm):
class Meta(object):
model = FichaSuelo
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorViveroAdminForm(forms.ModelForm):
class Meta(object):
model = FichaVivero
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorCosechaAdminForm(forms.ModelForm):
class Meta(object):
model = FichaCosecha
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorSafAdminForm(forms.ModelForm):
class Meta(object):
model = FichaSaf
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
class ProductorCierreAdminForm(forms.ModelForm):
class Meta(object):
model = FichaCierre
widgets = {
'productor': selectable.AutoCompleteSelectWidget(lookup_class=ProductorLookup),
'tecnico': selectable.AutoCompleteSelectWidget(lookup_class=TecnicoLookup),
}
def fecha_choice():
years = []
for en in FichaSombra.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaPoda.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaPlaga.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaPiso.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaCosecha.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaCierre.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaSaf.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
for en in FichaVivero.objects.order_by('fecha_visita').values_list('fecha_visita', flat=True):
years.append((en.year,en.year))
return list(sorted(set(years)))
def ciclo_choice():
ciclos = []
for en in Ciclos.objects.all():
ciclos.append((en.ciclo,en.ciclo))
return list(sorted(set(ciclos)))
CHOICE_SEXO1 = (
('', '-------'),
(1, 'Hombre'),
(2, 'Mujer')
)
CHOICE_TIPOLOGIA1 = (('', '-------------------'),
(1, 'Pequeño campesino de montaña'),
(2, 'Pequeño campesino diversificado'),
(3, 'Finquero cacaotero'),
(4, 'Finquero ganadero cacaotero'),
(5, 'Finquero cafetalero'),
(6, 'Finquero ganadero cafetalero'),
(7, 'Finquero ganadero'),
)
class ConsultaSombraForm(forms.Form):
#fecha = forms.MultipleChoiceField(choices=fecha_choice(), label="Años", required=False)
ciclo = forms.MultipleChoiceField(choices=ciclo_choice(), required=False)
productor = forms.CharField(max_length=250, required=False)
organizacion = forms.ModelChoiceField(queryset=Organizaciones.objects.all(), required=False)
pais = forms.ModelChoiceField(queryset=Pais.objects.all(), required=False)
departamento = forms.ModelChoiceField(queryset=Departamento.objects.all(), required=False)
municipio = forms.ModelChoiceField(queryset=Municipio.objects.all(), required=False)
comunidad = forms.ModelChoiceField(queryset=Comunidad.objects.all(), required=False)
sexo = forms.ChoiceField(choices=CHOICE_SEXO1, required=False)
tipologia = forms.ChoiceField(choices=CHOICE_TIPOLOGIA1, required=False)
class FormularioColabora(forms.Form):
nombre = forms.CharField(max_length=250,required=True)
correo = forms.EmailField(required=True)
asunto = forms.CharField(required=True,widget=forms.Textarea)
|
[
"crocha09.09@gmail.com"
] |
crocha09.09@gmail.com
|
6d7e1783aa3b37265fac8b94136080d6dbc363c1
|
67c630edd5818ed46df1571d6a9cf842ebec6d52
|
/programming_1/test.py
|
d8530753fc7759b049c84aeba8e65118077dfd0e
|
[] |
no_license
|
TanHaus/Shopee-CodeLeague-2020
|
134af398391dd5149ee3531e0672c6d0539695d3
|
e0c624cc4a1d024f973a85691df77d451c1d2af2
|
refs/heads/master
| 2022-11-24T04:00:35.668725
| 2020-08-09T05:34:38
| 2020-08-09T05:34:38
| 271,984,151
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
from script import something
## Test cases
|
[
"gau.nernst@yahoo.com.sg"
] |
gau.nernst@yahoo.com.sg
|
ef2ae58e1ca06fb83af4b8bff9f86384c38c4d9d
|
c581df02f90070fca266034cc33606fc72a899d8
|
/setup_pyi.py
|
b6ce4efd1da4718f495cc7c13a11b403d49c9f1f
|
[
"Apache-2.0"
] |
permissive
|
elnino217/lightnovel-crawler
|
cf9e3da411b1fd86c60a3758feff5cde000d1ae5
|
aac875912b63940c40bedabba2702382726dd1fb
|
refs/heads/master
| 2020-07-06T14:19:28.292565
| 2019-08-16T16:21:46
| 2019-08-16T16:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,653
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import shlex
import shutil
import sys
import platform
from PyInstaller import __main__ as pyi
from setuptools.config import read_configuration
dir_name = os.path.abspath(os.path.dirname(__file__))
output = os.path.join(dir_name, 'windows')
def setup_command():
cur_dir = '/'.join(dir_name.split(os.sep))
command = 'pyinstaller -y '
command += '--clean '
command += '-F ' # onefile
command += '-n "lncrawl" '
command += '-i "%s/res/lncrawl.ico" ' % cur_dir
config = read_configuration('setup.cfg')
sep = ';' if platform.system() == 'Windows' else ':'
for k, paths in config['options']['package_data'].items():
for v in paths:
src = os.path.normpath('/'.join([cur_dir, k, v]))
src = '/'.join(src.split(os.sep))
dst = os.path.normpath('/'.join([k, v]))
dst = os.path.dirname(dst)
dst = '/'.join(dst.split(os.sep))
dst = (dst + '/') if dst else '.'
command += '--add-data "%s%s%s" ' % (src, sep, dst)
# end for
# end for
command += '"%s/__main__.py" ' % cur_dir
print(command)
print()
extra = ['--distpath', os.path.join(dir_name, 'dist')]
extra += ['--workpath', os.path.join(output, 'build')]
extra += ['--specpath', output]
sys.argv = shlex.split(command) + extra
# end def
def package():
shutil.rmtree(output, ignore_errors=True)
os.makedirs(output, exist_ok=True)
setup_command()
pyi.run()
shutil.rmtree(output, ignore_errors=True)
# end def
if __name__ == '__main__':
package()
# end if
|
[
"dipu.sudipta@gmail.com"
] |
dipu.sudipta@gmail.com
|
3d33041c42490353364b13e0157be71f63979875
|
9d70af19ad421d2c2377b9c200ac3001095d90c6
|
/modoboa/limits/handlers.py
|
29bb1b7769434aaae8d83d57782916eefed9dbcc
|
[
"ISC"
] |
permissive
|
maniacs-oss/modoboa
|
8ca503a9d8733cac9e402efd36def530f2a85ea9
|
ed676850d2cf13729e197fc81d04748c2ed2c452
|
refs/heads/master
| 2020-12-30T13:45:51.418420
| 2017-05-05T11:50:46
| 2017-05-05T11:50:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,589
|
py
|
"""Django signal handlers for limits."""
from django.db.models import signals
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from modoboa.admin import models as admin_models
from modoboa.admin import signals as admin_signals
from modoboa.core import models as core_models
from modoboa.core import signals as core_signals
from modoboa.lib import signals as lib_signals
from modoboa.lib import permissions
from modoboa.parameters import tools as param_tools
from . import forms
from . import lib
from . import models
from . import utils
@receiver(core_signals.can_create_object)
def check_object_limit(sender, context, **kwargs):
"""Check if user can create a new object."""
if context.__class__.__name__ == "User":
if not param_tools.get_global_parameter("enable_admin_limits"):
return
if context.is_superuser:
return True
ct = ContentType.objects.get_for_model(kwargs.get("klass"))
limits = context.userobjectlimit_set.filter(content_type=ct)
elif context.__class__.__name__ == "Domain":
if not param_tools.get_global_parameter("enable_domain_limits"):
return
object_type = kwargs.get("object_type")
limits = context.domainobjectlimit_set.filter(name=object_type)
else:
raise NotImplementedError
for limit in limits:
if limit.is_exceeded(kwargs.get("count", 1), kwargs.get("instance")):
raise lib.LimitReached(limit)
@receiver(signals.post_save, sender=core_models.User)
def create_user_limits(sender, instance, **kwargs):
"""Create limits for new user."""
if not kwargs.get("created"):
return
request = lib_signals.get_request()
creator = request.user if request else None
global_params = dict(param_tools.get_global_parameters("limits"))
for name, definition in utils.get_user_limit_templates():
ct = ContentType.objects.get_by_natural_key(
*definition["content_type"].split("."))
max_value = 0
# creator can be None if user was created by a factory
if not creator or creator.is_superuser:
max_value = global_params["deflt_user_{0}_limit".format(name)]
models.UserObjectLimit.objects.create(
user=instance, name=name, content_type=ct, max_value=max_value)
@receiver(signals.post_save, sender=admin_models.Domain)
def create_domain_limits(sender, instance, **kwargs):
"""Create limits for new domain."""
if not kwargs.get("created"):
return
global_params = dict(param_tools.get_global_parameters("limits"))
for name, definition in utils.get_domain_limit_templates():
max_value = global_params["deflt_domain_{0}_limit".format(name)]
models.DomainObjectLimit.objects.create(
domain=instance, name=name, max_value=max_value)
@receiver(admin_signals.extra_domain_dashboard_widgets)
def display_domain_limits(sender, user, domain, **kwargs):
"""Display resources usage for domain."""
if not param_tools.get_global_parameter("enable_domain_limits"):
return []
return [{
"column": "right",
"template": "limits/resources_widget.html",
"context": {
"limits": domain.domainobjectlimit_set.all()
}
}]
@receiver(admin_signals.extra_account_dashboard_widgets)
def display_admin_limits(sender, user, account, **kwargs):
"""Display resources usage for admin."""
condition = (
param_tools.get_global_parameter("enable_admin_limits") and
account.role in ["DomainAdmins", "Resellers"]
)
if not condition:
return []
return [{
"column": "right",
"template": "limits/resources_widget.html",
"context": {
"limits": (
account.userobjectlimit_set.select_related("content_type"))
}
}]
@receiver(admin_signals.extra_account_forms)
def extra_account_form(sender, user, account=None, **kwargs):
"""Add limits form."""
if not param_tools.get_global_parameter("enable_admin_limits"):
return []
if user.role not in ["SuperAdmins", "Resellers"]:
return []
condition = (
(account is not None and
account.role not in ["Resellers", "DomainAdmins"]) or
user == account
)
if condition:
return []
return [{
"id": "resources", "title": _("Resources"),
"cls": forms.ResourcePoolForm
}]
@receiver(admin_signals.check_extra_account_form)
def check_form_access(sender, account, form, **kwargs):
"""Check if form must be used for account."""
if form["id"] != "resources":
return True
if account.role not in ["Resellers", "DomainAdmins"]:
return False
return True
@receiver(admin_signals.get_account_form_instances)
def fill_account_instances(sender, user, account, **kwargs):
"""Set account instance for resources form."""
condition = (
not param_tools.get_global_parameter("enable_admin_limits") or
(not user.is_superuser and user.role != "Resellers") or
account.role not in ["Resellers", "DomainAdmins"]
)
if condition:
return {}
return {"resources": account}
@receiver(admin_signals.extra_domain_forms)
def extra_domain_form(sender, user, domain, **kwargs):
"""Include domain limits form."""
if not param_tools.get_global_parameter("enable_domain_limits"):
return []
if not user.has_perm("admin.change_domain"):
return []
return [{
"id": "resources", "title": _("Resources"),
"cls": forms.DomainLimitsForm
}]
@receiver(admin_signals.get_domain_form_instances)
def fill_domain_instances(sender, user, domain, **kwargs):
"""Set domain instance for resources form."""
condition = (
not param_tools.get_global_parameter("enable_domain_limits") or
not user.has_perm("admin.change_domain")
)
if condition:
return {}
return {"resources": domain}
@receiver(core_signals.account_deleted)
def move_resource(sender, user, **kwargs):
"""Move remaining resource to another user."""
owner = permissions.get_object_owner(user)
if owner.is_superuser or owner.role != "Resellers":
return
utils.move_pool_resource(owner, user)
@receiver(core_signals.user_can_set_role)
def user_can_set_role(sender, user, role, account=None, **kwargs):
"""Check if the user can still set this role.
The only interesting case concerns resellers defining new domain
administrators. We want to check if they are allowed to do this
operation before any modification is made to :keyword:`account`.
:param ``User`` user: connected user
:param str role: role to check
:param ``User`` account: account modified (None on creation)
"""
condition = (
not param_tools.get_global_parameter("enable_admin_limits") or
role != "DomainAdmins")
if condition:
return True
lname = "domain_admins"
condition = (
user.is_superuser or
not user.userobjectlimit_set.get(name=lname).is_exceeded()
)
if condition:
return True
if account is not None and account.role == role:
return True
return False
@receiver(core_signals.extra_static_content)
def get_static_content(sender, caller, st_type, user, **kwargs):
"""Add extra static content."""
condition = (
not param_tools.get_global_parameter("enable_admin_limits") or
caller not in ["domains", "identities"] or
user.role in ["SuperAdmins", "SimpleUsers"]
)
if condition:
return ""
if st_type == "css":
return """<style>
.resource {
padding: 10px 15px;
}
.resource .progress {
margin-bottom: 0px;
}
.resource .progress .bar {
color: #000000;
}
</style>
"""
return """
<script type="text/javascript">
$(document).ready(function() {
$(".progress").tooltip();
});
</script>
"""
@receiver(admin_signals.extra_admin_content)
def display_pool_usage(sender, user, location, currentpage, **kwargs):
"""Display current usage."""
condition = (
not param_tools.get_global_parameter("enable_admin_limits") or
location != "leftcol" or user.is_superuser)
if condition:
return []
if currentpage == "identities":
names = ["mailboxes", "mailbox_aliases"]
if user.has_perm("admin.add_domain"):
names += ["domain_admins"]
else:
exceptions = ["domain_admins", "mailboxes", "mailbox_aliases"]
names = [
name for name, tpl in utils.get_user_limit_templates()
if name not in exceptions and
("required_role" not in tpl or
tpl["required_role"] == user.role)
]
limits = user.userobjectlimit_set.filter(name__in=names, max_value__gt=0)
if len(limits) == 0:
return []
return [
render_to_string("limits/poolusage.html",
dict(limits=limits))
]
@receiver(core_signals.account_role_changed)
def move_pool_resource(sender, account, role, **kwargs):
"""Move remaining resource to owner if needed."""
owner = permissions.get_object_owner(account)
if not owner or owner.is_superuser or owner.role != "Resellers":
# Domain admins can't change the role so nothing to check.
return
if role not in ["DomainAdmins", "Resellers"]:
utils.move_pool_resource(owner, account)
|
[
"tonio@ngyn.org"
] |
tonio@ngyn.org
|
5094283d1fca877bed8c4de315752f0d0b712ca8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_reviling.py
|
522a3e3cd1f0f8ada6c930e73f4dea4c008b3e8c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _REVILING():
def __init__(self,):
self.name = "REVILING"
self.definitions = revile
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['revile']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
6be66384b3f460b01a19e0d74416fecba6aa10c7
|
0b01e2d73c4bc986a61b4db73d94130f587f34ca
|
/Task-2.py
|
045b6cae1d13823c048adffa220229f8cbbeae02
|
[] |
no_license
|
fanashole/prebootcamp-coding-exercises
|
cc6b0ac2ada78f4fb7ba3e749a6bb2047e49b85e
|
f2973495ed864e5a130eb13e3879ef3deab58fd0
|
refs/heads/master
| 2023-01-19T11:34:47.837304
| 2020-11-19T10:53:12
| 2020-11-19T10:53:12
| 312,293,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
#Task two
x = 1 + 1 * 2
y = (1 + 1) * 2
z = 1 + (1 * 2)
a = 1 + 1 * 2 / 2
b = (1 + 1 * 2) / 2
#Printing out the above variables
print(x)
print(y)
print(z)
print(a)
print(b)
'''
Output:
3
4
3
2.0
1.5
'''
|
[
"fanashole@gmail.com"
] |
fanashole@gmail.com
|
0d10323679ade904d20729007426576d64a8f32b
|
9f140439c874ae78e981a1e36231d2e9d238c55e
|
/airi_pic/airi_pic/pipelines.py
|
53238e942e82123a4d6ca8f75a608257bc8abe4e
|
[] |
no_license
|
HenryHYH/PythonDemo
|
13bf3dde41bd04f30234ef3f88a670c87570c80d
|
a24870ffcd627b3ae6ee810c7430f88603eef974
|
refs/heads/master
| 2020-12-02T09:59:18.185097
| 2017-08-16T03:20:07
| 2017-08-16T03:20:07
| 96,671,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy import Request
class AiriPicPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['airi_image_url']:
yield Request(image_url)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem('图片未下载好 %s' % image_paths)
|
[
"henry.hyh@gmail.com"
] |
henry.hyh@gmail.com
|
4e2346f05a47a524aa6a9565cdfcc94b4a206031
|
0a60d0db697b5b4ae7f4e49a96488d45b4c24ee4
|
/lab3/lab3_1.py
|
df50ce1e0fa6a3b1b66e7f365e31e7adf558ed20
|
[] |
no_license
|
ilya89099/NumericalMethods
|
2eb31000074271cb795757a785ca018c2b165304
|
1f0ceb434b8a85822dd51a0b8fcbe858106e058d
|
refs/heads/master
| 2023-06-14T19:19:15.169671
| 2021-07-06T23:12:44
| 2021-07-06T23:12:44
| 383,618,915
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,313
|
py
|
import numpy as np
import math
import matplotlib.pyplot as plt
def f(x):
return np.arccos(x) + x
points1 = np.array([-0.4, -0.1, 0.2, 0.5])
points2 = np.array([-0.4, 0, 0.2, 0.5])
def Lagrange(points, values=None, function=None):
if values is None and function is None:
raise ValueError("function needs function values or function itself")
if values is None:
values = function(points)
if len(values) != len(points):
raise ValueError("length is wrong")
values = values[:]
points = points[:]
n = len(values)
return lambda x: np.sum(
[values[i] * np.prod([1 if j == i else ((x - points[j]) / (points[i] - points[j])) for j in range(0, n)]) for i
in range(0, n)], axis = 0)
def divided_diff(points, function):
if (len(points) == 1):
return function(points[0])
if (len(points) == 2):
return (function(points[0]) - function(points[1])) / (points[0] - points[1])
n = len(points)
return (divided_diff(points[:n-1], function) - divided_diff(points[1:], function)) / (points[0] - points[n - 1])
def Newton(points, function):
points = points[:]
n = len(points)
cur_arr = []
divided_sums_counted = []
for i in range(0, n):
cur_arr.append(points[i])
divided_sums_counted.append(divided_diff(cur_arr, function))
def result(x):
cur_prod = 1
res_sum = 0
for i in range(0, n):
res_sum += divided_sums_counted[i] * cur_prod
cur_prod *= (x - points[i])
return res_sum
return result
interval = np.arange(-1, 1, 0.1)
plt.figure()
plt.subplot(221)
plt.plot(interval, [Lagrange(points1, function=f)(x) for x in interval])
plt.plot(interval, f(interval))
plt.title("Lagrange 1")
plt.subplot(222)
plt.plot(interval, [Lagrange(points2, function=f)(x) for x in interval])
plt.plot(interval, f(interval))
plt.title("Lagrange 2")
plt.subplot(223)
plt.plot(interval, [Newton(points1, function=f)(x) for x in interval])
plt.plot(interval, f(interval))
plt.title("Newton 1")
plt.subplot(224)
plt.plot(interval, [Newton(points2, function=f)(x) for x in interval])
plt.plot(interval, f(interval))
plt.title("Newton 2")
plt.show()
|
[
"ilya.semenov89099@yandex.ru"
] |
ilya.semenov89099@yandex.ru
|
dc89e9589026fb903b3ee0b26aaab55ee565f472
|
cad0ccba8d368819deeb9387966a74c4e67eb523
|
/movies/migrations/0001_initial.py
|
9c46fc8289d4aafd64dce6cc12a769d9008eff28
|
[] |
no_license
|
hellogautam/moviedb-graphql
|
5295e3b6313bbec8535f3f04b2e62b374e3addfc
|
82368c2423e33d3735112b6c75087b5a11a449a7
|
refs/heads/master
| 2023-01-05T10:29:05.680780
| 2020-10-04T17:24:23
| 2020-10-04T17:24:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
# Generated by Django 3.1 on 2020-08-16 16:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, verbose_name='Movie Id')),
('name', models.CharField(max_length=100, verbose_name='Movie Name')),
('avg_rating', models.FloatField(verbose_name='Average Rating')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50, verbose_name='First Name')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='updated at')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
],
),
migrations.CreateModel(
name='UserList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codename', models.CharField(max_length=50, unique=True, verbose_name='Code Name')),
('movie_list', models.ManyToManyField(to='movies.Movie')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.user')),
],
),
]
|
[
"gautamchawla1999@gmail.com"
] |
gautamchawla1999@gmail.com
|
15c561a0d78f1f0e2c901b0e5aff9b97dc36dc26
|
82c1327f68180975b76238127edf40bb47410c40
|
/tomo_challenge/classifiers/GPz_classifier.py
|
47d370a75e4bdb42eeda7ea4fabf78aa971e54a5
|
[] |
no_license
|
pwhatfield/tomo_challenge
|
ff3e251d083d19f0632ff233ca2c158d46befd03
|
09a16fa8ff0826b74302c0c40855aa29b4b00646
|
refs/heads/master
| 2022-12-09T18:40:42.430509
| 2020-08-31T21:53:41
| 2020-08-31T21:53:41
| 281,980,556
| 0
| 0
| null | 2020-07-23T14:53:46
| 2020-07-23T14:53:45
| null |
UTF-8
|
Python
| false
| false
| 5,102
|
py
|
""" This is an example tomographic bin generator using the code GPz
Every classifier module needs to:
- have construction of the type
__init__ (self, bands, options) (see examples below)
- implement two functions:
train (self, training_data,training_z)
apply (self, data).
- define valid_options class varible.
See Classifier Documentation below.
"""
# The only extra code it needs is GPz, which can be accessed at
#pip3 install --upgrade 'git+https://github.com/OxfordML/GPz#egg=GPz'
# This is unfortunately only in python2.7 at the moment...
# It also calls two python2 scripts (GPz is in python2), classifier_train_GPz.py and classifier_predict_GPz.py
# Train requires file_prefix to tell it where you put these files
## Options:
# bins - number of bins
# edge_strictness - how close to the edges of the redshift bins relative to the uncertainty on the redshift permitted (higher is stricter)
# extrapolate_threshold - how much extrapolation is permitted (lower is stricter). This is probably not hugely valuable here, but might be if the test and training data were different.
from .base import Tomographer
import numpy as np
import subprocess
class GPzBinning(Tomographer):
""" GPz Classifier """
# valid parameter -- see below
valid_options = ['bins','edge_strictness','extrapolate_threshold']
# this settings means arrays will be sent to train and apply instead
# of dictionaries
wants_arrays = True
def __init__ (self, bands, options):
"""Constructor
Parameters:
-----------
bands: str
string containg valid bands, like 'riz' or 'griz'
options: dict
options come through here. Valid keys are listed as valid_options
class variable.
Note:
-----
Valiad options are:
'bins' - number of tomographic bins
'edge_strictness [default=0.0] Essentially how big error bars can be compared to the bin edges
'extrapolate_threshold' [default=1.0] Essentially how much extrapolation should be allowed
"""
self.bands = bands
self.opt = options
def train (self, training_data, training_z,file_prefix):
X_train = training_data
n_train,d = X_train.shape
np.savetxt('train_data.csv',training_data)
np.savetxt('training_z.csv',training_z)
subprocess.run(["python2", file_prefix+"classifier_train_GPz.py"])
# Sort out bin edges
n_bin = self.opt['bins']
print("Finding bins for training data")
# Now put the training data into redshift bins.
# Use zero so that the one object with minimum
# z in the whole survey will be in the lowest bin
training_bin = np.zeros(training_z.size)
# Find the edges that split the redshifts into n_z bins of
# equal number counts in each
p = np.linspace(0, 100, n_bin + 1)
z_edges = np.percentile(training_z, p)
# Now find all the objects in each of these bins
for i in range(n_bin):
z_low = z_edges[i]
z_high = z_edges[i + 1]
training_bin[(training_z > z_low) & (training_z < z_high)] = i
# for speed, cut down to 5% of original size
cut = np.random.uniform(0, 1, training_z.size) < 0.05
training_bin = training_bin[cut]
training_data = training_data[cut]
#self.photoz_predictor = model
self.z_edges = z_edges
def apply (self, testing_data,file_prefix):
"""Applies training to the data.
Parameters:
-----------
Data: numpy array, size Ngalaxes x Nbands
testing data, each row is a galaxy, each column is a band as per
band defined above
Returns:
tomographic_selections: numpy array, int, size Ngalaxies
tomographic selection for galaxies return as bin number for
each galaxy.
"""
# Save data
np.savetxt('test_data.csv',testing_data)
# Run GPz for predictions
subprocess.run(["python2", file_prefix+"classifier_predict_GPz.py"])
data= np.genfromtxt('prediction_data.csv')
mu=data[:,0]
sigma=data[:,1]
modelV=data[:,2]
noiseV=data[:,3]
z_edges=self.z_edges
n_bin = self.opt['bins']
edge_strictness=self.opt['edge_strictness']
extrapolate_threshold=self.opt['extrapolate_threshold']
tomo_bin = 0*mu
for i in range(len(mu)):
tomo_bin[i]=-1
for j in range(n_bin):
if mu[i]-edge_strictness*sigma[i]**0.5>z_edges[j] and mu[i]+edge_strictness*sigma[i]**0.5<z_edges[j+1]:
tomo_bin[i]=j
if modelV[i]>extrapolate_threshold*sigma[i]:
tomo_bin[i]=-1
return tomo_bin
|
[
"noreply@github.com"
] |
pwhatfield.noreply@github.com
|
aa09097d0e6f875e6dda633ffd0e23f331e9947a
|
65d1845bd0d675d2d03362475f5c3aafb0711ebc
|
/coursemate/forms.py
|
9b4191bf2c4ba9f1747ab55e9ca401a08302bf8d
|
[] |
no_license
|
Joshua-Ramos/groupProject
|
94b77926e2f61f62a6cddd61e0a58ee6a65ee0cc
|
cf53f77bcb6f7e913990a74faa70d55ebcdf13cb
|
refs/heads/master
| 2021-01-22T08:28:34.462430
| 2017-05-07T23:52:25
| 2017-05-07T23:52:25
| 81,901,127
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
from flask_wtf import Form
from flask_wtf.file import FileField
class UploadForm(Form):
example = FileField()
|
[
"eeshugerman@gmail.com"
] |
eeshugerman@gmail.com
|
0f8b5221c0d02b5eb1e407936ab54922dd259ecc
|
c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f
|
/sources/scipy-scipy-414c1ab/scipy/optimize/setup.py
|
3beb5fb651785d6c52dbc725e68648b10b74fa55
|
[] |
no_license
|
georgiee/lip-sync-lpc
|
7662102d4715e4985c693b316a02d11026ffb117
|
e931cc14fe4e741edabd12471713bf84d53a4250
|
refs/heads/master
| 2018-09-16T08:47:26.368491
| 2018-06-05T17:01:08
| 2018-06-05T17:01:08
| 5,779,592
| 17
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
#!/usr/bin/env python
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('optimize',parent_package, top_path)
config.add_library('minpack',sources=[join('minpack','*f')])
config.add_extension('_minpack',
sources=['_minpackmodule.c'],
libraries=['minpack'],
depends=["minpack.h","__minpack.h"])
config.add_library('rootfind',
sources=[join('Zeros','*.c')],
headers=[join('Zeros','zeros.h')])
config.add_extension('_zeros',
sources=['zeros.c'],
libraries=['rootfind'])
lapack = get_info('lapack_opt')
sources=['lbfgsb.pyf', 'lbfgsb.f', 'linpack.f', 'timer.f']
config.add_extension('_lbfgsb',
sources=[join('lbfgsb',x) for x in sources],
**lapack)
sources=['moduleTNC.c','tnc.c']
config.add_extension('moduleTNC',
sources=[join('tnc',x) for x in sources],
depends=[join('tnc','tnc.h')])
config.add_extension('_cobyla',
sources=[join('cobyla',x) for x in ['cobyla.pyf',
'cobyla2.f',
'trstlp.f']])
sources = ['minpack2.pyf', 'dcsrch.f', 'dcstep.f']
config.add_extension('minpack2',
sources=[join('minpack2',x) for x in sources])
sources = ['slsqp.pyf', 'slsqp_optmz.f']
config.add_extension('_slsqp', sources=[join('slsqp', x) for x in sources])
config.add_extension('_nnls', sources=[join('nnls', x) \
for x in ["nnls.f","nnls.pyf"]])
config.add_data_dir('tests')
config.add_data_dir('benchmarks')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
[
"georgios@kaleadis.de"
] |
georgios@kaleadis.de
|
0f5c9a62e00a7cf653f23eb1865af53bf80ea4ca
|
0b6140d8ea5312cda9ed2412ac01cc36503ff2ba
|
/incrementality_sandbox/sandbox_paircounter.py
|
e83c64f4288a190e0af3628dda340b7bc18ea0a2
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
SimonPreissner/fruitfly
|
841b48eb82e2357e3ae8366a384f1b5a95960ff1
|
99dffa7c1ed31da39513eda1ddacc4f9b968a6df
|
refs/heads/master
| 2021-06-16T19:18:08.017369
| 2019-10-27T16:39:35
| 2019-10-27T16:39:35
| 154,472,679
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
"""
This script was used to select 20 MEN-data set pairs as test set for
a sandbox (to explore incrementality)
"""
menfile = "../data/MEN_dataset_natural_form_full"
wordlist = []
pairs = []
with open(menfile, "r") as f:
for line in f:
words = line.rstrip().split()[:2]
pairs.append(words)
wordlist.extend(words)
freq = {}
for w in wordlist:
if w in freq:
freq[w] += 1
else:
freq[w] = 1
most_frequent = sorted(freq, key=freq.get, reverse=True)[:20]
print("number of words:",len(freq))
print("most frequent words:")
for w in most_frequent:
print(freq[w],"\t",w)
# do a cross-search for the most frequent words: do they form pairs?
most_frequent_pairs = []
for p in pairs:
if p[0] in most_frequent and p[1] in most_frequent:
most_frequent_pairs.append(p)
mfp_sorted = sorted(most_frequent_pairs)
print("\npairs with these words:")
for p in mfp_sorted:
print(p[0],"\t",p[1])
|
[
"simon.preissner@gmx.de"
] |
simon.preissner@gmx.de
|
b33e1f63cc4ffa6ab033238f4fef32cccf323192
|
aede805ce0d8b0f0f214cc92fa402ebc88467224
|
/firnmodel/CFM_main/dipTrend.py
|
8ad6501c98153a588de89fa534c84ac4fd45b3a4
|
[] |
no_license
|
gwib/CommunityFirnModel
|
8597c3c2e40e83c8dc56f0319e49e999d967ccf2
|
f18f1ecc8e1a6813e11d395489eef5d09bf5d785
|
refs/heads/master
| 2021-06-01T13:51:17.589807
| 2020-07-13T10:45:56
| 2020-07-13T10:45:56
| 131,051,482
| 0
| 0
| null | 2018-04-25T19:03:27
| 2018-04-25T19:03:26
| null |
UTF-8
|
Python
| false
| false
| 2,858
|
py
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import h5py as h5
import os
import numpy as np
import scipy as sp
import pickle
import csv
import matplotlib.gridspec as gridspec
import pandas as pd
from sklearn import linear_model
import csv
def dipTrendToFile(site, coords):
# always there: models
models = ["HLdynamic","HLSigfus","Li2011","Helsen2008","Arthern2010S","Goujon2003","Barnola1991","KuipersMunneke2015",'Simonsen2013', 'Crocus', 'Arthern2010T']
# create hdf5 - File
with h5.File('./CFMauto/diptrends'+str(site)+'.hdf5', 'w') as hf:
try:
hf["coords"].resize((hf["coords"].shape[0] + coords.shape[0]), axis = 0)
hf["coords"][-coords.shape[0]:] = coords
except:
hf.create_dataset('coords',data = coords) #TODO: maxshape
for m in models:
rfolder = 'CFMexperimentsInput2'
rfile='CFM_'+site+'_results_'+m+'.hdf5'
fn = os.path.join(rfolder,rfile)
try:
f = h5.File(fn,'r')
except:
continue
time = f['DIP'][1:,0]
dip = f['DIP'][1:,1]
diptrend = [findDIPtrend(time, dip)]
try:
hf[m].resize((hf[m].shape[0] + 1), axis = 0)
hf[m][-1:] = diptrend
except:
hf.create_dataset(m, data = diptrend)
try:
t6 = hf['time']
except:
hf.create_dataset('time', data = time)
hf.close()
def dipTrendToCSV(site, coords,csvName='diptrendsAuto', rfolder = 'CFMauto'):
# always there: models
models = ["HLdynamic","HLSigfus","Li2011","Helsen2008","Arthern2010S","Goujon2003","Barnola1991","KuipersMunneke2015","Simonsen2013", "Crocus","Arthern2010T"]
#csvName = csvName+'_'+site
diptrendModels = [np.nan]*(len(models))
for m in models:
rfile='CFM_'+site+'_results_'+m+'.hdf5'
fn = os.path.join(rfolder,rfile)
i=models.index(m)
try:
f = h5.File(fn,'r')
time = f['DIP'][1:,0]
dip = f['DIP'][1:,1]
diptrendModels[i] = findDIPtrend(time, dip)
except:
diptrendModels[i] = np.nan
coords.extend(diptrendModels)
with open(os.path.join(rfolder, csvName+'.csv'), 'a') as f:
writer = csv.writer(f)
writer.writerow(coords)
def findDIPtrend(time, dip):
t = time.reshape(-1,1)
dip_norm = [((d - dip.mean()) / (dip.max() - dip.min())) for d in dip]
regr = linear_model.LinearRegression()
try:
regr.fit(t, dip_norm)
diptrend = regr.coef_[0]
except:
diptrend = np.nan
return diptrend
|
[
"gwib@users.noreply.github.com"
] |
gwib@users.noreply.github.com
|
61fae23f954a13c058736ea35789dfa0adccb52c
|
5a72f4ad3dee9c93e907e5db6ae073a0f6173557
|
/api/models/log.py
|
7ab6756ef27a249f3dc859536ce22b9775c28e9f
|
[
"Apache-2.0"
] |
permissive
|
avikowy/machinaris
|
170117fa8857942d90b33b15a727674924da1d66
|
23eead3c30e5d4a75b13c142638c61bcd0af4bfe
|
refs/heads/main
| 2023-06-17T15:54:08.795622
| 2021-07-16T04:19:37
| 2021-07-16T04:19:37
| 386,497,979
| 0
| 0
|
Apache-2.0
| 2021-07-16T03:35:17
| 2021-07-16T03:35:17
| null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
#
# Data from log reading and parsing
#
import re
import traceback
from api import app
from common.utils import converters
class Challenges:
# Parse the provided most recent 5 lines of grepped output for challenges
def __init__(self, cli_stdout):
self.columns = [ 'challenge_id', 'plots_past_filter', 'proofs_found', 'time_taken', 'created_at']
self.rows = []
for line in cli_stdout:
#app.logger.info(line)
try:
self.rows.append({
'challenge_id': re.search('eligible for farming (\w+)', line, re.IGNORECASE).group(1) + '...',
'plots_past_filter': str(re.search('INFO\s*(\d+) plots were eligible', line, re.IGNORECASE).group(1)) + \
'/' + str(re.search('Total (\d+) plots', line, re.IGNORECASE).group(1)),
'proofs_found': int(re.search('Found (\d+) proofs', line, re.IGNORECASE).group(1)),
'time_taken': str(re.search('Time: (\d+\.?\d*) s.', line, re.IGNORECASE).group(1)) + ' secs',
'created_at': line.split()[0].replace('T', ' ')
})
except:
app.logger.info("Failed to parse challenge line: {0}".format(line))
app.logger.info(traceback.format_exc())
self.rows.reverse()
|
[
"guydavis.ca@gmail.com"
] |
guydavis.ca@gmail.com
|
b4d78ebef0927d8ff4cb93c72ad59d307bd693d5
|
6f72c675695e1ead2c47eb30597105ce9536251a
|
/main.py
|
9164d0856e561737fc5d6f8ccc51ad9ea1fceca4
|
[
"MIT"
] |
permissive
|
Fedioun/ter_victooor
|
28058d563ad700fe10a10df3dbb7a2df4f95da52
|
7bbe3141d8afbf24ed5e9170a29d22cc88d04f5c
|
refs/heads/main
| 2023-02-20T03:34:34.455846
| 2021-01-23T16:44:12
| 2021-01-23T16:44:12
| 330,169,886
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
import zipfile
import nibabel as nib
import time, os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import csv
def main():
matplotlib.use('agg')
input_folder = os.path.join(".", "data", "MICCAI_BraTS_2018_Data_Training")
data_type = "HGG"
dataset_folder = os.path.join(input_folder, data_type)
patients = os.listdir(dataset_folder)
csv_path = os.path.join(input_folder, "survival_data.csv")
patients_dict = get_metadata(os.path.join(input_folder, "survival_data.csv"))
x1, y1 = load_patient(os.path.join(dataset_folder, patients[0]))
x2, y2 = load_patient(os.path.join(dataset_folder, patients[1]))
print(get_metrics(y1[:, :, 100], y1[:, :, 99], 1) )
plt.imshow(y1[:, :, 100])
plt.colorbar()
plt.savefig('foo.png')
plt.imshow(y1[:, :, 99])
plt.colorbar()
plt.savefig('bar.png')
time.sleep(3)
def load_patient(path):
y = nib.load(os.path.join(path, os.path.basename(path) + "_seg.nii.gz")).get_fdata()
channels = ["t1", "t1ce", "t2", "flair"]
tmp = []
for c in channels:
tmp.append(
nib.load(os.path.join(path, os.path.basename(path) + "_" + c + ".nii.gz")).get_fdata()
)
x = np.stack(tmp, axis=3)
patches = []
for h in np.array_split(x, 48):
for w in np.array_split(h, 48, 1):
patches.extend(np.array_split(w, 31, 2))
return patches
def get_metadata(csv_path):
patients_dict = {}
with open(csv_path) as csv_file:
lines = [line for line in csv.reader(csv_file, delimiter=',', quotechar='|')][1:]
for line in lines:
dt = {
'Age' : line[1],
'Survival' : line[2],
'ResectionStatus' : line[3]
}
patients_dict[line[0]] = dt
return patients_dict
def get_metrics(x, y, label):
x = np.where(x == label, True, False)
y = np.where(y == label, True, False)
tn = (np.logical_not(x) * np.logical_not(y)).sum()
tp = (x * y).sum()
p = y.sum()
n = np.logical_not(y).sum()
fp = (x * np.logical_not(y)).sum()
fn = (np.logical_not(x) * y).sum()
union = (x + y).sum() # logical or
iou = tp / union
dsc = 2 * tp / (x.sum() + p)
acc = (tp + tn) / (p + n)
f1 = (2 * tp) / (2 * tp +fp + fn)
sensitivity = tp/p
ppv = tp / x.sum()
return iou, dsc, acc, f1, sensitivity, ppv
main()
|
[
"fedioun.achille@gmail.com"
] |
fedioun.achille@gmail.com
|
8ab7a338025f5fccb607dc5913ca4639bed4948f
|
b361ddcec5288f3b2f02abb5b82ca70005e16a82
|
/WEEK2/computeCost.py
|
6816e13414eb561efca8796e5d0f5d9099790435
|
[] |
no_license
|
ananyaiyer/C4ML
|
646797ab87a20554ac9c8903a07c322849cecad8
|
3f3ac6ad2c4d6eb80a7906414bbdec0f7626d8c3
|
refs/heads/master
| 2022-07-18T19:49:36.245722
| 2020-05-25T08:03:22
| 2020-05-25T08:03:22
| 256,193,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
import numpy as np
def computeCost(X,y,theta):
"""
Take in a numpy array X,y, theta and generate the cost function using theta as parameter in a linear regression model
"""
# np.dot(A,B) => gives dot product of A and B
# np.power(A,n) => returns array with each element raised to the power n
# np.sum(A) => Returns scalar with every element in A summed up
m=len(y)
predictions=np.dot(X,theta)
square_err=(predictions-y)**2
return 1/(2*m)*np.sum(square_err)
|
[
"noreply@github.com"
] |
ananyaiyer.noreply@github.com
|
dd075cc5af9ef7cfdf5555cac46d756075a245a1
|
de1d70b113bc13cd602148d86a18a2ee2bd20886
|
/challenge/migrations/0009_auto_20160703_1017.py
|
1f0bd903332080dc3a96df3cc9c33ffc293c5c81
|
[] |
no_license
|
npmcdn-to-unpkg-bot/innovationmap
|
6044a75cd69127f65dfd998edfbb911640a308c9
|
29e6ae1d83e1be7ba14bd87af791c3c7d41778a3
|
refs/heads/master
| 2021-01-15T20:43:40.301530
| 2016-07-27T18:34:03
| 2016-07-27T18:34:03
| 67,419,728
| 0
| 0
| null | 2016-09-05T12:39:41
| 2016-09-05T12:39:40
| null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('challenge', '0008_auto_20160702_2005'),
]
operations = [
migrations.AddField(
model_name='department',
name='info',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='member',
name='bio',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='tag',
name='image',
field=models.ImageField(upload_to='images/tag_images/%Y/%m/%d', blank=True, null=True),
),
]
|
[
"cgeist7@gmail.com"
] |
cgeist7@gmail.com
|
238d6bfbb83c1c16e846a3e8637ee4cc40c1ae95
|
6199676f32ee3e2313a6c16acb458b00eae38ed5
|
/test/functional/feature_shutdown.py
|
c47f8466955e8ece0d7f4e3a835b731dd4795327
|
[
"MIT"
] |
permissive
|
Darrenshome40/shitecoin
|
bd54de78abbba5e922f659f0ca43db3d295e44d5
|
a2535c8fc5a43ee21ec818d5367439f6302cd084
|
refs/heads/master
| 2023-02-21T08:45:30.848297
| 2021-01-19T13:00:14
| 2021-01-19T13:00:14
| 330,753,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The shitecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test shitecoind shutdown."""
from test_framework.test_framework import shitecoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(shitecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
[
"darren.conroy@ickonic.com"
] |
darren.conroy@ickonic.com
|
3d3a1c105d55eb418574c8bcc662dd40234f3272
|
764b62c9d855c6580db915538c71d1d1ad4a6ba0
|
/config/default.py
|
c3a4481373a676a5ee4cca674dc2ebac6a4eaf93
|
[] |
no_license
|
sorz/EnvSensingServer
|
ed1d549a305ba999d4d7fb3c98088d7c98cdcecb
|
948e5cafa6ad8f2368121845936ad64113e954ef
|
refs/heads/master
| 2020-04-09T16:53:10.066431
| 2016-03-28T12:19:45
| 2016-03-28T12:19:45
| 42,407,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
"""
Default configurations.
"""
DEBUG = False
# Default is 12.
# Take around 300ms on Vultr 768MiB VPS.
BCRYPT_LOG_ROUNDS = 12
# Expire in 30 days.
AUTH_TOKEN_EXPIRATION = 30 * 24 * 3600
# Selectively call csrf.protect() on non-basic-auth requests.
WTF_CSRF_CHECK_DEFAULT = False
|
[
"orz@sorz.org"
] |
orz@sorz.org
|
3caf74d2ebe90822fbb87fa0c0510a0ee00f902d
|
143c4bc2173f610775ad043e5f187e2b9c89d5fe
|
/addict.py
|
aa58ea8a1457379332af23a33a1ef8bb957fa64b
|
[] |
no_license
|
potykion/addiction
|
67dc7d50587467332c0bf414a5d0fe7fb415661b
|
dba2f377938f5d152cfcfcdb3414f16f72f64de2
|
refs/heads/master
| 2020-04-22T01:16:25.874708
| 2019-02-17T16:23:48
| 2019-02-17T16:23:48
| 170,009,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import fire
from addiction.api import PackageDependencyIndicator
if __name__ == '__main__':
fire.Fire(PackageDependencyIndicator)
|
[
"potykion@gmail.com"
] |
potykion@gmail.com
|
ee81d1a33ff82d93c4a0f8c87e2b96e77c4a4eb8
|
5e02ebcd576e4c9fb5b99e96eca480a760113e0a
|
/iotbot/typing.py
|
086e2efeecbad422c2c5a502fcfc6b2278fe5156
|
[
"MIT"
] |
permissive
|
yuban10703/python--iotbot
|
372468112eef9db45e5431ad27e715fe35316dda
|
acffc679eba6347b25f2826f1f48678ac1343fff
|
refs/heads/master
| 2022-12-01T12:55:32.126548
| 2020-08-12T09:03:13
| 2020-08-12T09:03:13
| 286,969,845
| 2
| 0
| null | 2020-08-12T09:16:38
| 2020-08-12T09:16:38
| null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from typing import Any
from typing import Callable
from .model import EventMsg
from .model import FriendMsg
from .model import GroupMsg
GroupMsgReceiver = Callable[[GroupMsg], Any]
FriendMsgReceiver = Callable[[FriendMsg], Any]
EventMsgReceiver = Callable[[EventMsg], Any]
|
[
"xiyao.wong@foxmail.com"
] |
xiyao.wong@foxmail.com
|
09bc7cc43022fb44101bffd1e0b981e0d2fbcbb5
|
955ee13f0ed39a3d0b997636207884f887eae4e1
|
/Gui/Tour/menu_frm-multi.py
|
960bde194c280433526d820b921ea73e65839ac0
|
[] |
no_license
|
sguberman/PP4E
|
1d4ca7c3dced798dcab7cfd746e061f8c8ef23d9
|
ea4f713daf94f6663314bfad0a7de1e8809f0248
|
refs/heads/master
| 2020-07-17T00:28:12.555340
| 2017-01-17T15:26:48
| 2017-01-17T15:26:48
| 73,938,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
from tkinter import *
from menu_frm import makemenu
root = Tk()
for i in range(2):
mnu = makemenu(root)
mnu.config(bd=2, relief=RAISED)
Label(root, bg='black', height=5, width=25).pack(expand=YES, fill=BOTH)
Button(root, text='Bye', command=root.quit).pack()
root.mainloop()
|
[
"sguberman@gmail.com"
] |
sguberman@gmail.com
|
44dc671e94cd34f4680015d003f9041c3691bfab
|
3c921f816f7336a446eb114229d5cf19aa211541
|
/src/app/__init__.py
|
f08d5dbb350c02ca28728ff246adbbbcba9fc830
|
[] |
no_license
|
mgolvers/flask_test
|
fc57141ad546744b261588f21652ce2251098e70
|
ea0707b528337ba3c10dedc928384316edf25369
|
refs/heads/main
| 2023-08-21T13:36:22.574042
| 2023-06-08T13:42:20
| 2023-06-08T13:42:20
| 77,781,532
| 0
| 0
| null | 2023-09-06T18:25:55
| 2017-01-01T14:14:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
# app/__init__.py
# imports
import logging
import os
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask import abort
from flask import render_template
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
# local import
from config import app_config
db = SQLAlchemy()
login_manager = LoginManager()
def create_app(config_name):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config.from_pyfile("config.py")
Bootstrap(app)
db.init_app(app)
login_manager.init_app(app)
login_manager.login_message = "You must be logged in to access this page!"
login_manager.login_view = "auth.login"
if not os.path.exists("log"):
os.mkdir("log")
file_handler = RotatingFileHandler("log/microblog.log", maxBytes=10240, backupCount=10)
file_handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]")
)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info("Microblog startup")
migrate = Migrate(app, db)
from app import models
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix="/admin")
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from .home import home as home_blueprint
app.register_blueprint(home_blueprint)
@app.errorhandler(403)
def forbidden(error):
return render_template("errors/403.html", title="Forbidden"), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/404.html", title="Page Not Found"), 404
@app.errorhandler(500)
def internal_server_error(error):
return render_template("errors/500.html", title="Server Error"), 500
@app.route("/500")
def error():
abort(500)
return app
|
[
"martins.golvers@gmail.com"
] |
martins.golvers@gmail.com
|
e3cbf265d41a241a11b58037c63c5ff1e842e962
|
e16dcc4c0718b09b85e3ad8468621f12208ce80a
|
/backend/apps/register/migrations/0002_registration_user.py
|
9500c90383b1a990da2246d6f60044a7fa44c85e
|
[] |
no_license
|
timonritzi/motion
|
c0372bc57c0e44053a9914a6e08c32b20a4686b8
|
d6218813a038a3f2eaeaa38bed3b689cb8dbb213
|
refs/heads/main
| 2023-02-24T19:31:58.688960
| 2021-01-24T11:28:34
| 2021-01-24T11:28:34
| 332,424,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
# Generated by Django 3.1.2 on 2020-10-21 10:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('register', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='registration',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"timon.pascal@gmail.com"
] |
timon.pascal@gmail.com
|
7a12790c4b3f7d01888e390be88b4bcab7322ba1
|
1293c0c6ec4899c7f522d0d3ae86edbab20d27dd
|
/POM_version3/tests/fill_form_test.py
|
2f9ee27286674e830128f561cee53837812c869c
|
[] |
no_license
|
salmanahmad21/Automation_Training
|
163cc278d80da1b63dfc6cd76b68738c21af5fdd
|
2b931ec497f6b9cecfea4960b8f924a64c667fff
|
refs/heads/main
| 2023-09-03T00:25:17.995490
| 2021-10-20T09:44:49
| 2021-10-20T09:44:49
| 394,978,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
from pages.fill_form_page import *
import unittest
class FillForm(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path="/Users/salmanahmad/Downloads/chromedriver")
def test_fill_form(self):
self.driver.get("https://demoqa.com/text-box")
obj = FillFormMethods(self.driver)
obj.form_fill(form_input_keys, form_input_values)
obj.click(form_submit_btn_id)
obj.output_result(output_text_ids)
print(clean_values)
self.assertEqual(form_input_values, clean_values, "Values are not matching - Failed")
if __name__ == "__main__":
unittest.main()
|
[
"salman.ahmad@arbisoft.com"
] |
salman.ahmad@arbisoft.com
|
0e8bb5be96cd1a23424987043e44c50d1d9af9a0
|
790b35cf27579bc8fde7e71077a83feab8d29471
|
/steamworks/exceptions.py
|
c730b290d6c3ab33c57959a2b0d2b3874150913a
|
[
"MIT"
] |
permissive
|
philippj/SteamworksPy
|
f094742966054ce2106dc03876fff074319abbfb
|
9496d308cff71a1bed9e21940245424a244432ca
|
refs/heads/master
| 2023-05-31T12:58:41.135249
| 2023-02-25T21:05:06
| 2023-02-25T21:05:06
| 39,316,769
| 123
| 25
|
MIT
| 2023-05-25T14:25:49
| 2015-07-19T00:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
class SteamException(Exception):
pass
class GenericSteamException(SteamException):
pass
class UnsupportedPlatformException(SteamException):
pass
class UnsupportedArchitectureException(SteamException):
pass
class MissingSteamworksLibraryException(SteamException):
pass
class SteamNotLoadedException(SteamException):
pass
class SteamNotRunningException(SteamException):
pass
class SteamConnectionException(SteamException):
pass
class UnsupportedSteamStatValue(SteamException):
pass
class SetupRequired(SteamException):
pass
|
[
"philipp98.joos@gmail.com"
] |
philipp98.joos@gmail.com
|
c8a2e5359fa4a0b99ed1fc3ca77706e69cdbb71f
|
2e0d2ea4044c782dc9ee6e22b5fc90f3998cdf94
|
/InheritanceExample.py
|
7e4a96fe3fca27b408b5dd350181e7116c4034d8
|
[] |
no_license
|
padmini06/pythonExamples
|
ce342123bbcb6377f52be6773f12c8008ed79687
|
7bcc32d2be1cf1abd902cd5405d6f651096c8bb3
|
refs/heads/master
| 2020-04-18T02:08:19.393519
| 2019-01-23T08:51:07
| 2019-01-23T08:51:07
| 167,149,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
#!/use/bin/python3
import random
import string
class Vehicle:
def __init__(self, brand = " ", color = " "):
self.__brand = brand
self.__color = color
def setColor(self, color):
self.__color = color
def setBrand(self, brand):
self.__brand = brand
def getColor(self):
return (self.__color)
def getBrand(self):
return (self.__brand)
def showDetials(self):
print("Car brand is :", __brand," Color is :",__color)
class Car(Vehicle):
def __init__(self, brand = " ", color = " ", price = 0):
super().__init__(brand,color)
self.__price = price
def setPrice(self, price):
self.__price = price
def getPrice(self) :
return(self.__price)
def showDetials(self) :
print("Car brand is :", self.getBrand()," Color is :", self.getColor()," and cost is : ", self.__price)
def __str__(self):
return "Car Model is :" +self.getBrand() + "Color is :" + self.getColor()+" and cost is : "+ str(self.__price)
objCar = Car("MARUTI","RED",800000)
objCar1 = Car()
objCar1.setPrice(800)
objCar.showDetials();
print(objCar1)
|
[
"noreply@github.com"
] |
padmini06.noreply@github.com
|
a7c1e0e5318f11b3918bd5d4ae1038b1103052aa
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/external/chromium_org/build/android/pylib/perf/thermal_throttle.py
|
6a47319f9838a3c2f8ce5527980b250cc74a659c
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868
| 2017-02-18T17:48:49
| 2017-02-18T17:48:49
| 81,847,777
| 0
| 2
|
MIT
| 2020-03-09T00:02:12
| 2017-02-13T16:47:00
| null |
UTF-8
|
Python
| false
| false
| 4,197
|
py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
class OmapThrottlingDetector(object):
"""Class to detect and track thermal throttling on an OMAP 4."""
OMAP_TEMP_FILE = ('/sys/devices/platform/omap/omap_temp_sensor.0/'
'temperature')
@staticmethod
def IsSupported(adb):
return adb.FileExistsOnDevice(OmapThrottlingDetector.OMAP_TEMP_FILE)
def __init__(self, adb):
self._adb = adb
def BecameThrottled(self, log_line):
return 'omap_thermal_throttle' in log_line
def BecameUnthrottled(self, log_line):
return 'omap_thermal_unthrottle' in log_line
def GetThrottlingTemperature(self, log_line):
if 'throttle_delayed_work_fn' in log_line:
return float([s for s in log_line.split() if s.isdigit()][0]) / 1000.0
def GetCurrentTemperature(self):
tempdata = self._adb.GetFileContents(OmapThrottlingDetector.OMAP_TEMP_FILE)
return float(tempdata[0]) / 1000.0
class ExynosThrottlingDetector(object):
"""Class to detect and track thermal throttling on an Exynos 5."""
@staticmethod
def IsSupported(adb):
return adb.FileExistsOnDevice('/sys/bus/exynos5-core')
def __init__(self, adb):
pass
def BecameThrottled(self, log_line):
return 'exynos_tmu: Throttling interrupt' in log_line
def BecameUnthrottled(self, log_line):
return 'exynos_thermal_unthrottle: not throttling' in log_line
def GetThrottlingTemperature(self, log_line):
return None
def GetCurrentTemperature(self):
return None
class ThermalThrottle(object):
"""Class to detect and track thermal throttling.
Usage:
Wait for IsThrottled() to be False before running test
After running test call HasBeenThrottled() to find out if the
test run was affected by thermal throttling.
"""
def __init__(self, adb):
self._adb = adb
self._throttled = False
self._detector = None
if OmapThrottlingDetector.IsSupported(adb):
self._detector = OmapThrottlingDetector(adb)
elif ExynosThrottlingDetector.IsSupported(adb):
self._detector = ExynosThrottlingDetector(adb)
def HasBeenThrottled(self):
"""True if there has been any throttling since the last call to
HasBeenThrottled or IsThrottled.
"""
return self._ReadLog()
def IsThrottled(self):
"""True if currently throttled."""
self._ReadLog()
return self._throttled
def _ReadLog(self):
if not self._detector:
return False
has_been_throttled = False
serial_number = self._adb.Adb().GetSerialNumber()
log = self._adb.RunShellCommand('dmesg -c')
degree_symbol = unichr(0x00B0)
for line in log:
if self._detector.BecameThrottled(line):
if not self._throttled:
logging.warning('>>> Device %s thermally throttled', serial_number)
self._throttled = True
has_been_throttled = True
elif self._detector.BecameUnthrottled(line):
if self._throttled:
logging.warning('>>> Device %s thermally unthrottled', serial_number)
self._throttled = False
has_been_throttled = True
temperature = self._detector.GetThrottlingTemperature(line)
if temperature is not None:
logging.info(u'Device %s thermally throttled at %3.1f%sC',
serial_number, temperature, degree_symbol)
if logging.getLogger().isEnabledFor(logging.DEBUG):
# Print current temperature of CPU SoC.
temperature = self._detector.GetCurrentTemperature()
if temperature is not None:
logging.debug(u'Current SoC temperature of %s = %3.1f%sC',
serial_number, temperature, degree_symbol)
# Print temperature of battery, to give a system temperature
dumpsys_log = self._adb.RunShellCommand('dumpsys battery')
for line in dumpsys_log:
if 'temperature' in line:
btemp = float([s for s in line.split() if s.isdigit()][0]) / 10.0
logging.debug(u'Current battery temperature of %s = %3.1f%sC',
serial_number, btemp, degree_symbol)
return has_been_throttled
|
[
"karun.matharu@gmail.com"
] |
karun.matharu@gmail.com
|
10acc59c4f9ce2be125aeb5c7ada5b1219d8ae66
|
6b84414c0fe7ab68603fd740cc10260dac0cc7ac
|
/labs/ball.py
|
19633b23f9d1c61569c2a400447ee0c2e90bf197
|
[] |
no_license
|
Master1612/MFTI
|
d3883a0866859abd975a94b42a229164fc7732ad
|
007691fa25dc7026f993cce9101f74d935476064
|
refs/heads/master
| 2023-03-18T04:53:30.214733
| 2021-03-04T13:12:49
| 2021-03-04T13:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import tkinter as tk
from random import choice
import time
root = tk.Tk()
root.title('BALL')
frame = tk.Frame(root)
root.geometry('800x600')
canvas = tk.Canvas(root, bg='white')
canvas.pack(fill=tk.BOTH, expand=1)
class Ball:
def __init__(self, x=40, y=450):
""" Конструктор класса Ball
Args:
x - начальное положение мяча по горизонтали
y - начальное положение мяча по вертикали
"""
self.x = x
self.y = y
self.r = 10
self.vx = 3
self.vy = 0
self.color = choice(['blue', 'green', 'red', 'brown'])
self.id = canvas.create_oval(
self.x - self.r,
self.y - self.r,
self.x + self.r,
self.y + self.r,
fill=self.color)
def move(self):
self.x += self.vx
self.y += self.vy
canvas.move(self, self.x, self.y)
# print(self.x, self.y)
ball = Ball()
def moving_ball():
root.after(20, ball.move())
moving_ball()
tk.mainloop()
|
[
"ed.suzi@gmal.com"
] |
ed.suzi@gmal.com
|
f17a2176d3762d84686e96c261f2988bcefcad0a
|
41892fbab75f9434ef5ef2fd7f8d132c5d71c1bf
|
/fastreid/solver/lr_scheduler.py
|
17ce2b9966459db9f451484f84fb177ecbde5776
|
[
"Apache-2.0"
] |
permissive
|
lxc86739795/fast-reid
|
1c2884767bc54ff655c4d15ead362c8b2826dd2e
|
29178d70c591ef64021f10767eb606f3053156b9
|
refs/heads/master
| 2023-04-12T09:44:32.520350
| 2021-01-25T08:09:08
| 2021-01-25T08:09:08
| 288,689,076
| 1
| 0
|
Apache-2.0
| 2021-01-25T08:09:10
| 2020-08-19T09:26:11
| null |
UTF-8
|
Python
| false
| false
| 2,067
|
py
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from typing import List
import torch
from torch.optim.lr_scheduler import *
class WarmupLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
warmup_factor: float = 0.1,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_epoch(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr * warmup_factor for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
def _get_warmup_factor_at_epoch(
method: str, iter: int, warmup_iters: int, warmup_factor: float
) -> float:
"""
Return the learning rate warmup factor at a specific iteration.
See https://arxiv.org/abs/1706.02677 for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iter at which to calculate the warmup factor.
warmup_iters (int): the number of warmup epochs.
warmup_factor (float): the base warmup factor (the meaning changes according
to the method used).
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
elif method == "linear":
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
elif method == "exp":
return warmup_factor ** (1 - iter / warmup_iters)
else:
raise ValueError("Unknown warmup method: {}".format(method))
|
[
"sherlockliao01@gmail.com"
] |
sherlockliao01@gmail.com
|
629762fa79f7efd3f78f52c19acd71240f4ff740
|
193a99e11038536b65ad82ceb06a2f58f3ca46a5
|
/DjangoAPI/DjangoAPI/urls.py
|
e5b2ca7c20de5b896bd867c4cbf091cbfadc6e2f
|
[
"MIT"
] |
permissive
|
DiegoALCEB/DjangoAPI
|
1c03cb36e68e46cb4c3a255b09113e61924a90fb
|
957b008d494400ed9bc7f069e03c8e8be83c7d4d
|
refs/heads/main
| 2023-02-09T06:10:20.336786
| 2021-01-08T17:49:34
| 2021-01-08T17:49:34
| 327,447,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
"""DjangoAPI URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("API_App.urls"))
]
|
[
""
] | |
7c4a39f417fba599388c40237c759c87119cd22d
|
20b2af5e275469261d95d4441303d567b5c03bba
|
/scripts/getNaoNumbers.py
|
0a56a825924d53155e5fe2205d2792e8994d6b96
|
[
"BSD-2-Clause"
] |
permissive
|
humanoid-robotics-htl-leonding/robo-ducks-core
|
efd513dedf58377dadc6a3094dd5c01f13c32eb1
|
1644b8180214b95ad9ce8fa97318a51748b5fe3f
|
refs/heads/master
| 2022-04-26T17:19:00.073468
| 2020-04-23T07:05:25
| 2020-04-23T07:05:25
| 181,146,731
| 7
| 0
|
NOASSERTION
| 2022-04-08T13:25:14
| 2019-04-13T09:07:29
|
C++
|
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
def getNaoIdList(location = "default"):
'''
Read id_map.json and extract nao Id list.
@param location, default value is "default"
@return array of strings
'''
script_dir = os.path.dirname(os.path.realpath(__file__))
REL_CONF_LOCATION = os.path.join('..', 'home', 'configuration', 'location')
FILE_NAME = 'id_map.json'
CONF_LOCATION = os.path.abspath(os.path.join(script_dir, REL_CONF_LOCATION))
try:
file_path = os.path.join(CONF_LOCATION, location, FILE_NAME)
if not os.path.isfile(file_path):
if location == 'default':
raise ValueError() # this will divert to the except handling
else:
print("Could not find id_map.json at location \"" + location + "\", Fallback to \"default\"")
location = 'default'
file_path = os.path.join(CONF_LOCATION, location, FILE_NAME)
with open(file_path) as f:
return json.load(f).get("idmap.nao", [])
except:
print("ERROR," + FILE_NAME + " not found on default location!!!")
def getNaoNames(location = "default"):
'''
Return nao names as an array of strings. ie: [tuhhnao01]
@param location, default value is "default"
@return array of strings
'''
data = getNaoIdList(location)
return list(map(lambda n: str(n["name"]), data))
def getNaoNumbers(location = "default"):
'''
Return nao numbers as an array of ints.
@param location, default value is "default"
@return array of ints
'''
names = getNaoNames(location)
NAO_NAME_PREFIX_LEN = len('tuhhnao')
return list(map(lambda n: int(n[NAO_NAME_PREFIX_LEN:]), names))
if __name__ == '__main__':
'''
Standalone mode. Probably useful to get available nao's for a given location
'''
import argparse
parser = argparse.ArgumentParser(description='Return Nao names or numbers.')
parser.add_argument('--location', dest='location', default='default',
help='Location, ie: smd. Default = "default".')
parser.add_argument('--names', action='store_true', help = "if set, return Nao names else return Nao Numbers.")
args = parser.parse_args()
if args.names:
print(getNaoNames(args.location))
else:
print(getNaoNumbers(args.location))
|
[
"rene.kost.951@gmail.com"
] |
rene.kost.951@gmail.com
|
62300dc47d43f84cf7fc15c6ea0fe32169815414
|
e5db7ff3498a6e51a478d10db87a69b5c0a05a37
|
/81.py
|
0da24817a35d402adde5568815263ad1f60c44c1
|
[] |
no_license
|
x89/euler
|
b0ee4574b0d1f9e9637cb35a7d7c48dc2e395ea7
|
5c9bf81ed703d7b285a7bdd7c454910d5ea3298b
|
refs/heads/master
| 2020-05-17T22:01:45.962618
| 2015-03-20T19:23:36
| 2015-03-20T19:23:36
| 23,315,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
# Project Euler 81. Shortest weighted path through a directed graph.
from pprint import pprint
#lines = open('matrix.5x5.txt', 'r').readlines()
lines = open('matrix.80x80.txt', 'r').readlines()
size = len(lines) # NxN array
adj_values = []
for line in lines:
line = line.split('\n')[0]
adj_values.append([int(_) for _ in line.split(',')])
inf = 100000000 # Set to higher than anything in our matrix
adj_matrix = [[inf for _ in range(size**2)] for _ in range(size**2)]
for i in range(size ** 2):
if i % size != size - 1:
adj_matrix[i][i+1] = \
int(adj_values[i//size%size][i%size+1])
if i < size ** 2 - (size + 1):
if i // size < size - 1:
adj_matrix[i][i+size] = \
int(adj_values[i//size%size+1][i%size])
for i in range(size ** 2):
if i + size < size ** 2 - 1:
assert adj_matrix[i][i+size] < inf, "%d, %d" % (i, i+size)
if not (i + 1) % size == 0:
assert adj_matrix[i][i+1] < inf, "%d, %d" % (i, i+1)
assert adj_matrix[0][1] == int(adj_values[0][1])
assert adj_matrix[0][size] == int(adj_values[1][0])
s = set(adj_matrix[0]); s -= {adj_values[0][1]}; s -= {adj_values[1][0]}
assert s == {inf}
assert set(adj_matrix[size**2-1]) == {inf}
shortest = [inf for _ in range(size**2)] # Shortest path v0 -> vN
shortest[0] = int(adj_values[0][0])
for v in range(len(adj_matrix)):
row = adj_matrix[v]
i = 0
for c in row:
if c < inf:
if c + shortest[v] < shortest[i]:
shortest[i] = c + shortest[v]
i += 1
print(shortest[-1])
|
[
"napalm10@gmail.com"
] |
napalm10@gmail.com
|
335e3ef863880c095f306960e8e88088b59c542a
|
7f55030f3b9fa11d31b13a5eca790b3f046fa9ce
|
/source2.py
|
c54c92958fd36412ed46fbe50bac80df825b74d1
|
[] |
no_license
|
rodrigocarlos2/Text-Mining
|
1d3fbee3f5f8819caeb3ba1cf26cfa5ae08264ed
|
54ab0d4f1ee68ece395a568de96a186f726cb1d8
|
refs/heads/master
| 2020-06-26T11:55:03.522141
| 2017-07-13T10:31:49
| 2017-07-13T10:31:49
| 97,018,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
from PyPDF2 import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
ipdf = PdfFileReader(open('pdf1.pdf', 'rb'))
wpdf = PdfFileReader(open('pdf1.pdf', 'rb'))
watermark = wpdf.getPage(0)
for i in range(ipdf.getNumPages()):
page = ipdf.getPage(i)
page.mergePage(watermark)
output.addPage(page)
with open('newfile.pdf', 'wb') as f:
output.write(f)
|
[
"rodrigo19962010@live.com"
] |
rodrigo19962010@live.com
|
5d4a27344244873ea34f6a6b4bbff785b0952da1
|
8a4063720bc783aabc9aab54bf4a4509bf3fe280
|
/myproject/settings.py
|
417511e8e8257e67191c1e3471e5962c9e806d38
|
[] |
no_license
|
Key-pi/Blog
|
5b68bf6b5ac00352b9a6396b24f8036e63214955
|
849630d089880f955cce8e96afd7269056bbe44d
|
refs/heads/main
| 2023-07-08T07:12:32.119632
| 2021-08-18T15:52:52
| 2021-08-18T15:52:52
| 394,607,271
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,282
|
py
|
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'django-insecure-&cxd9+%fml#v6y)pa16a#!nc*kt^fhuf5__okl5+9h5+-euehm')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
'*',
]
# INTERNAL_IPS = [
# '127.0.0.1',
# ]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'social_django',
'django.contrib.sites',
'django.contrib.flatpages',
'accounts.apps.AccountsConfig',
'boards.apps.BoardsConfig',
'widget_tweaks',
'django_extensions',
'simple_history',
'crispy_forms',
]
# if DEBUG:
# INSTALLED_APPS += [
# 'debug_toolbar',
# ]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
]
# if DEBUG:
# MIDDLEWARE += [
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
# ]
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.mail.mail_validation',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'boards.utils.create_profile',
)
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'myproject.context_processors.get_avatar',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
# STATIC_ROOT = BASE_DIR / 'staticfiles'
#
# STATIC_URL = '/static/'
#
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
#
# MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
MEDIAFILES_DIRS = [
os.path.join(os.path.dirname(BASE_DIR), "static", "media"),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
# AUTH_USER_MODEL = 'boards.User'
SOCIAL_AUTH_URL_NAMESPACE = 'social'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGOUT_REDIRECT_URL = 'boards:home'
LOGIN_REDIRECT_URL = 'boards:home'
LOGIN_URL = 'accounts:login'
SITE_ID = 1
GOOGLE_RECAPTCHA_SECRET_KEY = '6Ld1p5UbAAAAAP1uvus5T20W_fxJTVeGwaS-KDmz'
SOCIAL_AUTH_GITHUB_KEY = '3ff60ec40066c2ef3073'
SOCIAL_AUTH_GITHUB_SECRET = '070da7820431cb8b3eb55b89fb64ca4380728c03'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '1090285993143-bd2ksutc1lnf44kvclaq8oiauiogscpi.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'NVy8QZl2LqmT4eSBVqP-dS8p'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'social_core.backends.github.GithubOAuth2',
'social_core.backends.google.GoogleOAuth2',
]
CELERY_BROKER_URL = 'amqp://localhost'
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'kd0996253125@gmail.com'
EMAIL_HOST_PASSWORD = '1973s1975o2001d'
EMAIL_PORT = 587
# EMAIL_HOST_PASSWORD = 'uibxxgweocxnwlld'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
|
[
"Terrible_dev_4.20@protonmail.com"
] |
Terrible_dev_4.20@protonmail.com
|
cfa27dfc1c34c482c8c4cbbc55e7d5b20e92e89a
|
f26a873f5943de750b2aee52b6a73802c6ba35d3
|
/Code Academy/4. cas/zadatak9.py
|
61a834ca83de9141b6b30b315c8464951ff841e6
|
[] |
no_license
|
kolavcic/Basic-Python_Code
|
b5ebea1b82df4ca1302888e448e0b9a22bf4f82e
|
59577d6e8384b20dd4c8fc7d2362435a0a186a92
|
refs/heads/master
| 2023-05-29T10:15:24.565219
| 2021-06-15T11:13:18
| 2021-06-15T11:13:18
| 372,963,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# Napisati program koji za uneti pozitivan ceo broj n, ispisuje zvezdice i tako iscrtava odgovarajuću sliku.
# Slika predstavlja pravougli trougao sastavljen od zvezdica.
# Kateta trougla je dužine n, a prav ugao nalazi se u gornjem levom uglu slike.
n = int(input("Unesite broj: "))
for red in reversed(range(0, n)):
for kolona in reversed(range(0, red+1)):
print("*",end="")
print()
|
[
"mladen@tyk.io"
] |
mladen@tyk.io
|
0025cf27aa6a64251c5adf910bfc6c1e91d5d0d8
|
ef632f07c76a78e0b2c02588bdd50745b3e839b3
|
/app/httputils.py
|
ac9dc1d701aa60ab3799c2b510aa2e86d81767ee
|
[] |
no_license
|
aceofbitcoin/cow-app
|
7bfc16505106e6bf1c666fe369852c87f9f11440
|
05df4bd6ffede830ee9619d83c61af1db65295c0
|
refs/heads/master
| 2023-03-16T13:05:32.361802
| 2021-03-01T13:38:23
| 2021-03-01T13:38:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
# *-* coding: utf-8 *-*
# Ahmet Küçük <ahmetkucuk4@gmail.com>
# Zekeriya Akgül <zkry.akgul@gmail.com>
from flask import jsonify
# NOT_FOUND = jsonify({"status": False})
# AUTHORIZATION_ERROR = jsonify({"status": False, "code": "MA-100", "content": "Could not verify authorization credentials"})
# MISSING_REQUEST = jsonify({"status": False, "content": "Missing JSON in request"})
# CREDENTIALS_MISSING = jsonify({"status": False, "code": "AA-100", "content": "User credentials are missing"})
def response(content, code = 200):
response = jsonify(content)
response.status_code = code
return response
|
[
"ahmetkucuk4@gmail.com"
] |
ahmetkucuk4@gmail.com
|
f069307b715c205d5cc16fdc630e90dd100341ae
|
0265260f132194a0087e807dcd00803ed27f936b
|
/do/action.py
|
81fe6c121694d7d7279626149704f9b5a376c50f
|
[] |
no_license
|
fornof/pychatbot
|
9d025287efd0b444e1f251c464ac241d861f5fd5
|
8561a16a78e4d39269510379e02dc255b02b4ba7
|
refs/heads/master
| 2021-01-19T18:21:00.758326
| 2017-08-23T22:28:13
| 2017-08-23T22:28:13
| 101,125,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
import sqlite3
class Action():
id= ''
begin= ''
end= ''
def __init__(self):
pass
def loadAllActions(self):
conn = sqlite3.connect('data/sqlite_file.db')
c = conn.cursor()
where = ''
result = []
actions = []
c.execute('SELECT * FROM action ' + where + 'ORDER BY act_id desc')
dictionary = c.fetchall()
names = [description[0] for description in c.description]
for j in range(0, len(dictionary)):
row ={}
action = Action()
for i in range(0,len(c.description)):
row[c.description[i][0]] = dictionary[j][i]
result.append(row)
action.parseRowtoAction(row)
actions.append(action)
conn.commit()
#conn.close()
return actions
def parseRowtoAction(self,row):
self.id= row[u'act_id']
self.begin= row[u'act_begin']
self.end= row[u'act_end']
|
[
"robert4nof@gmail.com"
] |
robert4nof@gmail.com
|
c6c199c182816290391ca080eacf9cdff5a6f866
|
72949c29b005ca3a35fe8bbf4835615bdddd1869
|
/matplotlib/figure-aesthetics/grid-tick-axes/set_axes_constrained_layout.py
|
3e937290240d434ab4c4d0955b5d8f5ade2d9141
|
[] |
no_license
|
JagritiG/Data-Visualization-vol2
|
0b93de14e2c2afd2e7e2c8a599e7241d5dfd9c1f
|
7d3bdf5821b9d0de09ff666ee17ebf728acd6531
|
refs/heads/master
| 2021-06-24T00:26:15.355557
| 2021-02-20T03:30:49
| 2021-02-20T03:30:49
| 204,554,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
# Resizing axes with constrained layout
# Resize subplots in a figure so that there are no overlaps between axes objects and labels on the axes.
import matplotlib.pyplot as plt
import numpy as np
# Generate mock data
# Fixing random state for reproducibility
np.random.seed(10000)
# Make up some data in the interval (0, 1)
# Draw random samples from a normal (Gaussian) distribution.
y = np.random.normal(loc=0.5, scale=0.4, size=1000)
y = y[(y > 0) & (y < 1)]
y.sort()
x = np.arange(len(y))
def linear_plot(ax):
ax.plot(x, y)
ax.set_yscale('linear')
ax.set_xlabel('x-label', fontsize=10)
ax.set_ylabel('y-label', fontsize=10)
ax.set_title('Linear', fontsize=12)
# Plot without using constrained_layout, the labels overlap the axes
fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=False)
for ax in axs.flat:
linear_plot(ax)
plt.savefig('set_axes_without_constrained_layout.pdf')
# Plot using constrained_layout=True
fig, axs = plt.subplots(nrows=2, ncols=2, constrained_layout=True)
for ax in axs.flat:
linear_plot(ax)
plt.savefig('set_axes_with_constrained_layout.pdf')
plt.show()
|
[
"jagritigoswami84@gmail.com"
] |
jagritigoswami84@gmail.com
|
37e3ea1edcd06e0e353ce476647ce327dd7f257a
|
e031364f372a13f061fd44ccd5595b9588c63a2b
|
/__init__.py
|
1af3568b63f1d25b96be9e3f4d1dfdfb603962b3
|
[] |
no_license
|
CudaText-addons/cuda_fmt_ruby
|
bde3f33b779533d89d5ab1d94e1aa2eb73890ca9
|
f2c10dc9d55eac3f74c6d74057764852f81522a8
|
refs/heads/master
| 2020-06-18T04:03:49.545718
| 2019-07-14T12:31:17
| 2019-07-14T12:31:17
| 196,158,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from . import rubybeautifier
def options():
opt = rubybeautifier.default_options()
def do_format(text):
return rubybeautifier.beautify(text, options())
|
[
"support@uvviewsoft.com"
] |
support@uvviewsoft.com
|
99cf45f7793af8aade027f216d95d2a1b5eeb246
|
73a30a064d9167d658fc36f1d70fdeab8f647805
|
/stock1/serializer.py
|
2076f31d719d959c1d11043ebe2fef58210a64fe
|
[] |
no_license
|
sanskar-agrawal1903/stock
|
a21957036c4d8a98bcb6c464afda9fbb5cca0a16
|
dca075176f185c7086f0834717d27c1d8bb8a905
|
refs/heads/master
| 2022-12-11T03:50:07.702039
| 2020-09-13T08:12:00
| 2020-09-13T08:12:00
| 295,104,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from rest_framework import serializers
from .models import Stock
class StockSerializer(serializers.Serializer):
name=serializers.CharField(max_length=100)
price=serializers.IntegerField()
def create(self,validated_data):
return Stock.objects.create(validated_data)
def update(self,instance,validated_data):
instance.name=validated_data.get('name',instance.name)
instance.price=validated_data.get('price',instance.price)
instance.save()
return instance
|
[
"sanskar.agrawal2018@gmail.com"
] |
sanskar.agrawal2018@gmail.com
|
2cc188cf7fe8e47d4373672755648ae86cd35ec3
|
45d76197a4a9c0b540c447a353c372989140c0f1
|
/04_06_2021_quick_fix/receiver_standard_04_06_quick_fix_anja.py
|
a01eabc642d35f0b0bb5926cf54db3a13f198eb0
|
[] |
no_license
|
ogustafsson/Yooodeeel
|
b684940bf4ba26218c001af92ad5508eabfee693
|
227fad095af3388696012ed2acb5cd5d70d11c54
|
refs/heads/main
| 2023-05-14T11:08:44.070745
| 2021-06-10T14:58:39
| 2021-06-10T14:58:39
| 367,883,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,488
|
py
|
import numpy as np
import csv
import pandas as pd
from scipy.linalg import dft
from scipy.fft import fft
from scipy.fft import ifft
import matplotlib.pyplot as plt
import bitarray
import pdb
from scipy.io.wavfile import write # For writing to a .wav file
from scipy.io.wavfile import read # For reading from a .wav file to an np array.
from scipy import signal # Module for generating chirp.
from sklearn.linear_model import LinearRegression # Module for performing linear regression.
import math
import ldpc
from bitarray import bitarray
from PIL import Image
import binascii
# Define Constants.
N = 2048 #OFDM symbol length
block_size = N//2 - 1 # Set block size in frequency domain to half the OFDM symbol subtract the zeroed first frequency bin
L = 256 #Cyclic prefix length
n = 10 # Number of repetitions of known OFDM symbol for transmission
sample_rate = 44100 # Sample rate is 44.1 kHz
FILE_NAME = 'output.wav'
# Define the QPSK constellation and its power
A = np.sqrt(2) # Radius of the QPSK constellation circle
chirp_constants = {"startFrequency": 100, "endFrequency": 10000, "duration": 1, "method": "logarithmic"}
#Defining constants and mapping/demapping tables
bit_mapping = {(0,0): 0, (0,1): 1, (1,1): 2, (1,0): 3}# Use Gray coding to convert bit stream to constellation indices
mapping_table = {(0,0): (1+1j), (0,1): (-1+1j), (1,1): (-1-1j), (1,0): (1-1j)}
constellation_mapping = A/np.sqrt(2) * np.array([(1+1j), (-1+1j), (-1-1j), (1-1j)]) # Define the mapping of indices to QPSK constellation in anti-clockwise sense
noise_variance = 1
c = ldpc.code(z = 81)
k = c.K #data bits need to be a multiple of k in order to do LDPC encoding/Decoding
# TO-DO: New Constants.
pilot_tones_start_index = 0
pilot_tones_end_index = 1018
pilot_tones_step = 8
first_frequency_bin = 50
last_frequency_bin = 700
# Useful helper functions for converting between np arrays and wav files.
def convert_array_to_wav(array, sample_rate, FILE_NAME):
write(FILE_NAME, sample_rate, array)
def convert_wav_to_array(wav_filename):
rate, array = read(wav_filename)
return array
# Functions performing the DFT and iDFT.
def DFT(signal):
"""Compute the discrete Fourier transform of a signal"""
return fft(signal)
def iDFT(signal):
return ifft(signal)
# Returns only the real part of the iDFT.
def iDFT_real(signal):
inverse_transform = ifft(signal)
return inverse_transform.real
# Generate the chirp signal.
def generate_chirp():
"""Produces exponential chirp with exponential envelope"""
fs = sample_rate
chirp_length = chirp_constants['duration']
f1 = chirp_constants['startFrequency']
f2 = chirp_constants['endFrequency']
window_strength = 50
T = chirp_length
t = np.linspace(0, T, T * fs)
r = f2/f1
# Calculate Sine Sweep time domain values
profile = np.sin(2*np.pi*T*f1*((r**(t/T)-1)/(np.log(r))))*(1-np.e**(-window_strength*t))*(1-np.e**(window_strength*(t-T)))
return profile
# Matched filter returns index of highest correlation between a noisy signal and the desired signal.
# For the chirp, this index will be located halfway along the chirp.
def matched_filter(noisy_signal, desired_signal):
desired_signal = desired_signal.flatten()
correlation = signal.correlate(noisy_signal, desired_signal, mode='same')
max_correlation = np.amax(correlation)
max_correlation_index = np.argmax(correlation)
return max_correlation_index
# Remove cyclic prefix
def removeCP(signal):
"""Remove the cyclic prefix of a signal"""
return signal[L:(L+N)]
# Function for averaging two-dimensional arrays, element-wise.
def compute_average(two_dimensional_array, N, n):
"""
Computes the average of a two-dimensional array, element-wise.
Needs the length of the 'block' N and the number of blocks n.
For example, inputting np.array([[0,1,2],[2,3,4]]) would return np.array([1,2,3]).
Note, we call each individual array a 'block' so we are just averaging over them.
(There should be a nicer 'numpyier' 'vectorised' way of doing this better, try later)
"""
sum_of_blocks = np.zeros(N) # Initialise as zeros array for summing each block of two-dimensional array
for block in two_dimensional_array:
sum_of_blocks = sum_of_blocks + block # Compute sum of each block all together
average_block = sum_of_blocks / n # Take average by dividing by number of blocks
return average_block
def generate_random_bit_sequence(bit_sequence_length, random_seed):
#random seed is the number corresponding to the random seed, aka 2020, 2021, 2022, etc
rng = np.random.default_rng(random_seed)
random_bit_sequence = rng.integers(low=0, high=2, size = bit_sequence_length)
return random_bit_sequence
# Note that bits should be in numpy array like [0,0,0,1,1,0,…]
def map_bits_to_constellation_indices(bit_stream):
"""Takes a stream of bits and maps them to constellation indices from range {0,1,2,3} using Gray coding"""
bit_stream_length=len(bit_stream) # Compute length of bit stream
# If there is an odd number of bits, add a zero bit on the end to make it even
if bit_stream_length % 2:
bit_stream = np.append(bit_stream, np.array([0]))
bit_pairs = np.split(bit_stream, len(bit_stream)//2) # Split bit stream array into sub-arrays of bit pairs like [[0,0], [0,1], …]
constellation_indices = np.array([]) # Set up empty array for the loop
# Map each bit pair to its corresponding constellation index
for bit_pair in bit_pairs:
constellation_indices = np.append(constellation_indices, bit_mapping[tuple(bit_pair)]) # TODO: make this loop more efficient
constellation_indices = constellation_indices.astype(int) # Ensure that constellation indices are integers
return constellation_indices
def map_indices_to_constellation(constellation_indices):
"""Takes sequence of constellation indices from range {0,1,2,3} and maps them to a sequence of QPSK constellation symbols"""
# UNIT TEST: check constellation indices belong to range {0,1,2,3}
assert not np.isin([False], np.isin(constellation_indices, [0,1,2,3])), "Indices do not belong to range {0,1,2,3}"
constellation_sequence = np.array([]) # Set up empty array for the loop
# Map each random index to its corresponding constellation symbol
for index in constellation_indices:
constellation_sequence = np.append(constellation_sequence, constellation_mapping[index]) # TODO: make this loop more efficient
return constellation_sequence
# Generate random (known) OFDM symbols in the frequency domain.
def create_random_OFDM_symbols_frequency(number_of_blocks, random_seed):
"""
Input: number of blocks of length N = 2048 we want to generate all at once (number_of_blocks = 1 for seed_2020, number_of_blocks = 5 for seed_2021, number_of_blocks = 1 for seed_2022)
Output: number_of_blocks*(N+L) time domain long sequence
"""
bit_sequence_length = number_of_blocks*(N-2)
random_bit_sequence = generate_random_bit_sequence(bit_sequence_length, random_seed)
# TO-DO: I have commented out the old code, and replaced with what I think it should be.
#blocks = map_bits_to_constellation_indices(random_bit_sequence)
constellation_indices = map_bits_to_constellation_indices(random_bit_sequence)
blocks = map_indices_to_constellation(constellation_indices)
block_size = N//2 - 1
blocks = np.split(blocks, len(blocks)/block_size)
return blocks
# Returns H_est and h_est by performing channel estimation using known OFDM symbols.
def estimate_channel(known_OFDM_symbol_block_start_location, known_OFDM_symbol_block_end_location, received_signal, N, L, n):
# Generate the known OFDM symbol.
# TO-DO: Needs to be updated to generate the new version of the known OFDM symbol. Or a list of known OFDM symbols if we use several.
known_OFDM_symbols = create_random_OFDM_symbols_frequency(5, 2021)
# TO-DO: Be careful!!!!!!
# Isolate the received known OFDM symbol block in the time domain signal.
received_known_OFDM_symbol_block_time_domain = received_signal[known_OFDM_symbol_block_start_location:known_OFDM_symbol_block_end_location]
# Loop through the received known OFDM symbols to get an estimate of the channel frequency response for each symbol.
# Split into arrays containing each OFDM symbol in the time domain in its own array.
array_of_individual_known_OFDM_symbols_time_domain = np.split(received_known_OFDM_symbol_block_time_domain, n)
# Remove the cyclic prefixes.
array_of_individual_known_OFDM_symbols_time_domain_noCP = [removeCP(OFDM_symbol_time_domain_with_CP) for OFDM_symbol_time_domain_with_CP in array_of_individual_known_OFDM_symbols_time_domain]
# Take the DFT of each known OFDM symbol in the time domain. i.e. demodulate.
array_of_individual_known_OFDM_symbols_frequency_domain = [DFT(OFDM_symbol_time_domain) for OFDM_symbol_time_domain in array_of_individual_known_OFDM_symbols_time_domain_noCP]
# Keep only the relevant frequency bins. i.e. 1 to 1023.
array_of_individual_known_OFDM_symbols_frequency_domain_relevant_half = [OFDM_symbol_frequency_domain[1:int(N/2)] for OFDM_symbol_frequency_domain in array_of_individual_known_OFDM_symbols_frequency_domain]
# Divide by the known transmitted symbol to obtain the frequency reponse.
# Here we loop through all of the demodulated frequency domain arrays and divide by the appropriate known OFDM symbol.
# TO-DO: Needs to be updated if several known OFDM symbols are used instead of 1. Would need to use a slightly more elaborate loop.
known_OFDM_symbol_index = 0
array_of_frequency_response = np.array([])
for OFDM_demod in array_of_individual_known_OFDM_symbols_frequency_domain_relevant_half:
current_frequency_response = OFDM_demod / known_OFDM_symbols[known_OFDM_symbol_index]
array_of_frequency_response = np.append(array_of_frequency_response, current_frequency_response)
if (known_OFDM_symbol_index == 4):
known_OFDM_symbol_index = 0
else:
known_OFDM_symbol_index += 1
# Split the array so that the average can be computed.
array_of_frequency_response = np.split(array_of_frequency_response, n)
# Compute the averaged estimated frequency response.
# TO-DO: Check if this averaging works!!! Be careful!!!
H_est = compute_average(array_of_frequency_response, int(N/2 - 1), n)
# To get the time domain impulse response (to be real), we need to have conjugate symmetry in the frequency domain. With zeros in the right places.
reversed_H_est = np.conj(np.append(np.array([0]), np.flip(H_est)))
H_est_symmetric = np.append(H_est, reversed_H_est)
H_est_symmetric = np.append(np.array([0]), H_est_symmetric)
# Compute the impulse response.
h_est = iDFT_real(H_est_symmetric)
return H_est, h_est
def demodulate_pilot_tones(H, OFDM_data_symbol_block_start_location, number_of_OFDM_data_blocks, received_signal, N, L, pilot_tones_start_index, pilot_tones_end_index, pilot_tones_step, first_frequency_bin, last_frequency_bin, update_sync_using_pilots=True):
estimated_received_data_constellation_symbols = np.array([])
estimated_received_pilot_constellation_symbols = np.array([])
pilot_indices_overall = np.array([])
current_symbol_start_location = OFDM_data_symbol_block_start_location
timing_offsets = np.array([])
# Perform demodulation on each symbol
for i in range(0, number_of_OFDM_data_blocks):
# Consider the current symbol.
current_symbol_time_domain = received_signal[current_symbol_start_location:current_symbol_start_location+L+N]
# Remove cyclic prefix.
current_symbol_time_domain_noCP = removeCP(current_symbol_time_domain)
# Take the DFT.
current_symbol_frequency_domain = DFT(current_symbol_time_domain_noCP)
# Consider the non-repeated part of the current symbol. i.e. indices 1 to 1023.
current_symbol_frequency_domain_relevant_part = current_symbol_frequency_domain[1:int(N/2)]
# Obtain the estimate of the transmitted constellation symbols by dividing by the frequency response.
estimated_received_symbol = current_symbol_frequency_domain_relevant_part / H
# TO-DO: Modify the pilot tone indices and the data indices to be such that they correspond to the
# frequency bins with pilot tones and data are located due to the standard.
# Create arrays containing the pilot tone indices and the data tone indices.
all_indices = np.arange(N//2-1)
pilot_indices = np.arange(pilot_tones_start_index, pilot_tones_end_index, pilot_tones_step)
# Delete the pilot tone indices.
data_indices = np.delete(all_indices, pilot_indices)
# Remove the lower and upper frequency bins from the data indices.
data_indices_truncated = []
for index in data_indices:
if (index >= first_frequency_bin and index<last_frequency_bin):
data_indices_truncated.append(index)
data_indices_truncated = np.array(data_indices_truncated)
# Extract the pilot tones and data tones from the estimated received symbol by taking the values at the appropriate indices.
estimated_received_pilot_tones = np.take(estimated_received_symbol, pilot_indices)
estimated_received_data_tones = np.take(estimated_received_symbol, data_indices_truncated)
# If this parameter is true, then the synchronisation for each OFDM data symbol is update using the information from the pilot tones.
if (update_sync_using_pilots == True):
# Update the synchronisation of each OFDM data symbol here by rotating each constellation symbol by the appropriate amount.
pilot_tone = A/np.sqrt(2)*(1+1j)
phase_shifts = np.angle(estimated_received_pilot_tones / pilot_tone)
# TO-DO: Modify which frequency bins to consider for this part.
phase_shifts_unwrapped = np.unwrap(phase_shifts)[10:60] # Here we take a section of the unwrapped phases. Investigate further.
adjusted_pilot_indices = (2*np.pi/N)*pilot_indices[10:60] # Here we take a section of the unwrapped phases. Investigate further.
# Fit linear regression to the unwrapped phase shifts, to determine the offset in number of samples.
model = LinearRegression().fit(adjusted_pilot_indices[:, np.newaxis], phase_shifts_unwrapped)
slope = model.coef_[0]
# Round the offset to two decimal places and add it to a list.
offset = np.round(slope, 2)
timing_offsets = np.append(timing_offsets, offset)
# Correct for the timing offset by rotating the estimated received constellation symbols.
# This is done by multiplying the received data constellation symbols by a complex exponential.
adjusted_data_indices_truncated = (2*np.pi/N)*data_indices_truncated
resynchronisation_multiplier = np.exp((-1j)*(offset)*adjusted_data_indices_truncated)
estimated_received_data_tones = estimated_received_data_tones*resynchronisation_multiplier
# Append estimated received symbol to array of all received constellation symbols.
estimated_received_data_constellation_symbols = np.append(estimated_received_data_constellation_symbols, estimated_received_data_tones)
estimated_received_pilot_constellation_symbols = np.append(estimated_received_pilot_constellation_symbols, estimated_received_pilot_tones)
pilot_indices_overall = np.append(pilot_indices_overall, pilot_indices)
current_symbol_start_location += (L+N)
return estimated_received_data_constellation_symbols, estimated_received_pilot_tones, pilot_indices_overall
# Functions for LDPC decoding and handling output bits.
# # Decoding using LDPC.
# Defining all the neccessary functions for LDPC decoding.
def rand_bin_array(K, N):
arr = np.zeros(N)
arr[:K] = 1
np.random.shuffle(arr)
return arr
def bits2bytes(x):
n = len(x)+3
r = (8 - n % 8) % 8
prefix = format(r, '03b')
x = ''.join(str(a) for a in x)
suffix = '0'*r
x = prefix + x + suffix
x = [x[k:k+8] for k in range(0,len(x),8)]
y = []
for a in x:
y.append(int(a,2))
return y
def bytes2bits(y):
x = [format(a, '08b') for a in y]
r = int(x[0][0:3],2)
x = ''.join(x)
x = [int(a) for a in x]
for k in range(3):
x.pop(0)
for k in range(r):
x.pop()
return x
def mapping(bits):
return np.array([mapping_table[tuple(b)] for b in bits])
def SP(bits):
return bits.reshape((int(len(bits)/2), 2))
def encode_source_bits(source_bits):
tem = []
for i in range(int(len(source_bits)//c.K)):
tem.extend(c.encode(source_bits[i*c.K:(i+1)*c.K]))
return np.array(list(tem))
def LLR_calculation(output, channel_frequency, noise_variance):
#output = np.array(list(output))
#channel_frequency = [i[1+drop_front:N//2-drop_back] for i in channel_frequency]
#channel_frequency = np.concatenate(channel_frequency, axis=None)[:len(output)]
LLR = []
for i in range(len(output)):
first = channel_frequency[i]*np.conjugate(channel_frequency[i])*np.sqrt(2)*np.imag(output[i])/noise_variance
second = channel_frequency[i]*np.conjugate(channel_frequency[i])*np.sqrt(2)*np.real(output[i])/noise_variance
LLR.append(np.real(first))
LLR.append(np.real(second))
return np.array(LLR)
def BER(input_bits, output_bits):
added = (input_bits + output_bits) % 2
return np.sum(added)/len(input_bits)
def tobits(s):
result = []
for c in s:
bits = bin(ord(c))[2:]
bits = '00000000'[len(bits):] + bits
result.extend([int(b) for b in bits])
return result
def frombits(bits):
chars = []
for b in range(int(len(bits) / 8)):
byte = bits[b*8:(b+1)*8]
chars.append(chr(int(''.join([str(bit) for bit in byte]), 2)))
return ''.join(chars)
def s_to_bitlist(s):
ords = (ord(c) for c in s)
shifts = (7, 6, 5, 4, 3, 2, 1, 0)
return [(o >> shift) & 1 for o in ords for shift in shifts]
def bitlist_to_chars(bl):
bi = iter(bl)
bytes = zip(*(bi,) * 8)
shifts = (7, 6, 5, 4, 3, 2, 1, 0)
for byte in bytes:
yield chr(sum(bit << s for bit, s in zip(byte, shifts)))
def bitlist_to_s(bl):
return ''.join(bitlist_to_chars(bl))
def convert_file_to_bits(file_name):
with open(file_name, 'rb') as file:
file_bytes = np.array([byte for byte in file.read()], dtype=np.uint8)
file_bits = np.unpackbits(file_bytes)
file_bits = np.array(file_bits, dtype=int)
return file_bits
def LDPC_decoding(data_constellation_estimated, H_est):
output = data_constellation_estimated
# Decoder
channel_frequency = np.repeat(H_est, len(output)//len(H_est) + 1)
output = LLR_calculation(output,channel_frequency,noise_variance)
tem = []
for i in range(int(len(output)//(c.K*2))):
app,it = c.decode(output[c.K*2*i:c.K*2*i+2*c.K])
#tem.extend(app[::2])
# app = output[c.K*2*i:c.K*2*i+2*c.K]
tem.extend(app[:int(len(app)/2)])
output = []
for i in range(len(tem)):
if tem[i]<=0:
output.append(1)
elif tem[i]>0:
output.append(0)
output_bits = output
output = bitarray(output)
return output_bits
# Demapping functions using maximum likelihood
def demapping_symbol_ML(symbol):
first_bit = 0 if symbol.imag >= 0 else 1
second_bit = 0 if symbol.real >= 0 else 1
return first_bit, second_bit
def demapping_ML(sequence):
output = []
for f in sequence:
first_bit, second_bit = demapping_symbol_ML(f)
output.append(first_bit)
output.append(second_bit)
return output
def calculate_amount_of_padding_for_ldpc(source_bits_length):
additional_bits = 0
if (source_bits_length % k != 0):
additional_bits = k - source_bits_length % k
return additional_bits
# Calculate the file length from the repeated received 32 file_length_bits.
def calculate_file_length(file_length_bits):
file_length_blocks = np.split(file_length_bits, 5)
# Sum the blocks to perform majority vote.
vote_array = np.zeros(32)
for block in file_length_blocks:
vote_array += block
# Now perform the majority vote to return the estimate of the transmitted bits.
corrected_file_length_bits = []
for element in vote_array:
if element > 2:
corrected_file_length_bits.append(1)
else:
corrected_file_length_bits.append(0)
# Convert the file length from binary to an integer.
bits = bitarray(corrected_file_length_bits)
i = 0
for bit in bits:
i = (i << 1) | bit
file_length = i
return file_length
# Obtain the file type by performing a majority vote and then finding the minimum Hamming distance codeword.
# Returns 0 if .tif, 1 if .txt, and 2 if .wav.
def obtain_file_type(file_type_bits):
file_type_blocks = np.split(file_type_bits, 5)
# Sum the blocks to perform majority vote.
vote_array = np.zeros(8)
for block in file_type_blocks:
vote_array += block
# Now perform the majority vote to return the estimate of the transmitted bits.
voted_file_type_bits = []
for element in vote_array:
if element > 2:
voted_file_type_bits.append(1)
else:
voted_file_type_bits.append(0)
estimated_file_type_bits = np.array(voted_file_type_bits)
# Now perform Hamming distance calculations to determine the transmitted codeword.
# TO-DO: These should probably be global variables.
tif_codeword = np.array([0,0,1,1,0,0,1,0]) # Index 0
txt_codeword = np.array([0,1,0,0,1,0,0,0]) # Index 1
wav_codeword = np.array([1,0,1,0,0,1,0,1]) # Index 2
tif_hamming_distance = np.sum((estimated_file_type_bits + tif_codeword) % 2)
txt_hamming_distance = np.sum((estimated_file_type_bits + txt_codeword) % 2)
wav_hamming_distance = np.sum((estimated_file_type_bits + wav_codeword) % 2)
hamming_distances = np.array([tif_hamming_distance, txt_hamming_distance, wav_hamming_distance])
min_index = np.argmin(hamming_distances)
return min_index
def simulate_received_signal_pilot_tones():
transmitted_signal = convert_wav_to_array('audio_to_transmit.wav')
channelResponse = pd.read_csv('channel.csv', header = None)
channelResponse = np.array(channelResponse)
channelResponse = channelResponse.flatten()
zero_array = np.zeros(len(transmitted_signal) - len(channelResponse))
channelResponse = np.append(channelResponse, zero_array)
received_signal = signal.convolve(channelResponse, transmitted_signal, mode='full')
return received_signal
# Load the received signal.
#received_signal = convert_wav_to_array('text_with_metadata+ldpc.wav') # TO-DO: Scan in the received signal.
#received_signal = convert_wav_to_array('latinshort_qpsk_standard_txt.wav')
#received_signal = convert_wav_to_array('latinlong_qpsk_standard_txt.wav')
#received_signal = convert_wav_to_array('audio_to_transmit.wav')
received_signal = convert_wav_to_array('inputs.wav')
#received_signal = simulate_received_signal_pilot_tones()
# Perform chirp synchronisation.
# TO-DO: Update the function generating the chirp to take the analytical method of chirp generation.
# TO-DO: Take into account the chirp at the end of the transmission.
chirp_signal = generate_chirp()
# Matched filtering returns the center index of the chirp in the received signal.
# TO-DO: Think about chirp. This should no longer be an issue now that the chirp at the end of the signal is reversed.
chirp_center_index_in_received_signal = matched_filter(received_signal, chirp_signal)
# Now compute the start and end positions of the chirp signal.
chirp_length = len(chirp_signal)
chirp_start_index = chirp_center_index_in_received_signal - int((chirp_length / 2))
chirp_end_index = chirp_center_index_in_received_signal + int((chirp_length / 2))
# Compute the start and end locations of the known OFDM symbol block.
known_OFDM_symbol_block_start_location = chirp_end_index + (L+N)
known_OFDM_symbol_block_end_location = known_OFDM_symbol_block_start_location + (n*(L+N))
# TO-DO: implement the modified Schmidl and Cox synchronisation.
#Plot the received signal.
x = list(range(0, len(received_signal)))
x = np.array(x)
fig, ax = plt.subplots()
plt.title("Received Signal")
plt.xlabel("Sample")
plt.ylabel("Signal Magnitude")
plt.axvline(x=chirp_start_index, color='green') #Should be at beginning of chirp
plt.axvline(x=chirp_end_index, color='red') #Should be at end of chirp which is at chirp beginning + chirp_duration*44100
#plt.xlim(0,4000 )
# plt.ylim(-0.2, 0.2)
ax.plot(x, received_signal)
plt.legend(['Estimated Chirp Start', 'Estimated Chirp End', 'Signal'])
plt.show()
# Perform the initial channel estimation based on this synchonisation point.
# To-consider: maybe we use 10 times repeated the same OFDM symbol, or 10 different symbols?
H_est, h_est = estimate_channel(known_OFDM_symbol_block_start_location, known_OFDM_symbol_block_end_location, received_signal, N, L, n)
# Plot the estimated frequency response and impulse response of the channel.
# Magnitude Frequency Response.
f = list(range(1, len(H_est)+1))
frequencies = np.array(f)
frequencies = (sample_rate / N) * frequencies
x = frequencies
fig, ax = plt.subplots()
plt.title('Estimated Magnitude Frequency Response (Known OFDM symbols)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Relative Magnitude')
plt.yscale('log')
ax.plot(x, abs(H_est))
plt.show()
# Phase Frequency Response
f = list(range(1, len(H_est)+1))
frequencies = np.array(f)
frequencies = (sample_rate / N) * frequencies
x = frequencies
fig, ax = plt.subplots()
plt.title('Estimated Phase Frequency Response (Known OFDM symbols)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
#plt.yscale('log')
ax.plot(x, np.angle(H_est))
plt.show()
# Impulse Response
t = list(range(1, len(h_est)+1))
times = np.array(t)
#times = times / sample_rate
x = times
fig, ax = plt.subplots()
plt.title("Estimated Channel Impulse Response")
plt.xlabel("Sample")
plt.ylabel("Signal Magnitude")
ax.plot(x, h_est)
plt.show()
# Perform dynamic phase adjustment to update the initial synchronisation point, and update the channel estimates.
# Get the phase response and unwrap it.
phase_response = np.angle(H_est)
phase_response_unwrapped = np.unwrap(phase_response)
frequency_bin_indices = np.array(list(range(1, len(H_est)+1)))
adjusted_frequency_bin_indices = (2*np.pi/N)*frequency_bin_indices
# Look at only the middle part of the phase frequency response. As this seems to normally be the linear part?
# TO-DO: Consider more carefully which part of the phase response should be considered.
phase_response_unwrapped_truncated = phase_response_unwrapped[300:800]
adjusted_frequency_bin_indices_truncated = adjusted_frequency_bin_indices[300:800]
# Fit a linear regression to determine how much off synchronisation we are.
model_phase_response = LinearRegression().fit(adjusted_frequency_bin_indices_truncated[:, np.newaxis], phase_response_unwrapped_truncated)
slope_phase_response = model_phase_response.coef_[0]
# Round the slope to the nearest integer.
sync_offset = int(np.round(slope_phase_response))
# Reset the synchronisation point based on this new information.
known_OFDM_symbol_block_start_location_updated = known_OFDM_symbol_block_start_location - sync_offset
known_OFDM_symbol_block_end_location_updated = known_OFDM_symbol_block_end_location - sync_offset
# Now repeat channel estimation with this updated synchronisation point.
H_est_updated, h_est_updated = estimate_channel(known_OFDM_symbol_block_start_location_updated, known_OFDM_symbol_block_end_location_updated, received_signal, N, L, n)
f = list(range(1, len(H_est_updated)+1))
frequencies = np.array(f)
frequencies = (sample_rate / N) * frequencies
x = frequencies
fig, ax = plt.subplots()
plt.title('Updated Estimated Magnitude Frequency Response (Known OFDM symbols)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Relative Magnitude')
plt.yscale('log')
ax.plot(x, abs(H_est_updated))
plt.show()
# Phase Frequency Response
f = list(range(1, len(H_est_updated)+1))
frequencies = np.array(f)
frequencies = (sample_rate / N) * frequencies
x = frequencies
fig, ax = plt.subplots()
plt.title('Updated Estimated Phase Frequency Response (Known OFDM symbols)')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
#plt.yscale('log')
ax.plot(x, np.angle(H_est_updated))
plt.show()
# Impulse Response
t = list(range(1, len(h_est_updated)+1))
times = np.array(t)
#times = times / sample_rate
x = times
fig, ax = plt.subplots()
plt.title("Updated Estimated Channel Impulse Response")
plt.xlabel("Sample")
plt.ylabel("Signal Magnitude")
ax.plot(x, h_est_updated)
plt.show()
# Create an array to hold the all the estimated data constellation symbols.
data_constellation_estimated = np.array([])
# Demodulate the first payload block to obtain the header metadata.
# TO-DO: For now, we are assuming that at least 1 complete payload data block is transmitted.
first_payload_block_start_location = known_OFDM_symbol_block_end_location_updated + (L+N)
first_payload_block_demodulated_data_symbols, first_payload_block_demodulated_pilot_symbols, first_payload_block_pilot_indices_overall = demodulate_pilot_tones(H_est_updated, first_payload_block_start_location, 10, received_signal, N, L, pilot_tones_start_index, pilot_tones_end_index, pilot_tones_step, first_frequency_bin, last_frequency_bin, update_sync_using_pilots=True)
data_constellation_estimated = np.append(data_constellation_estimated, first_payload_block_demodulated_data_symbols)
# Obtain the header metadata from the first payload block.
# Demodulate and decode the constellation symbols corresponding to the first LDPC block (k=972 and n=1944 for LDPC code since rate is 1/2).
first_ldpc_block_constellation_symbols = data_constellation_estimated[:972]
# Quick-fix: look into this!!!
first_ldpc_block_constellation_symbols = np.append(np.array([1+1j]), first_ldpc_block_constellation_symbols)
first_ldpc_block_decoded = LDPC_decoding(first_ldpc_block_constellation_symbols, H_est_updated)
# Extract the metadata bits.
metadata_bits = np.array(first_ldpc_block_decoded[:200])
#metadata_bits = np.append(np.array([0,0]), metadata_bits)
#print(metadata_bits)
file_type_bits = metadata_bits[:40]
print(file_type_bits[:8])
print(file_type_bits[8:16])
print(file_type_bits[16:24])
print(file_type_bits[24:32])
print(file_type_bits[32:40])
file_length_bits = metadata_bits[40:]
print(file_length_bits[:32])
print(file_length_bits[32:64])
print(file_length_bits[64:96])
print(file_length_bits[96:128])
print(file_length_bits[128:160])
# Obtain the file size from the repeated file length bits.
file_size = calculate_file_length(file_length_bits)
# Obtain the file type.
# 0: tif, 1: txt, 2: wav
file_type = obtain_file_type(file_type_bits)
print("File size in bits: " + str(file_size))
print("File type as index: " + str(file_type))
# Calculate the total number of OFDM data symbols.
# Could use the second chirp to verify this calculation.
# TO-DO: Consider the effect of padding on the total number of constellation symbols.
all_indices = np.arange(N//2-1)
pilot_indices = np.arange(pilot_tones_start_index, pilot_tones_end_index, pilot_tones_step)
# Delete the pilot tone indices.
data_indices = np.delete(all_indices, pilot_indices)
# Remove the lower and upper frequency bins from the data indices.
data_indices_truncated = []
for index in data_indices:
if (index >= first_frequency_bin and index<last_frequency_bin):
data_indices_truncated.append(index)
data_indices_truncated = np.array(data_indices_truncated)
print("The number of data indices is: " + str(len(data_indices_truncated)))
padding = calculate_amount_of_padding_for_ldpc(file_size)
total_number_of_data_constellation_symbols = file_size + padding # TO-DO: Need to consider LDPC rate and 2 bits per constellation symbol. Consider padding.
total_number_of_OFDM_data_symbols = int(np.ceil(total_number_of_data_constellation_symbols / len(data_indices_truncated))) # TO-DO: Think about how to do this calculation.
total_number_of_complete_payload_blocks = total_number_of_OFDM_data_symbols // 10 # TO-DO: Check this.
number_of_OFDM_symbols_in_incomplete_payload_block = total_number_of_OFDM_data_symbols % 10
print("total number of OFDM data symbols is: " + str(total_number_of_OFDM_data_symbols))
print("Number of complete payload blocks is: " + str(total_number_of_complete_payload_blocks))
print("number of OFDM symbols in incomplete_payload_block is: " + str(number_of_OFDM_symbols_in_incomplete_payload_block))
# Demodulate the data to produce an array of data constellation symbols.
# Need to loop over the repeating structure.
# Starting from the second payload block.
# The payload block length is 11.
# First we demodulate all the completely filled payload blocks.
# Test this part by decoding payload block by payload block.
# TO-DO: Check that the correct number of OFDM symbols is actually getting decoded.
current_payload_block_data_start_location = first_payload_block_start_location + (11*(L+N))
for i in range(total_number_of_complete_payload_blocks):
current_payload_block_demodulated_data_symbols, current_payload_block_demodulated_pilot_symbols, current_payload_block_pilot_indices_overall = demodulate_pilot_tones(H_est_updated, current_payload_block_data_start_location, 10, received_signal, N, L, pilot_tones_start_index, pilot_tones_end_index, pilot_tones_step, first_frequency_bin, last_frequency_bin, update_sync_using_pilots=True)
data_constellation_estimated = np.append(data_constellation_estimated, current_payload_block_demodulated_data_symbols)
current_payload_block_data_start_location += (11*(L+N))
if (number_of_OFDM_symbols_in_incomplete_payload_block != 0):
current_payload_block_demodulated_data_symbols, current_payload_block_demodulated_pilot_symbols, current_payload_block_pilot_indices_overall = demodulate_pilot_tones(H_est_updated, current_payload_block_data_start_location, number_of_OFDM_symbols_in_incomplete_payload_block, received_signal, N, L, pilot_tones_start_index, pilot_tones_end_index, pilot_tones_step, first_frequency_bin, last_frequency_bin, update_sync_using_pilots=True)
data_constellation_estimated = np.append(data_constellation_estimated, current_payload_block_demodulated_data_symbols)
# Quick-fix: look into this!!!
data_constellation_estimated = np.append(np.array([1+1j]), data_constellation_estimated)
# Recover the output bits through LDPC decoding.
output_bits = LDPC_decoding(data_constellation_estimated, H_est_updated)
# TO-DO: Keep only the output bits that correspond to the actual transmitted data, and not the the other random symbols.
# TO-DO: Remove header metadata from beginning of output bits.
output_bits = np.array(output_bits)
#output_bits = np.append(np.array([0,0]), output_bits)
output_bits = np.array(output_bits[200:file_size+200])
# Compare output and input bits, and view the output.
input_bits = convert_file_to_bits('data_text.txt')
# see how this function might be used
def write_file(binary_data, file_type):
data_bytes = np.packbits(binary_data)
data_bytes = bytearray(data_bytes)
with open('output' + file_type, 'wb') as file:
file.write(data_bytes)
print(bitlist_to_s(output_bits)) #uncommenting this should print the decoded data text
print(BER(input_bits, output_bits))
# TO-DO: Need to add code for dealing with other file types except txt.
|
[
"noreply@github.com"
] |
ogustafsson.noreply@github.com
|
62447df8dc2125cde4dc03ae1d92c6565d69b64d
|
e218e695bc1b46ece75c81002fc1189496c177a0
|
/benchmarks/thread_benches/plot.py
|
1c0d93c947bfcc0d15446c5cdf74d0b83babe1d2
|
[
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
BLAKE3-team/BLAKE3-specs
|
6547dc4705a49249b8e4cc293ad69b3da8d4c732
|
ea51a3ac997288bf690ee82ac9cfc8b3e0e60f2a
|
refs/heads/master
| 2022-07-08T12:06:53.259543
| 2022-06-27T20:51:14
| 2022-06-27T20:52:53
| 222,069,092
| 165
| 8
|
NOASSERTION
| 2022-06-27T08:17:21
| 2019-11-16T08:25:32
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,168
|
py
|
#! /usr/bin/env python3
import json
from matplotlib import pyplot
from pathlib import Path
import pandas
import seaborn
import sys
BENCH_NAMES = [
("threads_48", "48 threads"),
("threads_32", "32 threads"),
("threads_16", "16 threads"),
("threads_08", "8 threads"),
("threads_04", "4 threads"),
("threads_02", "2 threads"),
("threads_01", "1 thread"),
]
SIZES = [
(2**15, "32 KiB"),
(2**16, "64 KiB"),
(2**17, "128 KiB"),
(2**18, "256 KiB"),
(2**19, "512 KiB"),
(2**20, "1 MiB"),
(2**21, "2 MiB"),
(2**22, "4 MiB"),
(2**23, "8 MiB"),
(2**24, "16 MiB"),
(2**25, "32 MiB"),
(2**26, "64 MiB"),
(2**27, "128 MiB"),
(2**28, "256 MiB"),
(2**29, "512 MiB"),
(2**30, "1 GiB"),
]
def main():
target = Path(sys.argv[1])
sizes_map = dict(SIZES)
bench_names = []
sizes = []
ticks = []
tick_names = []
throughputs = []
for bench_name, bench_name_pretty in BENCH_NAMES:
bench_names.append(bench_name_pretty)
hash_dir = target / "bench_group" / bench_name
for size_i, size in enumerate(size[0] for size in SIZES):
estimates_path = hash_dir / str(size) / "new/estimates.json"
try:
estimates = json.load(estimates_path.open())
except FileNotFoundError:
# Some benchmark runs use longer inputs than others, so we
# ignore missing sizes here.
continue
slope = estimates["Slope"]
point = slope["point_estimate"]
# upper = slope["confidence_interval"]["upper_bound"]
# lower = slope["confidence_interval"]["lower_bound"]
seconds = point / 1e9
bps_throughput = size / seconds
gibps_throughput = bps_throughput / (2 ** 30)
if len(throughputs) == size_i:
throughputs.append([])
sizes.append(size)
if size in sizes_map:
ticks.append(size)
tick_names.append(sizes_map[size])
throughputs[size_i].append(gibps_throughput)
dataframe = pandas.DataFrame(throughputs, sizes, bench_names)
seaborn.set()
# pyplot.rcParams["axes.labelsize"] = 20
# pyplot.rcParams["pgf.rcfonts"] = False
# pyplot.rcParams["font.family"] = "serif"
# pyplot.figure(figsize=[20, 10])
seaborn.set_context("paper")
# seaborn.set_context("talk")
dash_styles = [
"", (4, 1.5), (1, 1), (3, 1, 1.5, 1), (5, 1, 1, 1), (5, 1, 2, 1, 2, 1),
(2, 2, 3, 1.5), (1, 2.5, 3, 1.2), (2, 2)
]
plot = seaborn.lineplot(
data=dataframe,
sort=False,
dashes=dash_styles,
)
plot.set(ylabel="Throughput (GB/s)\n")
pyplot.ylim(0, 1.1 * max(max(col) for col in throughputs))
plot.set(xscale="log")
pyplot.legend(loc="best", framealpha=1)
# pyplot.legend(loc="lower right", framealpha=1)
plot.set(xticks=ticks)
plot.set_xticklabels(tick_names, rotation=270)
# pyplot.savefig(target.with_suffix(".pgf"), bbox_inches="tight")
pyplot.show()
if __name__ == "__main__":
main()
|
[
"oconnor663@gmail.com"
] |
oconnor663@gmail.com
|
9ec976cf04573860a80eed9f017b40de425e1130
|
c014e149a620cc2e785b3dfba2d35417eb86b07c
|
/Dammen/definities.py
|
64b970f23e04bf55e832b4d1c87f9641a6303aae
|
[] |
no_license
|
Kinggoid/Dammen
|
419997333dbd164b999becd974e1a4dc366231a1
|
d46ebc181deac48e1676a3b7b19e9fd120694a88
|
refs/heads/master
| 2022-11-09T19:51:14.700457
| 2020-06-27T08:46:34
| 2020-06-27T08:46:34
| 271,027,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,085
|
py
|
import pygame
from Dammen.Damsteen import Damsteen
def setup():
"""
In deze definitie maken we het bord. Ik maak een 8 x 8 bord omdat dat de meest populaire variant is.
"""
spelbord = []
for row in range(8):
spelbord.append([0] * 8)
return spelbord
def stukken(spelbord):
"""
In deze definitie maken we alle damstenen aan en we zetten die vervolgens op het bord
"""
w1 = Damsteen(1, 0, 'wit')
w2 = Damsteen(3, 0, 'wit')
w3 = Damsteen(5, 0, 'wit')
w4 = Damsteen(7, 0, 'wit')
w5 = Damsteen(0, 1, 'wit')
w6 = Damsteen(2, 1, 'wit')
w7 = Damsteen(4, 1, 'wit')
w8 = Damsteen(6, 1, 'wit')
w9 = Damsteen(1, 2, 'wit')
w10 = Damsteen(3, 2, 'wit')
w11 = Damsteen(5, 2, 'wit')
w12 = Damsteen(7, 2, 'wit')
z1 = Damsteen(0, 7, 'wit')
z2 = Damsteen(2, 7, 'wit')
z3 = Damsteen(4, 7, 'wit')
z4 = Damsteen(6, 7, 'wit')
z5 = Damsteen(1, 6, 'wit')
z6 = Damsteen(3, 6, 'wit')
z7 = Damsteen(5, 6, 'wit')
z8 = Damsteen(7, 6, 'wit')
z9 = Damsteen(0, 5, 'wit')
z10 = Damsteen(2, 5, 'wit')
z11 = Damsteen(4, 5, 'wit')
z12 = Damsteen(6, 5, 'wit')
alle_stenen = [w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, z1, z2, z3, z4, z5, z6, z7, z8, z9, z10, z11, z12]
posities = []
for steen in alle_stenen: # Pakt van elke steen zijn positie
posities.append(steen.positie)
for i in range(0, len(posities)): # Zet alle stenen op het bord
spelbord[posities[i][1]][posities[i][0]] = alle_stenen[i]
return alle_stenen
def draw_board(board, scherm, lengte_vakje, hoogte_vakje):
"""
In deze definitie tekenen we het bord en de overblijvende stukken
"""
zwart = (0, 0, 0)
wit = (220, 220, 220)
for i in range(0, 8):
for j in range(0, 8):
if (i + j) % 2 == 0: # Formule om te kijken of we een zwart of een wit vakje willen
kleur_van_vakje = wit
else:
kleur_van_vakje = zwart
vakje = pygame.draw.rect(scherm, kleur_van_vakje, [lengte_vakje * j, hoogte_vakje * i, lengte_vakje,
hoogte_vakje]) # Teken het vakje
stuk = board[i][j]
if stuk != 0: # Als er een damsteen op dit vakje staat dan gaan we die er ook op tekenen
if stuk.king:
stuk.draw_king(scherm, vakje.center)
else:
stuk.draw_dam(scherm, vakje.center)
def checkIfFriendly(board, x, y):
"""Kijkt of je gekozen vak wel binnen het bord valt en of je een leeg vak hebt geselecteerd"""
if len(board) >= y + 1:
if len(board[y]) >= x + 1:
if board[y][x] == 0:
return True
return False
def einde(wit, zwart):
""" In deze definitie kijken we of een kleur al zijn stukken is verloren. In dat geval heeft die kleur verloren"""
if wit == 0 or zwart == 0:
if wit == 0:
return [True, False]
else:
return [True, True]
else:
return [False, False]
def juisteStukken(stukken, beurt):
""" Deze definitie haalt alle stukken van dezelfde kleur uit de lijst en geeft dat terug."""
juiste_kleur_stukken = []
for i in stukken:
if i.team == beurt:
juiste_kleur_stukken.append(i)
return juiste_kleur_stukken
def promoveer(stuk):
""" Met deze definitie promoveren we een stuk."""
positie = stuk.positie
if stuk.team:
if positie[1] == 7:
stuk.promoveren()
else:
if positie[1] == 0:
stuk.promoveren()
def koningStappen(board, stuk):
""" Hier kijken we van een bepaalde koning of hij iemand kan pakken (dan krijg je het hele diagonaal daarachter mee.
en anders waar hij met normale stappen naartoe kan gaan."""
team = stuk.team
alle_mogelijke_posities = []
niet_springen = []
wel_springen = []
for i in [-1, 1]: # Deze forloops gebruik ik om op elk diagonaal te kijken
for j in [-1, 1]:
een_diagonaal = [] # Op welke vakken kunnen we landen op dit diagonaal
x = stuk.positie[0]
y = stuk.positie[1]
stukken = 0
while True:
x += i
y += j
# Elke ronde in de while loop kijken we steeds verder het diagonaal in
if 0 <= x <= 7 and 0 <= y <= 7: # Als we nog binnen het bord zitten
vak = board[y][x]
if vak != 0:
if vak.team == team: # Als we ons eigen team tegenkomen kunnen we niet verder
break
else: # Als we één keer een steen van het andere team tegenkomen slaan we deze op. Als we er twee op hetzelfde diagonaal vinden stoppen we met dit diagonaal
stukken += 1
if stukken == 2:
break
een_diagonaal.append([y, x])
else: # Als het een leeg vak is slaan we hem op
een_diagonaal.append([y, x])
else:
break
alle_mogelijke_posities.append(een_diagonaal) # We slaan elk diagonaal op
for diagonaal in alle_mogelijke_posities:
# Als we in dit diagonaal een stuk van het andere team tegenkomen dan slaan we alleen de posities daarna op.
sprong_mogelijk = 0
diagonal = []
for positie in diagonaal:
vak = board[positie[0]][positie[1]]
if vak != 0:
sprong_mogelijk += 1
if sprong_mogelijk == 0:
niet_springen.append(positie)
elif sprong_mogelijk == 1:
diagonal.append(positie)
else:
break
if diagonal and len(diagonal) > 1:
wel_springen.append(diagonal)
if len(wel_springen) != 0: # Als we een stuk kunnen pakken en minstens één plek hebben om op te landen
return [stuk, wel_springen]
else:
return [0, niet_springen] # Als we niks kunnen pakken dan krijgen we gewoon een lijst met mogelijke posities terug
def diagonaalKoningSpringen(board, posities):
"""In deze definitie kijken we of onze koning nog een keer kan slaan en op welke positie op zijn diagonaal hij kan
landen om dit te doen."""
stuk = posities[0]
mogelijke_posities = [stuk, [], 0]
for i in posities[1]:
mogelijke_posities[1].append([i[0]])
coordinaten = []
kan_stuk_slaan = False
for i in stuk.positie: # We slaan de positie van het stuk op
coordinaten.append(i)
for i in range(0, len(mogelijke_posities[1])):
diagonaal = posities[1][i]
y = board[diagonaal[0][0]][diagonaal[0][1]]
board[diagonaal[0][0]][diagonaal[0][1]] = 0
for j in range(1, len(
diagonaal)): # Voor elke positie waar we kunnen komen, kijken we of we daarvan nog iemand kunnen slaan
# Dit zou ons volgens de regels verplichten op dat vak te landen
board[diagonaal[j][0]][diagonaal[j][1]] = stuk
stuk.positie[0] = diagonaal[j][1]
stuk.positie[1] = diagonaal[j][0]
x = koningStappen(board, stuk)
if x[0] != 0:
kan_stuk_slaan = True
mogelijke_posities[1][i].append(posities[1][i][j])
board[diagonaal[j][0]][diagonaal[j][1]] = 0
# Nu zetten we alles weer normaal=
stuk.positie = coordinaten
board[diagonaal[0][0]][diagonaal[0][1]] = y
if kan_stuk_slaan: # Lijst met posities als het stuk nog een keer kan slaan
return mogelijke_posities
return posities # Waar hij anders naartoe kan
def stukkenBijhouden(wit, zwart, stuk):
"""Als een stuk wordt gepakt gaat het aantal zwarte of witte stukken omlaag met 1"""
if stuk.team:
return [wit, zwart - 1]
else:
return [wit - 1, zwart]
def damZetten(board, stuk):
""" Hier kijken we of een man een ander stuk kan pakken (en waar hij landt) en anders krijgen we een lijst met
andere mogelijke zetten terug"""
positie = stuk.positie
team = stuk.team
stap_vooruit_posities = []
sprong_posities = []
lst = [1, -1]
for i in lst: # Met deze for loopjes kijken we in elke (schuine) richting van het stuk
for j in lst:
y = positie[1] + i
x = positie[0] + j
if 0 <= x <= 7 and 0 <= y <= 7: # Als het nog binnen het bord zit
position = board[y][x]
if position == 0:
if team and y > positie[1] or not team and y < positie[
1]: # Witte damstenen moeten schuin naar beneden en zwarte damstenen moeten de andere kant op
stap_vooruit_posities.append([y, x])
elif position.team != team: # Als er een man van het andere team naast onze man ligt dan kijken we of het vakje daarna leeg is.
y2 = y + i
x2 = x + j
if 0 <= x2 <= 7 and 0 <= y2 <= 7:
if checkIfFriendly(board, x2, y2):
sprong_posities.append([[y, x], [y2, x2]])
if sprong_posities: # Als we iets kunnen pakken, krijgen we zoiets: [[positie van de steen die je pakt, positie waar je terecht komt]] terug
return sprong_posities
return stap_vooruit_posities # Anders een lijst met waar ze naartoe kunnen lopen
def herhaling(zetten):
if zetten == 15: # Volgens sommige regels is het gelijkspel als er 15 keer achter elkaar alleen maar met koningen wordt gespeeld.
print('Er zijn te vaak koningstappen achter elkaar gemaakt. Het is gelijkspel')
return True
return False
def watKanJeZetten(board, stukken, beurt, kan_je_pakken):
kan_je_nog_iets = 0
alleen_sprong = 0
stukken_die_iets_kunnen = []
stukken_die_iets_kunnen_pakken = []
alle_zetten = []
zetten_van_stukken_die_kunnen_pakken = []
for dam in stukken: # Hier kijken we of de speler nog zetten heeft. Zo niet, dan wint de ander
if dam.team == beurt:
if dam.king:
koningZet = koningStappen(board, dam)
if len(koningZet[1]) != 0:
kan_je_nog_iets = 1
if not alleen_sprong:
stukken_die_iets_kunnen.append(dam)
alle_zetten.append(koningZet)
if type(koningZet[0]) != int:
alleen_sprong = True
if kan_je_pakken:
return alleen_sprong
stukken_die_iets_kunnen_pakken.append(dam)
zetten_van_stukken_die_kunnen_pakken.append(diagonaalKoningSpringen(board, koningZet))
else:
mogelijke_zetten = damZetten(board, dam)
if mogelijke_zetten:
kan_je_nog_iets = 1
if not alleen_sprong:
stukken_die_iets_kunnen.append(dam)
alle_zetten.append(mogelijke_zetten)
if type(mogelijke_zetten[0][
0]) == list: # Als een normale man kan springen dan gaat 'alleen_sprong' aan
alleen_sprong = True
if kan_je_pakken:
return alleen_sprong
stukken_die_iets_kunnen_pakken.append(dam)
zetten_van_stukken_die_kunnen_pakken.append(mogelijke_zetten)
if kan_je_pakken:
return False
if kan_je_nog_iets == 0:
if beurt:
return [False, True, 0, 0]
else:
return [True, True, 0, 0]
if alleen_sprong:
return [alleen_sprong, False, stukken_die_iets_kunnen_pakken, zetten_van_stukken_die_kunnen_pakken]
return [alleen_sprong, False, stukken_die_iets_kunnen, alle_zetten]
|
[
"thijmespambox@gmail.com"
] |
thijmespambox@gmail.com
|
082d3355655379f5521c05f9d72b0a60863be729
|
6452520a8befcdcf3c4ed9e8170efaeee49ba549
|
/3_datalake.py
|
89de18adb872f67411e091235ac12a2871f409e7
|
[] |
no_license
|
jonghyunChae/DataEngineerSample
|
dfeb651d71c33f3f690ef39efcc8a134ad9f00f4
|
7f5305e7b0c7993cc889eaad245b169746913cc7
|
refs/heads/master
| 2023-03-04T14:30:33.767088
| 2021-02-12T03:25:40
| 2021-02-12T03:25:40
| 320,414,036
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,522
|
py
|
import sys
import os
import logging
import time
import boto3
import requests
import base64
import json
import pymysql
from datetime import datetime
import pandas as pd
import jsonpath
client_id = "74cbd487458843f1ad3f5fa1e914c02f"
client_secret = "752e4ed11062473f9da9076c4499d51b"
headers = None
host = "jongs.cli2moxtbkcd.ap-northeast-2.rds.amazonaws.com"
port = 3306
database = "production"
username = "admin"
password = "01045819402"
def main():
global headers
conn, cursor = mysql_connect()
headers = get_headers()
# RDS - 아티스트의 ID를 가져오고
cursor.execute("SELECT id FROM artists LIMIT 10")
top_track_keys = {
"id" : "id",
"name" : "name",
"popularity" : "popularity",
"external_url" : "external_urls.spotify"
}
# Top Tracks Spotify 가져오고
top_tracks=[]
for (id, ) in cursor.fetchall():
r = get_top_tracks(id)
raw = json.loads(r.text)
for track in raw['tracks'] :
top_track = {}
for k, v in top_track_keys.items() :
#키에 맞는 것 찾아서 어펜드
top_track.update({k: jsonpath.jsonpath(track, v)})
top_track.update({'artist_id' : id})
top_tracks.append(top_track)
#top_tracks.extend(raw['tracks'])
# track_ids
track_ids = [i['id'][0] for i in top_tracks]
# list of dictionaries
top_tracks = pd.DataFrame(top_tracks)
dt = datetime.utcnow().strftime('%Y-%m-%d')
print(dt)
s3_upload_with_make_parquet(dataframe=top_tracks, name='top_tracks', key=dt)
# .json 으로 했다면
"""
with open('top_tracks.json', 'w') as f:
for i in top_tracks :
json.dump(i, f)
f.write(os.linesep)
data = open('top-tracks.json', 'rb')
object.put(Body=data)
"""
# S3 import
tracks_batch = [track_ids[i: i + 100] for i in range(0, len(track_ids), 100)]
audio_features = []
for i in tracks_batch :
ids = ','.join(i)
URL = "https://api.spotify.com/v1/audio-features/?ids={}".format(ids)
r = requests.get(URL, headers=headers)
raw = json.loads(r.text)
audio_features.extend(raw['audio_features'])
audio_features = pd.DataFrame(audio_features)
s3_upload_with_make_parquet(dataframe=audio_features, name='audio_features', key=dt)
# 스파크가 읽을 수 있는 파티션으로 저장 해야함
def s3_upload_with_make_parquet(dataframe, name, key):
parquet_path = '{}.parquet'.format(name)
dataframe.to_parquet(parquet_path, engine='pyarrow', compression='snappy')
s3 = boto3.resource('s3')
upload_path = '{}/dt={}/{}'.format(name, key, parquet_path)
print(upload_path)
object = s3.Object('jongs-spotify-artists', upload_path)
data = open(parquet_path, 'rb')
object.put(Body=data)
def mysql_connect():
try:
conn = pymysql.connect(host, user=username, passwd=password, db=database, port=port, use_unicode=True)
cursor = conn.cursor()
except:
logging.error("could not connect to RDS")
sys.exit(1)
return conn, cursor
def get_headers() :
endpoints = 'https://accounts.spotify.com/api/token'
#python 3부터는 인코딩 한번 거쳐야함
encoded = base64.b64encode("{}:{}".format(client_id, client_secret).encode('utf-8')).decode('ascii')
headers = {
"Authorization": "Basic {}".format(encoded)
}
payload = {
"grant_type": "client_credentials"
}
r = None
try:
r = requests.post(endpoints, data=payload, headers=headers)
except :
logging.error(r.text)
sys.exit(1)
error_handle(r)
access_token = json.loads(r.text)['access_token']
headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
return headers
def get_top_tracks(id):
global headers
URL = "https://api.spotify.com/v1/artists/{}/top-tracks".format(id)
params = {
'country' : 'US'
}
r = requests.get(URL, headers=headers, params=params)
return r
def error_handle(r) :
if r.status_code != 200 :
logging.error(r.text)
if r.status_code == 429 :
retry_after = json.loads(r.headers)['Retry-After']
time.sleep(int(retry_after))
# access token expired
elif r.status_code == 401 :
headers = get_headers()
pass
else :
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"jongsys21@naver.com"
] |
jongsys21@naver.com
|
a9ed1e80222e3fda8f5ecfe8ed6ca6b8ea77ed22
|
f7f5ad4ae539fa1060c7bc3bc3fe0fb4945fb83d
|
/APAC2015/Round_B/psd_attacker/solution.py
|
8c34531d200369e4bb9e5d378adc364e56715e75
|
[] |
no_license
|
royxue/Codejam
|
467f1d39dec063795ce58e9f582c769aaac06df8
|
8336cd5154ec662270ebdf78528ffd503c807e22
|
refs/heads/master
| 2021-01-10T20:21:59.885216
| 2014-11-29T13:10:28
| 2014-11-29T13:10:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
from cj_input import cj_inputs
import math
total, data = cj_inputs("A-small-attempt3.in")
def psd_attacker(data):
answer_list = []
for i in data:
answer = 0
val_M = int(i[0])
val_N = int(i[1])
if val_M == val_N:
answer = math.factorial(val_N)
else:
diff = (val_N - val_M)
multi_1 = math.factorial(val_M)
answer = (multi_1 * val_M * val_N)/(diff+1)+(multi_1) * (pow(val_M,diff)-val_M) * val_N/ math.factorial(diff)
answer = answer%(1000000007)
answer_list.append(answer)
return answer_list
output_idx = 1
for i in psd_attacker(data):
print "Case #%d: %d"%(output_idx, i)
output_idx += 1
|
[
"xljroy@gmail.com"
] |
xljroy@gmail.com
|
459d195a39c7dd0c6c1f2ba9ef3103e5820db56e
|
23cb672f5f13af759601d042e32cf0e4c903ede9
|
/ProxyArduinoPi.py
|
e94b2c95e4087ada73f8949b2abe48241718fa60
|
[] |
no_license
|
casang/homebridge
|
77dcca5f27f2f5becad987fb29459dd1dee2f09a
|
7b4e263116e2a0656622514959ca250a47430300
|
refs/heads/master
| 2020-12-15T17:36:53.711366
| 2020-02-15T20:07:01
| 2020-02-15T20:07:01
| 235,196,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,215
|
py
|
import http.server
#import BaseHTTPRequestHandler
import threading
import time
import socket
import sys
import traceback
luzOn = [0,0,0,0,0,0];
luzOnSet = [0,0,0,0,0,0];
luzBrightness = [0,0,0,0,0,0];
luzBrightnessSet = [0,0,0,0,0,0];
message = str();
def get_status(num):
# Create a TCP/IP socket
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
sock.connect(('arduino1', 8088))
# Send data
message = 'GET /?C0'
#C1=xxx&C2=xxx&C3=xxx&C4=xxx&C5=xxx
#0123456789012345678901234567890123
print ('sending "%s"' % message, file = sys.stderr)
sock.sendall(bytes(message,"utf-8") + b"\n\n")
# Look for the response
data = sock.recv(1024)
message = str (data)
print ('received "%s"' % message[5 : 8], file = sys.stderr)
for i in range (0, 5):
luzBrightness[i + 1] = int(message[i * 7 + 5 : i * 7 + 8])
sock.close()
message = ''
for i in range (1, 6):
if (luzBrightnessSet[i] != -1) and (luzBrightnessSet[i] != luzBrightness[i]):
valor = str(luzBrightnessSet[i])
luzBrightnessSet[i] = -1
if message != "":
message = message + "&"
message = message + "A" + str(i) + "=" + valor
if message != "":
message = "GET /?" + message
sockCmd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockCmd.connect (('arduino1', 8088))
print ('sending "%s"' % message, file = sys.stderr)
sockCmd.sendall(bytes(message,"utf-8") + b"\n\n")
# Look for the response
data = sockCmd.recv(1024)
print ('received "%s"' % data, file = sys.stderr)
sockCmd.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
sock.connect(('arduino1', 8088))
# Send data
message = 'GET /?D0'
#D1=x&D2=x&D3=x&D4=x&D5=x
#012345678901234567890123
print ('sending "%s"' % message, file = sys.stderr)
sock.sendall(bytes(message,"utf-8") + b"\n\n")
# Look for the response
data = sock.recv(1024)
message = str (data)
print ('received "%s"' % message[5 : 8], file = sys.stderr)
for i in range (0, 5):
luzOn[i + 1] = int(message[i * 5 + 5 : i * 5 + 6])
sock.close()
message = ''
for i in range (1, 6):
if (luzOnSet[i] != -1) and (luzOnSet[i] != luzOn[i]):
valor = str(luzOnSet[i])
luzOnSet[i] = -1
if message != "":
message = message + "&"
message = message + "B" + str(i) + "=" + valor
if message != "":
message = "GET /?" + message
sockCmd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockCmd.connect (('arduino1', 8088))
print ('sending "%s"' % message, file = sys.stderr)
sockCmd.sendall(bytes(message,"utf-8") + b"\n\n")
# Look for the response
data = sockCmd.recv(1024)
print ('received "%s"' % data, file = sys.stderr)
sockCmd.close()
except Exception as e:
print("type error: " + str(e))
print(traceback.format_exc())
time.sleep (num)
#finally:
# print ('closing socket', file = sys.stderr)
# sock.close()
class http_server:
def __init__(self):
server = http.server.HTTPServer(('', 8088), myHandler)
server.serve_forever()
class myHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
b = False
for i in range (0, 6):
b = self.path.endswith("/?C" + str (i))
if (b):
break
if b == True:
self.wfile.write(bytes(str(luzBrightness[i]),"utf-8"))
#self.wfile.write(str(luzBrightness[i]))
#self.wfile.write(str(luzBrightness[i]))
return
b = False
for i in range (0, 6):
b = self.path.endswith("/?D" + str (i))
if b:
break
if b == True:
self.wfile.write(bytes(str(luzOn[i]),"utf-8"))
#self.wfile.write(str(luzOn[i]))
return
pos = -1
for i in range (0, 6):
pos = self.path.find("/?A" + str (i) + "=")
if (pos >= 0):
break
if pos >= 0 :
s = int(self.path[pos + 5 : ])
if i > 0:
luzBrightnessSet[i] = s
else:
for j in range (0, 6):
luzBrightnessSet[j] = s
self.wfile.write(b'1')
return
pos = -1
for i in range (0, 6):
pos = self.path.find("/?B" + str (i) + "=")
if (pos >= 0):
break
if pos >= 0 :
print (self.path[pos + 5 : pos + 7])
if (self.path[pos + 5 : pos + 7] == "1") :
s = 1
else :
s = 0
if i > 0:
luzOnSet[i] = s
else:
for j in range (0, 6):
luzOnSet[j] = s
self.wfile.write(b'1')
return
self.wfile.write(b"x")
return
class main:
t1 = threading.Thread(target=get_status, args=(0,))
t1.start()
def __init__(self):
self.server = http_server()
if __name__ == '__main__':
m = main()
|
[
"noreply@github.com"
] |
casang.noreply@github.com
|
d0c68dac18ea56ed9715e2bd54bb1788a6588293
|
2b640b7eaec1c703abd5c5da978d98363e9a5e7c
|
/c45.py
|
0f0b387cb96fb89c07e92776a02bdd805a337eab
|
[] |
no_license
|
karans785/CED-37-Machine-Learning-Tutorial-Assignments
|
17c0bda0b7561e849b5efaae66ef362b2c5e59ad
|
1a745b6a6c26fa59729fadb1743379ca12b3f0cd
|
refs/heads/master
| 2020-04-17T22:36:10.045061
| 2019-05-14T13:06:54
| 2019-05-14T13:06:54
| 167,000,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,967
|
py
|
import numpy
import pandas as pd
import math
dataset=pd.read_csv('playtennisC4.csv')
outlook = list(dataset['outlook'])
temperatureC = list(dataset['temperature'])
humidityC = list(dataset['humidity'])
wind = list(dataset['wind'])
answer = list(dataset['answer'])
temperature = []
humidity = []
def calculate_entropy(answer):
entropy = 0.0
count = 0
for i in range(len(answer)):
if answer[i] == "yes":
count = count + 1
size = len(answer)
p1 = count/size
p2 = (size-count)/size
if p1==0 and p2==0:
entropy=0
elif p1==0:
entropy=p2*math.log(p2,2)
elif p2==0:
entropy=p1*math.log(p1,2)
else:
entropy = p1*math.log(p1,2) + p2*math.log(p2,2)
entropy = entropy * -1
return entropy
def calculate_gain(column,answer,entropy_decision):
gain = 0.0
values={}
for i in range(len(column)):
type=column[i]
if type in values:
values[type].append(answer[i])
else:
values[type]=[]
values[type].append(answer[i])
temp=0.0
for i in values:
p=len(values[i])/len(column)
e=calculate_entropy(values[i])
temp=temp+p*e
gain=entropy_decision-temp
return gain
def calculate_splitinfo(column):
splitinfo = 0.0
values={}
for i in range(len(column)):
type=column[i]
if type in values:
t = values[type]
values[type] = t+1
else:
values[type]=1
for i in values:
p=(values[i])/len(column)
l=math.log(p,2)
splitinfo = splitinfo + (p*l)
splitinfo = -1 * splitinfo
return splitinfo
def calculate_gainratio(column,answer,entropy_decision):
gain = calculate_gain(column,answer,entropy_decision)
splitinfo = calculate_splitinfo(column)
if splitinfo == 0:
return 0
return (gain/splitinfo)
def conTOdis(entropy_decision):
global temperature,humidity
maxgain = 0
value = 0
for i in range(len(humidityC)):
threshold = humidityC[i]
temp = []
for j in range(len(humidityC)):
if humidityC[j]>threshold:
temp.append("high")
else:
temp.append("low")
currgain = calculate_gain(temp,answer,entropy_decision)
if currgain>maxgain:
maxgain = currgain
value = threshold
humidity = temp
print(humidityC)
print(value)
print(humidity,"\n")
maxgain = 0
value = 0
for i in range(len(temperatureC)):
threshold = temperatureC[i]
if threshold != 83 and threshold != 85:
temp = []
for j in range(len(temperatureC)):
if temperatureC[j]>threshold:
temp.append("high")
else:
temp.append("low")
currgain = calculate_gain(temp,answer,entropy_decision)
if currgain>maxgain:
maxgain = currgain
value = threshold
temperature = temp
print(temperatureC)
print(value)
print(temperature,"\n")
def get_max_attribute(data):
ans = data[len(data)-1]
entropy_decision = calculate_entropy(ans)
maxgain = 0
index = -1
for i in range(len(data)-1):
gain = calculate_gainratio(data[i],ans,entropy_decision)
if gain > maxgain:
maxgain = gain
index = i
return index,entropy_decision
ed = calculate_entropy(answer)
conTOdis(ed)
table = {'outlook':outlook,'temperature':temperature,'humidity':humidity,'wind':wind,'answer':answer}
name=['outlook','temperature','humidity','wind','answer']
branch=[]
def id3(temp,name,branch):
table=[]
for key in temp:
table.append(temp[key])
index,entropy_decision = get_max_attribute(table)
if entropy_decision == 0:
print(branch, "->",table[len(table)-1][0])
print()
return
column = table[index]
branch.append(name[index])
values={}
tempname = []
for i in range(len(name)):
if i != index:
tempname.append(name[i])
for i in range(len(column)):
type=column[i]
if type in values:
for j in range(len(table)):
if j != index:
values[type][name[j]].append(table[j][i])
else:
values[type]={}
for j in range(len(table)):
if j !=index:
values[type][name[j]]=[]
values[type][name[j]].append(table[j][i])
for i in values:
branch.append(i)
id3(values[i],tempname,branch)
branch.pop()
branch.pop()
# main function call
id3(table, name, branch)
|
[
"noreply@github.com"
] |
karans785.noreply@github.com
|
976cf2b2718c6129645a37ef6a08791aff86481f
|
7ccc166a1844568bb90b7eb73e80dcc504f49ab1
|
/chat.py
|
c6fa364f6861593d6963381dc0471ebeef279af9
|
[] |
no_license
|
ivfreire/chat
|
2de56bbed65977d7c6979fa09ef2e407a03f0d68
|
3bee200ed6aa7a3b7a433e95926bef150c1714a0
|
refs/heads/master
| 2022-11-25T16:18:07.249822
| 2020-07-29T09:28:12
| 2020-07-29T09:28:12
| 283,432,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
from flask import Flask, session, redirect, render_template, request, url_for
from flask_socketio import SocketIO, emit
app = Flask(__name__)
app.secret_key = 'secret cat'
io = SocketIO(app)
@app.route('/')
def index():
if 'username' in session:
return render_template('index.html', username=session['username'])
else:
return render_template('login.html')
@app.route('/login', methods=['POST'])
def login():
session['username'] = request.form['username']
return redirect('/')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
if 'username' in session:
session.pop('username', None)
return redirect('/')
@io.on('connect')
def connect():
if 'username' in session:
emit('new-user', { 'username': session['username'] }, broadcast=True)
@io.on('new-message')
def receive(data):
if 'message' in data:
emit('new-message', {
'username': session['username'],
'message': data['message']
}, broadcast=True)
@io.on('disconnect')
def disconnect():
emit('remove-user', { 'username': session['username'] }, broadcast=True)
|
[
"icarovf@usp.br"
] |
icarovf@usp.br
|
506cf1fc554480a3254d76511897634dc404de9f
|
8bd531be4cb12e5ac8009c2c4957cd26aaff8312
|
/paralog_gene_cluster5.py
|
6451f16c34793cc40a75cf34303ced117474f8f4
|
[] |
no_license
|
zhexia/lncRNA-project-script
|
7b584544f45d023b37124a4b8d40c64bdf5f5127
|
8565c65a9de2d604195b25d971460ec1b4a770f9
|
refs/heads/master
| 2020-08-03T01:59:38.278236
| 2019-09-29T02:33:02
| 2019-09-29T02:33:02
| 211,590,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,025
|
py
|
#我自己写的,还有bug,事实证明是大bug
import sys
import copy
genepairs_list = []
with open(sys.argv[1],'r') as f:
for line in f:
info = line.strip().split()
gene1 = info[0]
gene2 = info[1]
gene_pair = {gene1, gene2}
if (gene1 != gene2) and (gene_pair not in genepairs_list):
genepairs_list.append(gene_pair)
genepairs_list_copy = copy.deepcopy(genepairs_list)
for genepair_copy in genepairs_list_copy:
geneA, geneB = genepair_copy
geneA_fmy_index = -1
geneB_fmy_index = -1
i = 0
for genepair in genepairs_list:
if genepair_copy != genepair:
if geneA in genepair:
geneA_fmy_index = i
if geneB in genepair:
geneB_fmy_index = i
i = i + 1
if ((geneA_fmy_index > -1) and (geneB_fmy_index > -1)) and (geneA_fmy_index != geneB_fmy_index):
genepairs_list[geneA_fmy_index].update(genepairs_list[geneB_fmy_index])
del genepairs_list[geneB_fmy_index]
if genepair_copy in genepairs_list:
genepairs_list.remove(genepair_copy)
elif (geneA_fmy_index > -1) and (geneB_fmy_index == -1):
genepairs_list[geneA_fmy_index].add(geneB)
if genepair_copy in genepairs_list:
genepairs_list.remove(genepair_copy)
elif (geneB_fmy_index > -1) and (geneA_fmy_index == -1):
genepairs_list[geneB_fmy_index].add(geneA)
if genepair_copy in genepairs_list:
genepairs_list.remove(genepair_copy)
lncrna_num = {}
with open(sys.argv[2],'r') as f:
for line in f:
info = line.split()
num = int(info[1])
key = info[0]
lncrna_num[key] = num
family_ave_lncrna = lncrna_num[sys.argv[1].split('.')[0].split('/')[1]]/len(genepairs_list)
print('{} have {} genefamilys, family_ave_lncrna is {}'.format(sys.argv[1].split('.')[0].split('/')[1], len(genepairs_list),str('%.2f'%family_ave_lncrna)))
|
[
"noreply@github.com"
] |
zhexia.noreply@github.com
|
553f81640f79e77db5d8068c2908c0fb597ce6f9
|
530d812906e622aba690a27ce6e7f1576e892c70
|
/prpe9/prpe_ne.py
|
eae9c9227165b5897c4267749ec9ec702a768ebd
|
[
"MIT"
] |
permissive
|
zuters/prpene
|
4b992d1037f8e603838b9a3a11e1cd1cde152328
|
d76d6203e366c91efc1d1ae7ecee4c73e80e38d2
|
refs/heads/master
| 2020-03-29T09:49:47.409122
| 2019-07-29T14:25:39
| 2019-07-29T14:25:39
| 149,775,680
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77,476
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Jānis Zuters
# PRPE technical version 9
#=============================================================================
import codecs
import sys
from collections import Counter
from copy import deepcopy
from numpy import argmin
goodroots = Counter()
badroots = {}
goodprefixes = Counter()
badprefixes = {}
goodpostfixes = Counter()
badpostfixes = {}
premaxlen = 8
postmaxlen = 7
minrootlen = 2
minpreflen = 2
def isUlower(word):
return len(word)>=2 and word[0:1].isupper() and word[1:].islower() and word.isalpha()
def isUlower2(word):
return len(word)>=2 and word[0:1].isupper() and word[1:2].islower() and word.isalpha()
def processUlower(word):
if isUlower(word):
return word.lower()
else: return word
def processUlower2(word):
if isUlower2(word):
return word[0].lower()+word[1:]
else: return word
def spos(pos,L):
return (pos+0.5)/L
def sposback(sps,L):
return round(sps*L-0.5)
def words_match(w1,w2):
if len(w1)<len(w2):
ww = w1
w1 = w2
w2 = ww
L1 = len(w1)
L2 = len(w2)
i2 = 0
ret = 0
for i1 in range(L1):
sps1 = spos(i1,L1)
for ii2 in range(i2,min(sposback(sps1,L2)+3,L2)):
if w1[i1]==w2[ii2]:
i2 = ii2+1
ret+=1
break
return ret / L1
def read_parallel_lines(finname1,finname2,fin1,fin2,startpos,lmaxcount=None,stopatend=True):
lcount = 0
repo1 = []
repo2 = []
stop = False
eof = False
while not stop:
if startpos[0]==0:
fin1 = codecs.open(finname1, 'r', encoding='utf-8')
fin2 = codecs.open(finname2, 'r', encoding='utf-8')
# print(startpos)
fin1.seek(startpos[0])
fin2.seek(startpos[1])
cline = 0
line1 = fin1.readline()
while line1 != '':
line2 = fin2.readline()
# except extra line which is subject to check for eof
if lmaxcount is None or lcount<lmaxcount:
repo1.append(line1)
repo2.append(line2)
lcount+=1
if lmaxcount is not None and lcount<=lmaxcount:
startpos[0]+=len(line1.encode('utf-8'))
startpos[1]+=len(line2.encode('utf-8'))
# lmaxcount+1: to check for eof
if lmaxcount is not None and lcount>=lmaxcount+1:
stop = True
break
cline += 1
line1 = fin1.readline()
if lmaxcount is None:
stop = True
if line1=="": # eof check
fin1.close()
fin2.close()
eof = True
startpos[0] = 0
startpos[1] = 0
if stopatend:
stop = True
return repo1,repo2,eof,fin1,fin2
def preprocess_sentence_alpha_pairs(sentence1,tolower):
if tolower:
sentence1 = sentence1.lower()
s1 = [word1 for word1 in sentence1.split()[1:] if word1.isalpha()]
return s1
def collect_ne_pairs(cname1,cname2,fnename1,fnename2,fnename="",alphaonly=1,tolower=False):
eof = False
maxdnum = None
dnum = 0
fin1 = None
fin2 = None
startline = [0,0]
fne1 = codecs.open(fnename1, 'w', encoding='utf-8')
fne2 = codecs.open(fnename2, 'w', encoding='utf-8')
if fnename != "":
fne = codecs.open(fnename, 'w', encoding='utf-8')
ii = 1
while not eof and (maxdnum is None or dnum < maxdnum):
dnum += 1
print('Data:',dnum)
repo1,repo2,eof,fin1,fin2 = read_parallel_lines(
cname1,cname2,fin1,fin2,startline,lmaxcount=5000,stopatend=True)
i = 0
for sentence1 in repo1:
sentence2 = repo2[i]
s1 = preprocess_sentence_alpha_pairs(sentence1,tolower)
s2 = preprocess_sentence_alpha_pairs(sentence2,tolower)
# if ii==1335:
# print(sentence1)
# print(sentence2)
# print(s1)
# print(s2)
ul1 = 0
ul2 = 0
ww1=""
ww2=""
for w1 in s1:
if isUlower2(w1):
ul1 += 1
ww1 = w1
for w2 in s2:
if isUlower2(w2):
ul2 += 1
ww2 = w2
if ul1==1 and ul2==1:
if words_match(ww1,ww2)<0.3:
pass
else:
print (ii,ww1,file=fne1)
print (ii,ww2,file=fne2)
if fnename != "":
print (ii,ww1,ww2,file=fne)
i+=1
ii+=1
# print("Pairs ne 1", pairs_ne1)
fin1.close()
fin2.close()
fne1.close()
fne2.close()
if fnename != "":
fne.close()
def search_codetree(tword,codetree):
""" Stored in codetree with non-zero value in the terminal node
"""
pos = 0
while True:
s = tword[pos]
if s not in codetree:
return 0
elif pos==len(tword)-1:
return codetree[s][0]
else:
pos += 1
codetree = codetree[s][1]
def search_codetree_hasleftsub(tword,codetree):
""" Stored in codetree with non-zero value in any except the terminal node
"""
pos = 0
while True:
s = tword[pos]
if s not in codetree:
return 0
elif codetree[s][0]>0:
return codetree[s][0]
elif pos==len(tword)-1:
return 0
else:
pos += 1
codetree = codetree[s][1]
def search_codetree_isleftsub(tword,codetree):
""" Stored in codetree having any value terminal node (i.e., reaching terminal node)
"""
pos = 0
while True:
s = tword[pos]
if s not in codetree:
return 0
elif pos==len(tword)-1:
return 1
else:
pos += 1
codetree = codetree[s][1]
def add_to_codetree(tword,codetree,freq=1):
""" Adds word to tree structure - one node per symbol
"""
unique=0
for pos in range(len(tword)):
s = tword[pos]
if s not in codetree:
codetree[s] = [0,{}]
unique+=1
codetree[s][0] += freq
codetree = codetree[s][1]
return unique
def add_to_vocab_multi(word,vocab,freq):
for pos in range(len(word)):
if not word[pos].isalpha(): return
vocab[word[:pos+1]] += freq
def add_to_vocab_multi_reverse(word,vocab,postmaxlen,minrootlen,freq):
""" Adds one tuple-word to tree structure - one node per symbol
word end in the tree characterized by node[0]>0
"""
pos = 0
while pos<len(word)-minrootlen and pos<postmaxlen:
vocab[word[:pos+1]] += freq
pos+=1
def add_to_codetree_terminal(tword,codetree,freq=1):
""" Adds word to tree structure - one node per symbol
word end in the tree characterized by node[0]>0
"""
for pos in range(len(tword)):
s = tword[pos]
if s not in codetree:
codetree[s] = [0,{}]
if pos==len(tword)-1:
codetree[s][0] = freq
else:
codetree = codetree[s][1]
def read_codetree(datafile,reverse=False):
codetree = {}
for line in datafile:
item = line.split()
word = item[0]
if reverse: word=word[::-1]
if len(item)>1:
num = int(item[1])
else:
num = 1
add_to_codetree_terminal(word,codetree,num)
return codetree
def read_vocabulary(vocabfile,reverse=False):
vocab = Counter()
rcounter = 999999999
for line in vocabfile:
item = line.split()
word = item[0]
if reverse: word=word[::-1]
if len(item)>1:
num = int(item[1])
else:
num = rcounter
rcounter-=1
vocab[word] = num
return vocab
def read_nent(vocabfile):
vocab = {}
for line in vocabfile:
item = line.split()
num = int(item[0])
word = item[1]
vocab[num] = word
return vocab
def read_nent_int(vocabfile):
vocab = {}
for line in vocabfile:
item = line.split()
num = int(item[0])
word = int(item[1])
vocab[num] = word
return vocab
def extract_vocabulary(infile):
vocab = Counter()
for line in infile:
for word in line.split():
# word = processMeta(word)
word = processUlower(word)
vocab[word] += 1
return vocab
def save_vocabulary(vocabfile,vocab,order=False,reverseorder=True,alphaonly=False,maxcount=None,vocabout=None):
cnt = 0
if order:
for item in sorted(vocab.items(),key=lambda x: x[1],reverse=reverseorder):
if maxcount is not None and cnt==maxcount: return
if not alphaonly or item[0].isalpha():
vocabfile.write(u"{0} {1}\n".format(item[0],item[1]))
if vocabout is not None: vocabout[item[0]]=item[1]
cnt+=1
else:
for item in vocab.items():
if maxcount is not None and cnt==maxcount: return
if not alphaonly or item[0].isalpha():
vocabfile.write(u"{0} {1}\n".format(item[0],item[1]))
if vocabout is not None: vocabout[item[0]]=item[1]
cnt+=1
def register_subwords(infile,premaxlen,postmaxlen,minrootlen,isvocabin=False,vocabout=None,rawprefixfile=None,rawpostfixfile=None,loadrawfile=False,freqnotrank=False):
rawprecodetree = {}
rawpostcodetree = {}
if isvocabin:
vocab = read_vocabulary(infile)
else:
vocab = extract_vocabulary(infile)
if loadrawfile:
rawprevocab = read_vocabulary(rawprefixfile)
rawpostvocab = read_vocabulary(rawpostfixfile)
else:
rawprevocab = Counter()
rawpostvocab = Counter()
for item in vocab.items():
word = item[0]
freq = item[1]
preword =word[:premaxlen]
add_to_vocab_multi(preword,rawprevocab,freq)
add_to_vocab_multi_reverse(word[::-1],rawpostvocab,postmaxlen,minrootlen,freq)
# funique = len(rawprevocab)
# runique = len(rawpostvocab)
prevfreq = -1
num = 0
for item in sorted(rawprevocab.items(),key=lambda x: x[1],reverse=True):
word = item[0]
freq = item[1]
if freqnotrank:
num = freq
else:
if freq!=prevfreq: num+=1
add_to_codetree_terminal(word,rawprecodetree,num)
if not loadrawfile and rawprefixfile:
rawprefixfile.write(" {0} {1}\n".format(word,num))
prevfreq = freq
prevfreq = -1
num = 0
for item in sorted(rawpostvocab.items(),key=lambda x: x[1],reverse=True):
word = item[0]
freq = item[1]
if freqnotrank:
num = freq
else:
if freq!=prevfreq: num+=1
# if freq!=prevfreq: num+=1 # tmp not used
add_to_codetree_terminal(word,rawpostcodetree,num)
if not loadrawfile and rawpostfixfile:
rawpostfixfile.write(" {0} {1}\n".format(word,num))
prevfreq = freq
# print("vocab",len(vocab))
# print("funique",funique)
# print("runique",runique)
if vocabout is not None:
save_vocabulary(vocabout,vocab,True)
return rawprecodetree,rawpostcodetree,vocab,rawprevocab
def print_subwords(infile,codefile,n,reverse=False):
ngrams = Counter()
vocab = extract_vocabulary(infile)
# register (left or right) n-grams
for word in vocab.keys():
if reverse:
if len(word)>=n+1: # right without first
ngrams[word[-n:]] += 1
else:
if len(word)>=n: # left
ngrams[word[:n]] += 1
# count and print (left or right) n-grams
print(len(ngrams))
for item in sorted(ngrams.items(),key=lambda x: x[1],reverse=True):
codefile.write("{0} {1}\n".format(item[0],item[1]))
def add_subwords(codetree,tword,pos,subgraph):
pos0 = pos
while pos < len(tword):
s = tword[pos]
if s not in codetree:
return
else:
if codetree[s][0]>0:
posnext = pos + 1
if posnext not in subgraph[pos0]:
subgraph[pos0][posnext] = 0
subgraph[pos0][posnext] = max(subgraph[pos0][posnext],codetree[s][0])
pos += 1
codetree = codetree[s][1]
def add_subwords_reverse(codetree,tword,pos,subgraph):
posright = pos
while pos >= 2:
s = tword[pos-1]
if s not in codetree:
return
else:
if codetree[s][0]>0:
posleft = pos - 1
if posright not in subgraph[posleft]:
subgraph[posleft][posright] = 0
subgraph[posleft][posright] = max(subgraph[posleft][posright],codetree[s][0])
pos -= 1
codetree = codetree[s][1]
def create_subgraph(precodetree,postcodetree,tword):
subgraph = [{} for i in range(len(tword))]
for pos in range(0,len(subgraph)-1):
add_subwords(precodetree,tword,pos,subgraph)
# for pos in range(len(subgraph),0,-1):
# add_subwords_reverse(postcodetree,tword,pos,subgraph)
add_subwords_reverse(postcodetree,tword,len(subgraph),subgraph)
return subgraph
def analyze_subgraph(subgraph,word,track="",pos=0,freq="",leng=0):
if pos==len(word):
if leng<=3:
print(track,freq)
else:
if len(track)>0:
track += "-"
freq+=" "
for nextpos in subgraph[pos]:
nextfreq = subgraph[pos][nextpos]
analyze_subgraph(subgraph,word,track+word[pos:nextpos],nextpos,freq+str(nextfreq),leng+1)
# === Generic heuristics BEGIN
nonprefixes_dict = {}
vowels=u"aāeēiīoōuūy";
vowdict={}
for v in vowels:
vowdict[v]=1
def containsvowel(word):
for s in word:
if s in vowdict: return True
return False
def is_good_part_generic(part,word=''):
return (
part.isalpha()
and part.islower()
and containsvowel(part)
)
# === Generic heuristics END
# === English specific heuristics BEGIN
nonprefixes_en = ["non","un","im"]
nonprefixes_dict_en={}
for v in nonprefixes_en:
nonprefixes_dict_en[v]=1
def is_good_root_en(part,word):
return len(part)>2 and is_good_part_generic(part)
def is_good_postfix_en(part):
if len(part)<=2:
return is_good_ending_en(part) or part in ["ly"]
elif len(part)>5:
return False
else:
if part in ["ment","ling","ness"]: return True
if not is_good_part_generic(part):
return False
if part[0] not in vowdict:
return False
return True
def is_good_ending_en(part):
return part in ["s","ed","e","y","es","er","ies"]
def is_good_ending_ne_en(part):
return False
def is_good_prefix_en(part):
return is_good_part_generic(part)
# === English specific heuristics END
# === Latvian specific heuristics BEGIN
nonprefixes_lv = ["ne"]
nonprefixes_dict_lv={}
for v in nonprefixes_lv:
nonprefixes_dict_lv[v]=1
vowels_not_o=u"aāeēiīōuūy";
vowdict_not_o={}
for v in vowels_not_o:
vowdict_not_o[v]=1
badrootstart_lv = "cčjlļmnņr"
badrootstart_dict_lv={}
for v in badrootstart_lv:
badrootstart_dict_lv[v]=1
badrootend_lv = ["šs"]
badrootend_dict_lv={}
for v in badrootend_lv:
badrootend_dict_lv[v]=1
def is_good_root_lv(root,word):
# if len(root)<=2: return False
if root[-1] in vowdict_not_o: return False
if root[-1] == "o" and len(root)<4: return False
if root[-2] in ['p','t'] and root[-1] not in ['l','r','j','n','t','s','o']: return False
if len(root)==len(word) and len(root)<4: return False
if root[1] not in vowdict and root[0] in badrootstart_dict_lv: return False
if root[-2:] in badrootend_dict_lv: return False
return is_good_part_generic(root)
def is_good_postfix_lv(part):
if len(part)==1:
if part in vowdict: return True
elif part in ["t","s","š"]: return True
else: return False
else:
if not is_good_part_generic(part):
return False
if part[-1] not in vowdict and part[-1] not in ["m","s","š","t"]: return False
if len(part)==2:
# postfixes of length 2 should contain vowel at position 0 (LATVIAN?)
if part[0] not in vowdict or part[-1]=="o":
return False
else: # postfix length 3 or more
if part=="sies": return True
if part=="ties": return True
if part[:3]=="šan": return True
if part[:3]=="nīc": return True
if part[:4]=="niek": return True
if part[:4]=="niec": return True
if part[:4]=="nieč": return True
if not containsvowel(part[0]):
return False
return True
def is_good_ending_lv(part):
""" Is ending in Latvian, assuming it is good postfix
"""
if len(part)>4: return False
elif len(part)==4:
if part in ["sies","ties"]: return True
elif len(part)==3:
if part in ["iem","ies","ais"]: return True
elif len(part)==2:
if part[-1]=="š": return False
elif part[0] in vowdict and part[1] in vowdict:
if part in ["ai","ie","ei"]: return True
else: return False
elif part in ["om","ūs","et","ut","ūt"]: return False
else: return True
else: # length = 1
return True
return False
def is_good_ending_ne_lv(part):
if len(part)>3: return False
elif len(part)==3:
if part in ["iem","ais"]: return True
else: return True
elif len(part)==2:
if part[-1] in["š","t"]: return False
elif part[0] in vowdict and part[1] in vowdict:
if part in ["ai","ie","ei"]: return True
else: return False
elif part in ["om","ūs"]: return False
else: return True
else: # length = 1
if part in ["t","o","y"]: return False
else: return True
def is_good_prefix_lv(part):
return is_good_part_generic(part)
# === Latvian specific heuristics END
def add_heuristics(lang=''):
lang = lang.lower()
global is_good_prefix
global is_good_root
global is_good_postfix
global is_good_ending
global is_good_ending_ne
global nonprefixes_dict
if lang=='lv':
is_good_prefix = is_good_prefix_lv
is_good_root = is_good_root_lv
is_good_postfix = is_good_postfix_lv
is_good_ending = is_good_ending_lv
is_good_ending_ne = is_good_ending_ne_lv
nonprefixes_dict = nonprefixes_dict_lv
elif lang=='en':
is_good_prefix = is_good_prefix_en
is_good_root = is_good_root_en
is_good_postfix = is_good_postfix_en
is_good_ending = is_good_ending_en
is_good_ending_ne = is_good_ending_ne_en
nonprefixes_dict = nonprefixes_dict_en
else:
lang = 'unspecified'
is_good_prefix = is_good_prefix_en
is_good_root = is_good_root_en
is_good_postfix = is_good_postfix_en
is_good_ending = is_good_ending_en
is_good_ending_ne = is_good_ending_ne_en
nonprefixes_dict = nonprefixes_dict_en
sys.stderr.write('Heuristics: {0}\n'.format(lang))
def analyze_prefixes(prefsource,rootsource,vocab,rawprevocab,preffile=None,loadfile=False):
""" Collect candidate prefixes
"""
prefixes = Counter()
if loadfile:
if preffile is not None:
for line in preffile:
entry = line.split()
prefixes[entry[0]] = int(entry[1])
else:
# TEST=0
# CNT=0
for prefix in goodprefixes:
prefixes[prefix] = goodprefixes[prefix]
preflen1 = minpreflen
preflen2 = 4
rootlen1 = 4
rootlen2 = 7
for item in vocab.items():
word = item[0]
# freq = item[1]
preftree = prefsource
for p in range(1,preflen2+1):
if p+rootlen1>len(word): break
ps = word[p-1]
if ps not in preftree: break
elif preftree[ps][0]>0 and p>=preflen1:
prefix = word[:p]
if is_good_prefix(prefix) and search_codetree(prefix,badprefixes)==0:
roottree = rootsource
for r in range(1,rootlen2+1):
pr = p+r
if pr>len(word): break
prs = word[pr-1]
if prs not in roottree: break
# elif not freqnotrank: # ranking
# if prefixes[prefix]==0 or roottree[prs][0]<prefixes[prefix]:
# prefixes[prefix]=roottree[prs][0]
# elif roottree[prs][0]>0 and r>=rootlen1 and is_good_root(word[p:pr],word): # frequence
# prefixes[prefix]+=roottree[prs][0]
root=word[p:pr]
if r>=rootlen1 and roottree[prs][0]>0 and is_good_root(root,word):
prefixes[prefix]+=rawprevocab[root]
roottree = roottree[prs][1]
preftree = preftree[ps][1]
if preffile is not None:
for item in sorted(prefixes.items(),key=lambda x: x[1],reverse=True):
preffile.write(" {0} {1}\n".format(item[0],item[1]))
# print("CNT",CNT,TEST)
return prefixes
longenoughpplen = 5
ppregbase = 3
def analyze_postfixes(rootsource,postsource,vocab,rawprevocab,postfile=None,sufffile=None,endfile=None,loadfile=False):
""" Collect candidate postfixes, suffixes, endings
"""
postfixes = Counter()
suffixes = Counter()
endings = Counter()
if loadfile:
if postfile is not None:
for line in postfile:
entry = line.split()
postfixes[entry[0]] = int(entry[1])
if sufffile is not None:
for line in sufffile:
entry = line.split()
suffixes[entry[0]] = int(entry[1])
if endfile is not None:
for line in endfile:
entry = line.split()
endings[entry[0]] = int(entry[1])
else:
for postfix in goodpostfixes:
postfixes[postfix] = goodpostfixes[postfix]
postlen2 = 7
rootlen1 = 4
rootlen2 = 7
for item in vocab.items():
word = item[0]
# freq = item[1]
posttree = postsource
for p in range(1,postlen2+1):
if p+rootlen1>len(word): break
ps = word[-p]
if ps not in posttree: break
elif posttree[ps][0]>0:
postfix = word[-p:]
if is_good_postfix(postfix) and search_codetree(postfix,badpostfixes)==0:
for rootlen in range(rootlen1,1+min(rootlen2,len(word)-p)):
roottree = rootsource
for r in range(rootlen,0,-1):
pr = p+r
prs = word[-pr]
if prs not in roottree: break
# elif not freqnotrank: # ranking
# if postfixes[postfix]==0 or roottree[prs][0]<postfixes[postfix]:
# postfixes[postfix]=roottree[prs][0]
# if is_good_ending(postfix):
# if endings[postfix]==0 or roottree[prs][0]<endings[postfix]:
# endings[postfix]+=roottree[prs][0]
# elif roottree[prs][0]>0 and r==1 and is_good_root(word[-p-rootlen:-p],word): # frequence
# postfixes[postfix]+=roottree[prs][0]
# if is_good_ending(postfix):
# endings[postfix]+=roottree[prs][0]
root=word[-p-rootlen:-p]
if r==1 and roottree[prs][0]>0 and is_good_root(root,word): # frequence
postfixes[postfix]+=rawprevocab[root]
if is_good_ending(postfix):
endings[postfix]+=rawprevocab[root]
roottree = roottree[prs][1]
posttree = posttree[ps][1]
minsufflen = 1
# extract suffixes
for postfix in postfixes:
for pos in range(minsufflen,len(postfix)-1):
suffix=postfix[:pos]
ending=postfix[pos:]
if endings[ending]>0:
suffixes[suffix]+=postfixes[postfix]
# regularize weight
for postfix in postfixes:
if len(postfix)<longenoughpplen: # longer ppfixes are better
expo = longenoughpplen - len(postfix)
postfixes[postfix] = postfixes[postfix] // round(ppregbase**expo)
for suffix in suffixes:
if len(suffix)<longenoughpplen: # longer ppfixes are better
expo = longenoughpplen - len(suffix)
suffixes[suffix] = suffixes[suffix] // round(ppregbase**expo)
# print to files
if postfile is not None:
for item in sorted(postfixes.items(),key=lambda x: x[1],reverse=True):
postfile.write(" {0} {1}\n".format(item[0],item[1]))
if sufffile is not None:
for item in sorted(suffixes.items(),key=lambda x: x[1],reverse=True):
sufffile.write(" {0} {1}\n".format(item[0],item[1]))
if endfile is not None:
for item in sorted(endings.items(),key=lambda x: x[1],reverse=True):
endfile.write(" {0} {1}\n".format(item[0],item[1]))
return postfixes,suffixes,endings
def explore_codetree_plus(codetree,tword,wordpos0=0,emptysubword=False):
store={}
if emptysubword:
store[0]=0 # for empty subword
wlen = len(tword)
for wordpos in range(wordpos0,wlen):
s = tword[wordpos]
if s not in codetree:
break
val = codetree[s][0]
if val>0:
pos = wordpos-wordpos0+1
store[pos]=val
codetree = codetree[s][1]
return store
def extend_subword_matrix(dest,src,addempty=False,dstartpos=0):
for dpos in range(len(dest)):
if dpos>=dstartpos:
ddict=deepcopy(dest[dpos])
for ditem in ddict.items():
dlen=ditem[0]
drank=ditem[1]
spos = dlen+dpos
if spos<len(src):
for sitem in src[spos].items():
slen=sitem[0]
srank=sitem[1]
rank=max(drank,srank)
dslen=dlen+slen
if dslen not in dest[dpos]:
dest[dpos][dslen]=rank
elif rank<dest[dpos][dslen]:
dest[dpos][dslen]=rank
if addempty:
dest[dpos][0]=0
def merge_subword_matrix(dest,src,addempty=False,dstartpos=0):
for dpos in range(len(dest)):
if dpos>=dstartpos:
for sitem in src[dpos].items():
slen=sitem[0]
srank=sitem[1]
if slen not in dest[dpos]:
dest[dpos][slen]=srank
elif srank<dest[dpos][slen]:
dest[dpos][slen]=srank
if addempty:
dest[dpos][0]=0
def reverse_subword_matrix(mx,emptysubword=True):
mlen = len(mx)
if emptysubword:
rmx = [{0:0} for i in range(mlen)]
else:
rmx = [{} for i in range(mlen)]
for pos in range(mlen):
seq = mx[pos]
for seqel in seq.items():
sublen=seqel[0]
subrate=seqel[1]
lastpos = pos+sublen-1
pos2 = mlen-lastpos-1
rmx[pos2][sublen]=subrate
# rmx[pos2].append((sublen,subrate))
return rmx
def build_codetree_best(ppvocab,rate,reverse,datafile=None,loadfile=False):
""" Collect best prefixes (reverse=False) or postfixes (reverse=True)
"""
if loadfile:
codetree = read_codetree(datafile,reverse)
else:
codetree = {}
icount = len(ppvocab.items())
if rate>1.0: bestcount=int(rate)
else: bestcount = int(round(icount * rate))
prevfreq = 0
numx = 0
num = 0
for item in sorted(ppvocab.items(),key=lambda x: x[1],reverse=True):
if numx>=bestcount: break
freq = int(item[1])
if freq!=prevfreq: num+=1
word = item[0]
numout = num
if datafile is not None:
datafile.write(u" {0} {1}\n".format(word,numout))
if reverse: word=word[::-1]
add_to_codetree_terminal(word,codetree,numout)
prevfreq = freq
numx += 1
return codetree
def extract_root(precodetree,bestprecodetree,bestpostcodetree,word,minrootlen,bestcount):
# create candidate list of prefixes, with a prefix denoted as its length
prestore = explore_codetree_plus(bestprecodetree,word,0,True)
# create candidate list of postfixes, with a postfix denoted as its length
poststore = explore_codetree_plus(bestpostcodetree,word[::-1],0,True)
roots = Counter()
wlen = len(word)
for xpos in prestore.items(): # all available prefixes
pos = xpos[0]
for ypos in poststore.items(): # all available postfixes
pos2rev = ypos[0]
if pos+pos2rev+minrootlen<=wlen:
pos2 = wlen-pos2rev
root=word[pos:pos2]
# postfix = word[pos2:]
if (search_codetree(root,precodetree)>0
and is_good_root(root,word)
and search_codetree(root,badroots)==0
):
prerank=xpos[1]
# if pos>0:
# if verbose:
# print("{1}({0})".format(prerank,word[:pos]),end=" ")
rootrank = search_codetree(root,precodetree)
# if verbose:
# print("{1}[{0}]".format(rootrank,root),end=" ")
postrank=ypos[1]
# if pos2rev>0:
# if verbose:
# print("{1}({0})".format(postrank,postfix),end=" ")
# if freqnotrank:
# rootrate = rootrank-prerank-postrank
# else:
rootrate = prerank+rootrank+postrank
# rootrate = rootrank #+prerank+postrank
roots[root]=rootrate
# if verbose:
# print("+{0}".format(rootrate))
minroots=[]
if len(roots)>0:
cnt=0
for root in sorted(roots,key=lambda x: roots[x]):
# if root=="eirop":
# print("rootx",roots[root])
minroots.append(root)
cnt+=1
if cnt>=bestcount: break
# minroot=min(roots,key=lambda x: roots[x])
return minroots #,roots[minroot]
longenoughrootlen = 5
rootregbase = 4
minrootfreq = 2
def collect_roots(vocab,rawprecodetree,bestprecodetree,bestpostcodetree,datafile=None,loadfile=False,bestcount=1):
if loadfile:
roottree = read_codetree(datafile)
else:
roottree = {}
roots = Counter()
for root in goodroots:
roots[root] = goodroots[root]
for word in vocab:
minroots = extract_root(rawprecodetree,bestprecodetree,bestpostcodetree,word,minrootlen,bestcount)
cnt=0
for root in minroots:
freq = search_codetree(word,rawprecodetree)
if freq>0:
roots[root] += vocab[word]
cnt+=1
for root in roots:
if len(root)<longenoughrootlen: # longer roots are better
expo = longenoughrootlen - len(root)
roots[root] = roots[root] // round(rootregbase**expo)
prevfreq = 0
numx = 0
num = 0
for root in sorted(roots,key=lambda x: roots[x], reverse=True):
freq = roots[root]
if freq<minrootfreq: break
if freq!=prevfreq: num+=1
if datafile:
datafile.write(u" {0} {1}\n".format(root,num))
add_to_codetree_terminal(root,roottree,num)
numx += 1
prevfreq = freq
# print("roots",numx)
return roottree
rootfactor = 1
rootblockweight = 1000000
# status: 0-prefix, 1-root, 2-postfix, 3-endings
def segment_word0(mxx,word,pos,step,status,track,trackweight,trackstore,trackweightstore):
if pos>=len(mxx[status]): return # finished after prefix
# print(pos,step,status,len(track))
# print("SPV",status,pos,mxx[status][pos])
wordlen=len(word)
for candidate in mxx[status][pos].items():
pos2 = pos + candidate[0]
if status==1: # root
trackweight2 = trackweight + candidate[1] * rootfactor
else:
trackweight2 = trackweight + candidate[1]
if step==len(track):
track.append([pos2,candidate[1]])
else:
track[step] = [pos2,candidate[1]]
if status<=1: # prefix or root
segment_word0(mxx,word,pos2,step+1,(status+1)%3,track,trackweight2,trackstore,trackweightstore)
else: # post
if pos2==wordlen:
tracktostore = list(track[:step+1])
trackstore.append(tracktostore)
trackweightstore.append(trackweight2+len(tracktostore)*rootblockweight//3)
# print("added",tracktostore)
else:
segment_word0(mxx,word,pos2,step+1,(status+1)%3,track,trackweight2,trackstore,trackweightstore)
maxgerootlen = 9
nent_placeholder_marker = "@###"
maxnenums = 1
def obtain_segment_track(bestprecodetree,roottree,
bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,word,
generateroots=True,extramode=0,verbose=False):
""" Collect list of segmentation tracks, each in form (prefix, root, postfix)+ and compute weights (the less, the better)
and return the best one
"""
if extramode!=1 and extramode!=4 and word in bestvocab or len (word)>20:
return None # None indicates word from bestvocab later (in segment_word)
prematrix = []
for pos in range(len(word)):
prematrix.append(
explore_codetree_plus(bestprecodetree,word,pos)
)
pre2 = deepcopy(prematrix)
extend_subword_matrix(prematrix,pre2,True)
if verbose:
sys.stdout.write("PRE\n")
for pos in range(len(prematrix)):
sys.stdout.write("{0} {1} {2}\n".format(pos,word[pos],prematrix[pos]))
# print(pos,word[pos],prematrix[pos])
rootmatrix = []
for pos in range(len(word)):
rootmatrix.append(
explore_codetree_plus(roottree,word,pos)
)
if verbose:
sys.stdout.write("ROOT\n")
for pos in range(len(rootmatrix)):
sys.stdout.write("{0} {1} {2}\n".format(pos,word[pos],rootmatrix[pos]))
endmatrix0 = []
postmatrix0 = []
suffmatrix0 = []
for pos in range(len(word)):
endmatrix0.append(
explore_codetree_plus(bestendcodetree,word[::-1],pos)
)
suffmatrix0.append(
explore_codetree_plus(bestsuffcodetree,word[::-1],pos)
)
if verbose:
sys.stdout.write("POST00\n")
for pos in range(len(postmatrix0)):
sys.stdout.write("{0} {1} {2}\n".format(pos,word[::-1][pos],postmatrix0[pos]))
sys.stdout.write("SUFF00\n")
for pos in range(len(suffmatrix0)):
sys.stdout.write("{0} {1} {2}\n".format(pos,word[::-1][pos],suffmatrix0[pos]))
if extramode == 1 or extramode == 2 or extramode == 3 or extramode == 4:
lastxend0 = deepcopy(endmatrix0[0])
# print('lastxend',lastxend,word)
# if extramode == 1 or extramode == 2 or extramode == 3 or extramode == 4:
# extend_subword_matrix(endmatrix0,suffmatrix0,False,1)
# merge_subword_matrix(endmatrix0,suffmatrix0,False,1)
# else:
# extend_subword_matrix(endmatrix0,suffmatrix0,False)
# merge_subword_matrix(endmatrix0,suffmatrix0,False)
extend_subword_matrix(endmatrix0,suffmatrix0,False)
merge_subword_matrix(endmatrix0,suffmatrix0,False)
postmatrix0 = endmatrix0
# print("POSTM",postmatrix0)
if verbose:
sys.stdout.write("POST0\n")
for pos in range(len(postmatrix0)):
sys.stdout.write("{0} {1} {2}\n".format(pos,word[::-1][pos],postmatrix0[pos]))
postmatrix = reverse_subword_matrix(postmatrix0,True)
postmatrix.append({0:0})
if verbose:
sys.stdout.write("POST\n")
for pos in range(len(postmatrix)):
wordplus=word+" "
sys.stdout.write("{0} {1} {2}\n".format(pos,wordplus[pos],postmatrix[pos]))
track = [[0,0] for i in range(len(word)*2)]
trackweight = 0
trackstore = []
trackweightstore = []
# if extramode != 2:
mxx = [prematrix,rootmatrix,postmatrix]
segment_word0(mxx,word,pos=0,step=0,status=0,track=track,
trackweight=trackweight,trackstore=trackstore,trackweightstore=trackweightstore)
# extramode=1: named-entity processing in training phase - stem except for best words
# extramode=2: named-entity processing in translation phase - stem
# extramode=3: named-entity processing in translation phase - placeholders
# extramode=4: named-entity processing in training phase - inserting placeholders (except for best words)
if extramode == 1 or extramode == 2 or extramode == 3 or extramode == 4:
lastx = postmatrix0[0]
lastxend = {}
if len(lastxend0) > 0:
for ee in lastxend0:
# print(ee,word[-ee:],is_good_ending_ne(word[-ee:]))
if is_good_ending_ne(word[-ee:]):
lastxend[ee] = lastxend0[ee]
# print("LASTX",lastx,extramode)
# print('tracktore',trackstore)
# print('trackweightstore',trackweightstore,word)
if len(lastx)>0:
bestlist = sorted(lastx.keys(),key=lambda x: lastx[x])
for i in range(len(bestlist)):
best=bestlist[i]
bestweight=lastx[best]
if len(word[:-best])>=minrootlen and is_good_root(word[:-best],word):
track=[[0,0],[len(word)-best,bestweight],[len(word),0]]
trackstore.append(track)
trackweightstore.append(bestweight)
break
# print('len(trackweightstore)',len(trackweightstore))
if extramode == 1 or extramode == 2: # stem
if len(trackweightstore)==0 or len(lastxend)==0:
# print('nostore',word)
besttrack2 = [[(i+2)//3,0] for i in range(len(word)*3)]
else:
# print("lastxend",lastxend,word)
besttrack = trackstore[argmin(trackweightstore)]
rootendpos = besttrack[1][0]
besttrack2 = [[(i+2)//3,0] for i in range(rootendpos*3)]
elif extramode == 3 or extramode == 4: # ending
phlen = len(nent_placeholder_marker)
if maxnenums == 1: phlenplus = phlen
else: phlenplus = phlen + 1
if len(trackweightstore)==0 or len(lastxend)==0:
besttrack2 = [[0,0],[phlen,0],[phlen,0],[phlen,0],[phlenplus,0],[phlenplus,0]]
else:
besttrack = trackstore[argmin(trackweightstore)]
rootendpos = besttrack[1][0]
postfixlen = len(word)-rootendpos
if postfixlen==0:
besttrack2 = [[0,0],[phlen,0],[phlen,0],[phlen,0],[phlenplus,0],[phlenplus,0]]
else:
besttrack2 = [[0,0],[phlen,0],[phlen,0],[phlen,0],[phlenplus,0],[phlenplus,0],
[phlenplus,0],[phlenplus+postfixlen,0],[phlenplus+postfixlen,0]]
# print("##Besttrack3",word,besttrack2)
return besttrack2
# unable to find track from available roots
elif len(trackweightstore)==0:
if generateroots:
for pos in range(len(word)):
for candidatelen in range(2,min(maxgerootlen,len(word)-pos)+1):
candidateroot=word[pos:pos+candidatelen]
if is_good_root(candidateroot,word):
rootrank = search_codetree(candidateroot,roottree)
if rootrank>0:
rootmatrix[pos][candidatelen]=rootrank
else:
# x*candidatelen...: more letters generated roots make rank worse
# candidatelen+1: more generated roots make rank worse
# (one generated root of length 2n is better than two of lengths 4 each)
rootmatrix[pos][candidatelen]=rootblockweight*(candidatelen+1)
if verbose:
sys.stdout.write("ROOT2\n")
for pos in range(len(rootmatrix)):
sys.stdout.write("{0} {1} {2}\n".format(pos,word[pos],rootmatrix[pos]))
segment_word0(mxx,word,pos=0,step=0,status=0,track=track,
trackweight=trackweight,trackstore=trackstore,trackweightstore=trackweightstore)
else: # do not generate roots, take only postfix
lastx = postmatrix0[0]
if len(lastx)>0:
bestlist = sorted(lastx.keys(),key=lambda x: lastx[x])
for i in range(len(bestlist)):
best=bestlist[i]
bestweight=lastx[best]
if len(word[:-best])>=minrootlen and is_good_root(word[:-best],word):
track=[[0,0],[len(word)-best,bestweight],[len(word),0]]
trackstore.append(track)
trackweightstore.append(bestweight)
break
if verbose:
num=0
for t in trackstore:
sys.stdout.write("{0} {1} {2}\n".format(num,trackweightstore[num],t))
num+=1
sys.stdout.write("{0}\n".format(trackweightstore))
if len(trackweightstore)==0: return []
else:
if verbose:
sys.stdout.write("{0}\n".format(argmin(trackweightstore)))
sys.stdout.write("{0}\n".format(trackstore[argmin(trackweightstore)]))
return trackstore[argmin(trackweightstore)]
def mark_alpha_segmentation(roottree,bestvocab,track,word,marker1,mode,optmode=1):
""" Generate segmented word given track
"""
if len(track)==0: # no track built
if mode==3:
segmentlist=[word+marker1]
else:
segmentlist=[word]
else:
wordpos=0
segmentlist = []
status = 0 # 0-prefix, 1-root, 2-postfix
t = 0
while t < len(track):
# trackpos = track[t]
wordposx = track[t][0]
if status==0: # PRP optimization
wordposy = track[t+1][0]
wordposz = track[t+2][0]
if optmode==0:
pass
elif optmode==1:
segmenty = word[wordpos:wordposy] # prefix+root
segmentxyz = word[wordpos:wordposz] # p+r+p
segmentyz = word[wordposx:wordposz] # r+p
if segmentxyz in bestvocab and word[wordpos:wordposx] not in nonprefixes_dict:
# concatenate prefix+root+postfix, reduces amount of segments
if t+3<len(track) or mode>0:
track[t][0]=wordpos
track[t+1][0]=wordposz
track[t+2][0]=wordposz
elif wordposz>wordposy and segmentyz in bestvocab:
# concatenate root+postfix, reduces amount of segments
if t+3<len(track) or mode>0:
track[t+1][0]=wordposz
elif wordposx>wordpos and search_codetree(segmenty,roottree)>0: # prefix+root is among roots
track[t][0]=wordpos
# track[t+1][0]=wordposy # prefix added to root
elif track[t+1][1]>rootblockweight and t+3<len(track):
# generated roots (not in last position) concatenated
# with its prefix and postfix, reduces amount of segments
if word[wordpos:wordposx] not in nonprefixes_dict:
track[t][0]=wordpos
track[t+1][0]=wordposz
# track[t+2][0]=wordposz
elif optmode==2:
if t+3<len(track):
# roots (NOT in last prp position) concatenated
# with its prefix and postfix, reduces amount of segments
if word[wordpos:wordposx] not in nonprefixes_dict:
track[t][0]=wordpos
track[t+1][0]=wordposz
# track[t+2][0]=wordposz
else:
# roots (in last prp position) concatenated
# with its prefix (not postfix), reduces amount of segments
# wordposz = track[t+2][0]
if word[wordpos:wordposx] not in nonprefixes_dict:
track[t][0]=wordpos
# track[t+1][0]=wordposy
# track[t+2][0]=wordposz
wordpos2 = track[t][0]
if wordpos2>wordpos:
segment = word[wordpos:wordpos2]
if mode==0:
segmentlist.append(segment)
elif mode==1:
if status==0: #prefix marked
segmentlist.append(segment+marker1)
elif status==1:
segmentlist.append(segment)
wordpos3 = track[t+1][0]
isprelast = (t==len(track)-2)
# if the root is not the last root and no postfix follows
if not isprelast and wordpos3-wordpos2==0:
segmentlist.append(marker1)
elif status==2:
segmentlist.append(marker1+segment)
islast = (t==len(track)-1)
if not islast:
segmentlist.append(marker1)
elif mode==2:
if status==0: #prefix marked
segmentlist.append(segment+marker1)
elif status==1:
segmentlist.append(segment)
elif status==2:
segmentlist.append(marker1+segment)
elif mode==3:
segmentlist.append(segment+marker1)
wordpos = wordpos2
status = (status+1)%3
t+=1
return segmentlist
def create_nent_placeholder(nenum):
if maxnenums == 1:
return nent_placeholder_marker
else:
return nent_placeholder_marker + str(nenum)
def segment_word(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
word,marker1,marker2,mode,generateroots=False,optmode=1,extramode=0,nenum=0,verbose=False):
""" Preprocess string before segmentation into list of alpha(letters) and non-alpha parts,
alpha parts are then prp segmented, and then segmented word put together with segmentation marks,
considering also uppercase/lowercase processing
mode=0: marker1 denotes end of word
mode=1: marker1 denotes prefix/postfix and link
mode=2: marker1 denotes prefix/postfix and begin/end
mode=3: marker1 denotes link to next segment (like in BPE)
mode+100: no uppercase processing (marker2 denotes uppercase for the first letter of the following segment)
optmode: optimization mode used in 'mark_alpha_segmentation'
extramode=0: no named-entity processing
extramode=1: named-entity processing in training phase - stem except for best words
extramode=2: named-entity processing in translation phase - stem
extramode=3: named-entity processing in translation phase - placeholders
extramode=4: named-entity processing in training phase - inserting placeholders (except for best words)
"""
segmentlist = []
pos0 = 0
alpha = False
prevalpha = False
endsbyroot = False
AlphaProcessed = True
uppercasemode = mode<100
mode %= 100
tracknone=False
if extramode!=1 and extramode!=4 and word in bestvocab:
segmentlist.append(word)
else:
# if extramode==4:
# word = "abc."
for pos in range(len(word)+1): # symbol by symbol processing; takes one position after end to process last segment
if pos<len(word):
alpha = word[pos].isalpha()
if pos==len(word) or pos > 0 and alpha != prevalpha: # boundary of alpha/nonalpha parts
subword=word[pos0:pos] # original case
subwordplus=subword # case optionally set to lower
if prevalpha: # process alpha part
if pos0>0: # if alpha part follows non-alpha part, the non-alpha part marked
if mode==0: segmentlist[-1] += marker1
elif mode==1: segmentlist.append(marker1)
AlphaProcessed = False
if isUlower2(subword):
subwordplus = subwordplus[0].lower() + subwordplus[1:]
if uppercasemode:
segmentlist.append(marker2)
subword = subwordplus
track = obtain_segment_track(bestprecodetree,roottree,
bestsuffcodetree,bestpostcodetree,bestendcodetree,
bestvocab,subwordplus,generateroots,extramode,verbose)
if track is None:
track = []
tracknone = True
if verbose:
sys.stdout.write("TRACK {0}\n".format(track))
if mode==0:
if len(track)==0: endsbyroot=False
elif track[-1][0]==track[-2][0]: # empty postfixpart
# only root of length <=3 (thus considered to be small word without marker as separate segment)
# (empty prefix and root length <=5)
if len(track)==3 and track[-3][0]==0 and track[-2][0]-track[-3][0]<=5:
endsbyroot=False
else:
endsbyroot=True
else: endsbyroot=False
if extramode==3 or extramode==4:
if len(track)<=6:
subword = create_nent_placeholder(nenum)
else: #if len(track)==9:
postlen = track[-2][0] - track[-5][0]
subword = create_nent_placeholder(nenum) + subword[-postlen:]
elif extramode==2 or extramode==1:
pass
segmentlist+=mark_alpha_segmentation(roottree,bestvocab,track,subword,marker1,mode,optmode)
islast = (pos==len(word))
if islast and mode==0:
if endsbyroot: # marker set after word as separate segment
segmentlist += marker1
AlphaProcessed = True
else:
segmentlist[-1] = marker1 + segmentlist[-1] # postfix or small word marked (marker before it as part of it)
AlphaProcessed = True
else: # process non alpha part -- no segmentation performed -- forwarded to BPE
if not AlphaProcessed:
AlphaProcessed = True
if mode==0: subword = marker1 + subword
elif mode==1: segmentlist.append(marker1)
if mode==3: subword += marker1
segmentlist.append(subword)
endsbyroot = False
pos0 = pos
prevalpha = alpha
if mode==2: # postprocessing with begin/end
len1=len(marker1)
if len(segmentlist)==1: # single segment: do nothing
pass
elif len(segmentlist)==2 and segmentlist[0]==marker2: # single segment preceeded by uppercase mark: do nothing
pass
elif len(segmentlist)==2 and segmentlist[-1][:len1]==marker1: # two segments ended by postfix
segmentlist[-1] = marker1 + segmentlist[-1]
# the same with uppercase mark
elif len(segmentlist)==3 and segmentlist[0]==marker2 and segmentlist[-1][:len1]==marker1:
segmentlist[-1] = marker1 + segmentlist[-1]
else: # to put begin and end marks
# BEGINNING
# if uppercase mark or prefix in the beginning: add marker to it
if segmentlist[0]==marker2 or segmentlist[0][-len1:]==marker1:
segmentlist[0]+=marker1
else: # otherwise add extra marker
segmentlist.insert(0,marker1)
# END
# if postfix in the end: add marker to it
if segmentlist[-1][:len1]==marker1:
segmentlist[-1] = marker1 + segmentlist[-1]
else: # otherwise add extra marker
segmentlist.append(marker1+marker1)
elif mode==3:
len1=len(marker1)
if segmentlist[-1][-len1:]==marker1:
segmentlist[-1] = segmentlist[-1][:-len1]
return segmentlist,tracknone
def segment_sentence_preprocess_ne(sentence,verbose=False):
# EXTRACT named entities simple
wnum = 0
nentnums = []
for word in sentence.split():
if isUlower2(word):
# print(word)
nentnums.append(wnum)
wnum += 1
# print(nentnums)
nentnumsplus = []
prev = -9
i = 0
count = 0
for num in nentnums:
if num-prev>1:
count = 1
else:
count +=1
nentnumsplus.append(count)
prev = num
i += 1
i = len(nentnumsplus)-1
while i>=0:
if nentnumsplus[i]<=2: i-=1
else:
for k in range(nentnumsplus[i]):
nentnumsplus[i]=0
i-=1
nentnums2 = []
for i in range(len(nentnums)):
if nentnumsplus[i]>0:
nentnums2.append(nentnums[i])
return nentnums2
#nent_marker = "@$$$"
#
#def create_nent_line(word):
# return nent_marker + " " + word + "."
def segment_sentence(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode=0,generateroots=False,optmode=1,extramode=0,nentnums=[],nentsegs=[],verbose=False):
""" Segment line of words (whitespace-tokenized string) with PRP encoding
"""
output = []
i = 0
nnum = 0
for word in sentence.split():
if i in nentnums:
segmented = nentsegs[nnum]
nnum = (nnum + 1) % 10
else:
segmented,tracklen = segment_word(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
word,marker1,marker2,mode,generateroots,optmode,extramode,0,verbose)
if mode%100==2: # optimizing usage of begin/end markers (omitting end marker, if begin marker follows)
if len(output)>0 and output[-1]==marker1+marker1: # previous word ended by separate endmarker
if segmented[0]==marker1: # current word starts by marker
del output[-1]
if segmented[0][-len(marker1)*2:]==marker1+marker1: # current word starts by prefix marked as beginning
del output[-1]
output += segmented
i += 1
return ' '.join(output)
def segment_sentence_nents(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode=0,generateroots=False,optmode=1,extramode=2,nentnums=[],nentsegs=[],verbose=False):
""" Extract named-entities from sentence and prepare them for output
"""
output = []
nentnumspost = []
i = 0
nnum = 0
for word in sentence.split():
if i>0 and i in nentnums:
sword,tracknone = segment_word(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
word+'.',marker1,marker2,mode,generateroots,optmode,extramode,0,verbose)
if tracknone==False:
if mode%100==2: # optimizing usage of begin/end markers (omitting end marker, if begin marker follows)
if len(output)>0 and output[-1]==marker1+marker1: # previous word ended by separate endmarker
if sword[0]==marker1: # current word starts by marker
del output[-1]
if sword[0][-len(marker1)*2:]==marker1+marker1: # current word starts by prefix marked as beginning
del output[-1]
word2 = word + '.'
word3 = segment_sentence(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,
bestvocab,word2,marker1=marker1,marker2=marker2,mode=mode,
generateroots=generateroots,optmode=optmode,extramode=extramode)
output.append(word3)
nentnumspost.append(i)
nnum += 1
if nnum==10: break
i += 1
return output,nentnumspost
def segment_sentence_ne_placeholder(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode=0,generateroots=False,optmode=1,extramode=0,nentnums=[],nentsegs=[0],verbose=False):
""" segment line of words (whitespace-tokenized string) with PRP encoding
"""
output = []
nentnumspost = []
i = 0
nnum = nentsegs[0]
for word in sentence.split():
if i>0 and i in nentnums:
sword,tracknone = segment_word(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
word+".",marker1,marker2,mode,generateroots,optmode,extramode,nnum,verbose)
if tracknone==False:
if mode%100==2: # optimizing usage of begin/end markers (omitting end marker, if begin marker follows)
if len(output)>0 and output[-1]==marker1+marker1: # previous word ended by separate endmarker
if sword[0]==marker1: # current word starts by marker
del output[-1]
if sword[0][-len(marker1)*2:]==marker1+marker1: # current word starts by prefix marked as beginning
del output[-1]
output.append(sword)
nentnumspost.append(i)
nnum = (nnum+1)%10
i += 1
nentsegs[0]=nnum
return output
# code 9474: '│'; code 9553: '║'
def apply_prpe(infile,outfile,infilepref,infileroot,infilesuff,infilepost,infileend,infilebestvocab,marker1="9474",marker2="9553",bigmode=1,generateroots=False,lang='lv'):
"""segment input stream with PRP encoding
"""
if marker1.isdigit(): marker1 = chr(int(marker1))
if marker2.isdigit(): marker2 = chr(int(marker2))
add_heuristics(lang)
mode = bigmode % 1000
optmode = bigmode // 1000 % 10
# optmode = optmode0 % 10
# extramode = optmode0 // 10
bestprecodetree = read_codetree(infilepref,reverse=False)
bestsuffcodetree = read_codetree(infilesuff,reverse=True)
bestpostcodetree = read_codetree(infilepost,reverse=True)
bestendcodetree = read_codetree(infileend,reverse=True)
roottree = read_codetree(infileroot)
bestvocab = read_vocabulary(infilebestvocab,reverse=False)
for sentence in infile:
outfile.write(segment_sentence(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1=marker1,marker2=marker2,mode=mode,generateroots=generateroots,
optmode=optmode,extramode=0).strip())
outfile.write(' \n')
# code 9474: '│'; code 9553: '║'
def apply_prpe_ne_train(infile,outfile,infilepref,infileroot,infilesuff,infilepost,infileend,infilebestvocab,
infilenent,marker1="9474",marker2="9553",bigmode=1,generateroots=False,lang='lv'):
"""segment input stream with PRP encoding with named-entity proessing for training phase
"""
if marker1.isdigit(): marker1 = chr(int(marker1))
if marker2.isdigit(): marker2 = chr(int(marker2))
add_heuristics(lang)
mode = bigmode % 1000
optmode = bigmode // 1000 % 10
bestprecodetree = read_codetree(infilepref,reverse=False)
bestsuffcodetree = read_codetree(infilesuff,reverse=True)
bestpostcodetree = read_codetree(infilepost,reverse=True)
bestendcodetree = read_codetree(infileend,reverse=True)
roottree = read_codetree(infileroot)
bestvocab = read_vocabulary(infilebestvocab,reverse=False)
nent = read_nent(infilenent)
lnum = 0
if outfile is None:
lnum = 22
# lnum2 = 0
phnum = 0
nnums = [0]
for sentence in infile:
lnum+=1
nentnumspost = []
nentsegspost = []
if lnum in nent:
nentnums = segment_sentence_preprocess_ne(sentence)
segmented_ne,nentnumspost = segment_sentence_nents(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode,generateroots,optmode=optmode,extramode=1,
nentnums=nentnums,nentsegs=[])
segmented_ne = segmented_ne[:1]
nentnumspost = nentnumspost[:1]
if outfile is None:
print('#segmented_ne',segmented_ne,nentnumspost)
if len(segmented_ne)>0:
for ne in segmented_ne:
if outfile is not None:
outfile.write(ne)
outfile.write(' \n')
else:
print('#ne',ne)
nentsegspost = segment_sentence_ne_placeholder(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode,generateroots,optmode=optmode,extramode=4,
nentnums=nentnumspost,nentsegs=nnums)
phnum = (phnum+1)%10
sgm = segment_sentence(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1=marker1,marker2=marker2,mode=mode,generateroots=generateroots,optmode=optmode,
extramode=0,nentnums=nentnumspost,nentsegs=nentsegspost).strip()
if outfile is not None:
outfile.write(sgm)
outfile.write(' \n')
else:
print("#apply_train",sgm)
# code 9474: '│'; code 9553: '║'
def apply_prpe_ne_translate(infile,outfile,infilepref,infileroot,infilesuff,infilepost,infileend,infilebestvocab,
outfilenent,marker1="9474",marker2="9553",bigmode=1,generateroots=False,lang='lv'):
"""segment input stream with PRP encoding with named-entity proessing for translation phase
"""
if marker1.isdigit(): marker1 = chr(int(marker1))
if marker2.isdigit(): marker2 = chr(int(marker2))
add_heuristics(lang)
mode = bigmode % 1000
optmode = bigmode // 1000 % 10
bestprecodetree = read_codetree(infilepref,reverse=False)
bestsuffcodetree = read_codetree(infilesuff,reverse=True)
bestpostcodetree = read_codetree(infilepost,reverse=True)
bestendcodetree = read_codetree(infileend,reverse=True)
roottree = read_codetree(infileroot)
bestvocab = read_vocabulary(infilebestvocab,reverse=False)
lnum = 1
for sentence in infile:
nentnums = segment_sentence_preprocess_ne(sentence)[:1]
nentnumspost = []
nentsegspost = []
if len(nentnums)>0:
segmented_ne,nentnumspost = segment_sentence_nents(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode,generateroots,optmode=optmode,extramode=2,nentnums=nentnums,nentsegs=[])
segmented_ne = segmented_ne[:maxnenums]
nentnumspost = nentnumspost[:maxnenums]
if len(segmented_ne)>0:
for ne in segmented_ne:
if outfile is not None:
outfile.write(ne)
outfile.write(' \n')
else:
print(ne)
if outfilenent is not None:
outfilenent.write('{0} {1}\n'.format(lnum,len(segmented_ne)))
nentsegspost = segment_sentence_ne_placeholder(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1,marker2,mode,generateroots,optmode=optmode,extramode=3,
nentnums=nentnumspost,nentsegs=[0])
sgm = segment_sentence(bestprecodetree,roottree,bestsuffcodetree,bestpostcodetree,bestendcodetree,bestvocab,
sentence,marker1=marker1,marker2=marker2,mode=mode,generateroots=generateroots,optmode=optmode,
extramode=0,nentnums=nentnumspost,nentsegs=nentsegspost).strip()
if outfile is not None:
outfile.write(sgm)
outfile.write(' \n')
else:
print("apply_translate",sgm)
lnum += 1
def learn_prpe(infile,outfilepref,outfileroot,outfilesuff,outfilepost,outfileend,outfilebestvocab,ratepref=20,ratesuff=400,ratepost=0.1,ratevocab=10000,
ingoodpref=None,inbadpref=None,ingoodroot=None,inbadroot=None,ingoodpost=None,inbadpost=None,iterations=1,lang='lv'):
"""learn PRP encoding - raw prefixes, prefixes, roots, suffixes, postfixes, endings
"""
global goodprefixes
global badprefixes
global goodroots
global badroots
global goodpostfixes
global badpostfixes
if ingoodpref is not None: goodprefixes = read_vocabulary(ingoodpref)
if inbadpref is not None: badprefixes = read_codetree(inbadpref)
if ingoodroot is not None: goodroots = read_vocabulary(ingoodroot)
if inbadroot is not None: badroots = read_codetree(inbadroot)
if ingoodpost is not None: goodpostfixes = read_vocabulary(ingoodpost)
if inbadpost is not None: badpostfixes = read_codetree(inbadpost)
bestprecodetree = None
roottree = None
bestpostcodetree = None
add_heuristics(lang)
rawprecodetree,rawpostcodetree,vocab,rawprevocab=register_subwords(infile,premaxlen,postmaxlen,minrootlen)
# PRP extraction
mainprefrate = 0.05
mainpostrate = 0.05
suffrate = ratesuff
lastpostrate = ratepost
lastprefrate = ratepref
iters = iterations
save_vocabulary(outfilebestvocab,vocab,order=True,reverseorder=True,alphaonly=False,
maxcount=ratevocab)
# save_vocabulary(outfilebestvocab,vocab,True,True,True,ratevocab)
for it in range(iters):
# first processing
if it==0: # first
prefsource = rawprecodetree
rootsource = rawprecodetree
postsource = rawpostcodetree
else: # not first
prefsource = bestprecodetree
rootsource = roottree
postsource = bestpostcodetree
# postrate = mainpostrate
if it<iters-1: # not last
prefout = None
postout = None
suffout= None
endout = None
prefrate = mainprefrate**(1/(iters-it))
postrate = mainpostrate**(1/(iters-it))
bestprefout = None
bestsuffout = None
bestendout = None
bestpostout = None
rootout = None
else: # last
prefout = None
postout = None
suffout= None
endout = None
prefrate = lastprefrate
postrate = lastpostrate
bestprefout = outfilepref
rootout = outfileroot
bestsuffout = outfilesuff
bestendout = outfileend
bestpostout = outfilepost
# second processing
prevocab = analyze_prefixes(prefsource,rootsource,vocab,rawprevocab,prefout,loadfile=False)
postvocab,suffvocab,endvocab = analyze_postfixes(rootsource,postsource,vocab,rawprevocab,postout,suffout,endout,loadfile=False)
bestprecodetree = build_codetree_best(prevocab,rate=prefrate,reverse=False,datafile=bestprefout,loadfile=False)
bestpostcodetree = build_codetree_best(postvocab,rate=postrate,reverse=True,datafile=bestpostout,loadfile=False)
roottree=collect_roots(vocab,rawprecodetree,bestprecodetree,bestpostcodetree,rootout,loadfile=False,bestcount=1)
build_codetree_best(suffvocab,rate=suffrate,reverse=True,datafile=bestsuffout,loadfile=False)
build_codetree_best(endvocab,rate=1.0,reverse=True,datafile=bestendout,loadfile=False)
def unprocess_line_prpe(sentence,marker1,marker2,mode):
output = []
len1 = len(marker1)
len2 = len(marker2)
upper = False
marked = False
markednext = False
wordstarted = False
mode %= 100
for word in sentence.split():
# uppercase marking
if word==marker2:
upper=True
continue
elif mode==2 and word[:len2]==marker2: # in mode=2
upper=True
word = word[len2:]
elif upper:
word = word[0].upper() + word[1:]
upper = False
if mode==0:
# determine connection to previous segment
if word==marker1:
marked = False
continue
elif word[:len1]==marker1 and not word[len1].isalpha():
word = word[len1:]
marked = True
elif word[:len1]==marker1:
word = word[len1:]
markednext = False
elif word.isalpha():
markednext = True
if word[-len1:]==marker1 and not word[len1].isalpha():
word = word[:-len1]
markednext = True
# add segment
if marked:
output[-1]+=word
marked = False
else:
output.append(word)
# determine connection to next segment
if markednext:
markednext = False
marked = True
elif mode==1:
# determine connection to previous segment
if word==marker1:
marked = True
continue
elif word[:len1]==marker1:
marked = True
word = word[len1:]
# add segment
if marked:
output[-1]+=word
marked = False
else:
output.append(word)
# determine connection to next segment
if output[-1][-len1:]==marker1:
marked = True
output[-1] = output[-1][:-len1]
elif mode==2:
if word==marker1: # beginning as separate marker
wordstarted = True
output.append('')
elif word==marker1+marker1: # end as separate marker
wordstarted = False
elif word[-len1*2:]==marker1+marker1: # beginning as prefix
wordstarted = True
output.append(word[:-len1*2])
elif word[:len1*2]==marker1+marker1: # end as postfix
wordstarted = False
output[-1]+=word[len1*2:]
else:
if word[-len1:]==marker1: # prefix
word = word[:-len1]
elif word[:len1]==marker1: # postfix
word = word[len1:]
if wordstarted:
output[-1]+=word
else:
output.append(word)
elif mode==3:
if marked:
output[-1]+=word
else:
output.append(word)
marked = False
if output[-1][-len1:]==marker1:
output[-1]=output[-1][:-len1]
marked = True
return ' '.join(output)
# code 9474: '│'; code 9553: '║'
def unprocess_prpe(infile,outfile,marker1="9474",marker2="9553",mode=1):
if marker1.isdigit(): marker1 = chr(int(marker1))
if marker2.isdigit(): marker2 = chr(int(marker2))
for line in infile:
outfile.write(unprocess_line_prpe(line,marker1,marker2,mode).strip())
outfile.write(' \n')
def unprocess_prpe_ne_train(infile,outfile,infilenent,marker1="9474",marker2="9553",mode=1):
if marker1.isdigit(): marker1 = chr(int(marker1))
if marker2.isdigit(): marker2 = chr(int(marker2))
nnum = 0
lnum = 1
linesleft = -1
nentlist = []
lnents = read_nent(infilenent)
for line in infile:
pline = unprocess_line_prpe(line,marker1,marker2,mode).strip()
# lft1 = pline[:len(nent_marker)]
if lnum in lnents and linesleft == -1: # nent line
nent = pline.strip('.')
if outfile is None:
print("NENT",nent)
nent = nent[0].upper()+nent[1:]
nentlist.append(nent)
linesleft = 0
else: # regular line
plist = []
for p in pline.split():
if p[:len(nent_placeholder_marker)]==nent_placeholder_marker:
# print("!!!",p)
p = p[:len(p)-1]
plist.append(p)
pline = " ".join(plist)
if outfile is None:
print(pline)
for nent in nentlist:
pline = pline.replace(create_nent_placeholder(nnum),nent)
nnum = (nnum+1)%10
if outfile is None:
print(nentlist)
print(pline)
else:
outfile.write(pline)
outfile.write(' \n')
nentlist = []
linesleft = -1
lnum += 1
def unprocess_prpe_ne_translate(infile,outfile,infilenent,marker1="9474",marker2="9553",mode=1):
if marker1.isdigit(): marker1 = chr(int(marker1))
if marker2.isdigit(): marker2 = chr(int(marker2))
lnum = 1
linesleft = -1
nentlist = []
lnents = read_nent_int(infilenent)
for line in infile:
pline = unprocess_line_prpe(line,marker1,marker2,mode).strip()
# lft1 = pline[:len(nent_marker)]
if lnum in lnents and linesleft == -1: # first nent line
linesleft = lnents[lnum]
if linesleft > 0: # nent line
# nent = pline[len(nent_marker)+1:].strip('.')
nent = pline.strip('.')
if outfile is None:
print("NENT",nent)
nent = nent[0].upper()+nent[1:]
nentlist.append(nent)
linesleft -= 1
else: # regular line
plist = []
for p in pline.split():
if p[:len(nent_placeholder_marker)]==nent_placeholder_marker:
# print("!!!",p)
p = p[:len(p)-1]
plist.append(p)
pline = " ".join(plist)
if outfile is None:
print(pline)
nnum = 0
for nent in nentlist:
pline = pline.replace(create_nent_placeholder(nnum),nent)
nnum = (nnum+1)%10
if outfile is None:
print(nentlist)
print(pline)
else:
outfile.write(pline)
outfile.write(' \n')
nentlist = []
linesleft = -1
lnum += 1
|
[
"janis.zuters@lu.lv"
] |
janis.zuters@lu.lv
|
a640aa21e8d9d8de370e1401500dbff55628def6
|
73ebf027a2b8679fd153f982d98c8d60e9b1d894
|
/har_hw4.py
|
c5a10477e51e4cc42fe7335a40496ebc9cf93012
|
[] |
no_license
|
ejconlon/knowledge_games
|
05d8496935291f009c2431e1b7d3173f594a0279
|
f751dbfba468950e81625cce39725ec7c6358b64
|
refs/heads/master
| 2020-05-26T19:02:44.831211
| 2011-05-31T15:22:40
| 2011-05-31T15:22:40
| 1,560,992
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
#!/usr/bin/env python
from sat import *
class Problem(object):
def __init__(self, name=""):
self.name = name
def __enter__(self):
print "*** START "+self.name
def __exit__(self, a, b, c):
print "*** END "+self.name+"\n"
with Problem("3.1.1") as X:
print sub(['*','*','*'], ['1', '*', '1'])
print sub(['1','0','*'], ['0', '1', '*'])
print sub(['*','*','*'], ['1', '*', '*'])
with Problem("3.2.1") as X:
g = [
['*', '*', '1', '1'],
['0', '*', '*', '*'],
['0', '1', '*', '*'],
['0', '1', '1', '*'],
['1', '*', '0', '*']
]
s = [['*', '*', '*', '*']]
for row in g:
s = suball(s, row)
print s
print s
sm = SatMat(g).to_equiv_mat()
print list(sm.sat_by_counting())
with Problem("3.3.5") as X:
g = [
['*', '*', '1', '*', '0'],
['1', '1', '*', '*', '*']
]
print sub(g[0], g[1])
"""
s = [['*', '*', '*', '*','*']]
for row in g:
s = suball(s, row)
print s
print s
sm = SatMat(g).to_equiv_mat()
print list(sm.sat_by_counting())
"""
with Problem("3.4") as X:
g = [
['1', '*', '*', '1'],
['0', '1', '*', '*'],
['1', '*', '0', '*'],
['*', '*', '0', '1']
]
s = [['*', '*', '*', '*']]
for row in g:
s = suball(s, row)
print s
print s
sm = SatMat(g).to_equiv_mat()
print list(sm.sat_by_counting())
with Problem('3.5.5') as X:
print sub(['*', '0', '*', '1'], ['0', '*', '1', '1'])
print sub(['1','*', '*', ], ['1','1','0'])
print sub(['*', '1', '*', '1'], ['1', '*', '1', '*'])
with Problem("3.6") as X:
g = [
['0', '*', '1'],
['*', '1', '0'],
['*', '0', '0'],
['1', '*', '1']
]
s = [['*', '*', '*']]
for row in g:
s = suball(s, row)
print s
print s
sm = SatMat(g).to_equiv_mat()
print list(sm.sat_by_counting())
with Problem('4.1') as X:
print sub(['*', '*', '*', '*'], ['1', '*', '*', '*'])
|
[
"ejconlon@gmail.com"
] |
ejconlon@gmail.com
|
65b09789b7bb81368ae2669dcdf3688e2acf76b7
|
c87cb0333e1676b7cfd0e6d43fb7769c94d645d9
|
/data_structure/selection_sort.py
|
63e6e9f2f17efa878126c7743152a79d52f8cdc0
|
[] |
no_license
|
tanalam2411/python
|
4e4ee594d712d9650eddede91f0dfb992887392b
|
b51a02c4560c86e8797f373af2e1e03e0d0ba674
|
refs/heads/master
| 2020-12-25T14:33:12.371882
| 2019-07-27T18:25:55
| 2019-07-27T18:25:55
| 67,584,152
| 0
| 0
| null | 2016-10-09T19:03:35
| 2016-09-07T07:44:42
|
Python
|
UTF-8
|
Python
| false
| false
| 314
|
py
|
#! /usr/bin/env python
"""
Bubble sort implementation in Python.
Input array = [5, 4, 3, 2, 1]
In every iteration largest number will be sorted.
Total iteration = N - 1 (worst case) # as every iteration will sort one largest
number.
Time complexity:
Best Case -
Avg Case -
Worst Case -
"""
|
[
"onlinejob.2411@gmail.com"
] |
onlinejob.2411@gmail.com
|
185c117a2b259e5d78936d721c33968d73ebe809
|
e6bd38983a05e7eca9281072f24443a1de348cd2
|
/dags/libs/check_and_update_record.py
|
08f086e435fce5140d1797c0bb82c67dc697744b
|
[] |
no_license
|
Chestermozhao/airflow_practice
|
f9bd1c285135bce7f3f5d0868c7055f4abc187d5
|
319afb5d6fc031b4f4a2724877ce0075d81a9c09
|
refs/heads/master
| 2022-12-10T09:55:37.729724
| 2019-12-21T18:05:05
| 2019-12-21T18:09:23
| 229,432,497
| 0
| 0
| null | 2022-12-08T07:01:23
| 2019-12-21T13:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 885
|
py
|
from .mongo import collection_sub_channel
def check_and_update_record(mode, **context):
if mode == "check":
sub_channels = collection_sub_channel.find()
sub_channels = list(sub_channels) if sub_channels else ""
return sub_channels
elif mode == "update":
print("Saving latest youtube information..")
_, channel_update_info = context["task_instance"].xcom_pull(
task_ids="check_channel_update_info"
)
# update latest channel videos
for channel_name, channel_info in dict(channel_update_info).items():
query_filter = {"channel_name": channel_name}
update_data = {
"previous_title": channel_info["titles"][0],
"previous_link": channel_info["links"][0],
}
collection_sub_channel.update(query_filter, {"$set": update_data})
|
[
"b02310043@ntu.edu.tw"
] |
b02310043@ntu.edu.tw
|
be3a66601cfbca1c03d9f07d90a3f927da3df864
|
c3c1e47981daeed5d1d8023edb6ea84ed65c2859
|
/Python/Generate_Test_data.py
|
e2e6286c695b3be8b63f0bd2a6301e2b0aadfbf5
|
[] |
no_license
|
sappyh/AEP
|
dd6738897328244f143fb4b59990782c9d76c807
|
9985949b4ced64d7f9cc74909cdbf53d21c4a0a1
|
refs/heads/master
| 2021-01-20T08:29:43.474502
| 2017-07-24T16:38:32
| 2017-07-24T16:38:32
| 90,152,569
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
#!/usr/bin/python
import cv2
import numpy as np
from PIL import Image
img=cv2.imread('/home/ubuntu-admin/horse.png')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)[1]
np.savetxt('/home/ubuntu-admin/test',gray,fmt='%d')
mat=np.loadtxt('/home/ubuntu-admin/test',dtype=int)
mat=np.uint8(mat)
c=(gray==mat)
print c
print gray[0][0].dtype
print mat[0][0].dtype
# file_sample = open('/home/ubuntu-admin/test')
#mat =[255,255,0,0,0,255,255,0,0,0,255,255,0,0,0,255,255,0,0,0,255,255,0,0,0,255,255,0,0,0]
# mat=[];
# for i in range(100):
# s=str(file_sample.readlines())
# s=s.split(" ")
# mat.append(s)
# #mat = np.random.random((100,100))
# with open('/home/ubuntu-admin/test') as file:
# mat= [[float(digit) for digit in line.split()] for line in file]
# Creates PIL image
# mat=np.asarray(mat)
# print mat.size
# mat=np.reshape(mat,(100,100))
img = Image.fromarray(mat,'L')
img.show()
|
[
"akurude@berlinux-solutions.de"
] |
akurude@berlinux-solutions.de
|
e731bb87fa8a77a28c0423228f7ea27d5769df6a
|
cf02cd226853e89a0e35bd7623658a21e64cf80d
|
/class_person_repr.py
|
561b48f130eca6b77aab49cd54982cb6503f930a
|
[] |
no_license
|
bterwijn/python
|
ba9485aaf0808dc38e899e8ab662dc94cf6e4ac8
|
dd8fec6f71b18aaa4a78e26a4afefc8c70327093
|
refs/heads/master
| 2023-09-06T04:19:27.258438
| 2021-10-21T12:06:39
| 2021-10-21T12:06:39
| 385,696,358
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
def main():
person = Person("Carl", "Clarkson", 82.1, 1.89)
print(person)
person_list = [Person("James", "Taylor", 70.5, 1.71), Person("Jack", "Smith", 67.0, 1.65)]
person_list.append(Person("Emma", "Williams", 65.0, 1.69))
print(person_list)
class Person:
def __init__(self, first_name, last_name, weight, height):
self.name = first_name
self.surname = last_name
self.weight = weight
self.height = height
def get_full_name(self): # member function, method
return self.name + ' ' + self.surname
def get_body_mass_index(self):
return self.weight / self.height**2
def __repr__(self): # dunder/magic method
return "name:" + self.name + " surname:" + self.surname + \
" weight:" + str(self.weight) + " height:" + str(self.height)
main()
|
[
"bterwijn@gmail.com"
] |
bterwijn@gmail.com
|
012710c17a6a74ccfecf923085f21c731c346456
|
c7713ed30e6edd751ccb811ad3fd48de30f94e33
|
/WprimeToENu_M_3200_TuneCUETP8M1_13TeV_pythia8_cfi.py
|
5f38893712654099933a15ee871c0c0fc16230e5
|
[] |
no_license
|
bdelacruz/usercode
|
3be9fa8d3c761754c95a5c891c691dfd4baaa38d
|
76cb706731cde5a4cfb0dec68c628ef39dc1408f
|
refs/heads/master
| 2016-09-05T23:59:26.566827
| 2015-03-17T12:22:14
| 2015-03-17T12:22:14
| 32,382,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.009),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Main:timesAllowErrors = 10000',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tauMax = 10',
'Tune:ee 3',
'Tune:pp 5',
'NewGaugeBoson:ffbar2Wprime = on',
'34:m0 = 3200',
'34:onMode = off',
'34:onIfAny = 11,12',
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"begona.delacruz@ciemat.es"
] |
begona.delacruz@ciemat.es
|
77a5ecaa8ee26e0b6feca2c3e33edd7756af7e9e
|
7274ff5e36a6868578791c7d8bc52b5deec2270f
|
/api/server.py
|
f33d266698ad0881d23f8b0c905adbea9aee0653
|
[] |
no_license
|
hieubkvn123/VirtualInterviewPlatform
|
dfd4af25545e512c0eae12b47b49b45e1cc1874b
|
eea84a37ad2f4a94b2b20ef0a3a00e6ffa3b26b7
|
refs/heads/main
| 2023-01-19T21:19:22.340051
| 2020-11-27T02:14:01
| 2020-11-27T02:14:01
| 314,131,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
import threading
from flask import Flask
from flask import request
from flask_cors import CORS
from flask_mail import Mail, Message
### Import all blueprints ###
from mail_server import app as mail_app
from auth import auth
from vip import vip
from vip import UPLOAD_FOLDER
from config import system_mail_config as mail_conf
from config import ssl_config
mail = Mail()
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = mail_conf['email_address']
app.config['MAIL_PASSWORD'] = mail_conf['password']
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail.init_app(app)
app.register_blueprint(auth, url_prefix='/auth')
app.register_blueprint(vip, url_prefix='/vip')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CORS(app)
@app.route('/', methods=['GET', 'POST'])
def home():
return 'Hello world'
@app.route('/send_mail', methods=['POST'])
def send_mail():
if(request.method == 'POST'):
message = request.form['message']
sender = request.form['sender']
recipient = request.form['recipient']
msg = Message('[Virtual Interview System] Password confirmation',
sender = sender,
recipients=[recipient])
msg.body = message
mail.send(msg)
return 'success'
def runMailServer():
mail_app.run(host='0.0.0.0', port=8081)
if __name__ == '__main__':
mail_thread = threading.Thread(target=runMailServer, args=())
mail_thread.daemon = True
mail_thread.start()
app.run(host='0.0.0.0', port=8080, ssl_context=(ssl_config['cert'], ssl_config['key']))
|
[
"hieubkvn123@gmail.com"
] |
hieubkvn123@gmail.com
|
cdde25e6308c46332743cc718554a484f4e7908b
|
8f47ef999c6a5f00dd4716eb338606a53f452fe2
|
/commonUtilities/customLogger.py
|
741eef222e3e6677565935b5b7920d9c216e8993
|
[] |
no_license
|
arjyagarai/eCommerceFrameworkBatch4
|
f885d299ed14fff652cf7a9c2eb94816d0697c59
|
369b8e80075568116f4d031368fc0cfcfcf3c786
|
refs/heads/master
| 2023-01-03T21:44:20.400766
| 2020-10-31T07:12:04
| 2020-10-31T07:12:04
| 308,822,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
import logging
class LogHelper:
@staticmethod
def loggen():
logging.basicConfig(filename=".\\Logs\\automation.log",
format='%(asctime)s: %(levelname)s: %(message)s',
datefmt='%d/%m/%Y %H:%M:%S %p'
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return logger
|
[
"garai.arjya@gmail.com"
] |
garai.arjya@gmail.com
|
5b1f244fd827d81662aeaa209fc61a75efcbd9dd
|
227f625b992b6afc9d27a989a0e5535a2c20aa90
|
/graph/test_graph.py
|
950c86058c963d35266b183d9c3c09df85fa055e
|
[] |
no_license
|
usrnm242/algorithms
|
90a2f62c561b843f4140cc6af20aa10f52a7865d
|
06f5d1c36cfaa6e2d744fbdc4e6881ffccd671ee
|
refs/heads/master
| 2021-01-26T07:09:00.855482
| 2020-04-17T19:29:04
| 2020-04-17T19:29:04
| 243,358,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,937
|
py
|
#!/usr/bin/env python3
from bfs import bfs
from dfs import dfs
from dijkstra import DijkstraGraph
from kruskal_min_tree import KruskalTree
from prim_min_tree import PrimTree
from floyd_warshall import floyd_warshall
graph = {0: [(1, 4), (7, 8)],
1: [(2, 8), (7, 11)],
2: [(3, 7), (8, 2), (5, 4)],
3: [(4, 9), (5, 14)],
4: [(5, 10)],
5: [(6, 2)],
6: [(7, 1), (8, 6)],
7: [(8, 7)]}
unweighted_graph = {
0: [1, 2],
1: [0, 3, 4],
2: [0, 5],
3: [1],
4: [1, 5],
5: [2, 4],
}
# source: [(dest, weight), (dest1, weight1)]
undirected_graph = {1: [(2, 5), (3, 3)],
2: [(3, 1)],
3: [(4, 5)],
4: [(1, 2), (2, 2)]}
directed_graph = {1: [(2, 3), (3, 4)],
2: [(3, 5)],
3: [(1, 10)],
4: [(1, 4), (3, 2), (2, 3)]}
def print_delimiter(func):
def printing():
func()
print('_' * 50, end="\n\n")
return printing
@print_delimiter
def test_bfs():
start = 0
print("BFS")
print("Graph:", unweighted_graph)
print(f"start is '{start}'")
print(bfs(unweighted_graph, start))
@print_delimiter
def test_dfs():
start = 0
print("DFS")
print("Graph:", unweighted_graph)
print(f"start is '{start}'")
print(dfs(unweighted_graph, start))
@print_delimiter
def test_dijkstra():
print("Dijkstra")
print("Graph:", graph)
dijkstra = DijkstraGraph(graph, is_undirected=True)
source = 0
destination = 4
print(f"source is {source}, destination is {destination}")
dijkstra.explore(source)
path = dijkstra.get_path(source, destination)
print(path, f"is the shortest path from {source} to {destination}")
cost = dijkstra.get_cost(source, destination)
print(cost, f"is cost of the path from {source} to {destination}")
@print_delimiter
def test_kruskal():
print("Kruskal")
print("graph:", undirected_graph)
tree = KruskalTree(undirected_graph).explore_tree()
print("minimal spanning tree:", tree)
@print_delimiter
def test_prim():
start = 0
print("Prim's alg")
print("graph:", undirected_graph)
print(f"start is {start}")
prim = PrimTree(undirected_graph)
tree = prim.prim(start)
print("minimal spanning tree:", tree)
prim = PrimTree(undirected_graph)
tree = prim.prim_heap(start)
print("Using min heap:")
print("minimal spanning tree:", tree)
@print_delimiter
def test_floyd_warshall():
print("Floyd-Warshall alg")
print("Graph:", directed_graph)
print("Table with distances:")
dist_table = floyd_warshall(directed_graph)
for row in dist_table:
for val in row:
print ('{:5}'.format(val), end="")
print()
if __name__ == "__main__":
test_bfs()
test_dfs()
test_dijkstra()
test_kruskal()
test_prim()
test_floyd_warshall()
|
[
"k3nt242@gmail.com"
] |
k3nt242@gmail.com
|
eabdc6c3f6effdedd91a8277b7862b4840198430
|
6b48594ee09fe40c412a2b0cabdf09e2635bb7c7
|
/server.py
|
22e79bfd45c218dcbceff1b8807b60bfef1d8319
|
[
"MIT"
] |
permissive
|
TAINGL/api-simplon
|
7a78ae33d8bf6baffee44f31e4dcbdab734cd9a6
|
bd063d9d4d1120de22fb8ecccc5f35440903c1fa
|
refs/heads/master
| 2022-12-18T01:58:15.900285
| 2020-09-17T13:55:59
| 2020-09-17T13:55:59
| 296,291,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
from flask import Flask, jsonify, request
from flask_restful import Resource, Api, reqparse
import os
app = Flask(__name__)
api = Api(app)
port = int(os.environ.get("PORT", 5000))
parser = reqparse.RequestParser()
parser.add_argument('number', type=float, required=True)
class HelloWorld(Resource):
def get(self):
return {'hello':'world'}
class Square(Resource):
def post(self):
args = parser.parse_args()
number = args['number']
res = number * number
return {'square': res}, 200
api.add_resource(HelloWorld, '/hello')
api.add_resource(Square, '/square')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0',port=port)
|
[
"laura.taing@hotmail.com"
] |
laura.taing@hotmail.com
|
e4528bf1d132b678fc9ee8da7dcbc7fbb8fc43da
|
039cad89ebc88828219f1d353034fabf4d83ec9c
|
/YanxuanCrawler/Yanxuan/items.py
|
2b12facc7270c607b05423c675355cecad02c455
|
[] |
no_license
|
VeeDou/YanxuanCrawler
|
568be57ccd782af1471c527b06c4158f9911aa57
|
48addaf8d66735e8f39c166ab27cf32c6e74ef99
|
refs/heads/master
| 2020-03-13T02:59:52.306521
| 2018-04-25T01:54:54
| 2018-04-25T01:54:54
| 130,935,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class YanxuanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
attrs_dict = scrapy.Field()
comments_dict = scrapy.Field()
itemId = scrapy.Field()
comments_num = scrapy.Field()
comments_tags = scrapy.Field() #评论标签们
say_good_pct = scrapy.Field() #好评比例 4星及以上的比例
seem_good_tag = scrapy.Field() #爆款标签,评论数999+
seem_cheap_tag = scrapy.Field() #打折标签
itemid_typeA = scrapy.Field() #大类ID
itemid_typeB = scrapy.Field() #小类ID
price = scrapy.Field()
|
[
"noreply@github.com"
] |
VeeDou.noreply@github.com
|
ca4c643a49cf4166f5a1b83530fb556e446a1ddd
|
dc8aca825f1ed1d31db0dcb8d97e3049338c856c
|
/globals/dto/current_user_dto.py
|
73cc4c5f60991072391f2a2fea87b159561f8870
|
[] |
no_license
|
Avocado-Inc/orhana_api
|
1039cb573a69b8161cb4a1f6d87c24edf0aa1d3e
|
41372d1416839eb5fbfdeafffc71add459d31a80
|
refs/heads/main
| 2023-09-03T17:47:12.322288
| 2021-09-19T17:52:29
| 2021-09-19T17:52:29
| 360,650,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from globals.rest import BaseDto
class CurrentUser(BaseDto):
user_id: str
role: str
|
[
"rajat310198@outlook.com"
] |
rajat310198@outlook.com
|
abb682055e931eb51fec2f2e65a33658b4faf877
|
704fda0d0e05f66f0c4f3c17cc4b39e2b0bc6220
|
/homework4/task1.py
|
1cdd658eec40714e7e52a76d55539e2a7dafaa47
|
[] |
no_license
|
humantom88/geekbrains-python-basics
|
71eae1be4f8b1d757db17200d64d2b14ea51d83f
|
ebde8e0a9b92386f2a5b994a880184adde8c7454
|
refs/heads/master
| 2021-05-17T10:48:56.788147
| 2020-04-23T09:51:10
| 2020-04-23T09:51:10
| 250,743,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
# -*- coding:utf-8 -*-
# 1. Реализовать скрипт, в котором должна быть предусмотрена функция расчета заработной платы сотрудника.
# В расчете необходимо использовать формулу: (выработка в часах * ставка в час) + премия.
# Для выполнения расчета для конкретных значений необходимо запускать скрипт с параметрами.
import sys
if __name__ == "__main__" :
arguments = sys.argv
arguments.pop(0)
if arguments[0] == "help" or arguments[0] == "--help" or arguments[0] == "-h":
print("""
Скрипт выполняет расчет зарплаты сотрудника.
Параметры вводятся через пробел в следующем порядке:
1. выработка в часах
2. ставка в час
3. премия
Example:
python 40 200 10000
""")
elif len(arguments) < 3:
print('Вы ввели недостаточно параметров')
else:
try:
workout_in_hours = int(arguments[0])
hour_cost = int(arguments[1])
award = int(arguments[2])
salary = workout_in_hours * hour_cost + award
print(f'Зарплата сотрудника: {salary}')
except ValueError:
print("В одном из параметров введено не число")
|
[
"Belov.A.Andr@sberbank.ru"
] |
Belov.A.Andr@sberbank.ru
|
a84eef80c1386bd0ec750fb2b2d81e11630bf092
|
c4d3fe6c76e9e0fad14b0cd4a65bf3f12b444710
|
/Python/Examples/pyex_73.py
|
706be7887d88eb5dea640970ace0b4d8b63f3d34
|
[] |
no_license
|
MartinChan3/CyComputerNote
|
af12e508c0364e6e8381eefb510cfd265a9895a1
|
45765aed28929106f9bd9bb7d7336203de4b34cc
|
refs/heads/master
| 2023-05-13T01:44:50.704024
| 2023-04-28T08:49:19
| 2023-04-28T08:49:19
| 142,123,859
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
#coding:utf-8
if __name__ == '__main__':
N = 3
arr = []
for i in range(N):
arr.append(raw_input('Please input a number:\n'))
for i in arr[::-1]:
print i
|
[
"bycy0801@126.com"
] |
bycy0801@126.com
|
cc4786754a4df73a20af92b1a9ab2262190d77c8
|
1f64ee3fd873de6d3f5510e2e24f5a55993fc112
|
/img.py
|
54ab9cba6efa01d6496fce7c204e9471f672059d
|
[] |
no_license
|
Myxg/capture
|
687167ccf6cb392f4d645332dde25d49efa255a8
|
044b81a4c3a8c3aa223bb075a8c565c18c1e351f
|
refs/heads/master
| 2022-12-14T15:57:41.634687
| 2020-09-07T10:24:10
| 2020-09-07T10:24:10
| 290,379,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
import os
import shutil
import time
from multiprocessing import Process
# tt = time.time()
# def run(count_num):
n = 0
while True:
if n == 1:
break
for i in range(1, 9):
print(i, n)
i = str(i)
path_xml = "D:\\img_" + i
filelist = os.listdir(path_xml)
path = "D:\\img_" + i + "\\"
img = "D:\\openpose\\img_" + i + "\\"
# tt = time.time()
# s = time.localtime(tt)
# t = str(s.tm_year) + str(s.tm_mon).zfill(2) + str(s.tm_mday) + str(s.tm_hour) + str(s.tm_min)
# t = '202007201105'
for files in filelist[:300]:
name = os.path.splitext(files)[0]
hz = os.path.splitext(files)[1]
# print(name[0:14],type(name))
if hz == '.jpg':
copy_img = path + name + '.jpg'
new_img = img + name + '.jpg'
shutil.move(copy_img, new_img)
os.chdir("D:\\openpose")
a1 = 'openposedemo.exe --render_pose 0 --display 0 --image_dir img_' + i + ' --write_json out_' + i
# a2 = 'bin\openposedemo.exe --render_pose 0 --display 0 --image_dir img_8 --write_json out_8'
# openposedemo.exe --image_dir 1 --write_json 1_out --display 0 --render_pose 0
os.system(a1)
dir = "D:\\openpose\\img_" + i
shutil.rmtree(dir)
os.mkdir(dir)
n += 1
# if __name__ == '__main__':
# p1 = Process(target=run, args=(1,))
# # p2 = Process(target=run, args=(3,))
# p3 = Process(target=run, args=(5,))
# # p4 = Process(target=run, args=(7,))
# s = time.time()
# p1.start()
# # p2.start()
# p3.start()
# # p4.start()
# p1.join()
# # p2.join()
# p3.join()
# # p4.join()
# print('~~~~~~~~~~~~~~~~')
# e = time.time()
# print(e-s)
# ss = time.time()
# print(ss-tt)
|
[
"15234407153@163.com"
] |
15234407153@163.com
|
2cfabaac17e91a80ec12fe78412c6f43917058e7
|
889ae46c25608a549f14d3c1c8ff3328eb1c8ba5
|
/microdollars/apps.py
|
e6dfb7d7fd8403e8b8274305f7a3984050e93eb5
|
[] |
no_license
|
vasilzhigilei/MicroDollars
|
8d74b634dd36a4d1a816a3580bf96f7878cca5b3
|
c554c329225762e02ade85d7a9744fecda57b06b
|
refs/heads/master
| 2023-02-10T11:14:51.062557
| 2021-01-02T15:52:17
| 2021-01-02T15:52:17
| 318,037,005
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from django.apps import AppConfig
class MicrodollarsConfig(AppConfig):
name = 'microdollars'
|
[
"vasil.zhigilei@gmail.com"
] |
vasil.zhigilei@gmail.com
|
b1ff6cd4754f8fcfa1cd850e1666eedd48399014
|
ec9aeb155366571d58c4098176d723f7ec31ff1c
|
/pilotlog/asgi.py
|
b3f209c127d214deffe4ac6caed37807ad39fda4
|
[] |
no_license
|
LukeZvada/pilotlogcapstone-server
|
88808b1c253edc38baaac2a66e579ed883a51b22
|
5627945436550114b75f0f2677e7a2f7803bb026
|
refs/heads/main
| 2023-02-14T00:07:57.493319
| 2021-01-10T22:11:32
| 2021-01-10T22:11:32
| 320,682,550
| 2
| 0
| null | 2020-12-14T21:59:37
| 2020-12-11T20:58:03
|
Python
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for pilotlog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pilotlog.settings')
application = get_asgi_application()
|
[
"zvadamusic@gmail.com"
] |
zvadamusic@gmail.com
|
70238f11cb7faa876c603467cf7d095936153a53
|
a3ffcac33cf0a6f5b7c004028a7250c0e60de077
|
/python/dockeregistryfunc.py
|
dcbc38210533c16d346c1ea21eadb7568a3dd0e4
|
[] |
no_license
|
jils2013/scripts
|
af2092d8141b4ecfa2c5ce095e69aff64c69e9d3
|
79a597b45fb1d76beccfbfb686ed7e13d45bef44
|
refs/heads/master
| 2021-06-25T19:38:27.857832
| 2019-06-17T15:37:10
| 2019-06-17T15:37:10
| 106,628,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,460
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json,urllib2,urllib,re,base64,threading,time
# Bearer realm...
user = {'http://youregistry.com/v2/token': ['user', 'password']}
# get running images from kubernetes deployments
def getrunimgs(url):
images=[]
for i in json.loads(urllib2.urlopen('http://%s/apis/extensions/v1beta1/deployments'%url).read())['items']:
for c in i['spec']['template']['spec']['containers']:
if c['image'] not in images:
images.append(c['image'])
return images
# get token from docker registry by scode/service/user
def getoken(authenticate):
auth={}
re.sub('([^,=]+)="([^"]+)"',lambda m:auth.update({m.group(1):m.group(2)}),authenticate)
authurl=auth.pop('Bearer realm')
return json.loads(urllib2.urlopen(urllib2.Request(authurl+'?'+urllib.urlencode(auth),None,{'Authorization':'Basic '+base64.urlsafe_b64encode(':'.join(user[authurl]))})).read())['token']
# parse response from docker registry
def urlopenrel(opener,request,response):
method=request.get_method()
if method=="DELETE":
return {'result':response.code,'detail':response.msg}
if method=="HEAD" or response.headers.get('Content-Length',1)=='0':
return response.headers
if not response.headers.has_key('link'):
res=response.read()
if response.headers['Content-Type']=='application/octet-stream':
return res
return json.loads(res)
url=response.url
while response.headers.has_key('link'):
#request=urllib2.Request(request.type+'://'+request.host+response.headers['link'][1:-13])
request=urllib2.Request(url+'?n='+urllib2.urlparse.parse_qs(response.headers['link'][1:-13])['n'][0]+'0')
request.get_method=lambda: method
response=opener.open(request)
return json.loads(response.read())
# create request to docker registry
def apirequest(url,method,**reqdata):
opener=urllib2.build_opener()
opener.addheaders=[('Accept','application/vnd.docker.distribution.manifest.v2+json')]
request=urllib2.Request(url)
if reqdata:
request=urllib2.Request(url=url,data=reqdata.get('data',None),headers=reqdata.get('headers',{}))
request.get_method=lambda: method or 'GET'
try:
response=opener.open(request)
return urlopenrel(opener,request,response)
except urllib2.URLError as err:
if not hasattr(err, 'code'):
return {'result':-1,'detail':err.reason}
if err.code!=401:
return {'result':err.code,'detail':err.msg}
try:
opener.addheaders.append(('Authorization','Bearer '+getoken(err.headers.getheader('www-authenticate'))))
response=opener.open(request)
return urlopenrel(opener,request,response)
except urllib2.HTTPError as httperror:
return {'result':httperror.code,'detail':httperror.msg}
# get all repositories from docker registry
def getrepositories(host):
return apirequest('http://%s/v2/_catalog'%host,'')['repositories']
# get tags by repositorie
def getags(repo):
r=apirequest('http://%s/v2/%s/tags/list'%tuple(repo),'')
return sorted(r.get('tags',None) or [])
# delete images with image tag
def deleteimagewithtag(img):
digest=apirequest('http://%s/v2/%s/manifests/%s'%tuple(img),'HEAD')
if not digest.has_key('Docker-Content-Digest'):
return False
delete=apirequest('http://%s/v2/%s/manifests/%s'%tuple(img[0:2]+[digest['Docker-Content-Digest']]),'DELETE')
if delete.get('result','')=='202':
return False
return True
def parallelrun(func,arg,n):
ret=[]
while arg:
if threading.activeCount()>n:
time.sleep(0.1)
continue
thread=threading.Thread(target=lambda r:ret.append([r,func(r)]),args=(arg.pop(),))
thread.start()
while threading.activeCount()-1:
# print threading.activeCount(),len(ret)
time.sleep(1)
return ret
#upload blob/layer
def uplayer(layerupload):
#HEAD
img,tgt,digest=layerupload[0][0],layerupload[0][1],layerupload[1]
uploaded=apirequest('http://%s/v2/%s/blobs/%s'%(tgt,img[1],digest),'HEAD')
if uploaded.has_key('Docker-Content-Digest'):
return True
#PUSH
layer=apirequest('http://%s/v2/%s/blobs/%s'%(img[0],img[1],digest),'')
if type(layer)!=type(''):
return False
#POST
post=apirequest('http://%s/v2/%s/blobs/uploads/?%s'%(tgt,img[1],urllib.urlencode({'digest':digest})),'POST')
if not post.has_key("Location"):
return False
#PATCH
patch=apirequest(post["Location"],'PATCH',data=layer,headers={"Content-Type":"application/octet-stream"})
if not post.has_key("Location"):
return False
#PUT
put=apirequest(post["Location"]+'&'+urllib.urlencode({'digest':digest}),'PUT')
if not put.has_key('Docker-Content-Digest'):
return False
return True
#move a image
def mvimg(imgpush):
img=imgpush[0]
manifests=apirequest('http://%s/v2/%s/manifests/%s'%(imgpush[1],img[1],img[2]),'HEAD')
if manifests.has_key('Docker-Content-Digest'):
print 'R:',img,manifests.get('Docker-Content-Digest','')
return manifests['Docker-Content-Digest']
manifests=apirequest('http://%s/v2/%s/manifests/%s'%tuple(img),'')
if not manifests.has_key('layers'):
return False
for i in manifests['layers']:
_res=uplayer([imgpush,i['digest']])
if not _res:
return False
#for i in parallelrun(uplayer,[[imgpush,i['digest']]for i in manifests['layers']],10):
# if not i[1]:
# return False
uplayer([imgpush,manifests['config']['digest']])
put=apirequest('http://%s/v2/%s/manifests/%s'%(imgpush[1],img[1],img[2]),'PUT',data=json.dumps(manifests),headers={"Content-Type":"application/vnd.docker.distribution.manifest.v2+json"})
print 'P:',img,put.get('Docker-Content-Digest','')
return put.get('Docker-Content-Digest',False)
|
[
"noreply@github.com"
] |
jils2013.noreply@github.com
|
caf75fde8a29322b0f3f71e4117d53e8ddb58efe
|
305460b9313b627e17b43c9dcc598713637c4019
|
/7-7/productSum.py
|
5d1afcfcdf9cb1e8dc18a8df22b366e3239c6f8e
|
[] |
no_license
|
atanuc073/algoexpert-codes
|
5f57c9e39b076c1f736d2f0f4c6c768eac95a821
|
4f9c6c52e547812204bfa4976ad239ea83ba1475
|
refs/heads/master
| 2023-07-03T17:24:20.438480
| 2021-08-07T14:32:24
| 2021-08-07T14:32:24
| 382,352,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
# time O(n) | space O(D) D=depth
def productSum(array,multiplier=1):
sum=0
for element in array:
if type(element) is list:
sum+=productSum(element,multiplier+1)
else:
sum+=element
return sum*multiplier
|
[
"atanuc073@gmail.com"
] |
atanuc073@gmail.com
|
256b7375e1b67dbef659b2a71081c151ec165e31
|
ae2374e9b3defcb5c5ac7a87a6fed8755670edb9
|
/ad_buy/ad_buy/urls.py
|
5979c0e413188ff03723f48c22621a31a4180703
|
[] |
no_license
|
antipetrov/rtb-simulation
|
c0aa51a3d29f93d8789356cddb4d59d165149055
|
9e51ca401f76bd2ee814cf716f059fccb76ba6d4
|
refs/heads/master
| 2023-05-31T12:51:22.733422
| 2019-03-31T17:36:06
| 2019-03-31T17:36:06
| 266,450,762
| 0
| 0
| null | 2021-06-10T22:56:58
| 2020-05-24T01:45:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
"""ad_buy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from ad_app import views
urlpatterns = [
path('', views.index, name='index'),
path('ad/<int:id>/', views.ad_view, name='ad_view'),
path('ad/<int:id>/preview_json', views.ad_timetable_preview_json, name='ad_timetable_preview'),
path('ad/<int:id>/report', views.ad_report, name='ad_report'),
path('ad/<int:id>/timetable/<int:timetable_id>/delete', views.ad_timetable_delete, name='ad_timetable_delete'),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
[
"petr.matukhov@gmail.com"
] |
petr.matukhov@gmail.com
|
89a0ad29603302ad1b9ef508389b6a8c00d6c5d8
|
32598b094763e77be276dc31a3662c1ab181a8bf
|
/python_basic_webscraping/16_selenium_movies_scroll.py
|
db2110e9eb0b7f9e7f9c4b8370231b666a9e4a74
|
[] |
no_license
|
rssungjae1/Python-Lounge
|
30bc3b4cdf7f44dedc4a72258260a843381cd00e
|
3a69c1a146618fb994ceb3e84f8ace76ae642b72
|
refs/heads/main
| 2023-03-21T01:31:00.057595
| 2021-03-19T14:56:50
| 2021-03-19T14:56:50
| 349,454,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
from selenium import webdriver
import time
browser = webdriver.Chrome() # Chrome("위치")
browser.maximize_window() # 창 최대화
# 페이지 이동
url = "https://play.google.com/store/movies/top"
browser.get(url)
# 스크롤 내리기
# browser.execute_script("window.scrollTo(0, 1080)") # x 1080
interval = 2 # 2초에 한번씩 스크롤 내림
# 현재 문서 높이를 가져와서 저장
prev_height = browser.execute_script("return document.body.scrollHeight")
# 반복 수행
while True:
# 화면 가장 아래로 스크롤 내리기
browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")
# 페이지 로딩 대기
time.sleep(interval)
# 현재 문서 높이를 가져와서 저장
curr_height = browser.execute_script("return document.body.scrollHeight")
if curr_height == prev_height:
break
prev_height = curr_height
print("<scroll complete>")
##################################################################################
import requests
from bs4 import BeautifulSoup
soup = BeautifulSoup(browser.page_source, "lxml")
movies = soup.find_all("div", attrs={"class":"Vpfmgd"})
for movie in movies:
title = movie.find("div", attrs ={"class" : "WsMG1c nnK0zc"}).get_text()
# 할인 전 가격
original_price = movie.find("span", attrs={"class" : "SUZt4c djCuy"})
if original_price:
original_price = original_price.get_text()
else:
# print(title, "<할인되지 않은 영화 제외>")
continue
# 할인 된 가격
price = movie.find("span", attrs={"class" : "VfPpfd ZdBevf i5DZme"}).get_text()
# 링크
link = movie.find("a", attrs={"class":"JC71ub"})["href"]
# 올바른 링크: https://play.google.com + link
print(f"제목 : {title}")
print(f"할인 전 금액 : {original_price}")
print(f"할인 후 금액 : {price}")
print("링크 : ", "https://play.google.com" + link)
print("-"*120)
browser.quit()
|
[
"rssungjae1@gmail.com"
] |
rssungjae1@gmail.com
|
2762cc43f2307609ed231fa2ed5a2e6ec2df2b9f
|
38a972a3cd1fc303b5f877e24d65118912d85d1c
|
/path/to/virtualenv/project/Lib/site-packages/tensorflow/python/framework/test_util.py
|
8ae37bff8635ae3b4b9e75826560b3fa7867f5aa
|
[] |
no_license
|
ZulfikarAkbar/YOLO_ObjectDetection
|
0c1015aa987d03329eae48a2053a07dda05d96c0
|
3517d0592a269f79df9afd82e0b1b0123bbe0473
|
refs/heads/master
| 2022-10-27T05:08:26.734173
| 2019-02-07T17:35:22
| 2019-02-07T17:35:22
| 169,613,306
| 0
| 1
| null | 2022-10-18T02:18:17
| 2019-02-07T17:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 67,074
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import itertools
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape # pylint: disable=unused-import
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def")
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
# Reset the default graph so it has the C API enabled. We call
# reset_default_graph() instead of creating a new default Graph context to
# make this robust to tests that call reset_default_graph(), which requires
# that the current default graph isn't nested.
ops.reset_default_graph()
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# Make sure default graph reflects prev_value in case next test doesn't call
# reset_default_graph().
ops.reset_default_graph()
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
def enable_c_shapes(fn):
"""Decorator for enabling C shapes on a test.
Note this enables the C shapes after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
prev_value = ops._USE_C_SHAPES
# Only use C shapes if the C API is already enabled.
ops._USE_C_SHAPES = ops._USE_C_API
try:
fn(*args, **kwargs)
finally:
ops._USE_C_SHAPES = prev_value
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If the C API is already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C API by default
# without breaking these tests.
if ops._USE_C_API:
return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
def with_c_shapes(cls):
"""Adds methods that call original methods but with C API shapes enabled.
Note this enables C shapes in new methods after running the test class's
setup method.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
# If C shapes are already enabled, don't do anything. Some tests break if the
# same test is run twice, so this allows us to turn on the C shapes by default
# without breaking these tests.
if ops._USE_C_SHAPES:
return cls
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCShapes", enable_c_shapes(value))
return cls
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then
several times to let objects accumulate. The warmup helps ignore caches which
do not grow as the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
for _ in range(3):
f(self, **kwargs)
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" % (
new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
if context.executing_eagerly():
f(self, **kwargs)
ops.reset_default_graph()
else:
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._zeros_cache.flush()
context.get_default_context().ones_rank_cache().flush()
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
if len(gc.garbage) > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception:
logging.error("(Exception while printing object)")
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_all_in_graph_and_eager_modes(cls):
base_decorator = run_in_graph_and_eager_modes()
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(__unused__=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a @{tf.test.TestCase} class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see @{tf.enable_eager_execution}).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes()
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
Args:
__unused__: Prevents silently skipping tests.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
def decorated(self, **kwargs):
with context.graph_mode():
with self.test_session(use_gpu=use_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
self.setUp()
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/cpu:0"):
f(self, **kwargs)
else:
f(self, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
with context.eager_mode():
run_eagerly(self, **kwargs)
return decorated
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all([x in str(e) for x in ["CUDA", "not find"]]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if context.executing_eagerly():
yield None
elif graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
return a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg=msg, equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, dict)
if a_is_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s." % (path_str,
path_str))
except TypeError as e:
msg = "Error: a%s has %s, but b%s has %s" % (path_str, type(a),
path_str, type(b))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b, err_msg=msg)
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
def assertAllLess(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.float) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound)
if open_lower_bound else np.less(target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2, "Devices %s and %s are not equal. %s" %
(device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
[
"zulfikar.78.akbar@gmail.com"
] |
zulfikar.78.akbar@gmail.com
|
3a333d219c27873867459b78e83cc27ca760db9f
|
6a6fc4376d9e546b31f9bb7843d4b15e3d983e77
|
/app/views/__init__.py
|
51e469d6580cbc0db322256a831ce3eae19da80d
|
[] |
no_license
|
bluefinch83/FE_690_Homework_01
|
232140e12baf18db96494fe135481c95e2726383
|
9e5d6a103ee896215ca5e3b1b9d3d720f610dc92
|
refs/heads/master
| 2020-08-01T12:10:54.927256
| 2019-09-26T03:41:06
| 2019-09-26T03:41:06
| 210,992,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
from app import flask_app as app
import json
from datetime import datetime
import numpy as np
@app.route("/heartbeat")
def heartbeat():
return json.dumps(
{
"status": True,
"service": "Homework_Template",
"datetime": f"{datetime.now()}"
}
)
@app.route("/sum")
def sum(x, y):
s = x + y
return json.dump(
{
"sum": f"{s}"
}
)
@app.route("/minimum")
def minimum(x):
s = min(x)
return json.dump(
{
"minimum": f"{s}"
}
)
@app.route("/product")
def product(x):
a = np.array(x)
s = np.prod(a)
return json.dump(
{
"product": f"{s}"
}
)
@app.before_first_request
def load_app():
print("Loading App Before First Request")
|
[
"37155961+bluefinch83@users.noreply.github.com"
] |
37155961+bluefinch83@users.noreply.github.com
|
774241e41748b036d43c2408a8b464a1feba11db
|
743eb56cf4828193155b03df9695edea989ff0a0
|
/formateo_xml.py
|
7aed7b170ea42e9bbdedd1f1b73b7b05f04a882f
|
[] |
no_license
|
javicb55/pythonConsultasXML
|
a56f079cbdc4bff7640884d941a264c0f789cf33
|
d5d8fe65070049c618a189bcb347e34d0d1c257e
|
refs/heads/master
| 2020-04-02T17:24:26.428616
| 2018-10-26T09:34:09
| 2018-10-26T09:34:09
| 154,656,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
from arbolDirectorios import arbol_directorios
import crearJson
from bs4 import BeautifulSoup
import os
def formatearXML():
tree = ET.parse('NEWSELUJA_queries_spanish.xml')
root = tree.getroot()
listaConsultas = []
for child in root:
#print(child.tag)
query = child
dic = {}
for neighbor in query:
#print(neighbor.tag, neighbor.text) sacamos los nietos de root
dic[neighbor.tag] = neighbor.text
listaConsultas.append(dic)
#print(dic)
#print(listaConsultas)
#se saca lista de num
lista_num = []
for reg in listaConsultas:
lista_num.append(reg['num'])
#arbol directorios que contendrá los ficheros
"""
if os.path.exists('dataFormat'):
print("El arbol de directorios ya existe...")
else:
print("El arbol de directorios creado")
os.mkdir('dataFormat')
for reg in listaConsultas:
if os.path.exists('dataFormat/' + reg['num']):
print("Directorio de usuarios ya existe...")
else:
os.makedirs('dataFormat/' + reg['num'])
"""
arbol_directorios(listaConsultas)
return listaConsultas
|
[
"javicb55@gmail.com"
] |
javicb55@gmail.com
|
0ab098c47e1a35fcbc9f8ca6c55addc6785d5d3c
|
c47ae2b4849310186509370f09dbe2116c9601ca
|
/scripts/Extract.16S.MG.py
|
d433be58ee284256a50028abcd3b417477335b46
|
[] |
no_license
|
caozhichongchong/traits_finder
|
33f68d4dee85486842a1887fe1e9079cedf43d17
|
467f0f4c64bf27d9c67f05721ae08a16f82661b5
|
refs/heads/master
| 2021-07-10T05:27:30.901151
| 2019-03-24T17:24:32
| 2019-03-24T17:24:32
| 177,446,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,283
|
py
|
import argparse
from Bio import SeqIO
import os
############################################ Arguments and declarations ##############################################
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i",
help="input dir", type=str, default='.',metavar='current dir (.)')
parser.add_argument("-f",
help="input filename", type=str, default='input.faa',metavar='input.faa')
parser.add_argument("-n",
help="prefix name for usearch result", type=str, default='.usearch.txt',metavar='.usearch.txt')
parser.add_argument("-r",
help="output dir", type=str, default='.',metavar='current dir (.)')
################################################## Definition ########################################################
args = parser.parse_args()
################################################### Function #######################################################
def Extract16S(root, searchfile, seqfile, resultdir):
# extract the query aa sequences according to a usearch or diamond alignment output
# generate a smaller data of potential intI1 or sul1 for blastp search
# input the query ORF sequences
Seq_16S = dict()
try:
f1 = open(os.path.join(resultdir, searchfile + '.fasta'), 'w')
for line in open(os.path.join(resultdir, searchfile), 'r'):
Seq = str(line).split('\t')[0].split(' ')[0]
if Seq not in Seq_16S:
Seq_16S.setdefault(Seq, [[int(str(line).split('\t')[6]) - 1, int(str(line).split('\t')[7]) - 1]])
else:
for locus in Seq_16S[Seq]:
if max(int(str(line).split('\t')[7]) - 1, locus[1]) - \
min(int(str(line).split('\t')[6]) - 1, locus[0]) <=2000:
# same 16S on a contig
locus[0] = min(int(str(line).split('\t')[6]) - 1, locus[0])
locus[1] = max(int(str(line).split('\t')[7]) - 1, locus[1])
else:
# another 16S on a contig
Seq_16S[Seq].append([int(str(line).split('\t')[6]) - 1, int(str(line).split('\t')[7]) - 1])
# screening out the longest 16S
for record in SeqIO.parse(open(os.path.join(root, seqfile), 'r'), 'fasta'):
if str(record.id) in Seq_16S:
for locus in Seq_16S[str(record.id)]:
f1.write('>' + seqfile.split('.')[0] + '\t' + str(record.id) + '\n' +
str(str(record.seq)[locus[0] : locus[1]]) + '\n')
#flog.write(seqfile+'\toutput\n')
#else:
# flog.write(seqfile + '\ttoo_long\n')
#else:
# flog.write(seqfile + '\ttoo_short\n')
f1.close()
except (IOError):
flog.write(seqfile + '\tfile_missing\n')
################################################### Programme #######################################################
flog = open(os.path.join(args.r, '16S_output.log'), 'a')
Extract16S( args.i, args.f+args.n, args.f, args.r)
flog.close()
|
[
"caozhichongchong@gmail.com"
] |
caozhichongchong@gmail.com
|
7bcf211fc8557fbd64cf3a049b6d6d044738fa41
|
311fbbff6bc0b52ba2662462c798a14719cbec7c
|
/FPG_AR.py
|
e92e1db1bb09c8bcc4cd5f381eac5ba091db6313
|
[] |
no_license
|
orliz/FindRules
|
3ccb38617b1c8a052af23e39e4765bf1407f8f1c
|
79548637a925d5cf37046f6b85d134fe91315f78
|
refs/heads/master
| 2020-03-19T09:36:00.436226
| 2018-06-06T09:09:50
| 2018-06-06T09:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,407
|
py
|
import scipy.io
import numpy as np
# import downsample
# def loadSimpDat():
# # Train_sorted = scipy.io.loadmat('Train_sorted.mat')
# # # print(Train_sorted['tempdescr'])
# # simDat = Train_sorted['tempdescr']
# simpDat = [['r', 'z', 'h', 'j', 'p'],
# ['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],
# ['z'],
# ['r', 'x', 'n', 'o', 's'],
# ['y', 'r', 'x', 'z', 'q', 't', 'p'],
# ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]
# return simpDat
# 用于实现列表到字典的转换过程
def createInitSet(dataSet): # 把每条事务记录由列表转换为frozenset类型,并且其键值对应的值为1。
retDict = {}
for trans in dataSet:
retDict[frozenset(trans)] = retDict.get(frozenset(trans), 0) + 1
# retDict[trans] = retDict.get(trans, 0) + 1
return retDict
# 构建FP树的类定义
class treeNode:
def __init__(self, nameValue, numOccur, parentNode):
self.name = nameValue
self.count = numOccur
self.nodeLink = None # 用于链接相似的元素项。
self.parent = parentNode # needs to be updated,指向当前节点的父节点。
self.children = {}
def inc(self, numOccur):
self.count += numOccur
def disp(self, ind=1): # 用于将树以文本的形式显示
print(' ' * ind, self.name, ' ', self.count)
for child in self.children.values():
child.disp(ind + 1) # 递归调用disp()
def createTree(dataSet, minSup): #create FP-tree from dataset but don't mine
headerTable = {}
#go over dataSet twice
for trans in dataSet:#first pass counts frequency of occurance
for item in trans:
headerTable[item] = headerTable.get(item, 0) + dataSet[trans]
# print('headerTable!:',headerTable) #第一次统计个数
for k in list(headerTable.keys()): #remove items not meeting minSup
if headerTable[k] < minSup:
del(headerTable[k])
# print('headerTable2:',headerTable) #根据支持度第二次统计
freqItemSet = set(headerTable.keys()) #频繁1-项集
# print('freqItemSet: ',freqItemSet)
# print('-------------------------------------')
if len(freqItemSet) == 0: return None, None #if no items meet min support -->get out
# print('headerTable: ',headerTable)
# print('-------------------------------------')
for k in headerTable:
headerTable[k] = [headerTable[k], None] #reformat headerTable to use Node link
# print('headerTable: ',headerTable)
retTree = treeNode('Null Set', 1, None) #create tree
for tranSet, count in dataSet.items(): #go through dataset 2nd time
localD = {}
for item in tranSet: #put transaction items in order
if item in freqItemSet:
localD[item] = headerTable[item][0]
if len(localD) > 0:
#对当前事项集按频次排序
orderedItems = [v[0] for v in sorted(localD.items(), key=lambda p: p[1], reverse=True)]
updateTree(orderedItems, retTree, headerTable, count)#populate tree with ordered freq itemset
return retTree, headerTable #return tree and header table
def updateTree(items, inTree, headerTable, count):
if items[0] in inTree.children:#check if orderedItems[0] in retTree.children
inTree.children[items[0]].inc(count) #incrament count
else: #add items[0] to inTree.children
inTree.children[items[0]] = treeNode(items[0], count, inTree)
if headerTable[items[0]][1] == None: #update header table
headerTable[items[0]][1] = inTree.children[items[0]]
else:
updateHeader(headerTable[items[0]][1], inTree.children[items[0]])
if len(items) > 1:#call updateTree() with remaining ordered items
updateTree(items[1::], inTree.children[items[0]], headerTable, count)
def updateHeader(nodeToTest, targetNode): #this version does not use recursion
while (nodeToTest.nodeLink != None): #Do not use recursion to traverse a linked list!
nodeToTest = nodeToTest.nodeLink
nodeToTest.nodeLink = targetNode
def ascendTree(leafNode, prefixPath): #ascends from leaf node to root
if leafNode.parent != None:
prefixPath.append(leafNode.name)
ascendTree(leafNode.parent, prefixPath)
def findPrefixPath(basePat, treeNode): #treeNode comes from header table
condPats = {}
while treeNode != None:
prefixPath = []
ascendTree(treeNode, prefixPath)
if len(prefixPath) > 1:
condPats[frozenset(prefixPath[1:])] = treeNode.count
treeNode = treeNode.nodeLink
return condPats
def mineTree(inTree, headerTable, minSup, preFix, freqItemList):
bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: p[1][0])] #(sort header table by support)
for basePat in bigL: #start from bottom of header table
newFreqSet = preFix.copy()
############
# newFreqSet.add((basePat,headerTable[basePat][0]))
newFreqSet.add(basePat)
############
#print 'finalFrequent Item: ',newFreqSet #append to set
freqItemList.append([newFreqSet,headerTable[basePat][0]])
condPattBases = findPrefixPath(basePat, headerTable[basePat][1])
#print 'condPattBases :',basePat, condPattBases
#2. construct cond FP-tree from cond. pattern base
myCondTree, myHead = createTree(condPattBases, minSup)#cond. FP tree
#print 'head from conditional tree: ', myHead
if myHead != None: #3. mine cond. FP-tree
#print 'conditional tree for: ',newFreqSet
#myCondTree.disp(1)
mineTree(myCondTree, myHead, minSup, newFreqSet, freqItemList)
return freqItemList, minSup
# def loadData(i):
# return transaction_list[i]
# def createInitSet(dataSet):
# retDict = {}
# for trans in dataSet:
# retDict[frozenset(trans)] = retDict.get(frozenset(trans), 0) + 1
# return retDict
#
def FP_Growth(data, minSup):
initSet = createInitSet(data)
# initSet= loadSimpDat(data)
myFPtree, myHeaderTab = createTree(initSet, minSup)
#myFPtree.disp()
myFreqList = []
myFreqList, tminSup = mineTree(myFPtree, myHeaderTab, minSup, set([]), myFreqList)
# rules = findRules(myFreqList,min_conf)
# return myFreqList
return myFreqList, tminSup
def findRules(myFreqList,min_conf, minsup):
freq_item_set_dict={}
freq_item_set=[]
max_len=0
for i in range(len(myFreqList)):#for each [frequentSet,support] in myFreqList
freq_item_set_dict[frozenset(myFreqList[i][0])]=myFreqList[i][1]
# freq_item_set_dict[myFreqList[i][0]] = myFreqList[i][1]
freq_item_set.append(frozenset(myFreqList[i][0]))
# freq_item_set.append(myFreqList[i][0])
if len(frozenset(myFreqList[i][0]))>max_len:
# if len(myFreqList[i][0])>max_len:
max_len = len(frozenset(myFreqList[i][0]))
# max_len = len(myFreqList[i][0])
freq_item_set_list=[[] for i in range(max_len)]#index 0:频繁1-项集;index 1:频繁2-项集;以此类推
for s in freq_item_set:
size = len(s)
freq_item_set_list[size-1].append(s)
#print(len(myFreqList))
rules=[]
for i in range(max_len-1):
slist1 = freq_item_set_list[i]
# print slist1
for j in range(i+1,max_len):
slist2 = freq_item_set_list[j]
# print slist2
for s1 in slist1:
for s2 in slist2:
# print s1, s2
#conf = freq_item_set_dict[s2]/float(freq_item_set_dict[s1])
if s1.issubset(s2):
conf = freq_item_set_dict[s2]/float(freq_item_set_dict[s1])
if conf >= min_conf:
rules.append([[s1,s2-s1],minsup,conf*100])
# FreqItemsets = FP_Growth(myFreqList,minSup)
return rules
# dataFile = 'Train_sorted.mat'
# data = scio.loadmat(dataFile)
# print(data)
######Train_sorted = scipy.io.loadmat('Train_sorted.mat')
# print(Train_sorted)
# Train_sorted['descr'] = np.array(Train_sorted['tempdescr']).T
# Train_sorted['label'] = np.array(Train_sorted['templabel']).T
# Train_sorted.descr = Train_sorted.get('descr')
######print(Train_sorted['tempdescr'])
# #test
# # print(FP_Growth(simDat))
# simDat = downsample.loadSimpDat()
# initSet = createInitSet(simDat)
# print('iniset:',initSet)
# myFPtree,myHeaderTab = createTree(initSet,7)
# myFPtree.disp()
# print('myHeaderTab:',myHeaderTab)
# freqItems=[]
# mineTree(myFPtree, myHeaderTab, 7, set([]), freqItems)
# print('freqItems:',freqItems)
# myFreList = FP_Growth(simDat,7)
# print('myFreList:',myFreList)
# # for fit_conf in np.arange(0.1,1.1,0.1):
# print("当置信度为:",fit_conf)
# rules = findRules(myFreList,fit_conf)
# print('rules:',rules)
#
# data = loadSimpDat()
# print(createInitSet(data))
# print(FP_Growth(data,2))
# myFrelist, minsup = FP_Growth(data,2)
# print(myFrelist)
# rules = findRules(myFrelist,0.4, minsup)
# print(rules)
# #
#
# # Train_sorted_descr = np.loadtxt('Train2_X.txt')
# print(Train_sorted_descr)
# # data = loadSimpDat()
# # print(data)
# result = createInitSet(data)
# print(data)
# data = loadSimpDat()
# fre,s = FP_Growth(data,2)
|
[
"527946771@qq.com"
] |
527946771@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.