repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
rushkii/maid_manga_id | maid_manga_id/types/chapter.py | from typing import List, Dict
from dataclasses import dataclass
from bs4 import BeautifulSoup as bsoup
from datetime import datetime
from humanfriendly import format_size
import re, img2pdf, os
from maid_manga_id.object import Object
from maid_manga_id import http
@dataclass
class Chapters(Object):
title: Dict[str, str] = None
chapter: List[dict] = None
released: int = None
url: str = None
async def _parse(self, **kwargs):
kwargs['chapter'] = int(re.search(r'[\d]+', str(kwargs['chapter']))[0])
return Chapters(**kwargs)
async def download(self):
if not os.path.exists('downloads/'):
os.mkdir('downloads/')
if isinstance(self.title, dict):
title = f"{self.title['japanese']}-{self.title['english']}"
else:
title = self.title
manga: bsoup = await http.scrape(self.url)
reader = manga.find_all('img', alt='image host')
img = [img['src'] for img in reader]
_bytes = []
print(f"Downloading {title}-chapter-{self.chapter} into PDF file...")
for im,i in zip(img,range(1, len(img)+1)): # This HTTP requests is slow, help me to make it faster.
req = await http.new('GET', im)
if req.status_code == 200:
_bytes.append(req.content)
print(f"{i}. Image size: {format_size(len(req.content))}")
a4inpt = (img2pdf.mm_to_pt(200),img2pdf.mm_to_pt(300))
layout_fun = img2pdf.get_layout_fun(a4inpt)
date = datetime.now()
path = f"downloads/{title}-chapter-{self.chapter}.pdf"
with open(path, "wb") as f:
f.write(img2pdf.convert(_bytes, layout_fun=layout_fun))
print(f"Download complete, path: {path}")
return path
async def read(self):
# TKinter thing
pass |
rushkii/maid_manga_id | old_version/maid_manga/api.py | from bs4 import BeautifulSoup as bSoup
from urllib.parse import quote
import requests, lxml, re
class MaidMangaID(object):
def __init__(self):
self.base_url = "https://maid.my.id"
def ch_html_parse(self, title='', chapter='01'):
if chapter == '0':
chapter = '01'
if len(chapter) == 1:
chapter = '0'+chapter
if title != '':
r = requests.get(f"{self.base_url}/{title.replace(' ', '-')}-chapter-{chapter}-bahasa-indonesia")
s = bSoup(r.text, 'lxml')
return s
else:
raise Exception('Invalid Title!')
def manga_html_parse(self, title=''):
if chapter == '0':
chapter = '01'
if len(chapter) == 1:
chapter = '0'+chapter
if title != '':
r = requests.get(f"{self.base_url}/manga/{title.replace(' ', '-')}")
s = bSoup(r.text, 'lxml')
return s
else:
raise Exception('Invalid Title!')
def search(self, query=''):
all_search = []
r = requests.get(f"{self.base_url}/?s={quote(query)}")
s = bSoup(r.text, 'lxml')
mangas = s.find_all('div', class_='flexbox2-item')
for s in mangas:
data = {
'thumbnail': s.find('div', class_='flexbox2-thumb').img['src'],
'title': {
'japanese': s.find('span', class_='title').text
},
'genres': [a.text for a in s.find_all('a', rel='tag')],
'synopsis': s.find('div', class_='synops').text,
'chapters': re.compile("Ch. ([0-9]+)").search(s.find('div', class_='season').text).group(1).strip() if s.find('div', class_='season') is not None else "",
'author': s.find('span', class_='studio').text,
'rating': s.find('div', class_='score').text
}
all_search.append(data)
return all_search
def top_manga(self, genre=''):
if genre != '':
top = []
r = requests.get(f"{self.base_url}/top-30-manga-{genre.lower()}")
s = bSoup(r.text, 'lxml')
mangas = s.find_all('div', class_='flexbox2-item')
for s in mangas:
data = {
'thumbnail': s.find('div', class_='flexbox2-thumb').img['src'],
'title': {
'japanese': s.find('span', class_='title').text
},
'genres': [a.text for a in s.find_all('a', rel='tag')],
'synopsis': s.find('div', class_='synops').text,
'chapters': re.compile("Ch. ([0-9]+)").search(s.find('div', class_='season').text).group(1).strip() if s.find('div', class_='season') is not None else "",
'author': s.find('span', class_='studio').text,
'rating': s.find('div', class_='score').text
}
top.append(data)
return top
else:
raise Exception('Top genre not found, available top genre: Romance, Comedy, Harem.')
def get_manga_thumbnail(self, title=''):
if title != '':
s = self.manga_html_parse(title)
thumb = s.find('div', class_='series-thumb').img['src']
return thumb
else:
raise Exception('Invalid Title!')
def get_manga_title(self, title=''):
if title != '':
s = self.manga_html_parse(title)
title = s.find('div', class_='series-title').h2.text
return title
else:
raise Exception('Invalid Title!')
def get_manga_subtitle(self, title=''):
if title != '':
s = self.manga_html_parse(title)
subtitle = s.find('div', class_='series-title').span.text
return subtitle
else:
raise Exception('Invalid Title!')
def get_manga_genres(self, title=''):
if title != '':
s = self.manga_html_parse(title)
genres = [a.text for a in s.find_all('div', class_='series-genres')]
return genres
else:
raise Exception('Invalid Title!')
def get_manga_synopsis(self, title=''):
if title != '':
s = self.manga_html_parse(title)
synopsis = s.find('div', class_='series-synops').text
return synopsis
else:
raise Exception('Invalid Title!')
def get_manga_rating(self, title=''):
if title != '':
s = self.manga_html_parse(title)
rating = s.find('div', class_='series-infoz score').text
return rating
else:
raise Exception('Invalid Title!')
def get_manga_release(self, title=''):
if title != '':
s = self.manga_html_parse(title)
release = s.find('span', class_='published').text
return release
else:
raise Exception('Invalid Title!')
def get_manga_author(self, title=''):
if title != '':
s = self.manga_html_parse(title)
author = s.find('span', class_='author').text
return author
else:
raise Exception('Invalid Title!')
def get_images_by_chapter(self, title='', chapter='01'):
s = self.ch_html_parse(title, chapter)
reader = s.find_all('img', alt='image host')
img = [img['src'] for img in reader]
return img
def get_manga_extended(self, title=''):
if title != '':
s_manga = self.manga_html_parse(title)
s_ch = self.ch_html_parse(title, chapter='01')
data = {
'title': {
'japanese': s_manga.find('div', class_='series-title').h2.text,
'english': s_manga.find('div', class_='series-title').span.text
},
'genres': [a.text for a in s_manga.find_all('div', class_='series-genres')],
'synopsis': s_manga.find('div', class_='series-synops').text,
'chapters': [
{a.find("span", class_="ch").text:
{'release-date': a.find("span", class_="date").text, 'read-url': a.a['href']}
} for a in s_manga.find_all('div', class_='flexch-infoz')
],
'images': {
'thumbnail': s_manga.find('div', class_='series-thumb').img['src'],
'pages': [img['src'] for img in s_ch.find_all('img', alt='image host')]
},
'publish-date': s_manga.find('span', class_='published').text,
'author': s_manga.find('span', class_='author').text,
'rating': s_manga.find('div', class_='series-infoz score').text
}
return data
else:
raise Exception('Invalid Title!')
def get_chapter_info(self, title=''):
if title != '':
s = self.manga_html_parse(title=title)
d = s.find_all('div', class_='flexch-infoz')
data = [{a.find("span", class_="ch").text: {'release-date': a.find("span", class_="date").text, 'read-url': a.a['href']}} for a in d]
return data
else:
raise Exception('Invalid Title!')
def get_chapter_list(self, title=''):
if title != '':
s = self.manga_html_parse(title=title)
d = len(s.find_all('span', class_='ch'))
return d
else:
raise Exception('Invalid Title!')
def get_all_chapter_release(self, title=''):
if title != '':
s = self.manga_html_parse(title=title)
d = s.find_all('div', class_='flexch-infoz')
data = [{a.find("span", class_="ch").text: a.find("span", class_="date").text} for a in d]
return data
else:
raise Exception('Invalid Title!') |
rushkii/maid_manga_id | maid_manga_id/scaffold.py | <gh_stars>1-10
class Maid(object):
pass |
rushkii/maid_manga_id | old_version/setup.py | <reponame>rushkii/maid_manga_id
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='maid_manga_id',
version='1.0',
description='Maid Manga Indonesia API Using Python Web Scraper.',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
url='https://github.com/rushkii/maid_manga_id',
author='Kee',
author_email='<EMAIL>',
keywords='manga maid_manga maid_manga_id manga_id',
license='MIT',
packages=['maid_manga'],
install_requires=['requests', 'bs4', 'lxml', 'urllib', 're'],
include_package_data=True,
zip_safe=False
) |
rushkii/maid_manga_id | maid_manga_id/__init__.py | <gh_stars>1-10
__version__ = '1.1'
__author__ = 'Kee'
__copyright__ = "Copyright (C) 2021 Kee <https://github.com/rushkii/maid_manga_id>"
from .maid_client import MaidManga |
sarvex/tpu | models/official/unet3d/data_preprocess/convert_lits.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts raw LiTS numpy data to TFRecord."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
from scipy import ndimage
import tensorflow.compat.v1 as tf
flags.DEFINE_string("image_file_pattern", None,
"path pattern to an input image npy file.")
flags.DEFINE_string("label_file_pattern", None,
"path pattern to an input label npy file.")
flags.DEFINE_string("output_path", None, "path to output TFRecords.")
flags.DEFINE_boolean("crop_liver_region", True,
"whether to crop liver region out.")
flags.DEFINE_boolean("apply_data_aug", False,
"whether to apply data augmentation.")
flags.DEFINE_integer("shard_start", 0,
"start with volume-${shard_start}.npy.")
flags.DEFINE_integer("shard_stride", 1,
"this process will convert "
"volume-${shard_start + n * shard_stride}.npy for all n.")
flags.DEFINE_integer("output_size", 128,
"output, cropped size along x, y, and z.")
flags.DEFINE_integer("resize_size", 192,
"size along x, y, and z before cropping.")
FLAGS = flags.FLAGS
def to_1hot(label):
per_class = []
for classes in range(3):
per_class.append((label == classes)[..., np.newaxis])
label = np.concatenate(per_class, axis=-1).astype(label.dtype)
return label
def save_to_tfrecord(image, label, idx, im_id, output_path,
convert_label_to_1hot):
"""Save to TFRecord."""
if convert_label_to_1hot:
label = to_1hot(label)
d_feature = {}
d_feature["image/ct_image"] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image.reshape([-1]).tobytes()]))
d_feature["image/label"] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[label.reshape([-1]).tobytes()]))
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
serialized = example.SerializeToString()
result_file = os.path.join(
output_path, "instance-{}-{}.tfrecords".format(im_id, idx))
options = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)
with tf.python_io.TFRecordWriter(result_file, options=options) as w:
w.write(serialized)
def intensity_change(im):
"""Color augmentation."""
if np.random.rand() < 0.1:
return im
# Randomly scale color.
sigma = 0.05
truncate_rad = 0.1
im *= np.clip(np.random.normal(1.0, sigma),
1.0 - truncate_rad, 1.0 + truncate_rad)
return im
def rand_crop_liver(image, label, res_s, out_s,
apply_data_aug, augment_times=54):
"""Crop image and label; Randomly change image intensity.
Randomly crop image and label around liver.
Args:
image: 3D numpy array.
label: 3D numpy array.
res_s: resized size of image and label.
out_s: output size of random crops.
apply_data_aug: whether to apply data augmentation.
augment_times: the number of times to randomly crop and augment data.
Yields:
croped and augmented image and label.
"""
if image.shape != (res_s, res_s, res_s) or \
label.shape != (res_s, res_s, res_s):
logging.info("Unexpected shapes. "
"image.shape: %s, label.shape: %s",
image.shape, label.shape)
return
rough_liver_label = 1
x, y, z = np.where(label == rough_liver_label)
bbox_center = [(x.min() + x.max()) // 2,
(y.min() + y.max()) // 2,
(z.min() + z.max()) // 2]
def in_range_check(c):
c = max(c, out_s // 2)
c = min(c, res_s - out_s // 2)
return c
for _ in range(augment_times):
rand_c = []
for c in bbox_center:
sigma = out_s // 6
truncate_rad = out_s // 4
c += np.clip(np.random.randn() * sigma, -truncate_rad, truncate_rad)
rand_c.append(int(in_range_check(c)))
image_aug = image[rand_c[0] - out_s // 2:rand_c[0] + out_s // 2,
rand_c[1] - out_s // 2:rand_c[1] + out_s // 2,
rand_c[2] - out_s // 2:rand_c[2] + out_s // 2].copy()
label_aug = label[rand_c[0] - out_s // 2:rand_c[0] + out_s // 2,
rand_c[1] - out_s // 2:rand_c[1] + out_s // 2,
rand_c[2] - out_s // 2:rand_c[2] + out_s // 2].copy()
if apply_data_aug:
image_aug = intensity_change(image_aug)
yield image_aug, label_aug
def rand_crop_whole_ct(image, label, res_s, out_s,
apply_data_aug, augment_times=2):
"""Crop image and label; Randomly change image intensity.
Randomly crop image and label.
Args:
image: 3D numpy array.
label: 3D numpy array.
res_s: resized size of image and label.
out_s: output size of random crops.
apply_data_aug: whether to apply data augmentation.
augment_times: the number of times to randomly crop and augment data.
Yields:
croped and augmented image and label.
"""
if image.shape != (res_s, res_s, res_s) or \
label.shape != (res_s, res_s, res_s):
logging.info("Unexpected shapes. "
"image.shape: %s, label.shape: %s",
image.shape, label.shape)
return
if not apply_data_aug:
# Do not augment data.
idx = (res_s - out_s) // 2
image = image[idx:idx + out_s, idx:idx + out_s, idx:idx + out_s]
label = label[idx:idx + out_s, idx:idx + out_s, idx:idx + out_s]
yield image, label
else:
cut = res_s - out_s
for _ in range(augment_times):
for i in [0, cut // 2, cut]:
for j in [0, cut // 2, cut]:
for k in [0, cut // 2, cut]:
image_aug = image[i:i + out_s, j:j + out_s, k:k + out_s].copy()
label_aug = label[i:i + out_s, j:j + out_s, k:k + out_s].copy()
image_aug = intensity_change(image_aug)
yield image_aug, label_aug
def resize_3d_image_nearest_interpolation(im, res_s):
"""Resize 3D image, but with nearest interpolation."""
new_shape = [res_s, im.shape[1], im.shape[2]]
ret0 = np.zeros(new_shape, dtype=im.dtype)
for i in range(im.shape[2]):
im_slice = np.array(Image.fromarray(im[..., i]).resize(
(im.shape[1], res_s), resample=Image.NEAREST))
ret0[..., i] = im_slice
new_shape = [res_s, res_s, res_s]
ret = np.zeros(new_shape, dtype=im.dtype)
for i in range(res_s):
im_slice = np.array(Image.fromarray(ret0[i, ...]).resize(
(res_s, res_s), resample=Image.NEAREST))
ret[i, ...] = im_slice
return ret
def process_one_file(image_path, label_path, im_id,
output_path, res_s, out_s,
crop_liver_region, apply_data_aug):
"""Convert one npy file."""
with tf.gfile.Open(image_path, "rb") as f:
image = np.load(f)
with tf.gfile.Open(label_path, "rb") as f:
label = np.load(f)
image = ndimage.zoom(image, [float(res_s) / image.shape[0],
float(res_s) / image.shape[1],
float(res_s) / image.shape[2]])
label = resize_3d_image_nearest_interpolation(label.astype(np.uint8),
res_s).astype(np.float32)
if crop_liver_region:
for idx, (image_aug, label_aug) in enumerate(rand_crop_liver(
image, label, res_s, out_s, apply_data_aug)):
save_to_tfrecord(image_aug, label_aug, idx, im_id, output_path,
convert_label_to_1hot=True)
else: # not crop_liver_region
# If we output the entire CT scan (crop_liver_region=False),
# do not convert_label_to_1hot to save storage.
for idx, (image_aug, label_aug) in enumerate(rand_crop_whole_ct(
image, label, res_s, out_s, apply_data_aug)):
save_to_tfrecord(image_aug, label_aug, idx, im_id, output_path,
convert_label_to_1hot=False)
def main(argv):
del argv
output_path = FLAGS.output_path
res_s = FLAGS.resize_size
out_s = FLAGS.output_size
crop_liver_region = FLAGS.crop_liver_region
apply_data_aug = FLAGS.apply_data_aug
for im_id in range(FLAGS.shard_start, 1000000, FLAGS.shard_stride):
image_path = FLAGS.image_file_pattern.format(im_id)
label_path = FLAGS.label_file_pattern.format(im_id)
if not tf.gfile.Exists(image_path):
logging.info("Reached the end. Image does not exist: %s. "
"Process finish.", image_path)
break
process_one_file(image_path, label_path, im_id,
output_path, res_s, out_s,
crop_liver_region, apply_data_aug)
if __name__ == "__main__":
app.run(main)
|
sarvex/tpu | models/official/unet3d/unet_config.py | <filename>models/official/unet3d/unet_config.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config to train UNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
UNET_CONFIG = {
# Place holder for tpu configs.
'tpu_config': {},
'model_dir': '',
'training_file_pattern': '',
'eval_file_pattern': '',
# The input files are GZip compressed and need decompression.
'compressed_input': True,
'use_bfloat16': True,
'label_dtype': 'float32',
'train_batch_size': 8,
'eval_batch_size': 8,
'predict_batch_size': 8,
'train_epochs': 10,
'train_item_count': 1000,
'eval_item_count': 100,
'train_steps': 100000,
'eval_steps': 10,
'num_steps_per_eval': 100,
'min_eval_interval': 180,
'eval_timeout': None,
'optimizer': 'adam',
'momentum': 0.9,
# Spatial dimension of input image.
'input_image_size': [128, 128, 128],
# Number of channels of the input image.
'num_channels': 1,
# Spatial partition dimensions.
'input_partition_dims': None,
# Use deconvolution to upsample, otherwise upsampling.
'deconvolution': True,
# Number of areas i need to segment
'num_classes': 3,
# Number of filters used by the architecture
'num_base_filters': 32,
# Depth of the network
'depth': 4,
# Dropout values to use across the network
'dropout_rate': 0.5,
# Number of levels that contribute to the output.
'num_segmentation_levels': 2,
# Use batch norm.
'use_batch_norm': True,
'init_learning_rate': 0.00005,
# learning rate decay steps.
'lr_decay_steps': 100000,
# learning rate decay rate.
'lr_decay_rate': 0.5,
# Data format, 'channels_last' and 'channels_first'
'data_format': 'channels_last',
# Use class index for training. Otherwise, use one-hot encoding.
'use_index_label_in_train': False,
# e.g. softmax cross entropy, adaptive_dice32
'loss': 'adaptive_dice32',
}
UNET_RESTRICTIONS = []
|
sarvex/tpu | models/official/detection/configs/classification_config.py | <reponame>sarvex/tpu
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config template to train classification models."""
import sys
sys.path.insert(0, 'tpu/models')
from configs import base_config
from hyperparameters import params_dict
# pylint: disable=line-too-long
CLASSIFICATION_CFG = params_dict.ParamsDict(base_config.BASE_CFG)
CLASSIFICATION_CFG.override({
'type': 'classification',
'architecture': {
'parser': 'classification_parser',
'backbone': 'resnet',
# Note that `num_classes` is the total number of classes including one
# background class whose index is 0.
'num_classes': 1001,
},
'train': {
'iterations_per_loop': 1000,
'train_batch_size': 1024, # 2x2.
'total_steps': 112603, # total images 1281167, so ~90 epochs.
'learning_rate': {
'type': 'cosine',
'warmup_learning_rate': 0.0,
'warmup_steps': 6255, # ~5 epochs.
'init_learning_rate': 0.4, # linear scaling based on batch size.
'learning_rate_levels': [0.04, 0.004, 0.0004], # for type `step`.
'learning_rate_steps': [37534, 75069, 100091],
},
'frozen_variable_prefix': None,
'l2_weight_decay': 0.0001,
'label_smoothing': 0.0,
},
'eval': {
'eval_batch_size': 1024,
'eval_samples': 50000,
'num_steps_per_eval': 1000,
'type': 'customized',
},
'classification_parser': {
'output_size': [224, 224],
'aug_rand_hflip': True,
},
'batch_norm_activation': {
'batch_norm_momentum': 0.9,
'batch_norm_epsilon': 1e-5,
'batch_norm_trainable': True,
'use_sync_bn': False,
'activation': 'relu',
},
'resnet': {
'resnet_depth': 50,
},
'spinenet': {
'init_drop_connect_rate': None,
},
'classification_head': {
'endpoints_num_filters': 0,
'aggregation': 'top', # `top` or `all`.
'dropout_rate': 0.0,
},
}, is_strict=False)
CLASSIFICATION_RESTRICTIONS = [
]
# pylint: enable=line-too-long
|
sarvex/tpu | models/official/efficientnet/lite/efficientnet_lite_model_qat_test.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for efficientnet_lite_model_qat."""
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_model_optimization as tfmot
from lite import efficientnet_lite_builder
from lite import efficientnet_lite_model_qat
class EfficientnetLiteModelQatTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(('efficientnet-lite0',), ('efficientnet-lite1'),
('efficientnet-lite2',), ('efficientnet-lite3',),
('efficientnet-lite4',))
def test_values_match(self, model_name):
images = tf.random.uniform((1, 224, 224, 3))
tf.random.set_seed(0)
outputs, _ = efficientnet_lite_builder.build_model(
images,
model_name=model_name,
override_params=None,
training=False,
features_only=False,
pooled_features_only=False)
tf.random.set_seed(0)
outputs_qat, _ = efficientnet_lite_builder.build_model(
images,
model_name=model_name + '-qat',
override_params=None,
training=False,
features_only=False,
pooled_features_only=False)
self.assertAllClose(tf.reduce_sum(outputs), tf.reduce_sum(outputs_qat))
@parameterized.parameters(('efficientnet-lite0',), ('efficientnet-lite1'),
('efficientnet-lite2',), ('efficientnet-lite3',),
('efficientnet-lite4',))
def test_model_quantizable(self, model_name):
images = tf.random.uniform((1, 224, 224, 3))
override_params = {}
override_params['batch_norm'] = tf.keras.layers.BatchNormalization
blocks_args, global_params = efficientnet_lite_builder.get_model_params(
model_name, override_params=override_params)
model_qat = efficientnet_lite_model_qat.FunctionalModel(
model_name=model_name,
blocks_args=blocks_args,
global_params=global_params,
features_only=False,
pooled_features_only=False).get_functional_model(
training=True, input_shape=images.shape)
try:
tfmot.quantization.keras.quantize_model(model_qat)
except Exception as e: # pylint: disable=broad-except
self.fail('Exception raised: %s' % str(e))
if __name__ == '__main__':
tf.test.main()
|
sarvex/tpu | models/official/unet3d/data_preprocess/convert_lits_nii_to_npy.py | <filename>models/official/unet3d/data_preprocess/convert_lits_nii_to_npy.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts .nii files in LiTS dataset to .npy files.
This script should be run just once before running convert_lits.{py,borg}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import multiprocessing
import os
# Standard Imports
import nibabel as nib
import numpy as np
num_processes = 2
input_path = "Downloads/LiTS/Train/" # where the .nii files are.
output_path = "Downloads/LiTS/Train_np/" # where you want to put the npy files.
def process_one_file(image_path):
"""Convert one nii file to npy."""
im_id = os.path.basename(image_path).split("volume-")[1].split(".nii")[0]
label_path = image_path.replace("volume-", "segmentation-")
image = nib.load(image_path).get_data().astype(np.float32)
label = nib.load(label_path).get_data().astype(np.float32)
print("image shape: {}, dtype: {}".format(image.shape, image.dtype))
print("label shape: {}, dtype: {}".format(label.shape, label.dtype))
np.save(os.path.join(output_path, "volume-{}.npy".format(im_id)), image)
np.save(os.path.join(output_path, "segmentation-{}.npy".format(im_id)), label)
nii_dir = os.path.join(input_path, "volume-*")
p = multiprocessing.Pool(num_processes)
p.map(process_one_file, glob.glob(nii_dir))
|
sarvex/tpu | models/official/detection/projects/vild/modeling/vild_head.py | <filename>models/official/detection/projects/vild/modeling/vild_head.py<gh_stars>10-100
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes to build various prediction heads in all supported models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_ops
def _divide_no_nan(x, y, epsilon=1e-8):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
# need manual broadcast...
safe_y = tf.where(
tf.logical_and(tf.greater_equal(y, -epsilon), tf.less_equal(y, epsilon)),
tf.ones_like(y), y)
return tf.where(
tf.logical_and(
tf.greater_equal(tf.broadcast_to(y, x.get_shape()), -epsilon),
tf.less_equal(tf.broadcast_to(y, x.get_shape()), epsilon)),
tf.zeros_like(x), x / safe_y)
class ViLDFastrcnnHead(object):
"""Fast R-CNN box head."""
def __init__(
self,
num_classes,
num_convs=0,
num_filters=256,
use_separable_conv=False,
num_fcs=2,
fc_dims=1024,
# for vild classifier: start
clip_dim=512,
classifier_weight_path=None,
normalize_classifier=False,
normalize_visual=False,
temperature=1.0,
# feature distillation
visual_feature_distill=None,
max_distill_rois=300,
# for vild classifier: end
activation='relu',
use_batch_norm=True,
batch_norm_activation=nn_ops.BatchNormActivation(activation='relu'),
class_agnostic_bbox_pred=False):
"""Initialize params to build Fast R-CNN box head.
Args:
num_classes: an integer for the number of classes.
num_convs: `int` number that represents the number of the intermediate
conv layers before the FC layers.
num_filters: `int` number that represents the number of filters of the
intermediate conv layers.
use_separable_conv: `bool`, indicating whether the separable conv layers
is used.
num_fcs: `int` number that represents the number of FC layers before the
predictions.
fc_dims: `int` number that represents the number of dimension of the FC
layers.
clip_dim: `int` number that represents the number of dimension of the CLIP
text embeddings.
classifier_weight_path: `str` for the text embeddings used as classifier.
normalize_classifier: `bool`, indicating whether to normalize the
classifier.
normalize_visual: indication whether to normalize the visual features used
for classification.
temperature: `float`, temperature applied to the logits.
visual_feature_distill: None or `str` in ['vanilla', 'double_branch'] to
specify the type of visual feature distillation.
max_distill_rois: `int`, specify the number of precomputed rois used for
distillation.
activation: activation function. Support 'relu' and 'swish'.
use_batch_norm: 'bool', indicating whether batchnorm layers are added.
batch_norm_activation: an operation that includes a batch normalization
layer followed by an optional activation layer.
class_agnostic_bbox_pred: `bool`, indicating whether bboxes should be
predicted for every class or not.
"""
self._num_classes = num_classes
self._num_convs = num_convs
self._num_filters = num_filters
if use_separable_conv:
self._conv2d_op = functools.partial(
tf.layers.separable_conv2d,
depth_multiplier=1,
bias_initializer=tf.zeros_initializer())
else:
self._conv2d_op = functools.partial(
tf.layers.conv2d,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer())
self._num_fcs = num_fcs
self._fc_dims = fc_dims
if activation == 'relu':
self._activation = tf.nn.relu
elif activation == 'swish':
self._activation = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._use_batch_norm = use_batch_norm
self._batch_norm_activation = batch_norm_activation
self._class_agnostic_bbox_pred = class_agnostic_bbox_pred
# clip classifier related
self._clip_dim = clip_dim
self._classifier_weight_path = classifier_weight_path
assert tf.gfile.Exists(self._classifier_weight_path)
self._normalize_classifier = normalize_classifier
self._normalize_visual = normalize_visual
self._temperature = temperature
# feature distill
self._feat_distill = visual_feature_distill
self._max_distill_rois = max_distill_rois
assert self._normalize_classifier and self._normalize_visual
def __call__(self, roi_features, is_training=False):
"""Box and class branches for the Mask-RCNN model.
Args:
roi_features: A ROI feature tensor of shape [batch_size, num_rois,
height_l, width_l, num_filters].
is_training: `boolean`, if True if model is in training mode.
Returns:
class_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes], representing the class predictions.
box_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes * 4], representing the box
predictions.
"""
distill_feat_outputs = None
distill_class_outputs = None
with tf.variable_scope('frcnn_layer_0/fast_rcnn_head', reuse=tf.AUTO_REUSE):
# ---------------- RESHAPE & SPLIT ----------------
_, num_rois, height, width, filters = roi_features.get_shape().as_list()
net = tf.reshape(roi_features, [-1, height, width, filters])
if self._feat_distill == 'double_branch':
distill_net = net
if is_training:
all_roi_features = roi_features
# split the rois for supervised learning and distillation
roi_features, distill_roi_features = tf.split(
all_roi_features,
[num_rois - self._max_distill_rois, self._max_distill_rois],
axis=1)
_, num_rois, height, width, filters = roi_features.get_shape(
).as_list()
net = tf.reshape(roi_features, [-1, height, width, filters])
distill_net = tf.reshape(distill_roi_features,
[-1, height, width, filters])
# ---------------- BUILD COMMON OUTPUTS ----------------
for i in range(self._num_convs):
net = self._conv2d_op(
net,
self._num_filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
activation=(None if self._use_batch_norm else self._activation),
name='conv_{}'.format(i))
if self._use_batch_norm:
net = self._batch_norm_activation(net, is_training=is_training)
filters = self._num_filters if self._num_convs > 0 else filters
net = tf.reshape(net, [-1, num_rois, height * width * filters])
for i in range(self._num_fcs):
net = tf.layers.dense(
net,
units=self._fc_dims,
activation=(None if self._use_batch_norm else self._activation),
name='fc{}'.format(i + 6))
if self._use_batch_norm:
net = self._batch_norm_activation(net, is_training=is_training)
net = tf.cast(net, tf.float32)
# ---------------- BUILD DISTILL OUTPUTS for ViLD-ensemble ---------------
if self._feat_distill == 'double_branch':
for i in range(self._num_convs):
distill_net = self._conv2d_op(
distill_net,
self._num_filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
activation=(None if self._use_batch_norm else self._activation),
name='distill_conv_{}'.format(i))
if self._use_batch_norm:
distill_net = self._batch_norm_activation(distill_net,
is_training=is_training)
filters = self._num_filters if self._num_convs > 0 else filters
distill_net = tf.reshape(
distill_net, [-1,
self._max_distill_rois if is_training else num_rois,
height * width * filters])
for i in range(self._num_fcs):
distill_net = tf.layers.dense(
distill_net,
units=self._fc_dims,
activation=(None if self._use_batch_norm else self._activation),
name='distill_fc{}'.format(i + 6))
if self._use_batch_norm:
distill_net = self._batch_norm_activation(distill_net,
is_training=is_training)
distill_net = tf.cast(distill_net, tf.float32)
# ---------------- VILD PROJ & NORM ----------------
projected_net = tf.layers.dense(
net, units=self._clip_dim, activation=None, name='project-to-clip')
if self._normalize_visual:
tf.logging.info(f'visual: {projected_net}') # (B, num_rois, 512)
visual_norm = tf.norm(
projected_net, ord=2, axis=-1, keepdims=True, name='visual_norm')
tf.logging.info(f'visual_norm: {visual_norm}') # (B, num_rois, 1)
projected_net = _divide_no_nan(projected_net, visual_norm)
if self._feat_distill == 'double_branch':
tf.logging.info(f'distill_net before projection: {distill_net}')
projected_distill_net = tf.layers.dense(
distill_net,
units=self._clip_dim,
activation=None,
name='distill-project-to-clip',
)
if self._normalize_visual:
tf.logging.info(f'distilled visual: {projected_distill_net}')
# (B, num_all_rois, 512)
distill_visual_norm = tf.norm(
projected_distill_net,
ord=2,
axis=-1,
keepdims=True,
name='distill_visual_norm')
tf.logging.info(f'distill_visual_norm: {distill_visual_norm}')
# (B, num_all_rois, 1)
projected_distill_net = _divide_no_nan(projected_distill_net,
distill_visual_norm)
classifier_input = projected_net
if self._feat_distill == 'vanilla' and is_training:
# during inference, no need to split as there are no distill rois
# [batch_size, num_rois, some feat dim]
tf.logging.info(f'before split, classifier_input: {classifier_input}')
classifier_input, distill_feat_outputs = tf.split(
classifier_input,
[num_rois - self._max_distill_rois, self._max_distill_rois],
axis=1)
tf.logging.info(f'after split, classifier_input: {classifier_input}, '
f'distill_feat_outputs: {distill_feat_outputs}')
if self._feat_distill == 'double_branch':
distill_feat_outputs = projected_distill_net
if not is_training:
distill_classifier_input = projected_distill_net
# ---------------- CLASSIFICATION LAYER ----------------
with tf.gfile.GFile(self._classifier_weight_path, 'rb') as fp:
loaded_numpy = np.load(fp)
# the shape of current version of CLIP text feature
tf.logging.info(f'loaded_numpy.shape: {loaded_numpy.shape};'
f' clip dim: {self._clip_dim};'
f' num_classes: {self._num_classes}')
assert loaded_numpy.shape == (self._clip_dim, self._num_classes - 1)
kernel_initializer = tf.initializers.constant(loaded_numpy)
class_outputs = tf.layers.dense(
classifier_input,
self._num_classes - 1,
use_bias=False,
kernel_initializer=kernel_initializer,
bias_initializer=tf.zeros_initializer(),
name='class-predict')
if self._normalize_classifier:
classifier = tf.get_variable(name='class-predict/kernel')
# [D, num_classes]
classifier_norm = tf.norm(classifier, ord=2, axis=0) # [num_classes,]
tf.logging.info(f'classifier_norm: {classifier_norm}')
assert class_outputs.dtype == classifier_norm.dtype
class_outputs = _divide_no_nan(class_outputs, classifier_norm[None,
None, :])
# background classifier layer and normalization
background_output = tf.layers.dense(
classifier_input,
1,
use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
name='background-class-predict')
if self._normalize_classifier:
bg_classifier = tf.get_variable(name='background-class-predict/kernel')
tf.logging.info(f'bg_classifier: {bg_classifier}')
bg_classifier_norm = tf.norm(bg_classifier, ord=2, axis=0) # [1,]
tf.logging.info(f'bg_classifier_norm: {bg_classifier_norm}')
assert background_output.dtype == bg_classifier_norm.dtype
background_output = _divide_no_nan(background_output,
bg_classifier_norm[None, None, :])
class_outputs = tf.concat((background_output, class_outputs),
axis=-1,
name='concat_classifier')
class_outputs *= self._temperature
if (not is_training) and self._feat_distill == 'double_branch':
distill_class_outputs = tf.layers.dense(
distill_classifier_input,
self._num_classes - 1,
use_bias=False,
kernel_initializer=kernel_initializer,
name='class-predict')
distill_class_outputs = _divide_no_nan(distill_class_outputs,
classifier_norm[None, None, :])
distill_class_outputs *= self._temperature
# ---------------- BOX PREDICTION LAYER ----------------
if is_training and self._feat_distill == 'vanilla':
# split net for box prediction
tf.logging.info(f'before split, net: {net}')
# [batch_size, num_rois, self._fc_dim]
net, distilled_net_not_used = tf.split(
net, [num_rois - self._max_distill_rois, self._max_distill_rois],
axis=1)
tf.logging.info(f'after split, net: {net}, '
f'distilled_net_not_used: {distilled_net_not_used}')
num_box_outputs = (4 if self._class_agnostic_bbox_pred else 4 *
self._num_classes)
box_outputs = tf.layers.dense(
net,
num_box_outputs,
kernel_initializer=tf.random_normal_initializer(stddev=0.001),
bias_initializer=tf.zeros_initializer(),
name='box-predict')
return class_outputs, box_outputs, distill_feat_outputs, distill_class_outputs
|
sarvex/tpu | models/official/detection/projects/vild/preprocessing/create_lvis_tf_record.py | <gh_stars>10-100
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw LVIS dataset to TFRecord for object_detection.
VAL:
DATA_DIR=[DATA_DIR]
DEST_DIR=[DEST_DIR]
VAL_JSON="${DATA_DIR}/lvis_v1_val.json"
python3 preprocessing/create_lvis_tf_record.py --logtostderr \
--image_dir="${DATA_DIR}" \
--json_path="${VAL_JSON}" \
--dest_dir=${DEST_DIR} \
--include_mask=True \
--split='val' \
--debug=False \
--num_parts=100
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import io
import json
import multiprocessing
import os
import os.path as osp
from absl import app
from absl import flags
import dataset_util
import numpy as np
import PIL.Image
from pycocotools import mask
import tensorflow.compat.v1 as tf
flags.DEFINE_boolean('include_mask', True,
'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: True.')
flags.DEFINE_string('image_dir', '', 'Directory containing images.')
flags.DEFINE_string('json_path', '', 'File containing object '
'annotations - boxes and instance masks.')
flags.DEFINE_string('dest_dir', '/tmp', 'Path to output file')
flags.DEFINE_enum('split', default='val', enum_values=['train', 'val'],
help='Split to preprocess')
flags.DEFINE_integer('num_parts', default=100,
help='how many tfrecords do you want to create')
flags.DEFINE_integer('max_num_processes', default=100,
help='max number of processes, '
'adjust if needed!')
flags.DEFINE_boolean('debug', default=False, help='')
FLAGS = flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def create_tf_example(image,
image_dir,
bbox_annotations=None,
category_index=None,
include_mask=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys:
[u'license', u'file_name', u'coco_url', u'height', u'width',
u'date_captured', u'flickr_url', u'id', u'not_exhaustive_category_ids',
u'neg_category_ids']
image_dir: directory containing the image files.
bbox_annotations:
list of dicts with keys:
[u'segmentation', u'area', u'image_id', u'bbox', u'category_id', u'id']
Notice that bounding box coordinates in the official LVIS dataset are
given as [x, y, width, height] tuples using absolute coordinates where
x, y represent the top-left (0-indexed) corner. This function converts
to the format expected by the Tensorflow Object Detection API (which is
which is [ymin, xmin, ymax, xmax] with coordinates normalized relative
to image size).
category_index: a dict containing LVIS category information keyed
by the 'id' field of each category. See the
label_map_util.create_category_index function.
include_mask: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
success: whether the conversion is successful
filename: image filename
example: The converted tf.Example
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['coco_url']
filename = osp.join(*filename.split('/')[-2:])
image_id = image['id']
image_not_exhaustive_category_ids = image['not_exhaustive_category_ids']
image_neg_category_ids = image['neg_category_ids']
full_path = os.path.join(image_dir, filename)
if not tf.gfile.Exists(full_path):
tf.logging.warn(f'image {full_path} not exists! skip')
return False, None, None
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
key = hashlib.sha256(encoded_jpg).hexdigest()
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/not_exhaustive_category_ids':
dataset_util.int64_list_feature(image_not_exhaustive_category_ids),
'image/image_neg_category_ids':
dataset_util.int64_list_feature(image_neg_category_ids),
}
if bbox_annotations:
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
for object_annotations in bbox_annotations:
(x, y, width, height) = tuple(object_annotations['bbox'])
xmin_single = max(float(x) / image_width, 0.0)
xmax_single = min(float(x + width) / image_width, 1.0)
ymin_single = max(float(y) / image_height, 0.0)
ymax_single = min(float(y + height) / image_height, 1.0)
if xmax_single <= xmin_single or ymax_single <= ymin_single:
continue
xmin.append(xmin_single)
xmax.append(xmax_single)
ymin.append(ymin_single)
ymax.append(ymax_single)
is_crowd.append(0)
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(category_index[category_id]['name'].encode('utf8'))
area.append(object_annotations['area'])
if include_mask:
run_len_encoding = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
binary_mask = mask.decode(run_len_encoding)
binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
feature_dict.update({
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/class/label':
dataset_util.int64_list_feature(category_ids),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
})
if include_mask:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return True, filename, example
def create_single_record(record_filename, part_image_ids, image_index,
img_anno_map, cat_index, include_mask):
"""Create single record."""
writer = tf.python_io.TFRecordWriter(
osp.join(FLAGS.dest_dir, record_filename))
for idx, image_id in enumerate(part_image_ids):
success, filename, example = create_tf_example(
image_index[image_id],
FLAGS.image_dir,
bbox_annotations=img_anno_map[image_id],
category_index=cat_index,
include_mask=include_mask,
)
if success:
writer.write(example.SerializeToString())
if FLAGS.debug or idx % 100 == 0:
tf.logging.info(
f'Finish writing idx {idx} image_id {image_id} img {filename}')
writer.close()
def main(_):
# ==================== prepare ====================
assert FLAGS.image_dir, '`image_dir` missing.'
assert FLAGS.json_path, 'annotation file is missing.'
if FLAGS.debug:
FLAGS.dest_dir += '_debug'
if not tf.gfile.Exists(FLAGS.dest_dir):
tf.gfile.MakeDirs(FLAGS.dest_dir)
# ==================== load json & build index ====================
with tf.gfile.Open(FLAGS.json_path, 'r') as f:
json_file = json.load(f)
image_ids = [image['id'] for image in json_file['images']]
if FLAGS.debug:
image_ids = image_ids[:10]
tf.logging.info(f'num of images: {len(image_ids)}')
image_index = {image['id']: image for image in json_file['images']}
cat_index = {cat['id']: cat for cat in json_file['categories']}
img_anno_map = collections.defaultdict(list)
for anno in json_file['annotations']:
img_anno_map[anno['image_id']].append(anno)
tf.logging.info('indices built')
# ==================== write tf records ====================
total_len = len(image_ids)
part_len = (total_len + FLAGS.num_parts - 1) // FLAGS.num_parts
all_filenames = [
osp.join(FLAGS.dest_dir,
f'{FLAGS.split}-{part_idx:05}-of-{FLAGS.num_parts:05}.tfrecord')
for part_idx in range(FLAGS.num_parts)
]
image_ids_parts = []
image_index_parts = []
img_anno_map_parts = []
for part_idx in range(FLAGS.num_parts):
start_idx = part_len * part_idx
end_idx = min(start_idx + part_len, total_len)
image_ids_parts.append(image_ids[start_idx:end_idx])
image_index_parts.append(
{image_id: image_index[image_id] for image_id in image_ids_parts[-1]})
img_anno_map_parts.append(
{image_id: img_anno_map[image_id] for image_id in image_ids_parts[-1]})
if FLAGS.debug:
tf.logging.info(f'all_filenames: {all_filenames}')
tf.logging.info(f'image_ids_parts: {image_ids_parts}')
tf.logging.info(f'image_ids: {image_ids}')
with multiprocessing.Pool(
processes=min(FLAGS.num_parts, FLAGS.max_num_processes)) as pool:
pool.starmap(
create_single_record,
zip(all_filenames, image_ids_parts, image_index_parts,
img_anno_map_parts, [cat_index] * FLAGS.num_parts,
[FLAGS.include_mask] * FLAGS.num_parts))
pool.close()
pool.join()
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
sarvex/tpu | tools/data_converter/image_classification/simple_example.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example implementation of ImageClassificationBuilder.
This shows an example of generating a fake dataset with the following directory
structure:
- train
- class-0
- class-0-ex-0.jpg
- class-0-ex-1.jpg
- ...
- class-1
- class-1-ex-0.jpg
- class-1-ex-1.jpg
- ...
- ...
- validation
- etc.
- testing
- etc.
This example also includes an implementation of ImageClassificationConfig
which is used in conjunction with ImageClassificationBuilder to generate
TFRecords.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import tempfile
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets.public_api as tfds
from image_classification.image_classification_data import ImageClassificationBuilder
from image_classification.image_classification_data import ImageClassificationConfig
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'num_classes', default=10,
help='The number of classes to use in this example.')
flags.DEFINE_integer(
'image_size', default=224,
help='The width and the height of the generated images.')
flags.DEFINE_string(
'data_path', default=None,
help='The root path of where to save the generated images or where the '
'images are saved, if `generate`=False.')
flags.DEFINE_bool(
'generate', default=True,
help='Whether or not to use generated data.')
flags.DEFINE_integer(
'num_examples_per_class_low', default=10,
help='The low end of the range of number of examples per class.')
flags.DEFINE_integer(
'num_examples_per_class_high', default=20,
help='The high end of the range of number of examples per class.')
flags.DEFINE_string(
'save_dir', default=None,
help='The location of where to save the converted TFRecords.')
_WORKER_COUNT = 100
def create_random_image(path, shape):
"""Create a random image."""
im_array = np.random.random(shape)
Image.fromarray(np.uint8(im_array * 255)).save(path)
def create_random_image_zip(zipped):
"""Wrapper function for use with multiprocessing."""
return create_random_image(*zipped)
def create_sample_dataset(root_path,
range_of_examples_per_class,
num_classes,
img_extension='jpg',
img_shape=(224, 224),
modes=('train', 'validation', 'test')):
"""Create raw sample training, validation, test data.
Args:
root_path: `str`, the root path of where to store the data. This should
already be created.
range_of_examples_per_class: `tuple`, (low, high) range of the number of
examples per class.
num_classes: `int`, the number of classes
img_extension: `str, optional`, the extension of each image. If a list is
provided, it will randomly shuffle between the provided
img_shape: `tuple of int or list of tuple of int, optional`, the shape or
shapes of the images to be generated. Defaults to (10, 10).
modes: `iterative, optional` the list of modes to generate. Defaults to
('train', 'validation', 'test')
"""
pool = multiprocessing.pool.ThreadPool(_WORKER_COUNT)
def num_examples_fn():
range_low = range_of_examples_per_class[0]
range_high = range_of_examples_per_class[1]
return np.random.randint(range_low, range_high, 1)[0]
example_paths = []
img_shapes = []
if isinstance(img_extension, six.string_types):
img_extension_fn = lambda: img_extension
else:
img_extension_fn = lambda: np.random.choice(img_extension, size=1)[0]
if isinstance(img_shape, list) and not isinstance(img_shape[0], int):
def img_shape_fn():
return img_shape[np.random.choice(len(img_shape), size=1)[0]]
else:
img_shape_fn = lambda: img_shape
if '~' in root_path:
root_path = os.path.expanduser(root_path)
if not os.path.exists(root_path):
os.mkdir(root_path)
for mode in modes:
mode_path = os.path.join(root_path, mode)
if not os.path.exists(mode_path):
os.mkdir(mode_path)
for class_index in range(num_classes):
class_path = os.path.join(mode_path, 'class-{}'.format(class_index))
if not os.path.exists(class_path):
os.mkdir(class_path)
for example_index in range(num_examples_fn()):
fname = 'class-{}-ex-{}.{}'.format(class_index,
example_index,
img_extension_fn())
example_paths.append(os.path.join(class_path, fname))
img_shapes.append(img_shape_fn())
logging.info('Generating images for modes: %s.', ', '.join(modes))
logging.info('Generating %d classes.', num_classes)
logging.info('Generating between %d and %d images per class.',
*range_of_examples_per_class)
pool.map(create_random_image_zip, zip(example_paths, img_shapes))
pool.close()
pool.join()
class SimpleDatasetConfig(ImageClassificationConfig):
"""A configuration to be used with ImageClassificationBuilder."""
def __init__(self,
num_classes,
root_path,
**kwargs):
"""A configuration to be used with ImageClassificationBuilder.
Args:
num_classes: `int` the number of classes in the dataset.
root_path: `str` the root path to where the data is stored.
**kwargs: Extra args.
"""
super(SimpleDatasetConfig, self).__init__(
version=tfds.core.Version('0.1.0'),
supported_versions=[],
**kwargs)
self.num_classes = num_classes
if '~' in root_path:
root_path = os.path.expanduser(root_path)
self.root_path = root_path
@property
def supported_modes(self):
"""The list of supported modes in this dataset."""
return ['train', 'test', 'validation']
@property
def num_labels(self):
"""Returns the number of classes."""
return self.num_classes
def download_path(self, mode):
"""This dataset does not require data download."""
pass
def example_generator(self, mode):
"""The example generator that yields the essential keys.
This example generator iterates through the dataset saved in `root_path` and
yields examples of `dict` with 'image_fobj' representing an `fobj` of the
image and 'label' representing the name of the class.
Args:
mode: `str` one of 'train', 'test', 'validation'
Yields:
`dict` with the keys of 'image_fobj' and 'text'.
"""
data_path = self.root_path
mode_path = os.path.join(data_path, mode)
for class_name in os.listdir(mode_path):
class_dir = os.path.join(mode_path, class_name)
for img_path in os.listdir(class_dir):
abs_path = os.path.abspath(os.path.join(class_dir, img_path))
yield {
'image_fobj': tf.io.gfile.GFile(abs_path, 'rb'),
'label': class_name,
}
def main(argv):
del argv # unused
data_path = FLAGS.data_path
ex_range_low = FLAGS.num_examples_per_class_low
ex_range_high = FLAGS.num_examples_per_class_high
num_classes = FLAGS.num_classes
save_dir = FLAGS.save_dir
img_shape = (FLAGS.image_size, FLAGS.image_size)
assert ex_range_low < ex_range_high
ex_range = (ex_range_low, ex_range_high)
if not data_path:
logging.info('No data path was provided.'
'Saving to a temporary directory.')
data_path = tempfile.mkdtemp()
if not save_dir:
logging.info('No save dir was provided.'
'Saving tfrecords to a temporary directory.')
save_dir = tempfile.mkdtemp()
if FLAGS.generate:
create_sample_dataset(root_path=data_path,
range_of_examples_per_class=ex_range,
num_classes=num_classes,
img_extension=['jpg', 'png'],
img_shape=img_shape)
config = SimpleDatasetConfig(name='Simple',
description='A simple fake dataset',
num_classes=num_classes,
root_path=data_path)
dataset = ImageClassificationBuilder(data_dir=save_dir,
config=config)
dataset.download_and_prepare()
logging.info('Saved tfrecords to %s', save_dir)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
|
sarvex/tpu | models/official/efficientnet/lite/efficientnet_lite_model_qat.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements EfficientNet Lite model for Quantization Aware Training.
[1] <NAME>, <NAME>
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
import functools
import tensorflow.compat.v1 as tf
import efficientnet_model
class FunctionalModelBuilder:
"""A class that builds functional api keras models."""
def __init__(self, name='FunctionalModel'):
self.name = name
self.built = False
def build(self, input_shape: tf.TensorShape):
del input_shape # Only used by subclasses.
self.built = True
def call(self, inputs, training):
raise NotImplementedError('This function is implemented in subclasses.')
def get_functional_model(self, input_shape, training):
functional_inputs = tf.keras.Input(
shape=input_shape[1:], batch_size=input_shape[0])
functional_outputs = self(functional_inputs, training)
return tf.keras.Model(inputs=functional_inputs, outputs=functional_outputs)
def __call__(self, inputs, training):
if not self.built:
if tf.nest.is_nested(inputs):
input_shapes = [
input_tensor.shape for input_tensor in tf.nest.flatten(inputs)
]
else:
input_shapes = inputs.shape
self.build(input_shapes[1:])
return self.call(inputs, training)
class FunctionalMBConvBlock(FunctionalModelBuilder):
"""A class of MBConv: Mobile Inverted Residual Bottleneck.
Attributes:
endpoints: dict. A list of internal tensors.
"""
def __init__(self, block_args, global_params, dtype, name, **kwargs):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
global_params: GlobalParams, a set of global parameters.
dtype: Layer type.
name: Layer name.
**kwargs: Keyword arguments.
"""
super().__init__(**kwargs)
self._block_args = block_args
self._dtype = dtype
self._name = name
self._batch_norm_momentum = global_params.batch_norm_momentum
self._batch_norm_epsilon = global_params.batch_norm_epsilon
self._batch_norm = global_params.batch_norm
self._data_format = global_params.data_format
self._conv_kernel_initializer = tf.compat.v2.keras.initializers.VarianceScaling(
scale=2.0, mode='fan_out', distribution='untruncated_normal')
if self._data_format == 'channels_first':
self._channel_axis = 1
self._spatial_dims = [2, 3]
else:
self._channel_axis = -1
self._spatial_dims = [1, 2]
self._relu_fn = functools.partial(tf.keras.layers.ReLU, 6.0)
self._survival_prob = global_params.survival_prob
self.endpoints = None
def block_args(self):
return self._block_args
def build(self, input_shape):
"""Builds block according to the arguments."""
conv2d_id = 0
batch_norm_id = 0
if self._block_args.expand_ratio != 1:
self._expand_conv = tf.keras.layers.Conv2D(
filters=(self._block_args.input_filters *
self._block_args.expand_ratio),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._name}/conv2d')
conv2d_id += 1
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._name}/tpu_batch_normalization')
batch_norm_id += 1
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=[
self._block_args.kernel_size, self._block_args.kernel_size
],
strides=self._block_args.strides,
depthwise_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._name}/depthwise_conv2d')
batch_norm_name_suffix = f'_{batch_norm_id}' if batch_norm_id else ''
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._name}/tpu_batch_normalization{batch_norm_name_suffix}')
batch_norm_id += 1
# Output phase.
conv2d_name_suffix = f'_{conv2d_id}' if conv2d_id else ''
self._project_conv = tf.keras.layers.Conv2D(
filters=self._block_args.output_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._name}/conv2d{conv2d_name_suffix}')
batch_norm_name_suffix = f'_{batch_norm_id}' if batch_norm_id else ''
self._bn2 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._name}/tpu_batch_normalization{batch_norm_name_suffix}')
self._spartial_dropout_2d = tf.keras.layers.SpatialDropout2D(
rate=1 - self._survival_prob, dtype=self._dtype)
def call(self, inputs, training):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
Returns:
A output tensor.
"""
x = inputs
if self._block_args.expand_ratio != 1:
x = self._relu_fn()(self._bn0(self._expand_conv(x), training=training))
x = self._relu_fn()(self._bn1(self._depthwise_conv(x), training=training))
self.endpoints = {'expansion_output': x}
x = self._bn2(self._project_conv(x), training=training)
if (all(s == 1 for s in self._block_args.strides) and
inputs.get_shape().as_list()[-1] == x.get_shape().as_list()[-1]):
# Apply only if skip connection presents.
if self._survival_prob:
x = self._spartial_dropout_2d(x)
x = tf.keras.layers.Add(dtype=self._dtype)([x, inputs])
return x
class FunctionalModel(FunctionalModelBuilder):
"""A class implements tf.keras.Model for MNAS-like model.
Reference: https://arxiv.org/abs/1807.11626
"""
def __init__(self,
model_name,
blocks_args=None,
global_params=None,
features_only=None,
pooled_features_only=False,
**kwargs):
"""Initializes an `Model` instance.
Args:
model_name: Name of the model.
blocks_args: A list of BlockArgs to construct block modules.
global_params: GlobalParams, a set of global parameters.
features_only: build the base feature network only.
pooled_features_only: build the base network for features extraction
(after 1x1 conv layer and global pooling, but before dropout and fc
head).
**kwargs: Keyword arguments.
Raises:
ValueError: when blocks_args is not specified as a list.
"""
super().__init__(**kwargs)
if not isinstance(blocks_args, list):
raise ValueError('blocks_args should be a list.')
self._model_name = model_name
self._global_params = global_params
self._blocks_args = blocks_args
self._dtype = 'float32'
if self._global_params.use_bfloat16:
self._dtype = 'mixed_bfloat16'
self._features_only = features_only
self._pooled_features_only = pooled_features_only
self._relu_fn = functools.partial(tf.keras.layers.ReLU, 6.0)
self._batch_norm = global_params.batch_norm
self._fix_head_stem = global_params.fix_head_stem
self._conv_kernel_initializer = tf.compat.v2.keras.initializers.VarianceScaling(
scale=2.0, mode='fan_out', distribution='untruncated_normal')
self._dense_kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1.0 / 3.0, mode='fan_out', distribution='uniform')
self.endpoints = None
def build(self, input_shape):
"""Builds a model."""
del input_shape # Unused.
self._blocks = []
batch_norm_momentum = self._global_params.batch_norm_momentum
batch_norm_epsilon = self._global_params.batch_norm_epsilon
if self._global_params.data_format == 'channels_first':
channel_axis = 1
self._spatial_dims = [2, 3]
else:
channel_axis = -1
self._spatial_dims = [1, 2]
# Stem part.
self._conv_stem = tf.keras.layers.Conv2D(
filters=efficientnet_model.round_filters(32, self._global_params,
self._fix_head_stem),
kernel_size=[3, 3],
strides=[2, 2],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._global_params.data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._model_name}/stem/conv2d')
self._bn0 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon,
name=f'{self._model_name}/stem/tpu_batch_normalization')
# Builds blocks.
for i, block_args in enumerate(self._blocks_args):
assert block_args.num_repeat > 0
assert block_args.space2depth in [0, 1, 2]
# Update block input and output filters based on depth multiplier.
input_filters = efficientnet_model.round_filters(block_args.input_filters,
self._global_params)
output_filters = efficientnet_model.round_filters(
block_args.output_filters, self._global_params)
if self._fix_head_stem and (i == 0 or i == len(self._blocks_args) - 1):
repeats = block_args.num_repeat
else:
repeats = efficientnet_model.round_repeats(block_args.num_repeat,
self._global_params)
block_args = block_args._replace(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(
FunctionalMBConvBlock(
block_args=block_args,
global_params=self._global_params,
dtype=self._dtype,
name=f'{self._model_name}/blocks_{len(self._blocks)}'))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for _ in range(block_args.num_repeat - 1):
self._blocks.append(
FunctionalMBConvBlock(
block_args,
self._global_params,
dtype=self._dtype,
name=f'{self._model_name}/blocks_{len(self._blocks)}'))
# Head part.
self._conv_head = tf.keras.layers.Conv2D(
filters=efficientnet_model.round_filters(1280, self._global_params,
self._fix_head_stem),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._global_params.data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._model_name}/head/conv2d')
self._bn1 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._model_name}/head/tpu_batch_normalization')
if self._global_params.num_classes:
self._fc = tf.keras.layers.Dense(
self._global_params.num_classes,
kernel_initializer=self._dense_kernel_initializer,
dtype=self._dtype,
name=f'{self._model_name}/head/dense')
else:
self._fc = None
if self._global_params.dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(
self._global_params.dropout_rate, dtype=self._dtype)
else:
self._dropout = None
def call(self, inputs, training):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
Returns:
output tensors.
"""
outputs = None
self.endpoints = {}
reduction_idx = 0
# Calls Stem layers
outputs = self._relu_fn()(
self._bn0(self._conv_stem(inputs), training=training))
self.endpoints['stem'] = outputs
# Calls blocks.
for idx, block in enumerate(self._blocks):
is_reduction = False # reduction flag for blocks after the stem layer
if ((idx == len(self._blocks) - 1) or
self._blocks[idx + 1].block_args().strides[0] > 1):
is_reduction = True
reduction_idx += 1
survival_prob = self._global_params.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
outputs = block(outputs, training)
self.endpoints['block_%s' % idx] = outputs
if is_reduction:
self.endpoints['reduction_%s' % reduction_idx] = outputs
if block.endpoints:
for k, v in block.endpoints.items():
self.endpoints['block_%s/%s' % (idx, k)] = v
if is_reduction:
self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v
self.endpoints['features'] = outputs
if not self._features_only:
outputs = self._relu_fn()(
self._bn1(self._conv_head(outputs), training=training))
self.endpoints['head_1x1'] = outputs
shape = outputs.get_shape().as_list()
outputs = tf.keras.layers.AveragePooling2D(
pool_size=(shape[self._spatial_dims[0]],
shape[self._spatial_dims[1]]),
strides=[1, 1],
padding='valid',
dtype=self._dtype)(
outputs)
self.endpoints['pooled_features'] = outputs
if not self._pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = tf.keras.layers.Flatten(dtype=self._dtype)(outputs)
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
return outputs
|
sarvex/tpu | tools/data_converter/image_utils.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for TPU Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets.public_api as tfds
def _decode_image(fobj):
"""Read and decode an image from a file object as a Numpy array.
The data converter may encounter images in several formats, e.g.:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
TFDS assumes all images have the same number of channels so these
must be converted to RGB.
Args:
fobj: `tf.io.gfile.GFile` or `file` of the loaded image.
Returns:
Numpy array with shape (height, width, channels).
Raises:
`tf.errors.InvalidArgumentError`: If the image could not be decoded.
"""
buf = fobj.read()
# Convert to RGB
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3)
if image is None:
logging.warning('Image %s could not be decoded by OpenCV. '
'Falling back to TF.', fobj.name)
try:
image = tfds.core.utils.image_utils.decode_image(buf)
except tf.errors.InvalidArgumentError:
raise tf.errors.InvalidArgumentError('Image {} could not be decoded '
'by Tensorflow'.format(fobj.name))
# GIF images contain a frame dimension. Select the first frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image
def _encode_jpeg(image, quality=None):
"""Encode an image to jpeg."""
cv2 = tfds.core.lazy_imports.cv2
extra_args = [[int(cv2.IMWRITE_JPEG_QUALITY), quality]] if quality else []
_, buff = cv2.imencode('.jpg', image, *extra_args)
return io.BytesIO(buff.tostring())
def image_to_jpeg(fobj,
filename,
quality=None,
target_pixels=None):
"""Converts image files to JPEG and returns the bytes and shape.
For consistency, we convert all images into the JPEG format since
some of them might be in different formats. If these are not
normalized into a consistent format, TF might crash.
Args:
fobj: `tf.io.gfile.GFile` or `file` of the loaded image.
filename: `str` the filename of the original image.
quality: `int` representing the target JPEG quality, e.g.
cv2.IMWRITE_JPEG_QUALITY
target_pixels: `int` representing the desired number of pixels.
If specified, this will reshape the image to a factor of
(sqrt(target_pixels), sqrt(target_pixels)).
Returns:
`io.BytesIO` representation of the image and `tuple` representing
the shape of the image.
Raises:
`tf.errors.InvalidArgumentError`: If the image could not be decoded.
`ValueError` if fobj or filename was None.
"""
if not fobj or not filename:
raise ValueError('fobj or filename was None.')
image = _decode_image(fobj)
height, width, _ = image.shape
actual_pixels = height * width
if target_pixels and actual_pixels > target_pixels:
factor = np.sqrt(target_pixels / actual_pixels)
image = tfds.core.lazy_imports.cv2.resize(
image, dsize=None, fx=factor, fy=factor)
return _encode_jpeg(image, quality=quality), image.shape
def validate_essential_inputs(example,
essential_inputs):
"""Validate that essential inputs are included in provided example."""
for essential_input in essential_inputs:
if essential_input not in example:
raise AssertionError('{} was not included '
'in the yielded example.'.format(essential_input))
|
sarvex/tpu | models/official/detection/projects/vild/modeling/vild_losses.py | <gh_stars>10-100
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses used for ViLD."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
class FastrcnnClassLoss(object):
"""Fast R-CNN classification loss function."""
def __init__(self, params=None):
if params:
self._mask_rare = params.mask_rare
if self._mask_rare:
with tf.gfile.GFile(params.rare_mask_path, 'rb') as f:
self._rare_mask = np.array(np.load(f), dtype=np.float32)
else:
self._mask_rare = False
def __call__(self, class_outputs, class_targets):
"""Computes the class loss (Fast-RCNN branch) of Mask-RCNN.
This function implements the classification loss of the Fast-RCNN.
The classification loss is softmax on all RoIs.
Reference:
https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py
# pylint: disable=line-too-long
Args:
class_outputs: a float tensor representing the class prediction for each
box with a shape of [batch_size, num_boxes, num_classes].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
Returns:
a scalar tensor representing total class loss.
"""
with tf.name_scope('fast_rcnn_loss'):
_, _, num_classes = class_outputs.get_shape().as_list()
class_targets = tf.to_int32(class_targets)
class_targets_one_hot = tf.one_hot(class_targets, num_classes)
return self._fast_rcnn_class_loss(class_outputs, class_targets_one_hot)
def _fast_rcnn_class_loss(self,
class_outputs,
class_targets_one_hot,
normalizer=1.0):
"""Computes classification loss."""
with tf.name_scope('fast_rcnn_class_loss'):
if self._mask_rare:
class_outputs = class_outputs * self._rare_mask[None, None, :]
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
class_loss = tf.losses.softmax_cross_entropy(
class_targets_one_hot,
class_outputs,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
class_loss /= normalizer
return class_loss
|
sarvex/tpu | models/official/detection/projects/vild/configs/vild_config.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config template to train Mask R-CNN."""
from configs import detection_config
import sys
sys.path.insert(0, 'tpu/models')
from hyperparameters import params_dict
# pylint: disable=line-too-long
VILD_CFG = params_dict.ParamsDict(detection_config.DETECTION_CFG)
VILD_CFG.override({
'type': 'vild',
'eval': {
'type': 'lvis_box_and_mask',
'eval_samples': 19809,
'min_eval_interval': 5,
},
'architecture': {
'space_to_depth_block_size': 1,
'parser': 'vild_parser',
'backbone': 'resnet',
'min_level': 2,
'max_level': 6,
'multilevel_features': 'fpn',
'include_mask': True,
'mask_target_size': 28,
'num_classes': 1204,
# FEATURE DISTILL
'visual_feature_distill': 'vanilla', # None, 'vanilla', 'double_branch'
'visual_feature_dim': 512,
'max_num_rois': 300,
'feat_distill_weight': 0.5,
'filter_distill_boxes_size': 0,
'normalize_feat_during_training': True,
},
'vild_parser': {
'output_size': [1024, 1024],
'rpn_match_threshold': 0.7,
'rpn_unmatched_threshold': 0.3,
'rpn_batch_size_per_im': 256,
'rpn_fg_fraction': 0.5,
'aug_rand_hflip': True,
'aug_scale_min': 0.1,
'aug_scale_max': 2.0,
'skip_crowd_during_training': True,
'max_num_instances': 300,
'mask_crop_size': 112,
'regenerate_source_id': False,
'copy_paste': False,
},
'anchor': {
'num_scales': 1,
'anchor_size': 8,
},
'rpn_head': {
'anchors_per_location': None, # Param no longer used.
'num_convs': 2,
'num_filters': 256,
'use_separable_conv': False,
'use_batch_norm': True,
'cast_to_float32': True,
},
'frcnn_head': {
'num_convs': 4,
'num_filters': 256,
'use_separable_conv': False,
'num_fcs': 2,
'fc_dims': 1024,
'use_batch_norm': True,
# If True only one box will be predicted instead of num_classes boxes.
'class_agnostic_bbox_pred': True,
# for vild classifier: start
'clip_dim': 512,
'classifier_weight_path': '',
'normalize_classifier': True,
'normalize_visual': True,
'temperature': 100.0,
# for vild classifier: end
},
'mrcnn_head': {
'num_convs': 4,
'num_filters': 256,
'use_separable_conv': False,
'use_batch_norm': True,
'class_agnostic_mask_pred': True,
},
'rpn_score_loss': {
'rpn_batch_size_per_im': 256,
},
'rpn_box_loss': {
'huber_loss_delta': 1.0 / 9.0,
},
'frcnn_box_loss': {
'huber_loss_delta': 1.0,
},
'frcnn_class_loss': {
'mask_rare': True,
'rare_mask_path': '',
},
'roi_proposal': {
'rpn_pre_nms_top_k': 2000,
'rpn_post_nms_top_k': 1000,
'rpn_nms_threshold': 0.7,
'rpn_score_threshold': 0.0,
'rpn_min_size_threshold': 0.0,
'test_rpn_pre_nms_top_k': 1000,
'test_rpn_post_nms_top_k': 1000,
'test_rpn_nms_threshold': 0.7,
'test_rpn_score_threshold': 0.0,
'test_rpn_min_size_threshold': 0.0,
'use_batched_nms': False,
},
'roi_sampling': {
# IoU thresholds for additional FRCNN heads in Cascade mode. e.g.
# [0.7, 0.8]
# 'fg_iou_thresh' is used as the first threshold.
'cascade_iou_thresholds': None,
'num_samples_per_image': 512,
'fg_fraction': 0.25,
'fg_iou_thresh': 0.5,
'bg_iou_thresh_hi': 0.5,
'bg_iou_thresh_lo': 0.0,
'mix_gt_boxes': True,
},
'mask_sampling': {
'num_mask_samples_per_image': 128, # Typically = `num_samples_per_image` * `fg_fraction`.
},
'postprocess': {
'max_total_size': 300,
'score_threshold': 0.0,
'pre_nms_num_boxes': 1000,
'rare_mask_path': '',
'apply_sigmoid': False, # Not used, but misleading.
# whether to remove background before softmax
'discard_background': False,
},
'batch_norm_activation': {
'use_sync_bn': True,
},
'train': {
'space_to_depth_block_size': 1,
'frozen_variable_prefix': 'frcnn_layer_0/fast_rcnn_head/class-predict',
'losses': 'all',
'l2_weight_decay': 4e-5,
},
'enable_summary': True,
}, is_strict=False)
VILD_RESTRICTIONS = [
]
# pylint: enable=line-too-long
|
sarvex/tpu | models/official/unet3d/input_reader.py | <reponame>sarvex/tpu
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Defines input_fn of UNet-3D for TF Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import logging
import tensorflow.compat.v1 as tf
class InputFn(object):
"""Input function for tf.Estimator."""
def __init__(self, file_pattern, params, mode):
self._file_pattern = file_pattern
self._mode = mode
self._is_training = (mode == tf.estimator.ModeKeys.TRAIN)
self._parser_fn = self.create_parser_fn(params)
if params.compressed_input:
self._dataset_fn = functools.partial(
tf.data.TFRecordDataset, compression_type='GZIP')
else:
self._dataset_fn = tf.data.TFRecordDataset
def create_parser_fn(self, params):
"""Create parse fn to extract tensors from tf.Example."""
def _parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_example(
[serialized_example],
features={
'image/encoded': tf.VarLenFeature(dtype=tf.float32),
'image/segmentation/mask': tf.VarLenFeature(dtype=tf.float32),
})
image = features['image/encoded']
if isinstance(image, tf.SparseTensor):
image = tf.sparse_tensor_to_dense(image)
gt_mask = features['image/segmentation/mask']
if isinstance(gt_mask, tf.SparseTensor):
gt_mask = tf.sparse_tensor_to_dense(gt_mask)
image_size, label_size = self.get_input_shapes(params)
image = tf.reshape(image, image_size)
gt_mask = tf.reshape(gt_mask, label_size)
if params.use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
gt_mask = tf.cast(gt_mask, dtype=tf.bfloat16)
logging.info('debug input %s %s', image, gt_mask)
return image, gt_mask
return _parser
def get_input_shapes(self, params):
image_size = params.input_image_size + [params.num_channels]
label_size = params.input_image_size + [params.num_classes]
return image_size, label_size
def __call__(self, params):
"""Generates features and labels for training or evaluation.
This uses the input pipeline based approach using file name queue
to read data so that entire data is not loaded in memory.
Args:
params: model parameters in ParamsDict like object.
Returns:
tf.data.Dataset
"""
batch_size = params['batch_size']
dataset = tf.data.Dataset.list_files(
self._file_pattern, shuffle=self._is_training)
if self._is_training:
dataset = dataset.repeat()
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
lambda file_name: self._dataset_fn(file_name).prefetch(1),
cycle_length=32,
sloppy=self._is_training))
if self._is_training:
dataset = dataset.shuffle(64)
# Parses the fetched records to input tensors for model function.
dataset = dataset.map(self._parser_fn, num_parallel_calls=64)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
class LiverInputFn(InputFn):
"""Input function of Liver Segmentation data set."""
def create_parser_fn(self, params):
"""Create parse fn to extract tensors from tf.Example."""
def _decode_liver_example(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = {}
features['image/ct_image'] = tf.FixedLenFeature([], tf.string)
features['image/label'] = tf.FixedLenFeature([], tf.string)
parsed = tf.parse_single_example(serialized_example, features=features)
# Here, assumes the `image` is normalized to [0, 1] of type float32 and
# the `label` is a binary matrix, whose last dimension is one_hot encoded
# labels.
# The dtype of `label` can be either float32 or int64.
image = tf.decode_raw(parsed['image/ct_image'],
tf.as_dtype(tf.float32))
label = tf.decode_raw(parsed['image/label'],
tf.as_dtype(params.label_dtype))
image_size = params.input_image_size + [params.num_channels]
image = tf.reshape(image, image_size)
label_size = params.input_image_size + [params.num_classes]
label = tf.reshape(label, label_size)
if self._is_training and params.use_index_label_in_train:
# Use class index for labels and remove the channel dim (#channels=1).
channel_dim = -1
label = tf.argmax(label, axis=channel_dim, output_type=tf.int32)
if params.use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
if label.dtype == tf.float32:
label = tf.cast(label, dtype=tf.bfloat16)
# TPU doesn't support tf.int64 well, use tf.int32 directly.
if label.dtype == tf.int64:
label = tf.cast(label, dtype=tf.int32)
return image, label
return _decode_liver_example
def get_input_shapes(self, params):
image_size = params.input_image_size + [params.num_channels]
if self._is_training and params.use_index_label_in_train:
label_size = params.input_image_size
else:
label_size = params.input_image_size + [params.num_classes]
return image_size, label_size
|
crowdbotics-apps/nftlink-33254 | backend/modules/privacy_policy/serializers.py | <gh_stars>0
from rest_framework import serializers
from .models import PrivacyPolicy
class PrivacyPolicySerializer(serializers.ModelSerializer):
class Meta:
model = PrivacyPolicy
fields = [
"id",
"body",
"author",
"is_active",
"created_at",
"updated_at",
]
read_only_fields = ["id"]
|
crowdbotics-apps/nftlink-33254 | backend/modules/privacy_policy/apps.py | <reponame>crowdbotics-apps/nftlink-33254<filename>backend/modules/privacy_policy/apps.py
from django.apps import AppConfig
class PrivacyPolicyConfig(AppConfig):
name = "modules.privacy_policy"
verbose_name = "Privacy Policy"
|
crowdbotics-apps/nftlink-33254 | backend/modules/payments/admin.py | from django.contrib import admin
from .models import StripeUserProfile
admin.site.register(StripeUserProfile) |
crowdbotics-apps/nftlink-33254 | backend/modules/terms_and_conditions/urls.py | <reponame>crowdbotics-apps/nftlink-33254<filename>backend/modules/terms_and_conditions/urls.py
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import TermAndConditionViewSet
router = DefaultRouter()
# because we are using a custom queryset for our viewset, the basename
# must be specified explicitly here. See: https://www.django-rest-framework.org/api-guide/routers/#Usage
# Your terms will be available at : /modules/terms-and-conditions/
router.register("", TermAndConditionViewSet, basename="terms-and-conditions")
urlpatterns = [
path("", include(router.urls)),
]
|
crowdbotics-apps/nftlink-33254 | backend/modules/privacy_policy/__init__.py | default_app_config = "modules.privacy_policy.apps.PrivacyPolicyConfig"
|
crowdbotics-apps/nftlink-33254 | backend/modules/terms_and_conditions/models.py | <reponame>crowdbotics-apps/nftlink-33254
from django.db import models
from django.conf import settings
# Create your models here.
class TermAndCondition(models.Model):
body = models.TextField()
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
)
is_active = models.BooleanField(
default=True
)
created_at = models.DateTimeField(
auto_now_add=True,
)
updated_at = models.DateTimeField(
auto_now=True,
)
|
crowdbotics-apps/nftlink-33254 | backend/modules/payments/services/StripeService.py | <filename>backend/modules/payments/services/StripeService.py<gh_stars>0
import stripe
import environ
env = environ.Env()
class StripeService:
stripe.api_key = env.str("STRIPE_SECRET_KEY")
@classmethod
def create_payment_intent_sheet(cls, cus_id, cents):
ephemeralKey = stripe.EphemeralKey.create(
customer=cus_id,
stripe_version=env.str("STRIPE_VERSION", '2020-08-27'),
)
paymentIntent = stripe.PaymentIntent.create(
amount=cents,
currency=env.str("STRIPE_CURRENCY", 'usd'),
customer=cus_id
)
return {
"paymentIntent": paymentIntent.client_secret,
"ephemeralKey": ephemeralKey.secret,
"customer": cus_id
}
@classmethod
def get_payments_history(cls, cus_id, limit=100, offset=0):
return stripe.PaymentIntent.list(
customer=cus_id, limit=limit, offset=offset,
).get('data', [])
@classmethod
def get_payments_methods(cls, cus_id, type='card', limit=100, offset=0):
return stripe.PaymentMethod.list(customer=cus_id, type=type, limit=limit, offset=offset).get('data', []) |
crowdbotics-apps/nftlink-33254 | backend/modules/terms_and_conditions/apps.py | from django.apps import AppConfig
class TermsAndConditionsConfig(AppConfig):
name = "modules.terms_and_conditions"
verbose_name = "Terms and Conditions"
|
crowdbotics-apps/nftlink-33254 | backend/modules/payments/urls.py | from django.urls import re_path
from .viewsets import PaymentSheetView, GetStripePaymentsView, GetPaymentMethodsView
urlpatterns = [
re_path(r'payment_sheet/?', PaymentSheetView.as_view()),
re_path(r'get_payments_history/?', GetStripePaymentsView.as_view()),
re_path(r'get_payments_methods/?', GetPaymentMethodsView.as_view()),
] |
crowdbotics-apps/nftlink-33254 | backend/modules/terms_and_conditions/__init__.py | default_app_config = "modules.terms_and_conditions.apps.TermsAndConditionsConfig"
|
crowdbotics-apps/nftlink-33254 | backend/modules/payments/models.py | <gh_stars>0
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
class StripeUserProfile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='stripe_profile'
)
stripe_cus_id = models.CharField(
max_length=120,
null=True, blank=True,
help_text="Stripe Customer ID"
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def create_stripe_profile(sender, instance, created, **kwargs):
if created:
StripeUserProfile.objects.create(user=instance)
post_save.connect(create_stripe_profile, sender=settings.AUTH_USER_MODEL, dispatch_uid="create_user_profile")
|
crowdbotics-apps/nftlink-33254 | backend/modules/payments/viewsets.py | <filename>backend/modules/payments/viewsets.py
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions, status
from django.contrib.auth.models import User
import stripe
from .services.StripeService import StripeService
class PaymentSheetView(APIView):
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAuthenticated]
def post(self, request, *args, **kwargs):
user = request.user
stripe_profile = user.stripe_profile
if not stripe_profile.stripe_cus_id:
customer = stripe.Customer.create(email=user.email)
stripe_cus_id = customer['id']
stripe_profile.stripe_cus_id = stripe_cus_id
stripe_profile.save()
else:
stripe_cus_id = stripe_profile.stripe_cus_id
cents = request.data.get('cents', 100)
response = StripeService.create_payment_intent_sheet(stripe_cus_id, cents)
return Response(response, status=status.HTTP_200_OK)
class GetStripePaymentsView(APIView):
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, *args, **kwargs):
user = request.user
stripe_profile = user.stripe_profile
if not stripe_profile.stripe_cus_id:
stripe_cus_id = None
else:
stripe_cus_id = stripe_profile.stripe_cus_id
history = StripeService.get_payments_history(stripe_cus_id)
response = {
"success": True,
"data": history
}
return Response(response, status=status.HTTP_200_OK)
class GetPaymentMethodsView(APIView):
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, *args, **kwargs):
user = request.user
stripe_profile = user.stripe_profile
if not stripe_profile.stripe_cus_id:
stripe_cus_id = None
else:
stripe_cus_id = stripe_profile.stripe_cus_id
history = StripeService.get_payments_methods(stripe_cus_id)
response = {
"success": True,
"data": history
}
return Response(response, status=status.HTTP_200_OK)
|
crowdbotics-apps/nftlink-33254 | backend/modules/terms_and_conditions/admin.py | <reponame>crowdbotics-apps/nftlink-33254<gh_stars>1-10
from django.contrib import admin
from .models import TermAndCondition
admin.site.register(TermAndCondition)
|
hiro877/SparseSincnet | speaker_id_evaluate.py | <filename>speaker_id_evaluate.py
# speaker_id.py
# <NAME>
# Mila - University of Montreal
# July 2018
# Description:
# This code performs a speaker_id experiments with SincNet.
# How to run it:
# python speaker_id.py --cfg=cfg/SincNet_TIMIT.cfg
import os
#import scipy.io.wavfile
import soundfile as sf
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import sys
import numpy as np
from dnn_models import MLP,flip
from dnn_models import SincNet as CNN
from data_io import ReadList,read_conf,str_to_bool
# diff from the original
import matplotlib.pyplot as plt
from nupic.torch.modules import rezero_weights, update_boost_strength
class AddNoise(object):
"""Blend random noise into the sample.
A' = A * (1 - alpha) + alpha * noise
noise is random uniform in the range [-max_val, max_val]
"""
def __init__(self, alpha=0.0, max_val=1.0):
self.alpha = alpha
self.max_val = max_val
def __call__(self, data):
# samples = data
noise_vector = np.random.uniform(
-self.max_val, self.max_val, data.size
)
return data * (1 - self.alpha) + noise_vector * self.alpha
def plot_filter(cnn_net):
sincnet = cnn_net.conv[0]
filter_num = 1
F_amplitude_all = np.zeros(126)
for filter in sincnet.filters:
x = filter[0].to('cpu').detach().numpy().copy()
# 高速フーリエ変換(FFT)
F = np.fft.fft(x)
# 振幅スペクトルを計算
amplitude = np.abs(F)
# 調整
F_amplitude = amplitude / x.shape * 2
F_amplitude[0] = F_amplitude[0] / 2
F_amplitude_all = F_amplitude_all + F_amplitude[:int(x.shape[0] / 2) + 1]
filter_num += 1
frequency_axis = np.linspace(0, 4000, int(x.shape[0] / 2) + 1)
plt.plot(frequency_axis, F_amplitude_all / filter_num)
plt.show()
sys.exit()
def create_batches_rnd(batch_size,data_folder,wav_lst,N_snt,wlen,lab_dict,fact_amp):
# Initialization of the minibatch (batch_size,[0=>x_t,1=>x_t+N,1=>random_samp])
sig_batch=np.zeros([batch_size,wlen])
lab_batch=np.zeros(batch_size)
snt_id_arr=np.random.randint(N_snt, size=batch_size)
rand_amp_arr = np.random.uniform(1.0-fact_amp,1+fact_amp,batch_size)
for i in range(batch_size):
# select a random sentence from the list
#[fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst[snt_id_arr[i]])
#signal=signal.astype(float)/32768
[signal, fs] = sf.read(data_folder+wav_lst[snt_id_arr[i]])
# accesing to a random chunk
snt_len=signal.shape[0]
snt_beg=np.random.randint(snt_len-wlen-1) #randint(0, snt_len-2*wlen-1)
snt_end=snt_beg+wlen
channels = len(signal.shape)
if channels == 2:
print('WARNING: stereo to mono: '+data_folder+wav_lst[snt_id_arr[i]])
signal = signal[:,0]
sig_batch[i,:]=signal[snt_beg:snt_end]*rand_amp_arr[i]
lab_batch[i]=lab_dict[wav_lst[snt_id_arr[i]]]
inp=Variable(torch.from_numpy(sig_batch).float().cuda().contiguous())
lab=Variable(torch.from_numpy(lab_batch).float().cuda().contiguous())
return inp,lab
# Reading cfg file
options=read_conf()
#[data]
tr_lst=options.tr_lst
te_lst=options.te_lst
pt_file=options.pt_file
class_dict_file=options.lab_dict
data_folder=options.data_folder+'/'
output_folder=options.output_folder
#[windowing]
fs=int(options.fs)
cw_len=int(options.cw_len)
cw_shift=int(options.cw_shift)
#[cnn]
cnn_N_filt=list(map(int, options.cnn_N_filt.split(',')))
cnn_len_filt=list(map(int, options.cnn_len_filt.split(',')))
cnn_max_pool_len=list(map(int, options.cnn_max_pool_len.split(',')))
cnn_use_laynorm_inp=str_to_bool(options.cnn_use_laynorm_inp)
cnn_use_batchnorm_inp=str_to_bool(options.cnn_use_batchnorm_inp)
cnn_use_laynorm=list(map(str_to_bool, options.cnn_use_laynorm.split(',')))
cnn_use_batchnorm=list(map(str_to_bool, options.cnn_use_batchnorm.split(',')))
cnn_act=list(map(str, options.cnn_act.split(',')))
cnn_drop=list(map(float, options.cnn_drop.split(',')))
#[dnn]
fc_lay=list(map(int, options.fc_lay.split(',')))
fc_drop=list(map(float, options.fc_drop.split(',')))
fc_use_laynorm_inp=str_to_bool(options.fc_use_laynorm_inp)
fc_use_batchnorm_inp=str_to_bool(options.fc_use_batchnorm_inp)
fc_use_batchnorm=list(map(str_to_bool, options.fc_use_batchnorm.split(',')))
fc_use_laynorm=list(map(str_to_bool, options.fc_use_laynorm.split(',')))
fc_act=list(map(str, options.fc_act.split(',')))
#[class]
class_lay=list(map(int, options.class_lay.split(',')))
class_drop=list(map(float, options.class_drop.split(',')))
class_use_laynorm_inp=str_to_bool(options.class_use_laynorm_inp)
class_use_batchnorm_inp=str_to_bool(options.class_use_batchnorm_inp)
class_use_batchnorm=list(map(str_to_bool, options.class_use_batchnorm.split(',')))
class_use_laynorm=list(map(str_to_bool, options.class_use_laynorm.split(',')))
class_act=list(map(str, options.class_act.split(',')))
#[optimization]
lr=float(options.lr)
batch_size=int(options.batch_size)
N_epochs=int(options.N_epochs)
N_batches=int(options.N_batches)
N_eval_epoch=int(options.N_eval_epoch)
seed=int(options.seed)
use_kwinners=str_to_bool(options.use_kwinners)
# training list
wav_lst_tr=ReadList(tr_lst)
snt_tr=len(wav_lst_tr)
# test list
wav_lst_te=ReadList(te_lst)
snt_te=len(wav_lst_te)
# Folder creation
try:
os.stat(output_folder)
except:
os.mkdir(output_folder)
# setting seed
torch.manual_seed(seed)
np.random.seed(seed)
# loss function
cost = nn.NLLLoss()
# Converting context and shift in samples
wlen=int(fs*cw_len/1000.00)
wshift=int(fs*cw_shift/1000.00)
# Batch_dev
Batch_dev=128
# Feature extractor CNN
CNN_arch = {'input_dim': wlen,
'fs': fs,
'cnn_N_filt': cnn_N_filt,
'cnn_len_filt': cnn_len_filt,
'cnn_max_pool_len':cnn_max_pool_len,
'cnn_use_laynorm_inp': cnn_use_laynorm_inp,
'cnn_use_batchnorm_inp': cnn_use_batchnorm_inp,
'cnn_use_laynorm':cnn_use_laynorm,
'cnn_use_batchnorm':cnn_use_batchnorm,
'cnn_act': cnn_act,
'cnn_drop':cnn_drop,
'use_kwinners':use_kwinners,
}
CNN_net=CNN(CNN_arch)
CNN_net.cuda()
# Loading label dictionary
lab_dict=np.load(class_dict_file, allow_pickle=True).item()
DNN1_arch = {'input_dim': CNN_net.out_dim,
'fc_lay': fc_lay,
'fc_drop': fc_drop,
'fc_use_batchnorm': fc_use_batchnorm,
'fc_use_laynorm': fc_use_laynorm,
'fc_use_laynorm_inp': fc_use_laynorm_inp,
'fc_use_batchnorm_inp':fc_use_batchnorm_inp,
'fc_act': fc_act,
'use_kwinners': use_kwinners,
'sparsity': 0.8,
'percent_on': 0.6,
# 'boost_strength': 1.0,
# 'boost_strength_factor': 0.9,
# 'k_inference_factor': 1.0,
# 'duty_cycle_period': 1000,
}
DNN1_net=MLP(DNN1_arch)
DNN1_net.cuda()
DNN2_arch = {'input_dim':fc_lay[-1] ,
'fc_lay': class_lay,
'fc_drop': class_drop,
'fc_use_batchnorm': class_use_batchnorm,
'fc_use_laynorm': class_use_laynorm,
'fc_use_laynorm_inp': class_use_laynorm_inp,
'fc_use_batchnorm_inp':class_use_batchnorm_inp,
'fc_act': class_act,
'use_kwinners': use_kwinners,
'sparsity': 0.8, #"0.3"
'percent_on': 0.6,
# 'boost_strength': 1.0,
# 'boost_strength_factor': 0.9,
# 'k_inference_factor': 1.0,
# 'duty_cycle_period': 250,
}
DNN2_net=MLP(DNN2_arch)
DNN2_net.cuda()
if pt_file!='none':
checkpoint_load = torch.load(pt_file)
CNN_net.load_state_dict(checkpoint_load['CNN_model_par'])
DNN1_net.load_state_dict(checkpoint_load['DNN1_model_par'])
DNN2_net.load_state_dict(checkpoint_load['DNN2_model_par'])
optimizer_CNN = optim.RMSprop(CNN_net.parameters(), lr=lr,alpha=0.95, eps=1e-8)
optimizer_DNN1 = optim.RMSprop(DNN1_net.parameters(), lr=lr,alpha=0.95, eps=1e-8)
optimizer_DNN2 = optim.RMSprop(DNN2_net.parameters(), lr=lr,alpha=0.95, eps=1e-8)
# HTM Param
FIRST_EPOCH_BATCH_SIZE = 4
os.makedirs(output_folder + '/checkpoint', exist_ok=True)
checkpoint_num = 1
# noise_list = [0.0, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01]
# noise_list = [0.011, 0.012, 0.013, 0.014, 0.015, 0.016, 0.017, 0.018, 0.019, 0.02]
s = 0.00
s = s+0.001
noise_list = [round(s+i*0.001,3) for i in range(0, 10)]
print(noise_list)
for noise in noise_list:
# AddNoiseInstance = AddNoise(noise)
# Full Validation new
CNN_net.eval()
DNN1_net.eval()
DNN2_net.eval()
test_flag=1
loss_sum=0
err_sum=0
err_sum_snt=0
with torch.no_grad():
for i in range(snt_te):
#[fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst_te[i])
#signal=signal.astype(float)/32768
[signal, fs] = sf.read(data_folder+wav_lst_te[i])
AddNoiseInstance = AddNoise(noise,signal.max())
signal=AddNoiseInstance(signal)
signal=torch.from_numpy(signal).float().cuda().contiguous()
lab_batch=lab_dict[wav_lst_te[i]]
# split signals into chunks
beg_samp=0
end_samp=wlen
N_fr=int((signal.shape[0]-wlen)/(wshift))
sig_arr=torch.zeros([Batch_dev,wlen]).float().cuda().contiguous()
lab= Variable((torch.zeros(N_fr+1)+lab_batch).cuda().contiguous().long())
pout=Variable(torch.zeros(N_fr+1,class_lay[-1]).float().cuda().contiguous())
count_fr=0
count_fr_tot=0
while end_samp<signal.shape[0]:
sig_arr[count_fr,:]=signal[beg_samp:end_samp]
beg_samp=beg_samp+wshift
end_samp=beg_samp+wlen
count_fr=count_fr+1
count_fr_tot=count_fr_tot+1
if count_fr==Batch_dev:
inp=Variable(sig_arr)
pout[count_fr_tot-Batch_dev:count_fr_tot,:]=DNN2_net(DNN1_net(CNN_net(inp)))
count_fr=0
sig_arr=torch.zeros([Batch_dev,wlen]).float().cuda().contiguous()
if count_fr>0:
inp=Variable(sig_arr[0:count_fr])
pout[count_fr_tot-count_fr:count_fr_tot,:]=DNN2_net(DNN1_net(CNN_net(inp)))
pred=torch.max(pout,dim=1)[1]
loss = cost(pout, lab.long())
# print("pred: {}, lab.long(): {}".format(pred, lab.long()))
# print((pred!=lab.long()).float())
err = torch.mean((pred!=lab.long()).float())
# print("pout: {}, torch.sum(pout,dim=0): {}".format(pout, torch.sum(pout,dim=0)))
pout0 = torch.sum(pout,dim=0)
[val,best_class]=torch.max(torch.sum(pout,dim=0),0)
# print("best_class: {}, lab[0]: {}".format(best_class, lab[0]))
err_sum_snt=err_sum_snt+(best_class!=lab[0]).float()
loss_sum=loss_sum+loss.detach()
err_sum=err_sum+err.detach()
err_tot_dev_snt=err_sum_snt/snt_te
loss_tot_dev=loss_sum/snt_te
err_tot_dev=err_sum/snt_te
print("noise %f, loss_te=%f err_te=%f err_te_snt=%f" % (noise, loss_tot_dev,err_tot_dev,err_tot_dev_snt)) |
pranshu28/cnn-viz | utils.py | <gh_stars>1-10
import numpy as np
import cv2
from keras import backend as K
# Normalize the input and clip between (0,1)
def normalize_clip(x):
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
x += 0.5
x = np.clip(x, 0, 1)
return x
# Normalize and Convert to RGB image
def deprocess_image(x):
x = normalize_clip(x)
x *= 255
if x.shape[2] != 3:
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# L2 normalization of gradients
def l2_normalize(x):
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
# Gaussian Blur Regularization
def blur_regularization(img, size=(3, 3)):
return cv2.blur(img, size)
# L2 decay regularization
def decay_regularization(img, decay=0.8):
return decay * img
# Clipping pixels with small norm
def clip_weak_pixel_regularization(img, percentile=1):
clipped = img
threshold = np.percentile(np.abs(img), percentile)
clipped[np.where(np.abs(img) < threshold)] = 0
return clipped
# Save list of images in one figure
def all_imgs_in_one(img_list, img_width, img_height):
margin = 5
n = int(len(img_list) ** 0.5)
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
all_in_one = np.zeros((width, height, 3))
for i in range(n):
for j in range(n):
index = i * n + j
if index < len(img_list):
img = img_list[i * n + j]
all_in_one[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
return all_in_one
def save_occs(occ_img_list, img_width, img_height, img_path):
occ_img = all_imgs_in_one(occ_img_list, img_width, img_height)
cv2.imwrite('occ_exp/occs_' + img_path, occ_img)
def save_filters(filters, img_width, img_height, layer, name):
stitched_filters = all_imgs_in_one(filters, img_width, img_height)
cv2.imwrite('cnn_filters/filters_pro_' + layer + '_' + name, stitched_filters)
|
pranshu28/cnn-viz | viz_occlusion.py | <filename>viz_occlusion.py
import argparse
import math
import time
import matplotlib.pylab as plt
import seaborn as sns
from model import *
from utils import *
def get_occ_imgs(img, img_size, occ_size, occ_pixel, occ_stride, classes):
# Get original image
image = cv2.imread(img)
image = cv2.resize(image, (img_size, img_size)).astype(np.float32)
# Index of class with highest probability
class_index = np.argmax(classes)
print('True class index:', class_index)
# Define number of occlusions in both dimensions
output_height = int(math.ceil((img_size - occ_size) / occ_stride + 1))
output_width = int(math.ceil((img_size - occ_size) / occ_stride + 1))
print('Total iterations:', output_height, '*', output_width, '=', output_height * output_width)
# Initialize probability heatmap and occluded images
temp_img_list = []
prob_matrix = np.zeros((output_height, output_width))
start = time.time()
for h in range(output_height):
for w in range(output_width):
# Occluder window:
h_start = h * occ_stride
w_start = w * occ_stride
h_end = min(img_size, h_start + occ_size)
w_end = min(img_size, w_start + occ_size)
# Getting the image copy, applying the occluding window and classifying it:
occ_image = image.copy()
occ_image[h_start:h_end, w_start:w_end, :] = occ_pixel
predictions = pred_prob_list(model, occ_image.copy())[0]
prob = predictions[class_index]
# Collect the probability value in a matrix
prob_matrix[h, w] = prob
# Collect occluded images
occ_image[h_start:h_end, w_start:w_end, :] = prob*255
cv2.putText(img=occ_image, text=str(round(prob,4)), org=(w_start, int(h_start + (h_end - h_start) / 2)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3, color=(255*(1-prob),255*(1-prob),255*(1-prob)), thickness=1)
cv2.imwrite('occ_exp/video/'+img_name+str(h*output_width+w+1).zfill(6)+'.png',occ_image)
# To save occluded images as a video, run the following shell command
"""ffmpeg -framerate 5 -i occ_exp/video/<img_name>%06d.jpg -c:v libx264 -profile:v high -crf 20 -pix_fmt yuv420p occ_exp/<img_name>.mp4"""
temp_img_list.append(occ_image)
print('Percentage done :', round(((h + 1) * output_width) * 100 / (output_height * output_width), 2), '%')
end = time.time()
elapsed = end - start
print('Total time taken:', elapsed, 'sec\tAverage:', elapsed / (output_height * output_width), 'sec')
# Save probabilities and all occluded images in one
np.save('occ_exp/probs_' + img_name + '.npy', prob_matrix)
# save_occs(temp_img_list, img_size, img_size, img_path.split('/')[-1])
return prob_matrix
def regularize(prob, norm, percentile):
# First save the original prob matrix as heat-map
f = plt.figure(1)
sns.heatmap(prob, xticklabels=False, yticklabels=False)
f.savefig('occ_exp/heatmap_' + img_path.split('/')[-1])
# Apply Regularization
prob = normalize_clip(prob) if norm else prob
clipped = clip_weak_pixel_regularization(prob, percentile=percentile)
reg_heat = blur_regularization(1 - clipped, size=(3, 3))
# Save regularized heat-map
f2 = plt.figure(2)
sns.heatmap(reg_heat, xticklabels=False, yticklabels=False)
f2.savefig('occ_exp/heatmap_reg_' + img_path.split('/')[-1])
return reg_heat
def join(heat_reg, img, img_size, occ_size):
# Get original image
image = cv2.imread(img, 1)
inp_img = cv2.resize(image, (img_size, img_size))
H, W = image.shape[0], image.shape[1]
bord = int(occ_size / 2)
# Define heat-map to be projected on original image
heat_map = cv2.resize(heat_reg,(img_size, img_size)).astype(np.float32)
# Second way to define heat-map - manually set border values
# heat_map = np.zeros((img_size, img_size))
# heat_map[bord:img_size - bord, bord:img_size - bord] = cv2.resize(heat_reg,
# (img_size - occ_size, img_size - occ_size)).astype(np.float32)
# np.place(heat_map, heat_map == 0.0, np.median(heat_map))
# Third way to define heat-map - replicate border values
# heatmap = cv2.resize(heat, (img_size-occ_size, img_size-occ_size)).astype(np.float32)
# heatmap = cv2.copyMakeBorder(heat-map,bord,bord,bord,bord,cv2.BORDER_REPLICATE)
# Original image * heat-map
for i in range(3):
inp_img[:, :, i] = heat_map * inp_img[:, :, i]
inp_viz = cv2.resize(inp_img, (W, H))
# Save the final output
cv2.imwrite('occ_exp/final_' + img.split('/')[-1], inp_viz)
return inp_viz
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--img', type=str) # Path of the input image
parser.add_argument('--weights_path', type=str, default='vgg16_weights.h5') # Path of the saved pre-trained model
parser.add_argument('--size', type=int, default='224') # Layer name
parser.add_argument('--occ_size', type=int, default='40') # Size of occluding window
parser.add_argument('--pixel', type=int, default='0') # Occluding window - pixel values
parser.add_argument('--stride', type=int, default='5') # Occlusion Stride
parser.add_argument('--norm', type=int, default='1') # Normalize probabilities first
parser.add_argument('--percentile', type=int, default='25') # Regularization percentile for heatmap
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
print('\n', args)
img_path, img_size = args.img, args.size
img_name = img_path.split('/')[-1].split('.')[0]
occ_size, occ_pixel, occ_stride = args.occ_size, args.pixel, args.stride
# Input pre-trained model, defined in model.py
model = load_trained_model(args.weights_path)
# Get original image
input_image = cv2.imread(img_path)
input_image = cv2.resize(input_image, (img_size, img_size)).astype(np.float32)
# Get probability list and print top 5 classes
result = pred_prob_list(model, input_image)
de_result = decode_predictions(result)[0]
print('\nPredicted: ', de_result)
# Start occlusion experiment and store predicted probabilities in a file
print('Running occlusion iterations (Class:', de_result[0][1], ') ...\n')
probs = get_occ_imgs(img_path, img_size, occ_size, occ_pixel, occ_stride, result)
# Get probabilities and apply regularization
print('\nGetting probability heat-map and regularizing...')
probs = np.load('occ_exp/probs_' + img_name + '.npy')
heat = regularize(probs, args.norm, args.percentile)
# Project heatmap on original image
print('\nProject the heat-map to original image...')
aug = join(heat, img_path, img_size, occ_size)
print('\nDone')
|
pranshu28/cnn-viz | viz_gradient_ascent.py | import argparse
import random
from model import *
from utils import *
def gradient_ascent_iteration(loss_function, syn_img):
# Update image with the gradient
loss_value, grads_value = loss_function([syn_img])
gradient_ascent_step = syn_img + grads_value * 0.9
# grads_row_major = np.transpose(grads_value[0, :], (1, 2, 0))
img_row_major = np.transpose(gradient_ascent_step[0, :], (1, 2, 0))
# Define weights for individual regularization
reg_functions = [blur_regularization, decay_regularization, clip_weak_pixel_regularization]
weights = np.float32([3, 3, 1])
weights /= np.sum(weights)
# Apply regularization on the gradient ascent output image
images = [reg_func(img_row_major) for reg_func in reg_functions]
weighted_images = np.float32([w * image for w, image in zip(weights, images)])
act = np.sum(weighted_images, axis=0)
# Difference has been taken to visualize activated part clearly in the filter
act = np.float32([np.transpose(act, (2, 0, 1))]) - syn_img
return act
def visualize_filter(input_img, filter_index, img_placeholder, number_of_iterations=20):
# a loss function to maximize the activation of the filter
loss = K.mean(layer[:, :, :, filter_index])
# compute the gradient of the input picture wrt loss and normalize it
grads = K.gradients(loss, img_placeholder)[0]
grads = l2_normalize(grads) # (utils.py)
# function to return loss and gradient for given image
iterate = K.function([img_placeholder], [loss, grads])
syn_img = input_img * 1
for iter in range(number_of_iterations):
syn_img = gradient_ascent_iteration(iterate, syn_img)
# function to convert it into a valid image (utils.py)
syn_img = deprocess_image(syn_img[0])
print("Done with filter", filter_index)
return syn_img
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--iterations", type=int, default=20) # Gradient Ascent Process
parser.add_argument("--img", type=str) # Path of the input image
parser.add_argument("--weights_path", type=str, default='vgg16_weights.h5') # Path of the saved pre-trained model
parser.add_argument("--layer", type=str, default='conv1_1') # Layer name
parser.add_argument("--num_filters", type=int, default=64) # Number of filters to visualize from specified layer
parser.add_argument("--size", type=int, default=128) # Size of the image (width, height)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
print(args)
img_width, img_height = args.size, args.size
# define input placeholder and get model and weights, functions defined in model.py
first_layer = ZeroPadding2D((1, 1), input_shape=(img_width, img_height, 3))
model = get_model(first_layer)
# input_placeholder = K.placeholder((1, img_width, img_height, 3))
input_placeholder = model.input
model = load_model_weights(model, args.weights_path)
# Output of the specified layer
layer = get_output_layer(model, args.layer)
# Initialize input image and resize it
if args.img is None:
init_img = np.random.random((1, img_width, img_height, 3)) * 20 + 128.
cv2.imwrite('random.png', cv2.resize(init_img[0], (img_width * 2, img_height * 2)))
else:
img = cv2.imread(args.img, 1)
img = cv2.resize(img, (img_width, img_height))
init_img = [img] # [np.transpose(img, (2, 0, 1))]
# Choose filters from given CNN layer
if layer.shape[3] > args.num_filters:
filter_indexes = [random.randint(0, layer.shape[3] - 1) for i in range(0, args.num_filters)]
filter_indexes.sort()
else:
filter_indexes = range(0, layer.shape[3])
# Iterate for all filters
filters_viz = [None] * len(filter_indexes)
for i, index in enumerate(filter_indexes):
filters_viz[i] = visualize_filter(init_img, index, input_placeholder, args.iterations)
save_filters(filters_viz, img_width, img_height, args.layer, args.img.split("/")[-1])
|
pranshu28/cnn-viz | model.py | from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D
from keras.layers.core import Flatten, Dense, Dropout
from keras.applications.mobilenet import preprocess_input, decode_predictions
from keras import backend as K
from keras.utils.conv_utils import convert_kernel
import tensorflow as tf
import numpy as np
# Model definition
def get_model(first_layer):
# from keras import applications
# model = applications.VGG16(include_top=False, weights='imagenet')
model = Sequential()
model.add(first_layer)
model.add(Conv2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPool2D((2, 2), strides=(2, 2)))
# model.summary()
return model
def load_model_weights(model, weights_path):
print('\nLoading model.')
# Load pre-trained model
model.load_weights(weights_path, by_name=True)
# Theano to Tensoflow - depends on the version
ops = []
for layer in model.layers:
if layer.__class__.__name__ in ['Conv2D']: # Layers with pre-trained weights
original_w = K.get_value(layer.kernel)
converted_w = convert_kernel(original_w)
ops.append(tf.assign(layer.kernel, converted_w).op)
K.get_session().run(ops)
# Prev code
# f = h5py.File(weights_path)
# for k in range(f.attrs['nb_layers']):
# if k >= len(model.layers):
# # we don't look at the last (fully-connected) layers in the savefile
# break
# g = f['layer_{}'.format(k)]
# weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
# model.layers[k].set_weights(weights)
# f.close()
# model.save_weights(weights_path)
print('\nModel loaded.')
return model
# Return output of specified layer
def get_output_layer(model, layer_name):
layer_dict = dict([(layer.name, layer) for layer in model.layers])
layer = layer_dict[layer_name]
return layer.output
# Load trained model - for occlusion experiment
def load_trained_model(weights_path):
# first_layer = ZeroPadding2D((1, 1), input_shape=(img_width, img_height, 3))
# model = get_model(first_layer) # must have FC and output layer for class prediction
# model.load_weights(weights_path, by_name=True)
from keras.applications.mobilenet import MobileNet
model = MobileNet(weights='imagenet')
return model
# Predict probabilities for given test image using trained model
def pred_prob_list(model, test_image):
test_image = np.expand_dims(test_image, axis=0)
test_image = preprocess_input(test_image)
predictions = model.predict(test_image)
return predictions
|
adulbrich/arousal-dynamics-model | models.py |
from numpy import sqrt, exp, pi, power, tanh, vectorize
# time constants for model: Postnova et al. 2018 - Table 1
tau_v = 50.0 #s
tau_m = tau_v
tau_H = 59.0*3600.0 #s
tau_X = (24.0*3600.0) / (2.0*pi) #s
tau_Y = tau_X
tau_C = 24.2*3600.0 #s
tau_A = 1.5*3600.0 #s # 1.5 hours # Tekieh et al. 2020 - Section 2.3.2, after Equation 9
tau_L = 24.0*60.0 #s # 24 min # Tekieh et al. 2020 - Section 3.3
# coupling strengths constants: Postnova et al. 2018 - Table 1
nu_vm = -2.1 #mV
nu_mv = -1.8 #mV
nu_Hm = 4.57 #s
nu_Xp = 37.0*60.0 #s
nu_Xn = 0.032
nu_YY = (1.0/3.0)*nu_Xp
nu_YX = 0.55*nu_Xp
nu_vH = 1.0
nu_vC = -0.5 #mV
nu_LA = -0.11 # Tekieh et al. 2020 - Section 3.3
# nu_LA = -0.4 # testing: good results with -0.4
# circadian constants: Postnova et al. 2018 - Table 1
gamma = 0.13
delta = 24.0*3600.0/0.99729 #s
beta = 0.007/60.0 #sˆ-1
#mV # external neuronal drives constants: Postnova et al. 2018 - Table 1
D_m = 1.3
def wake_effort(Q_v, forced = 0):
# Wake Effort Function
# Inputs:
# Q_v: mean population firing rate of the VLPO
# forced: 1 if forced wake, default 0
# Outpus:
# W: wake effort
V_WE = -0.07 #mv # wake effort constants: Postnova et al. 2018 - Table 1
W = forced * max(0, V_WE-nu_mv*Q_v-D_m) # Postnova et al. 2018 - Table 1, Equation 8
return W
wake_effort_v = vectorize(wake_effort)
def total_sleep_drive(H,C):
# Total Sleep Drive Function
# Inputs:
# H: homeostatic drive
# C: circadian drive, sleep propensity model
# Outputs:
# D_v: total sleep drive
A_v = -10.3 #mV # external neuronal drives constants: Postnova et al. 2018 - Table 1
D_v = nu_vH*H + nu_vC*C + A_v # Postnova et al. 2018 - Table 1, Equation 9
return D_v
total_sleep_drive_v = vectorize(total_sleep_drive)
def nonphotic_drive(X, S):
# Nonphotic Drive to the Circadian Function
# Inputs:
# X: Circadian Variables
# S: Wake = 1 or Sleep = 0 state
# Outputs:
# D_n: nonphotic drive to the circadian
r = 10.0 # nonphotic drive constant: Postnova et al. 2018 - Table 1
D_n = (S-(2.0/3.0))*(1-tanh(r*X)) # Postnova et al. 2018 - Table 1, Equation 11
return D_n
nonphotic_drive_v = vectorize(nonphotic_drive)
def photoreceptor_conversion_rate(IE, S, version = '2020'):
# Photoreceptor Conversion Rate Function
# Inputs:
# I or E_emel: Illuminance (lux) or Melanopic Irradiance # Is it melanopic illuminance I_mel instead of just I?
# S: Wake = 1 or Sleep = 0 state
# version: 2018 uses Illuminance, 2020 uses melanopic irradiance
# Output:
# alpha: the photorecpetor conversion rate
IE = IE*S # Postnova et al. 2018 - Table 1, Equation 14
# photic drive constants: Postnova et al. 2018 - Table 1
I_0 = 100 #lx
I_1 = 9500 #lx
alpha_0 = 0.1/60.0 #sˆ-1
if (version == '2018'):
alpha = ((alpha_0*IE)/(IE+I_1))*sqrt(IE/I_0) # Postnova et al. 2018 - Table 1, Equation 13
if (version == '2020'):
F_4100K = 8.19e-4 # Tekieh et al. 2020 - Equation 5
alpha = ((alpha_0*IE)/(IE+I_1*F_4100K))*sqrt(IE/(I_0*F_4100K)) # Tekieh et al. 2020 - Equation 7
return alpha
photoreceptor_conversion_rate_v = vectorize(photoreceptor_conversion_rate)
def photic_drive(X, Y, P, alpha):
# Photic Drive to the Circadian function
# Inputs:
# X, Y: Circadian Variables
# P: Photoreceptor Activity
# alpha: photoreceptor conversion rate
# Outputs:
# D_p: photic drive to the circadian
epsilon = 0.4 # photic drive constants: Postnova et al. 2018 - Table 1
D_p = alpha*(1-P)*(1-epsilon*X)*(1-epsilon*Y) # Postnova et al. 2018 - Table 1, Equation 12
return D_p
photic_drive_v = vectorize(photic_drive)
def mean_population_firing_rate(V_i):
# Mean Population Firing Rate Function
# Inputs:
# V_v or V_m: Mean voltages of the VLPO and MA respectively
# Output:
# Q: mean population firing rate
# firing rate constants: Postnova et al. 2018 - Table 1
Q_max = 100.0 #sˆ-1
theta = 10.0 #mV
sigma_prime = 3.0 #mV
Q = Q_max / (1 + exp((theta-V_i)/sigma_prime)) # Postnova et al. 2018 - Table 1, Equation 7
return Q
mean_population_firing_rate_v = vectorize(mean_population_firing_rate)
def state(V_m):
# Wake/Sleep State Function
# Postnova et al. 2018 - Table 1, Equation 15
# Input:
# V_m: Mean Voltage of the monoaminergic (MA) wake-active neuronal populations
# Output:
# S: sleep state, 1 is awake, 0 is asleep
V_th = -2.0 #mV # wake effort constants: Postnova et al. 2018 - Table 1
if (V_m > V_th):
S = 1
else:
S = 0
return S
state_v = vectorize(state)
def sigmoid(E_emel):
# sigmoid function
# Inputs:
# E_emel: Melanopic Irradiance
# Outputs:
# S: sigmoid in range [0 1], is it always so? yes
# parameters defining the melanopic irradiance value at half-maximal alerting effect and the steepness of the curve
# Tekieh et al. 2020 - Section 2.3.3
S_b = 0.05 # W/mˆ2
S_c = 1/223.5 # mˆ2/W, there is a typo in the paper, it should be S_cˆ{-1} = 223.5 mˆ2/W
# the sigmoig was defined for illuminance initially,
# but the parameters were then computed for irradiance so we can use irradiance directly
# E_emel = E_emel / 0.0013262 # test to transform irradiance to illuminance
S = 1/(1 + exp((S_b-E_emel)/S_c) ) # Tekieh et al. 2020 - Equation 14
return S
sigmoid_v = vectorize(sigmoid)
def alertness_measure(C, H, Theta_L = 0):
# Alertness Measure Function
# Inputs:
# H: homeostatic drive
# C: circadian drive, sleep propensity model
# Tetha_L: light-dependent modulation of the homeostatic weight
# Outputs:
# AM: alertness measure on the KSS
# KSS: Karolinska Sleepiness Scale
# Ranges from 1 = "Extremely alert" to 9 = "Extremely sleepy, fighting sleep."
# KSS default parameters: Postnova et al. 2018 - Table 3
Theta_0 = -24.34
Theta_H = 2.28
Theta_C = -1.74
AM = Theta_0 + (Theta_H + Theta_L)*H + Theta_C*C # Postnova et al. 2018 - Equation 23, Tekieh et al. 2020 - Equation 12
return AM
alertness_measure_v = vectorize(alertness_measure)
def circadian_drive(X,Y):
# Circadian Drive Function, sleep propensity model
# Inputs:
# X, Y: Circadian Variables
# Outputs:
# C: circadian drive
C = 0.1*((1.0+X)/2.0)+power(((3.1*X - 2.5*Y + 4.2)/(3.7*(X+2))),2) # Postnova et al. 2016 - Equations 1, 2, and 3
return C
circadian_drive_v = vectorize(circadian_drive)
def melatonin_suppression(E_emel):
# Melatonin Suppression Function
# Inputs:
# E_emel: Melanopic Irradiance
# Outputs:
# r: melatonin suppression
# parameters of the sigmoid function # Tekieh et al. 2020 - Section 2.3.2, after Equation 9
r_a = 1
r_b = 0.031 # W/mˆ2
r_c = 0.82
r = 1 - (r_a/(1+power(E_emel/r_b,-r_c))) # Tekieh et al. 2020 - Equation 9
return r
melatonin_suppression_v = vectorize(melatonin_suppression)
def model(y, t, input_function, forced_wake, minE, maxE, version = '2020'):
V_v, V_m, H, X, Y, P, Theta_L = y
IE = input_function(t)
S = state(V_m)
# so many things can go wrong with this sigmoid definition
# what's the threshold irradiance that creates a locally measurable impact on the KSS?
Sigmoid = ( sigmoid(IE) - sigmoid(minE) ) / ( sigmoid(maxE) - sigmoid(minE) ) # Tekieh et al. 2020 - Section 2.3.3: scaling to [0,1]
alpha = photoreceptor_conversion_rate(IE, S, version)
Q_m = mean_population_firing_rate(V_m)
Q_v = mean_population_firing_rate(V_v)
C = circadian_drive(X,Y)
D_v = total_sleep_drive(H,C)
D_n = nonphotic_drive(X, S)
D_p = photic_drive(X, Y, P, alpha)
F_w = forced_wake(t)
W = wake_effort(Q_v, F_w)
gradient_y = [(nu_vm*Q_m - V_v + D_v)/tau_v, # V_v, Postnova et al. 2018 - Table 1, Equation 1
(nu_mv*Q_v - V_m + D_m + W)/tau_m, # V_m, Postnova et al. 2018 - Table 1, Equation 2
(nu_Hm*Q_m - H)/tau_H, # H, Postnova et al. 2018 - Table 1, Equation 3
(Y + gamma*(X/3.0 + power(X,3)*4.0/3.0 - power(X,7)*256.0/105.0) + nu_Xp*D_p + nu_Xn*D_n)/tau_X, # X, Postnova et al. 2018 - Table 1, Equation 4
(D_p*(nu_YY*Y - nu_YX*X) - power((delta/tau_C),2)*X)/tau_Y, # Y, Postnova et al. 2018 - Table 1, Equation 5
alpha*(1-P)-(beta*P), # P, Postnova et al. 2018 - Table 1, Equation 6, revised
(-Theta_L + nu_LA*Sigmoid)/tau_L # Tekieh et al. 2020 - Equation 13
]
return gradient_y |
rekmarks/solidity-metadata | src/get_metadata.py | '''
Author: <NAME> (github.com/rekmarks)
Script for retrieving entity (contract, library, interface) names and
dependencies from the OpenZeppelin Solidity library.
'''
from os import walk, remove, path, makedirs
import json
import sys
def main():
'''
Gets takes a directory path and two output paths and returns metadata for
all Solidity entities in the directory path and outputs results to the
the output paths as JSON files.
Expects 3 parameters from sys.argv, see function body.
'''
if len(sys.argv) != 4:
raise ValueError(
'Expected 3 parameters but received: ' + str(len(sys.argv) - 1)
)
solidity_root_path = sys.argv[1]
metadata_path = sys.argv[2]
filepaths_path = sys.argv[3]
metadata, filepaths = getMetadata(solidity_root_path)
# write metadata as JSON
writeFile(filepaths_path, filepaths)
writeFile(metadata_path, metadata)
def writeFile(filepath, data):
# attempt to create directory if it doesn't exist
directory = path.dirname(filepath)
if not path.exists(directory): makedirs(directory)
# create file and write data to it as JSON, deleting what's already there
with open(filepath, 'w') as data_file:
json.dump(data, data_file, indent=2, sort_keys=True)
# parse a contract, interface, or library from Solidity source file
def parseEntity(lines, entity_type, compiler_version):
entity_data = {
'type': entity_type,
'compiler': compiler_version,
'dependencies': set(),
}
for i in range(len(lines)):
### testing: manually verify entity declaration lines
# print(line)
line = lines[i].split()
imports = []
if i == 0: # e.g. "contract ERC20 is BasicToken {"
entity_data['name'] = line[1]
if 'is' in line: # something is imported
imports += line[line.index('is') + 1 : line.index('{')]
# the other import keyword
if line and line[0] == 'using':
imports.append(line[1])
if len(imports) == 0: continue
# iterate over imports to add depdendencies
for i in range(len(imports)):
# current_import = imports[i].strip(string.punctuation)
current_import = imports[i]
if not current_import[-1].isalnum():
current_import = current_import[:-1]
# defensive programming
if not current_import.isalnum():
raise RuntimeError('non-alphanumeric import '
+ current_import + ' for ' + filename)
# add dependency
entity_data['dependencies'].add(current_import)
return entity_data
# Get dependencies
def getMetadata(root_path):
metadata, filepaths = {}, {}
# walk through openzeppelin directory
for (dirpath, dirnames, filenames) in walk(root_path):
# ignore mocks and examples, if using OpenZeppelin repo instead of npm dist
if dirpath.endswith('mocks') or dirpath.endswith('examples'):
continue
# for filename in current directory
for filename in filenames:
# only check Solidity files
if len(filename) < 5 or not filename.endswith('.sol'):
continue
# say no to deprecated contracts
if filename.find('Deprecated') != -1:
continue
current_path = dirpath + '/' + filename
# open Solidity file
with open(current_path, 'r') as file:
current_path = current_path[1:] # from '../' to './'
# file-level import statements are collected
imports = []
# flag for lines belonging to entity declarations
lineIsEntity = False
compiler_version, entity_type = '', ''
for line in file:
if lineIsEntity:
entityLines.append(line)
# if end of entity declaration
if line.find('}') == 0:
# parse entity
entity_data = parseEntity(
entityLines,
entity_type,
compiler_version
)
entity_data['dependencies'].update(imports)
entity_data['dependencies'] = list(
entity_data['dependencies']
)
entity_data['dependencies'].sort()
# store entity metadata
metadata[entity_data['name']] = entity_data
filepaths[entity_data['name']] = current_path
# reset variables
lineIsEntity, entity_type = False, ''
continue
if line.find('library') == 0: entity_type = 'library'
elif line.find('contract') == 0: entity_type = 'contract'
elif line.find('interface') == 0: entity_type = 'interface'
# get solidity compiler version
elif line.find('pragma') == 0:
compiler_version = line.split()[-1]
compiler_version = compiler_version.split(';')[0]
# parse imports
elif line.find('import') == 0:
split_line = line.split('/')
current_import = split_line[-1]
current_import = current_import.split('.sol')[0]
if current_import in imports:
print('duplicate import:', current_import)
continue
imports.append(current_import)
# if line is an entity declaration, start collecting entity
# lines
if entity_type:
lineIsEntity = True
entityLines = [line]
continue
return metadata, filepaths
main()
|
IgorPetra/pyspeckit | pyspeckit/spectrum/models/formaldehyde.py | <gh_stars>0
"""
===========================
Formaldehyde cm-line fitter
===========================
This is a formaldehyde 1_11-1_10 / 2_12-2_11 fitter. It includes hyperfine
components of the formaldehyde lines and has both LTE and RADEX LVG based
models
Module API
^^^^^^^^^^
"""
from __future__ import print_function
import numpy as np
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
import matplotlib.cbook as mpcb
import copy
from . import hyperfine
from ...specwarnings import warn
from six.moves import xrange
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['oneone','twotwo','threethree']
line_names = ['oneone_f10', 'oneone_f01', 'oneone_f22', 'oneone_f21',
'oneone_f12', 'oneone_f11', 'twotwo_f11', 'twotwo_f12',
'twotwo_f21', 'twotwo_f32', 'twotwo_f33', 'twotwo_f22',
'twotwo_f23']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'oneone': 4.82965996e9,
'twotwo': 14.48847881e9,
'threethree': 28.97480e9,
}
line_strength_dict={
'oneone_f10': 4.,
'oneone_f01': 4.,
'oneone_f22': 15.,
'oneone_f21': 5.,
'oneone_f12': 5.,
'oneone_f11': 3.,
'twotwo_f11': 15.,
'twotwo_f12': 5.,
'twotwo_f21': 5.,
'twotwo_f32': 5.19,
'twotwo_f33': 41.48,
'twotwo_f22': 23.15,
'twotwo_f23': 5.19,
'threethree_f22':1,
'threethree_f44':1,
'threethree_f33':1,
}
relative_strength_total_degeneracy={
'oneone_f10': 36.,
'oneone_f01': 36.,
'oneone_f22': 36.,
'oneone_f21': 36.,
'oneone_f12': 36.,
'oneone_f11': 36.,
'twotwo_f11': 100.01,
'twotwo_f12': 100.01,
'twotwo_f21': 100.01,
'twotwo_f32': 100.01,
'twotwo_f33': 100.01,
'twotwo_f22': 100.01,
'twotwo_f23': 100.01,
'threethree_f22':3.0,
'threethree_f44':3.0,
'threethree_f33':3.0,
}
hf_freq_dict={
'oneone_f10':4.82965996e9 - 18.53e3,
'oneone_f01':4.82965996e9 - 1.34e3,
'oneone_f22':4.82965996e9 - 0.35e3,
'oneone_f21':4.82965996e9 + 4.05e3,
'oneone_f12':4.82965996e9 + 6.48e3,
'oneone_f11':4.82965996e9 + 11.08e3,
'twotwo_f11':14.48847881e9 - 19.97e3,
'twotwo_f12':14.48847881e9 - 7.03e3,
'twotwo_f21':14.48847881e9 - 2.20e3,
'twotwo_f32':14.48847881e9 + 0.12e3,
'twotwo_f33':14.48847881e9 + 0.89e3,
'twotwo_f22':14.48847881e9 + 10.74e3,
'twotwo_f23':14.48847881e9 + 11.51e3,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
freq_dict = copy.copy(hf_freq_dict)
freq_dict.update(central_freq_dict)
aval_dict = {
'oneone': 10**-8.44801, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 10**-7.49373, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 10**-6.89179, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
}
hf_aval_dict={
'oneone_f10':10**-8.92509,
'oneone_f01':10**-8.44797,
'oneone_f22':10**-8.57294,
'oneone_f21':10**-9.05004,
'oneone_f12':10**-8.82819,
'oneone_f11':10**-9.05009,
'twotwo_f11':10**-7.61876,
'twotwo_f12':10**-8.09586,
'twotwo_f21':10**-8.31771,
'twotwo_f32':10**-8.44804,
'twotwo_f33':10**-7.54494,
'twotwo_f22':10**-7.65221,
'twotwo_f23':10**-8.30191,
'threethree_f22':10**-6.94294,
'threethree_f44':10**-6.91981,
'threethree_f33':10**-6.96736,
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [(hf_freq_dict[f]-freq_dict['oneone'])/freq_dict['oneone']*units.speedoflight_ms for f in hf_freq_dict.keys() if "oneone" in f],
'twotwo': [(hf_freq_dict[f]-freq_dict['twotwo'])/freq_dict['twotwo']*units.speedoflight_ms for f in hf_freq_dict.keys() if "twotwo" in f],
'threethree': [(hf_freq_dict[f]-freq_dict['threethree'])/freq_dict['threethree']*units.speedoflight_ms for f in hf_freq_dict.keys() if "threethree" in f],
}
voff_lines_dict={ # opposite signs of freq offset
'oneone_f10': + 18.53e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f01': + 1.34e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f22': + 0.35e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f21': - 4.05e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f12': - 6.48e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f11': - 11.08e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'twotwo_f11': + 19.97e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f12': + 7.03e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f21': + 2.20e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f32': - 0.12e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f33': - 0.89e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f22': - 10.74e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f23': - 11.51e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
formaldehyde_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict,
relative_strength_total_degeneracy)
formaldehyde_vtau_fitter = formaldehyde_vtau.fitter
formaldehyde_vtau_vheight_fitter = formaldehyde_vtau.vheight_fitter
formaldehyde_vtau_tbg_fitter = formaldehyde_vtau.background_fitter
def formaldehyde_radex(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None, path_to_texgrid='',
path_to_taugrid='', temperature_gridnumber=3,
debug=False, verbose=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_radex_orthopara_temp(xarr, density=4, column=13,
orthopara=1.0, temperature=15.0,
xoff_v=0.0, width=1.0,
Tbackground1=2.73,
Tbackground2=2.73,
grid_vwidth=1.0,
grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None,
path_to_texgrid='', path_to_taugrid='',
debug=False, verbose=False,
getpars=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
else:
raise Exception
densityarr = (np.arange(taugrid[0].shape[3])+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (np.arange(taugrid[0].shape[2])+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
temparr = (np.arange(taugrid[0].shape[1])+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # temperature
oprarr = (np.arange(taugrid[0].shape[0])+hdr['CRPIX4']-1)*hdr['CDELT4']+hdr['CRVAL4'] # log ortho/para ratio
gridval1 = np.interp(density, densityarr, np.arange(len(densityarr)))
gridval2 = np.interp(column, columnarr, np.arange(len(columnarr)))
gridval3 = np.interp(temperature, temparr, np.arange(len(temparr)))
gridval4 = np.interp(orthopara, oprarr, np.arange(len(oprarr)))
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [slice(int(np.floor(gv)),int(np.floor(gv)+2))
for gv in (gridval4,gridval3,gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1, prefilter=False)
for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1,prefilter=False)
for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
# there can be different background temperatures at each frequency
tbg = [Tbackground1,Tbackground2]
if verbose:
print("density %20.12g column: %20.12g temperature: %20.12g opr: %20.12g xoff_v: %20.12g width: %20.12g" % (density, column, temperature, orthopara, xoff_v, width))
print("tau: ",tau," tex: ",tex)
print("minfreq: ",minfreq," maxfreq: ",maxfreq)
print("tbg: ",tbg)
if debug > 1:
import pdb; pdb.set_trace()
if getpars:
return tau,tex
spec = np.sum([(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
Tex=float(tex[ii]), tau=float(tau[ii]),
Tbackground=tbg[ii], xoff_v=xoff_v,
width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii])
* (xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_hyperfine_components=False, texscale=0.01, tau=0.01, **kwargs):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define
it to be Tex given tau=0.01 when passing to the fitter
The final spectrum is then rescaled to that value
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*texscale, tau=tau, xoff_v=xoff_v,
width=width,
return_tau=True,
return_hyperfine_components=return_hyperfine_components, **kwargs)
if return_hyperfine_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
def formaldehyde_pyradex(xarr, density=4, column=13, temperature=20,
xoff_v=0.0, opr=1.0, width=1.0, tbackground=2.73,
grid_vwidth=1.0, debug=False, verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
"""
raise NotImplementedError("Not done yet.")
import pyradex
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tb_nu_cumul = np.zeros(len(xarr))
R = pyradex.Radex(molecule='oh2co-h2', column=column,
temperature=temperature, density=10**density,
tbackground=tbackground,)
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
class formaldehyde_model(model.SpectralModel):
def formaldehyde_integral(self, modelpars, linename='oneone'):
"""
Return the integral of the individual components (ignoring height)
"""
raise NotImplementedError("Not implemented, but the integral is just amplitude * width * sqrt(2*pi)")
# produced by directly computing the integral of gaussians and formaldehydeians as a function of
# line width and then fitting that with a broken logarithmic power law
# The errors are <0.5% for all widths
formaldehyde_to_gaussian_ratio_coefs = {
'lt0.1_oneone': np.array([ -5.784020,-40.058798,-111.172706,-154.256411,-106.593122,-28.933119]),
'gt0.1_oneone': np.array([ 0.038548, -0.071162, -0.045710, 0.183828, -0.145429, 0.040039]),
'lt0.1_twotwo': np.array([ 1.156561, 6.638570, 11.782065, -0.429536,-24.860297,-27.902274, -9.510288]),
'gt0.1_twotwo': np.array([ -0.090646, 0.078204, 0.123181, -0.175590, 0.089506, -0.034687, 0.008676]),
}
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in np.reshape(modelpars,[len(modelpars)/3,3]):
gaussint = amp*width*np.sqrt(2.0*np.pi)
cftype = "gt0.1_"+linename if width > 0.1 else "lt0.1_"+linename
correction_factor = 10**np.polyval(formaldehyde_to_gaussian_ratio_coefs[cftype], np.log10(width) )
# debug statement print("Two components of the integral: amp %g, width %g, gaussint %g, correction_factor %g " % (amp,width,gaussint,correction_factor))
integ += gaussint*correction_factor
return integ
formaldehyde_fitter = formaldehyde_model(formaldehyde, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
formaldehyde_vheight_fitter = formaldehyde_model(fitter.vheightmodel(formaldehyde), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
# Create a tau-only fit:
def formaldehyde_radex_tau(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False,
taugrid=None, hdr=None, path_to_taugrid='',
temperature_gridnumber=3, debug=False,
verbose=False, return_hyperfine_components=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
* uses hyperfine components
* assumes *tau* varies but *tex* does not!
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if verbose:
print("Parameters: dens=%f, column=%f, xoff=%f, width=%f" % (density, column, xoff_v, width))
if taugrid is None:
if path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif hdr is not None:
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
# let the hyperfine module determine the hyperfine components, and pass all of them here
spec_components = [(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
tau=float(tau[ii]), xoff_v=xoff_v, width=width,
return_tau=True, return_hyperfine_components=True, **kwargs) *
(xarr.as_unit('GHz')>minfreq[ii]) *
(xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tau))]
# get an array of [n_lines, n_hyperfine, len(xarr)]
if return_hyperfine_components:
return np.array(spec_components).sum(axis=0)
else:
return np.sum(spec_components, axis=0).sum(axis=0)
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
|
leontrolski/immerframe | immerframe/__init__.py | from copy import copy
from dataclasses import dataclass, fields, is_dataclass
from typing import TypeVar, Any, Generic, List, Optional, Tuple, Union, cast
T = TypeVar("T")
class Empty:
def __repr__(self) -> str:
return "<empty>"
empty = Empty()
class ImmerframeError(RuntimeError):
pass
class NoAttributeToCallError(ImmerframeError):
pass
class ProduceError(ImmerframeError):
pass
class HandleTypeError(ImmerframeError):
pass
@dataclass(frozen=True)
class El:
type: str # getattr|getitem|setattr|call
key: Any = empty
value: Any = empty
args: Any = empty
kwargs: Any = empty
class Path(List[El]):
def __init__(self) -> None:
self.op: Union[str, Empty] = empty
self.other: Any = empty
super().__init__()
class Proxy(Generic[T]):
def __init__(self, value: T = None) -> None:
self._value = value
self._return_value: T = empty
if value is not None:
self._return_value = copy(self._value)
self._paths: List[Path] = []
self._current_path = Path()
def __repr__(self) -> str:
return f"<Proxy of: {self._value}>"
def __enter__(self) -> Tuple[T, T]: # the typing here is a lie on-purpose
return cast(T, self), self._return_value
def __exit__(self, type, value, tb):
final_value = produce(self)
v = self._return_value
if isinstance(v, list):
v.clear()
v.extend(final_value)
elif isinstance(v, (dict, set)):
v.clear()
v.update(final_value)
elif is_dataclass(v):
for field in fields(v):
value = getattr(final_value, field.name)
setattr(v, field.name, value)
else: # assume attrs
for field in attr.fields(v.__class__):
value = getattr(final_value, field.name)
setattr(v, field.name, value)
def _terminate_current_path(self) -> None:
self._paths.append(self._current_path)
self._current_path = Path()
def __getattr__(self, key: str) -> "Proxy":
self._current_path.append(El(type="getattr", key=key))
return self
def __getitem__(self, key: Any) -> "Proxy":
self._current_path.append(El(type="getitem", key=key))
return self
def __setattr__(self, key: str, value: Any) -> None:
if key in {
"_value",
"_return_value",
"_paths",
"_current_path",
"_terminate_current_path",
}:
self.__dict__[key] = value
return
self._current_path.append(El(type="getattr", key=key))
self._current_path.append(El(type="setattr", value=value))
self._terminate_current_path()
def __setitem__(self, key: Any, value: Any) -> None:
self._current_path.append(El(type="getitem", key=key))
self._current_path.append(El(type="setitem", value=value))
self._terminate_current_path()
def __call__(self, *args: Any, **kwargs: Any) -> None:
if not self._current_path:
raise NoAttributeToCallError("cannot call an unmodified Proxy object")
prev_path = self._current_path.pop()
if prev_path.type != "getattr":
raise NoAttributeToCallError("can only call methods on known attributes")
el = El(type="call", key=prev_path.key, args=args, kwargs=kwargs)
self._current_path.append(el)
self._terminate_current_path()
# TODO: fill in all the magic methods
def __add__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__add__"
self._current_path.other = other
def __sub__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__sub__"
self._current_path.other = other
def __mul__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__mul__"
self._current_path.other = other
def __truediv__(self, other: Any) -> None:
self._current_path.pop()
self._current_path.op = "__truediv__"
self._current_path.other = other
def _safe_getitem(obj: Any, key: Any) -> Any:
try:
return obj[key]
except (KeyError, IndexError):
return empty
def _get(obj: Any, el: El) -> Any:
gets = {"getattr": getattr, "getitem": _safe_getitem}
return gets[el.type](obj, el.key)
def produce(proxy: Proxy, obj: Optional[T] = None) -> T:
if obj is None:
obj = proxy._value
for path_ in proxy._paths:
op, other = path_.op, path_.other
*path, final = path_
chain = [obj]
for el in path:
*_, tip = chain
chain.append(_get(tip, el))
tip = chain.pop()
if final.type in {"setattr", "setitem"}:
if op is empty:
value = final.value
else:
value = getattr(tip, op)(other)
elif final.type == "call":
# shallow copy, then run whatever mutatey function
value = copy(tip)
getattr(value, final.key)(*final.args, **final.kwargs)
else:
raise ProduceError("final path appears no have no effect")
for inner_obj, el in reversed(list(zip(chain, path))):
value = _copy_and_set(inner_obj, el, value)
obj = value
return obj
def _copy_and_set(obj: T, el: El, value: Any) -> T:
new = copy(obj)
if isinstance(obj, (dict, list)):
new[el.key] = value
return new
setattr(new, el.key, value)
return new
|
leontrolski/immerframe | setup.py | <reponame>leontrolski/immerframe<filename>setup.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
]
setup(
name="immerframe",
version="0.1.0",
description="creates the next immutable object by simply "
"modifying the current one",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=classifiers,
keywords=[],
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/leontrolski/immerframe",
license="MIT License",
packages=find_packages(),
extras_require=dict(testing=["pytest",],),
install_requires=[],
test_suite="pytest",
)
|
leontrolski/immerframe | tests/test_immerframe.py | from dataclasses import dataclass
import attr
from immerframe import Proxy, produce
def test_list():
l = [1, 2, 3, 4]
proxy = Proxy()
proxy[1] = "foo"
proxy.pop()
new_l = produce(proxy, l)
assert new_l == [1, "foo", 3]
assert l == [1, 2, 3, 4]
def test_set():
l = {1, 2, 3}
proxy = Proxy()
proxy.add("foo")
new_l = produce(proxy, l)
assert new_l == {1, 2, 3, "foo"}
assert l == {1, 2, 3}
def test_dict():
d = {"foo": 1, "bar": 2}
proxy = Proxy()
proxy["foo"] = 100
proxy["bar"] += 1
new_d = produce(proxy, d)
assert new_d == {"foo": 100, "bar": 3}
assert d == {"foo": 1, "bar": 2}
def test_dataclass():
@dataclass
class Cat:
name: str
cat = Cat(name="Mary")
proxy = Proxy()
proxy.name = "Sam"
new_cat = produce(proxy, cat)
assert new_cat == Cat(name="Sam")
def test_attr():
@attr.s(auto_attribs=True)
class Dog:
bark: str
dog = Dog(bark="woof")
proxy = Proxy()
proxy.bark = "ruff"
new_dog = produce(proxy, dog)
assert new_dog == Dog(bark="ruff")
assert dog == Dog(bark="woof")
def test_nested():
@dataclass
class Ant:
age: int
nested = {
"foo": [Ant(age=2), "bar",],
}
proxy = Proxy()
proxy["foo"][0].age += 1
proxy["foo"].pop()
proxy["qux"] = 99
new_nested = produce(proxy, nested)
assert new_nested == {
"foo": [Ant(age=3),],
"qux": 99,
}
assert nested == {
"foo": [Ant(age=2), "bar",],
}
def test_sharing():
d = {"foo": 1}
l = [d]
proxy = Proxy()
proxy.append(100)
new_l = produce(proxy, l)
assert new_l == [d, 100]
assert new_l[0] is d
assert l == [d]
def test_can_operate_on_proxy_made_objects():
l = [1, 2, 3]
proxy = Proxy()
proxy[1] = []
proxy[1].append(4)
proxy[1].append(5)
new_l = produce(proxy, l)
assert new_l == [1, [4, 5], 3]
def test_use_proxy_twice():
l = [1, 2, 3]
proxy = Proxy()
proxy[1] = "foo"
new_l = produce(proxy, l)
assert new_l == [1, "foo", 3]
new_l = produce(proxy, l)
assert new_l == [1, "foo", 3]
def test_use_value_arg():
l = [1, 2, 3, 4]
proxy = Proxy(l)
proxy[1] = "foo"
proxy.pop()
new_l = produce(proxy)
assert new_l == [1, "foo", 3]
assert l == [1, 2, 3, 4]
def test_context_list():
l = [1, 2, 3, 4]
with Proxy(l) as (_, new_l):
_[1] = "foo"
_.pop()
assert new_l == [1, "foo", 3]
assert l == [1, 2, 3, 4]
def test_context_set():
l = {1, 2}
with Proxy(l) as (_, new_l):
_.add("foo")
_.remove(2)
assert new_l == {1, "foo"}
assert l == {1, 2}
def test_context_dict():
l = {1: 2}
with Proxy(l) as (_, new_l):
_[3] = 4
assert l == {1: 2}
assert new_l == {1: 2, 3: 4}
def test_context_attr():
@attr.s(auto_attribs=True)
class Dog:
bark: str
dog = Dog(bark="woof")
with Proxy(dog) as (_, new_dog):
_.bark = "baa"
assert new_dog == Dog("baa")
assert dog == Dog("woof")
def test_context_dataclass():
@dataclass
class Dog:
bark: str
dog = Dog(bark="woof")
with Proxy(dog) as (_, new_dog):
_.bark = "baa"
assert new_dog == Dog("baa")
assert dog == Dog("woof")
def test_context_nested() -> None:
@dataclass
class Ant:
age: int
ant_10 = Ant(age=10)
ant_20 = Ant(age=20)
nested = {
"ants": [ant_10, ant_20, None],
}
with Proxy(nested) as (p, new_nested):
p["ants"][0].age += 1
p["ants"].pop()
p["foo"] = 99
assert nested == {
"ants": [ant_10, ant_20, None],
}
assert new_nested == {
"ants": [Ant(age=11), ant_20],
"foo": 99,
}
assert new_nested["ants"][1] is ant_20
def test_context_nested_and_loopy():
@dataclass
class Ant:
age: int
is_young: bool = False
ant_10 = Ant(age=10)
ant_20 = Ant(age=20)
nested = {
"ants": [ant_10, ant_20, None],
}
with Proxy(nested) as (p, new_nested):
p["ants"][0].age += 1
p["ants"].pop()
p["foo"] = 99
with Proxy(new_nested) as (p, new_nested):
for k in new_nested:
if isinstance(new_nested[k], int):
# note about setting values over and over
p[k] += 1
with Proxy(new_nested) as (p, new_nested):
for i, n in enumerate(new_nested["ants"]):
if n is None:
continue
if n.age < 15:
p["ants"][i].is_young = True
assert nested == {
"ants": [ant_10, ant_20, None],
}
assert new_nested == {
"ants": [Ant(age=11, is_young=True), ant_20],
"foo": 100,
}
assert new_nested["ants"][1] is ant_20
|
leontrolski/immerframe | tests/test_core.py | import pytest
from immerframe import El, Path, Proxy, produce, NoAttributeToCallError
# TODO: split up these tests
def test_current_path():
proxy = Proxy()
proxy.foo
assert proxy._current_path == [
El(type="getattr", key="foo"),
]
assert proxy._paths == []
proxy = Proxy()
proxy[0]
assert proxy._current_path == [
El(type="getitem", key=0),
]
assert proxy._paths == []
proxy = Proxy()
proxy.foo = 42
assert proxy._current_path == []
assert proxy._paths == [
[El(type="getattr", key="foo"), El(type="setattr", value=42),]
]
proxy = Proxy()
proxy[0] = 42
assert proxy._current_path == []
assert proxy._paths == [[El(type="getitem", key=0), El(type="setitem", value=42),]]
proxy = Proxy()
with pytest.raises(NoAttributeToCallError):
proxy()
proxy = Proxy()
with pytest.raises(NoAttributeToCallError):
proxy[0]()
proxy = Proxy()
proxy.pop(1, 2, c=3)
assert proxy._current_path == []
assert proxy._paths == [[El(type="call", key="pop", args=(1, 2), kwargs={"c": 3}),]]
proxy = Proxy()
proxy.foo += 42
assert proxy._current_path == []
assert proxy._paths == [
[El(type="getattr", key="foo"), El(type="setattr", value=None),]
]
assert proxy._paths[0].op == "__add__"
assert proxy._paths[0].other == 42
|
5voltsgc/EOLT_R6 | realTimePlotting.py | <filename>realTimePlotting.py
import sys
import time
import serial
import matplotlib.pyplot as plt
def getdata():
arduino.write(str.encode("getdata?\n"))
resposta=arduino.readline()
decoded_bytes = str(resposta[0:len(resposta)-2].decode("utf-8"))
resposta=decoded_bytes
#print (resposta)
return resposta
plt.ion()
plt.xlabel('Time (sec)')
plt.ylabel('Temperature (deg C)')
arduino = serial.Serial('/dev/ttyUSB0',9600,timeout=2)
tempo_total=100
intervalo_tempo=3
relogio_start = time.time()
relogio_final = relogio_start + tempo_total
now=time.time()
i=0
while (now < relogio_final):
if (now > relogio_start+(intervalo_tempo*i)):
data_collected=getdata()
tempo_now = (time.time()-relogio_start)
data_to_save=str(tempo_now) + "," + data_collected
#print (data_to_save)
data=data_to_save.split(',')
plt.plot(float(data[0]),float(data[1]), 'og')
plt.show
plt.pause(0.0001)
i = i + 1
now=time.time() |
5voltsgc/EOLT_R6 | listSerialPorts.py | import serial.tools.list_ports
import serial
# import time
windowsPort = "COM1"
ports = list(serial.tools.list_ports.comports())
for port in ports:
# print(p)
if "Arduino" in port.description:
# print("This is an Arduino!")
windowsPort = port.name
arduino = serial.Serial(windowsPort, baudrate=9600, timeout=.1)
while True:
num = input("Enter a number: ") # Taking input from user
arduino.write(bytes(num, 'utf-8'))
|
5voltsgc/EOLT_R6 | arduino_serial.py | <gh_stars>0
# https://gist.github.com/Marzogh/723c137a402be7f06dfc1ba0b8517d09
import serial
import csv
import re
import matplotlib.pyplot as plt
import pandas as pd
portPath = "/dev/ttyACM0" # Must match value shown on Arduino IDE
baud = 115200 # Must match Arduino baud rate
timeout = 5 # Seconds
filename = "data.csv"
max_num_readings = 16000
num_signals = 1
def create_serial_obj(portPath, baud_rate, tout):
"""
Given the port path, baud rate, and timeout value, creates
and returns a pyserial object.
"""
return serial.Serial(portPath, baud_rate, timeout=tout)
def read_serial_data(serial):
"""
Given a pyserial object (serial). Outputs a list of lines read in
from the serial port
"""
serial.flushInput()
serial_data = []
readings_left = True
timeout_reached = False
while readings_left and not timeout_reached:
serial_line = serial.readline()
if serial_line == '':
timeout_reached = True
else:
serial_data.append(serial_line)
if len(serial_data) == max_num_readings:
readings_left = False
return serial_data
def is_number(string):
"""
Given a string returns True if the string represents a number.
Returns False otherwise.
"""
try:
float(string)
return True
except ValueError:
return False
def clean_serial_data(data):
"""
Given a list of serial lines (data). Removes all characters.
Returns the cleaned list of lists of digits.
Given something like: ['0.5000,33\r\n', '1.0000,283\r\n']
Returns: [[0.5,33.0], [1.0,283.0]]
"""
clean_data = []
for line in data:
line_data = re.findall("\d*\.\d*|\d*", line) # Find all digits
line_data = [float(element) for element in line_data if is_number(element)] # Convert strings to float
if len(line_data) >= 2:
clean_data.append(line_data)
return clean_data
def save_to_csv(data, filename):
"""
Saves a list of lists (data) to filename
"""
with open(filename, 'wb') as csvfile:
csvwrite = csv.writer(csvfile)
csvwrite.writerows(data)
def gen_col_list(num_signals):
"""
Given the number of signals returns
a list of columns for the data.
E.g. 3 signals returns the list: ['Time','Signal1','Signal2','Signal3']
"""
col_list = ['Time']
for i in range(1, num_signals + 1):
col = 'Signal' + str(i)
col_list.append(col)
return col_list
def map_value(x, in_min, in_max, out_min, out_max):
return (((x - in_min) * (out_max - out_min)) / (in_max - in_min)) + out_min
def simple_plot(csv_file, columns, headers):
plt.clf()
plt.close()
plt.plotfile(csv_file, columns, names=headers, newfig=True)
plt.show()
def plot_csv(csv_file, cols):
# Create Pandas DataFrame from csv data
data_frame = pd.read_csv(csv_file)
# Set the names of the columns
data_frame.columns = cols
# Set the first column (Time) as the index
data_frame = data_frame.set_index(cols[0])
# Map the voltage values from 0-1023 to 0-5
data_frame = data_frame.apply(lambda x: map_value(x, 0., 1023, 0, 5))
# Bring back the Time column
data_frame = data_frame.reset_index()
plt.clf()
plt.close()
# Plot the data
data_frame.plot(x=cols[0], y=cols[1:])
plt.show()
print("Creating serial object...")
serial_obj = create_serial_obj(portPath, baud, timeout)
print("Reading serial data...")
serial_data = read_serial_data(serial_obj)
print(len(serial_data))
print("Cleaning data...")
clean_data = clean_serial_data(serial_data)
print("Saving to csv...")
save_to_csv(clean_data, filename)
print("Plotting data...")
# simple_plot(filename, (0,1,2), ['time (s)', 'voltage1', 'voltage2'])
# simple_plot(filename, (0,1), ['time (s)', 'voltage1'])
plot_csv(filename, gen_col_list(num_signals))
|
5voltsgc/EOLT_R6 | readSerial.py | <gh_stars>0
import serial
import time
import datetime
import csv
ser = serial.Serial('COM27')
fileName = ("EOLT-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") +
".csv")
ser.flushInput()
while True:
try:
ser_bytes = ser.readline()
print(ser_bytes)
with open(fileName, "a", newline='') as f:
writer = csv.writer(f, delimiter=",")
writer.writerow([time.time(), ser_bytes])
except KeyboardInterrupt:
print("Keyboard interrupt exception caught")
break
|
5voltsgc/EOLT_R6 | map.py | # https://github.com/marceloprates/prettymaps
|
5voltsgc/EOLT_R6 | gz_gui_EOLT.py | <filename>gz_gui_EOLT.py
from guizero import App, Text, TextBox, Combo, PushButton, Box, Picture
import numpy as np
import csv
from time import sleep, strftime
import RPi.GPIO as GPIO
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
from fpdf import FPDF
import matplotlib.pyplot as plt
import pandas as pd
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1015(i2c)
# Stepper Motor and Sensor head GPIO addresses
DIR = 27 # Direction GPIO Pin
STEP = 17 # Step GPIO Pin
CW = 1 # Clockwise Rotation Plate towards home
CCW = 0 # Counterclockwise Rotation Plate away from home
ENB = 22 # the enable pin - this pin is inverted
head_select_0 = 23
head_select_1 = 24
head_select_2 = 25
head_select_3 = 12
#Set up the analog to digital pins
ads0 = AnalogIn(ads, ADS.P0)
ads1 = AnalogIn(ads, ADS.P1)
ads2 = AnalogIn(ads, ADS.P2)
ads3 = AnalogIn(ads, ADS.P3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(head_select_0, GPIO.OUT)
GPIO.setup(head_select_1, GPIO.OUT)
GPIO.setup(head_select_2, GPIO.OUT)
GPIO.setup(head_select_3, GPIO.OUT)
GPIO.setup(STEP, GPIO.OUT)
GPIO.setup(DIR, GPIO.OUT)
GPIO.output(DIR, CW)
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set pin 5 to be an input pin and set initial value to be pulled low (off)
GPIO.setup(21, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set pin 21 to be an input pin and set initial value to be pulled low (off)
# Initiallizing the Home Switch on pin 5
pin5 = GPIO.input(5)
# pin21 = GPIO.input(21) # to be used for the power down Raspberry Pi
GPIO.setup(ENB, GPIO.OUT)
HALLS = 6
HEADS = 3
delay = .015
readings_table = []
plotlegend = []
users=[] # list to hold users - purhapse will change to Dictionary to make easier to add
item_num_indx = 0 # used as a global index for which test
noise_readings = 100 # how many readings for the noise check
test_parmaters = ["0-item Number", "1-Serial number", "2-HALLS", "3-HEADS", "4-Adrs/selected", ]
# Item number column headers
# 0-Part Numbers,
# 1-Count Halls,
# 2-halls/head,
# 3-# heads,
# 4-Selected,
# 5-addressed,
# 6-highMax,
# 7-highMin,
# 8-lowMax,
# 9-lowMin,
# 10-diffMax,
# 11-diffLow
# 12-Harness item number
# 13-Fixture item number
# 14-Noise Threshold
item_numbers = np.array([[107287,8,2,4,1,0,1000,700,-600,-900,1000,-50,204109,124458,200],
[107297,8,2,4,1,0,1000,700,-600,-900,818,560,204109,124458,200],
[108144,8,2,4,1,0,1000,700,-600,-900,1000,-50,204109,124458,200],
[108150,8,2,4,1,0,1000,700,-600,-900,885,654,204109,124458,200],
[112497,6,2,3,1,0,1000,700,-600,-900,1000,-50,301404,124734,200],
[121248,12,3,4,1,0,1000,700,-600,-900,728,460,301393,124393,200],
[121250,18,6,3,0,1,1000,700,-600,-900,609,423,301400,124394,200],
[121334,15,5,3,0,1,1000,700,-600,-900,1000,-50,301401,124742,200],
[121335,15,5,3,0,1,1000,700,-600,-900,1000,-50,301401,124740,200],
[121791,12,6,2,0,1,1000,700,-600,-900,1000,-50,301400,124394,200]])
print(item_numbers)
# Create the item numbers for the Combobox
items =[]
for column in item_numbers:
items.append(column[0])
print(items)
def flatten_list(_2d_list):
flat_list = []
# Iterate through the outer list
for element in _2d_list:
if type(element) is list:
# If the element is of type list, iterate through the sublist
for item in element:
flat_list.append(item)
else:
flat_list.append(element)
return flat_list
# if file missing got error FileNotFoundError: [Errno 2] No such file or directory: 'users.csv'
with open('Users.csv', newline='') as FR:
reader = csv.reader(FR, delimiter =',')
# this brings in a list of list of names as such [["John"], ["Julie"]]
for row in reader:
users.append(row)
# but I only want only a list as such ["John", "Julie"]
# So this will flatten the list
print(f"unsorted:{users}")
users = flatten_list(users)
# sort the list of names alphabetically with sorted()
users=sorted(users)
print(users)
def addressed_read_all_halls():
hall_readings = []
# print("heads: " + str(HEADS))
# print("heads: " + str(HALLS))
for i in range(HEADS):
for j in range(HALLS):
addressed_hall_number = i * HALLS + j
if j == 0:
# print("Hall_0")
GPIO.output(head_select_0, GPIO.LOW)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 1:
# print("Hall_1")
GPIO.output(head_select_0, GPIO.HIGH)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 2:
# print("Hall_2")
GPIO.output(head_select_0, GPIO.LOW)
GPIO.output(head_select_1, GPIO.HIGH)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 3:
# print("Hall_3")
GPIO.output(head_select_0, GPIO.HIGH)
GPIO.output(head_select_1, GPIO.HIGH)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 4:
# print("Hall_4")
GPIO.output(head_select_0, GPIO.LOW)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.HIGH)
GPIO.output(head_select_3, GPIO.LOW)
else:
# import csv print("Hall_5")
GPIO.output(head_select_0, GPIO.HIGH)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.HIGH)
GPIO.output(head_select_3, GPIO.LOW)
if i == 0:
# print("0 - Hall Number: " + str(addressed_hall_number))
try:
hall_readings.append(ads0.value)
except OSError as e:
print("OSError at hall_readings.append(ads0.value)")
print(e)
# Try again and continue and learn if just trying to read the ADS crashes again.
# This is from an OSError with the adafruit library - found a different library
# http://abyz.me.uk/lg/examples.html
hall_readings.append(ads0.value)
elif i == 1:
# print("1 - Hall Number: " + str(addressed_hall_number))
try:
hall_readings.append(ads1.value)
except OSError as e:
print("OSError at hall_readings.append(ads1.value)")
print(e)
# try again
hall_readings.append(ads1.value)
elif i == 2:
# print("2 - Hall Number: " + str(addressed_hall_number))
try:
hall_readings.append(ads2.value)
except OSError as e:
print("OSError at hall_readings.append(ads2.value)")
print(e)
# try again
hall_readings.append(ads2.value)
else:
# print("3 - Hall Number: " + str(addressed_hall_number))
try:
hall_readings.append(ads3.value)
except OSError as e:
print("OSError at hall_readings.append(ads3.value)")
print(e)
# try again
hall_readings.append(ads3.value)
# print(hall_readings)
return(hall_readings)
def double_click():
print("Double Clicked")
# name = app.question("Hello", "What name do you want to add to the list?")
# # If cancel is pressed, None is returned
# # so check a name was entered
# if name is not None:
# print(name)
# users.append(str(name))
# with open("Users.csv", 'w', newline='') as f:
# writer = csv.writer(f)
# writer.writerow([users])
def update_harnes_fixture_lbl():
global item_num_indx
print(selected_item.value)
item_num_indx = items.index(int(selected_item.value))
harness = str(item_numbers[item_num_indx][6])
fixture = str(item_numbers[item_num_indx][7])
use_harness.value = harness
use_fixture.value = fixture
save_btn.enabled=True
tst_btn.enabled = True
def save_test():
print("Save Test")
def begin_test():
# find a better way for globals here maybe the mutable list that is global?
global item_num_indx
global HEADS
global HALLS
report_txt = "" #This will hold all the report test and post it to the test report textbox see how it works
UUT = item_numbers[item_num_indx][0]
report_txt += f"Testing a {UUT} \n"
HALLS = item_numbers[item_num_indx][2]
report_txt += f"Using {HALLS} Halls per Head\n"
HEADS = item_numbers[item_num_indx][3]
report_txt += f"With {HEADS} Heads\n"
# Add the test max min and threshold
# Obtain test configuration
for h in range(HALLS * HEADS):
plotlegend.append("hall: "+ str(h))
GPIO.output(ENB, True)
# Homing
global pin5
while pin5 == 0:
GPIO.output(STEP, GPIO.HIGH)
sleep(delay) # need pulse for stepper motor controller to move
GPIO.output(STEP, GPIO.LOW)
pin5 = GPIO.input(5)
# rapid move to starting
GPIO.output(DIR, CCW)
for s in range(500):
GPIO.output(STEP, GPIO.HIGH)
sleep(delay/10)
GPIO.output(STEP, GPIO.LOW)
# Read sesnors once to prime, or noise reduction, of the ADS1X15 sensor
addressed_read_all_halls()
# read the hall again and do the quick check to make sure the setup is correct
#Alert the user if the paramters are not correct give notice in a message box exit by using return
# Noise Check
for n in range(noise_readings):
readings_table.append(addressed_read_all_halls())
# testing steps
for s in range(1100-500):
GPIO.output(STEP, GPIO.HIGH)
sleep(delay/10)
GPIO.output(STEP, GPIO.LOW)
step = int(round(s/18,0))
# Write the test readings to the table
readings_table.append(addressed_read_all_halls())
df = pd.DataFrame(readings_table)
timestr = strftime("%Y%m%d-%H%M%S")
filename = "readings-" + timestr +".csv"
df.to_csv(filename, sep=',')
# Calculate noise - 1st derivitive
noise_results = df.iloc[:noise_readings, 0:((HALLS * HEADS))].diff(axis=0, periods = 1).abs().max().to_frame()
noise_results.columns = ['Noise']
noise_results["Halls"] = plotlegend
# Dump noise result to a file?
filename = "noise_readings-" + timestr +".csv"
noise_results.to_csv(filename, sep=',')
report_txt += str(noise_results) + "\n"
result_txtbox.value = report_txt
# pass fail results of pandas dataframe between 9 Something like max - min between(max-dif, min-diff15, inclusive = True)
# return the plate back home
GPIO.output(DIR, CW)
for r in range(1100):
GPIO.output(STEP, GPIO.HIGH)
sleep(delay/10)
GPIO.output(STEP, GPIO.LOW)
step = int(60-round(r/18,0))
# Turn on the stepper motor controller
GPIO.output(ENB, False)
GPIO.cleanup()
# Make the Counts chart
plt.plot(readings_table)
plt.rcParams["figure.figsize"] = [7.50, 3.50]
plt.xlabel('Steps')
plt.ylabel('Counts')
plt.savefig('Counts.png')
# plt.show() # if you would like a window pop up
# Make Noise Chart
#https://www.tutorialspoint.com/how-to-create-a-matplotlib-bar-chart-with-a-threshold-line
plt.rcParams["figure.figsize"] = [7.50, 3.50]
# plt.rcParams["figure.autolayout"] = True
threshold = item_numbers[item_num_indx][14] # 14 is the threshold
a_threshold = np.maximum(noise_results["Noise"] - threshold, 0)
b_threshold = np.minimum(noise_results["Noise"], threshold)
x = range(HALLS * HEADS)
fig, ax = plt.subplots()
ax.bar(x, b_threshold, 0.35, color="green")
ax.bar(x, a_threshold, 0.35, color="red", bottom=b_threshold)
plt.axhline(threshold, color='red', ls='dotted')
plt.savefig('Noise.png')
# Now resize saved figures for the GUI
# Create PDF report
pdf = FPDF('P', 'mm', 'letter')
pdf.add_page()
pdf.image('EOLT_report.png', x = 0, y = 0, w = 203, h = 279, type = 'PNG')
pdf.image('Noise.png', x = 0, y = 55, w = 97, h = 82, type = 'PNG')
pdf.image('Counts.png', x = 100, y = 55, w = 97, h = 82, type = 'PNG')
pdf.set_font('helvetica', 'B', 16)
pdf.text(23, 40, '121250')
pdf.text(23, 51.5, 'January 3, 2022')
pdf.text(127, 40, 'B12345')
pdf.text(127, 51.5, '01032022r12m3')
#set font for results
pdf.set_font('helvetica', size=10)
# Creating an empty list
rows = []
# Iterating through the columns of
# dataframe
results = noise_results[['Halls', 'Noise']]
for column in results.columns:
# Storing the rows of a column
# into a temporary list
li = results[column].tolist()
# appending the temporary list
rows.append(li)
print(f"Rows {rows}")
print("plot legend")
print(plotlegend)
print(f"second column in rows {rows[1]}")
row_pos=200 #counter for which row in loop
for row in (rows):
pdf.text(10, row_pos,str(row))
print(row)
row_pos += 5
# pdf.write(5, str(results))
pdf.output('tuto1.pdf', 'F')
def plot_noise():
print("Noise Plot")
def plot_counts():
print("plot_counts")
app = App(layout="grid", title = "EOLT", width = 1500, height = 650)
button_box=Box(app, layout="grid", grid=[0, 0, 3, 6], border=3)
# Item Numbers
itm_num_lbl = Text(button_box, text="1. Choose - Item Number:", size=20, grid=[0,1], align="left")
selected_item = Combo(button_box, grid=[1, 1, 2, 1], width=15, options=items, command=update_harnes_fixture_lbl)
selected_item.text_size=20
# Serial Numbers
serial_label = Text(button_box, text="2. Enter - Serial Number:", size=20, grid=[0,2], align="left")
serial_num_txtbox = TextBox(button_box, grid=[1, 2, 2, 1], width=17, command=update_harnes_fixture_lbl)
serial_num_txtbox.text_size = 20
# User
user_lbl = Text(button_box,text="3. Select - User:", size=20, grid=[0, 3], align="left" )
user_lbl.when_double_clicked = double_click
user_name_cmb = Combo(button_box,options=users, grid=[1,3, 2, 1], align="left", width=15)
user_name_cmb.text_size=20
# Use fixture and harness
harness_lbl = Text(button_box, text="4. Use Harness:", size=20, grid=[0, 4], align="left")
use_harness = TextBox(button_box, enabled=True, width=17, grid=[1, 4, 2, 1])
use_harness.text_size=20
use_harness.bg = "#999999"
fixture_lbl = Text(button_box, text="5. Use Fixture:", size=20, grid=[0,5], align="left")
use_fixture = TextBox(button_box, enabled=True, width=17, grid=[1, 5, 2, 1])
use_fixture.bg = "#999999"
use_fixture.text_size=20
# Buttons
tst_btn = PushButton(button_box, command=begin_test, text = "6. Begin Test", grid=[0,6])
tst_btn.text_size = 30
tst_btn.enabled = False
save_btn = PushButton(button_box, command=save_test, text = "7. Save Test", grid=[1,6])
save_btn.text_size = 30
save_btn.enabled = False
graph_box = Box(app, layout="grid", grid=[6,0,6,6], border=3)
noise_btn = PushButton(graph_box, command=plot_noise, image="Noise_400x300.png", grid=[0,0])
counts_btn = PushButton(graph_box, command=plot_counts, image="Counts_400x300.png", grid=[1,0])
results_box = Box(app, layout='grid', grid=[0,6, 7,6], border=3)
results_lbl = Text(results_box, grid=[0,0], text="Test Results: Passed")
results_lbl.text_size = 30
results_lbl.bg='green'
result_txtbox=TextBox(results_box, grid=[0,1], text="", align="left", multiline=True, scrollbar=True)
result_txtbox.height = 15
result_txtbox.width = 175
# for l in item_numbers:
# result_txtbox.append(l)
app.display()
|
5voltsgc/EOLT_R6 | testCSV.py |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('C:\\Users\\a6384\\Dropbox\\python\\EOLT\\121250_example.CSV')
# plt.figure()
df.plot()
plt.legend(loc='best')
plt.savefig('121250_example.png')
plt.show()
print(df.describe(percentiles=None,))
# df.info()
# print(df.shape)
count = df.iloc[:50, 1:19].diff(axis=0, periods=1).abs().max().to_frame().T
print(count)
# count.info()
# noise = count.diff(axis=0, periods=1).abs().max().to_frame().T
# print(noise)
# print(noise.max().to_frame().T)
# noise.to_csv('noise.csv', sep=',')
|
5voltsgc/EOLT_R6 | icecream_tutorial.py | <reponame>5voltsgc/EOLT_R6<filename>icecream_tutorial.py
# https://towardsdatascience.com/do-not-use-print-for-debugging-in-python-anymore-6767b6f1866d
from icecream import ic
from datetime import datetime
ic.disable()
ic.enable()
def now():
return f'[{datetime.now()}] '
ic.configureOutput(prefix=now)
# ic('test')
my_dict = {
'name': 'Chris',
'age': 33
}
def square_of(num):
return num * num
print(square_of(2))
print(square_of(3))
print(square_of(4))
print('square of 2:', square_of(2))
print('square of 3:', square_of(3))
print('square of 4:', square_of(4))
ic(square_of(2))
ic(square_of(3))
ic(square_of(4))
ic(my_dict['name'])
class Dog():
num_legs = 4
tail = True
nose = "cold"
dog = Dog()
ic(dog.num_legs)
ic(dog.tail)
ic(dog.nose)
print(dog.tail)
user_name = "Chris"
if user_name == "Chris":
ic()
else:
ic()
def check_user(username):
if username == 'Chris':
# do something
ic()
else:
# do something else
ic()
check_user('Chris')
check_user('Jade')
num = 2
square_of_num = square_of(ic(num))
if ic(square_of_num) == pow(num, 2):
ic('Correct!')
|
5voltsgc/EOLT_R6 | directory.py | <gh_stars>0
# https://stackoverflow.com/questions/51877124/how-to-select-a-directory-and-store-it-into-a-variable-in-tkinter/51877299
# from tkinter import*
from tkinter import StringVar
from tkinter import Label
from tkinter import Entry
from tkinter import ttk
from tkinter import Tk
from tkinter import filedialog
gui = Tk()
gui.geometry("400x400")
gui.title("FC")
def getFolderPath():
folder_selected = filedialog.askdirectory()
folderPath.set(folder_selected)
def doStuff():
folder = folderPath.get()
print("Doing stuff with folder", folder)
folderPath = StringVar()
a = Label(gui, text="Enter name")
a.grid(row=0, column=0)
E = Entry(gui, textvariable=folderPath)
E.grid(row=0, column=1)
btnFind = ttk.Button(gui, text="Browse Folder", command=getFolderPath)
btnFind.grid(row=0, column=2)
c = ttk.Button(gui, text="find", command=doStuff)
c.grid(row=4, column=0)
gui.mainloop()
|
5voltsgc/EOLT_R6 | combined.py | <reponame>5voltsgc/EOLT_R6
import tkinter as tk
from tkinter.ttk import Combobox
from PIL import Image, ImageTk
import numpy as np
from tkinter.font import Font
from time import sleep
import RPi.GPIO as GPIO
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
from fpdf import FPDF
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
window = tk.Tk()
window.title('EOLT')
# window.iconbitmap('TDWicon2.ico')
window.geometry('1400x600')
myFont = Font(family="Times New Roman", size=12)
# test Varibles held in a list
UUT_config = []
HALLS = 6
HEADS = 3
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1015(i2c)
DIR = 27 # Direction GPIO Pin
STEP = 17 # Step GPIO Pin
CW = 1 # Clockwise Rotation Plate towards home
CCW = 0 # Counterclockwise Rotation Plate away from home
ENB = 22 # the enable pin - this pin is inverted
head_select_0 = 23
head_select_1 = 24
head_select_2 = 25
head_select_3 = 12
ads0 = AnalogIn(ads, ADS.P0)
ads1 = AnalogIn(ads, ADS.P1)
ads2 = AnalogIn(ads, ADS.P2)
ads3 = AnalogIn(ads, ADS.P3)
GPIO.setmode(GPIO.BCM)
GPIO.setup(head_select_0, GPIO.OUT)
GPIO.setup(head_select_1, GPIO.OUT)
GPIO.setup(head_select_2, GPIO.OUT)
GPIO.setup(head_select_3, GPIO.OUT)
GPIO.setup(STEP, GPIO.OUT)
GPIO.setup(DIR, GPIO.OUT)
GPIO.output(DIR, CW)
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set pin 5 to be an input pin and set initial value to be pulled low (off)
GPIO.setup(21, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Set pin 21 to be an input pin and set initial value to be pulled low (off)
pin5 = GPIO.input(5)
pin21 = GPIO.input(21)
GPIO.setup(ENB, GPIO.OUT)
HALLS = 6
HEADS = 3
delay = .015
readings_table = []
plotlegend = []
# ##################################################################
# ###################TESTING########################################
# ##################################################################
def addressed_read_all_halls():
hall_readings = []
# print("heads: " + str(HEADS))
# print("heads: " + str(HALLS))
for i in range(HEADS):
for j in range(HALLS):
addressed_hall_number = i * HALLS + j
if j == 0:
# print("Hall_0")
GPIO.output(head_select_0, GPIO.LOW)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 1:
# print("Hall_1")
GPIO.output(head_select_0, GPIO.HIGH)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 2:
# print("Hall_2")
GPIO.output(head_select_0, GPIO.LOW)
GPIO.output(head_select_1, GPIO.HIGH)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 3:
# print("Hall_3")
GPIO.output(head_select_0, GPIO.HIGH)
GPIO.output(head_select_1, GPIO.HIGH)
GPIO.output(head_select_2, GPIO.LOW)
GPIO.output(head_select_3, GPIO.LOW)
elif j == 4:
# print("Hall_4")
GPIO.output(head_select_0, GPIO.LOW)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.HIGH)
GPIO.output(head_select_3, GPIO.LOW)
else:
# import csv print("Hall_5")
GPIO.output(head_select_0, GPIO.HIGH)
GPIO.output(head_select_1, GPIO.LOW)
GPIO.output(head_select_2, GPIO.HIGH)
GPIO.output(head_select_3, GPIO.LOW)
if i == 0:
# print("0 - Hall Number: " + str(addressed_hall_number))
hall_readings.append(ads0.value)
elif i == 1:
# print("1 - Hall Number: " + str(addressed_hall_number))
hall_readings.append(ads1.value)
elif i == 2:
# print("2 - Hall Number: " + str(addressed_hall_number))
hall_readings.append(ads2.value)
else:
# print("3 - Hall Number: " + str(addressed_hall_number))
hall_readings.append(ads3.value)
# print(hall_readings)
return(hall_readings)
def testing():
for h in range(HALLS * HEADS):
plotlegend.append("hall: "+ str(h))
noise_readings = 100 # how many readings for the noise check
#Move plate to Home position while the button is not pressed
GPIO.output(ENB, True)
pin5 = 0
while pin5 == 0:
GPIO.output(STEP, GPIO.HIGH)
sleep(delay)
GPIO.output(STEP, GPIO.LOW)
pin5 = GPIO.input(5)
# rapid move to starting
GPIO.output(DIR, CCW)
for s in range(500):
GPIO.output(STEP, GPIO.HIGH)
sleep(delay/10)
GPIO.output(STEP, GPIO.LOW)
#Read sesnors once to prime, or noise reduction, of the ADS1X15 sensor
addressed_read_all_halls()
# Noise Check
for n in range(noise_readings):
readings_table.append(addressed_read_all_halls())
# testing steps
for s in range(1100-500):
GPIO.output(STEP, GPIO.HIGH)
sleep(delay/10)
GPIO.output(STEP, GPIO.LOW)
# sleep(delay/5)
step = int(round(s/18,0))
# print(s)
readings_table.append(addressed_read_all_halls())
# print(readings_table)
df = pd.DataFrame(readings_table)
noise_results = df.iloc[:noise_readings, 0:((HALLS * HEADS))].diff(axis=0, periods = 1).abs().max().to_frame()
noise_results.columns = ['Noise']
noise_results["Halls"] = plotlegend
noise_results.to_csv('noise_results.csv', sep=',')
GPIO.output(DIR, CW)
for r in range(1100):
GPIO.output(STEP, GPIO.HIGH)
sleep(delay/10)
GPIO.output(STEP, GPIO.LOW)
step = int(60-round(r/18,0))
GPIO.output(ENB, False)
GPIO.cleanup()
plt.plot(readings_table)
plt.rcParams["figure.figsize"] = [4.00, 3.00]
plt.xlabel('Steps')
plt.ylabel('Counts')
plt.savefig('Counts_400x300.png')
#https://www.tutorialspoint.com/how-to-create-a-matplotlib-bar-chart-with-a-threshold-line
plt.rcParams["figure.figsize"] = [4.00, 3.00]
threshold = 200
a_threshold = np.maximum(noise_results["Noise"] - threshold, 0)
b_threshold = np.minimum(noise_results["Noise"], threshold)
x = range(HALLS * HEADS)
print(x)
fig, ax = plt.subplots()
ax.bar(x, b_threshold, 0.35, color="green")
ax.bar(x, a_threshold, 0.35, color="red", bottom=b_threshold)
plt.axhline(threshold, color='red', ls='dotted')
plt.savefig('Noise_400x300.png')
pdf = FPDF('P', 'mm', 'letter')
pdf.add_page()
pdf.image('EOLT_report.png', x = 0, y = 0, w = 203, h = 279, type = 'PNG')
pdf.image('Noise.png', x = 0, y = 55, w = 97, h = 82, type = 'PNG')
pdf.image('Counts.png', x = 100, y = 55, w = 97, h = 82, type = 'PNG')
pdf.set_font('helvetica', 'B', 16)
pdf.text(23, 40, '121250')
pdf.text(23, 51.5, 'January 3, 2022')
pdf.text(127, 40, 'B12345')
pdf.text(127, 51.5, '01032022r12m3')
results = noise_results[['Halls', 'Noise']]
print("The results were:")
print(type(results))
print(results)
pdf.ln(10)
pdf.write(5, str(results))
pdf.output('tuto1.pdf', 'F')
def comboclick(event):
index = items.index(int(drop_item_number.get()))
global UUT_config
UUT_config = item_numbers[index]
print(UUT_config)
lbl_fixtures.config(text=UUT_config[6])
txt_harn = "f-" + str(UUT_config[6])
lbl_harness.config(text=txt_harn)
# The command buttons active only after chooseing an Item Number
btn_begin.config(state=tk.NORMAL)
btn_save.config(state=tk.NORMAL)
btn_print.config(state=tk.NORMAL)
def test():
print("Begin testing loop")
print(UUT_config)
testing()
counts_chart_large = ImageTk.PhotoImage(Image.open('Counts_400x300.png'))
counts_btn.config(image=counts_chart_large)
# update charts on buttons
noise_chart_large = ImageTk.PhotoImage(Image.open('Noise_400x300.png'))
noise_btn.config(image=noise_chart_large)
def save_results():
# print("save results was clicked")
print(drop_item_number.get())
print(item_numbers[:, 0])
# print(f.index(121250))
def print_results():
print('Print results was clicked')
# lbl_fixtures.config(text = str(selected_item_number))
item_numbers = np.array([[107287, 8, 2, 4, 1, 0, 12345],
[107297, 8, 2, 4, 1, 0, 23456],
[108144, 8, 2, 4, 1, 0, 34567],
[108150, 8, 2, 4, 1, 0, 45677],
[108283, 18, 6, 3, 0, 1, 76543],
[112497, 6, 2, 3, 1, 0, 865324],
[121248, 12, 3, 4, 1, 0, 7654367],
[121250, 18, 6, 3, 0, 1, 6543456],
[121334, 15, 5, 3, 0, 1, 6543234],
[121335, 15, 5, 3, 0, 1, 876765],
[121791, 12, 6, 2, 0, 1, 5643322]])
# Create the item numbers for the Combobox
items =[]
for column in item_numbers:
items.append(column[0])
selected_item_number = tk.IntVar()
selected_item_number.set(121250)
# input frame
input_frame = tk.Frame(window, width=100, highlightbackground='blue', highlightthickness=3)
# input_frame=Frame(window, width=100, highlightbackground='blue', highlightthickness=3)
input_frame.grid(row=0, column=0, padx=20, pady=20, ipadx=20, ipady=20)
# ItemNumbers
tk.Label(input_frame, text='Item Number #:', fg='blue', font=myFont, foreground="black").grid(row=0, column=0, sticky=tk.E)
# drop_item_number = tk.OptionMenu(input_frame, selected_item_number, * item_numbers)
# ComboBox
drop_item_number = Combobox(input_frame,
values=items,
state='readonly',
width=8,
font=myFont,
foreground="black"
)
drop_item_number.bind("<<ComboboxSelected>>", comboclick)
drop_item_number.grid(row=0, column=1, columnspan=2, sticky=tk.W)
# Serial Number
tk.Label(input_frame, text='Serial #:', fg='black', font=myFont).grid(row=1, column=0, sticky=tk.E)
ent_serial_num = tk.Entry(input_frame, textvariable=selected_item_number, fg='black', font=myFont, width=10)
ent_serial_num.grid(row=1, column=1, columnspan=2, sticky=tk.W)
# Harness and Fixture
tk.Label(input_frame, text='Use Harness:', fg='black', font=myFont).grid(row=2, column=0, sticky=tk.E)
lbl_harness = tk.Label(input_frame, text=str(selected_item_number.get()), fg='black', font=myFont)
lbl_harness.grid(row=2, column=1, sticky=tk.E)
tk.Label(input_frame, text='Use Fixture:', fg='black', font=myFont).grid(row=3, column=0, sticky=tk.E)
lbl_fixtures = tk.Label(input_frame, text=str(selected_item_number.get()), fg='black', font=myFont)
lbl_fixtures.grid(row=3, column=1, sticky=tk.E)
# Buttons
btn_begin = tk.Button(input_frame, command=test, text='Begin\nTest', fg='blue', font=myFont, state=tk.DISABLED)
btn_begin.grid(row=4, column=0, sticky=tk.E)
btn_save = tk.Button(input_frame, command=save_results, text='Save\nresults', fg='blue', font=myFont, state=tk.DISABLED)
btn_save.grid(row=4, column=1, sticky=tk.W)
btn_print = tk.Button(input_frame, command=print_results, text='Print\nResults', fg='blue', font=myFont, state=tk.DISABLED)
btn_print.grid(row=4, column=2, sticky=tk.W)
# Chart frame
noise_frame = tk.Frame(window, width=100, highlightbackground='blue', highlightthickness=3)
noise_frame.grid(row=0, column=1, padx=20, pady=20, ipadx=20, ipady=20)
noise_chart_large = ImageTk.PhotoImage(Image.open('Noise_400x300.png'))
counts_chart = ImageTk.PhotoImage(Image.open('Counts_400x300.png'))
noise_btn = tk.Button(noise_frame, text="open plot", image=noise_chart_large).pack(side=tk.LEFT)
counts_btn = tk.Button(noise_frame, text="open plot", image=counts_chart).pack(side=tk.RIGHT)
window.mainloop()
|
ilexistools/Kitconc-examples | 08_ngrams.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
ngrams = corpus.ngrams(size=3,pos='NN IN NN',show_progress=True)
print(ngrams.df.head(10))
ngrams.save_excel(corpus.output_path + 'ngrams.xlsx') |
ilexistools/Kitconc-examples | 02_wordlist.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
# reference to the corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
# make wordlist
wordlist = corpus.wordlist(show_progress=True)
# print the top 10
print(wordlist.df.head(10))
# save Excel file
wordlist.save_excel(corpus.output_path + 'wordlist.xlsx')
|
ilexistools/Kitconc-examples | 04_kwic.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
kwic = corpus.kwic('experience',show_progress=True)
kwic.sort('R1','R2','R3')
print(kwic.df.head(10))
kwic.save_excel(corpus.output_path + 'kwic.xlsx',highlight='R1 R2 R3') |
ilexistools/Kitconc-examples | 12_collgraph.py | <reponame>ilexistools/Kitconc-examples
# -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
collocates = corpus.collocates('skills',left_span=3,right_span=3,coll_pos='NN JJ',show_progress=True)
print(collocates.df.head(10))
collocates.save_excel(corpus.output_path + 'collocates.xlsx')
# plot collocates
collocates.plot_collgraph(node='skills') |
ilexistools/Kitconc-examples | 10_keywords_dispersion.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
wordlist = corpus.wordlist(show_progress=True)
keywords = corpus.keywords(wordlist,show_progress=True)
keywords_dispersion = corpus.keywords_dispersion(keywords,show_progress=True)
print(keywords_dispersion.df.head(10))
keywords_dispersion.save_excel(corpus.output_path+'keywords_dispersion.xlsx') |
ilexistools/Kitconc-examples | 06_collocates.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
collocates = corpus.collocates('experience',left_span=2,right_span=2,coll_pos='IN NN JJ VBN VBD',show_progress=True)
print(collocates.df.head(10))
collocates.save_excel(corpus.output_path + 'collocates.xlsx') |
ilexistools/Kitconc-examples | 11_collocations.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
kwic = corpus.kwic('skills',show_progress=True)
collocations = corpus.collocations(kwic,show_progress=True)
print(collocations.df.head(10))
collocations.save_excel(corpus.output_path+'collocations.xlsx')
# plot a collocate distribution
collocations.plot_colldist('strong') |
ilexistools/Kitconc-examples | 05_concordances.py | <reponame>ilexistools/Kitconc-examples<filename>05_concordances.py
# -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
concordances = corpus.concordance('experience',show_progress=True)
print(concordances.df.head(10))
concordances.save_excel(corpus.output_path + 'concordances.xlsx',highlight='R1 R2 R3') |
ilexistools/Kitconc-examples | 03_keywords.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
wordlist = corpus.wordlist(show_progress=True)
keywords = corpus.keywords(wordlist,show_progress=True)
print(keywords.df.head(10))
keywords.save_excel(corpus.output_path + 'keywords.xlsx') |
ilexistools/Kitconc-examples | 07_clusters.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
clusters = corpus.clusters('experience',size=3,show_progress=True)
print(clusters.df.head(10))
clusters.save_excel(corpus.output_path + 'clusters.xlsx') |
ilexistools/Kitconc-examples | 00_download_examples.py | <filename>00_download_examples.py
# -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.core import Examples
Examples().download() |
ilexistools/Kitconc-examples | 09_dispersion.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
dispersion = corpus.dispersion('salary')
print(dispersion.df.head(10))
dispersion.save_excel(corpus.output_path + 'dispersion.xlsx') |
ilexistools/Kitconc-examples | 01_create_corpus.py | # -*- coding: utf-8 -*-
"""
Kitconc examples
@author: <EMAIL>
"""
from kitconc.kit_corpus import Corpus
# reference to the corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
# add texts from source folder
corpus.add_texts('kitconc-examples/ads',show_progress=True) |
Hiroaki-Tanaka-0606/Photoelectron_refraction | Python/Config_example.py | # Photoelectron_refraction
# Config
# For Gui
fontFamilies=["Segoe UI", "Yu Gothic UI"]
fontSize_normal=16
ContentsMargins=[5,5,5,5]
# For graph
## Color of horizontal and vertical guide lines
pen1=(0, 255, 0)
## length of tickes
tickLength=-30
# Physical constants
## 1 Bohr (Angstrom)
Bohr_ang=0.529177
## 1 Hartree (eV)
Eh_eV=27.2114
# Calculation configuration
## Calculation range of the Gauss function
sigmaMax=5.0 |
Hiroaki-Tanaka-0606/Photoelectron_refraction | Python/Calculation_GUI.py | # Photoelectron_refraction
# GUI
# libraries from pip
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
import numpy as np
import math
import h5py
from datetime import datetime
# libraries in this package
import lib
import Config
# MainWindow: a class for the window
class MainWindow(QtGui.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Photoelectron refraction")
# Font (normal)
font=QtGui.QFont()
font.setFamilies(Config.fontFamilies)
font.setPixelSize(Config.fontSize_normal)
# Font (bold)
bFont=QtGui.QFont(font)
bFont.setBold(True)
# Frame (Vertical box layout)
vbox=QtGui.QVBoxLayout()
vbox.setContentsMargins(*Config.ContentsMargins)
vbox.setAlignment(QtCore.Qt.AlignTop)
mainWidget=QtGui.QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
# Row 1: potential
row1=QtGui.QHBoxLayout()
row1.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row1)
label1A=QtGui.QLabel("Work function (eV)")
label1A.setFont(bFont)
row1.addWidget(label1A)
self.Wtext=QtGui.QLineEdit("5")
row1.addWidget(self.Wtext)
label1B=QtGui.QLabel("Inner potential (eV)")
label1B.setFont(bFont)
row1.addWidget(label1B)
self.V0text=QtGui.QLineEdit("12")
row1.addWidget(self.V0text)
# Row 2: initial state
row2=QtGui.QHBoxLayout()
row2.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row2)
label2A=QtGui.QLabel("Parabola paramter")
label2A.setFont(bFont)
row2.addWidget(label2A)
self.atext=QtGui.QLineEdit("-1")
row2.addWidget(self.atext)
label2B=QtGui.QLabel("Energy of the top from EF (eV)")
label2B.setFont(bFont)
row2.addWidget(label2B)
self.V1text=QtGui.QLineEdit("0")
row2.addWidget(self.V1text)
label2C=QtGui.QLabel("Coordinate of the top (Ang^-1)")
label2C.setFont(bFont)
row2.addWidget(label2C)
self.k0xtext=QtGui.QLineEdit("0")
row2.addWidget(self.k0xtext)
self.k0ytext=QtGui.QLineEdit("0")
row2.addWidget(self.k0ytext)
self.k0ztext=QtGui.QLineEdit("10")
row2.addWidget(self.k0ztext)
# Row 3: reciprocal space
row3=QtGui.QHBoxLayout()
row3.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row3)
self.kPlaneChoice=QtGui.QButtonGroup()
self.kFlat=QtGui.QRadioButton("Flat k plane")
self.kFlat.setChecked(True)
self.kFlat.setFont(bFont)
row3.addWidget(self.kFlat)
self.kPlaneChoice.addButton(self.kFlat)
label3A=QtGui.QLabel("( kz = ")
row3.addWidget(label3A)
self.kFlat_kz=QtGui.QLineEdit("10")
row3.addWidget(self.kFlat_kz)
label3B=QtGui.QLabel(" Ang^-1)")
row3.addWidget(label3B)
self.kCurved=QtGui.QRadioButton("Curved k plane")
self.kCurved.setFont(bFont)
row3.addWidget(self.kCurved)
self.kPlaneChoice.addButton(self.kCurved)
label3C=QtGui.QLabel("( |k| = ")
row3.addWidget(label3C)
self.kCurved_k=QtGui.QLineEdit()
row3.addWidget(self.kCurved_k)
label3D=QtGui.QLabel(" Ang^-1)")
row3.addWidget(label3D)
# Row 4: surface
row4=QtGui.QHBoxLayout()
row4.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row4)
self.surfaceChoice=QtGui.QButtonGroup()
self.surfaceConst=QtGui.QRadioButton("Constant surface")
self.surfaceConst.setFont(bFont)
self.surfaceConst.setChecked(True)
row4.addWidget(self.surfaceConst)
self.surfaceChoice.addButton(self.surfaceConst)
label4A=QtGui.QLabel("( theta, phi = ")
row4.addWidget(label4A)
self.surfaceConst_theta=QtGui.QLineEdit("0")
row4.addWidget(self.surfaceConst_theta)
self.surfaceConst_phi=QtGui.QLineEdit("0")
row4.addWidget(self.surfaceConst_phi)
label4B=QtGui.QLabel(" (deg))")
row4.addWidget(label4B)
self.surfaceRandom=QtGui.QRadioButton("Random surface")
self.surfaceRandom.setFont(bFont)
row4.addWidget(self.surfaceRandom)
self.surfaceChoice.addButton(self.surfaceRandom)
label4C=QtGui.QLabel("( samples = ")
row4.addWidget(label4C)
self.surfaceRandom_samples=QtGui.QLineEdit()
row4.addWidget(self.surfaceRandom_samples)
label4D=QtGui.QLabel(")")
row4.addWidget(label4D)
# Row 5: grid
row5=QtGui.QHBoxLayout()
row5.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row5)
label5A=QtGui.QLabel("kx range")
label5A.setFont(bFont)
row5.addWidget(label5A)
label5B=QtGui.QLabel("Min and Max (Ang^-1)")
row5.addWidget(label5B)
self.kxMintext=QtGui.QLineEdit("-0.5")
row5.addWidget(self.kxMintext)
self.kxMaxtext=QtGui.QLineEdit("0.5")
row5.addWidget(self.kxMaxtext)
label5C=QtGui.QLabel("Count")
row5.addWidget(label5C)
self.kxCounttext=QtGui.QLineEdit("51")
row5.addWidget(self.kxCounttext)
label5D=QtGui.QLabel("ky range")
label5D.setFont(bFont)
row5.addWidget(label5D)
label5E=QtGui.QLabel("Min and Max (Ang^-1)")
row5.addWidget(label5E)
self.kyMintext=QtGui.QLineEdit("-0.5")
row5.addWidget(self.kyMintext)
self.kyMaxtext=QtGui.QLineEdit("0.5")
row5.addWidget(self.kyMaxtext)
label5F=QtGui.QLabel("Count")
row5.addWidget(label5F)
self.kyCounttext=QtGui.QLineEdit("51")
row5.addWidget(self.kyCounttext)
label5G=QtGui.QLabel("E range")
label5G.setFont(bFont)
row5.addWidget(label5G)
label5H=QtGui.QLabel("Min and Max from EF (eV)")
row5.addWidget(label5H)
self.eMintext=QtGui.QLineEdit("-2")
row5.addWidget(self.eMintext)
self.eMaxtext=QtGui.QLineEdit("0.5")
row5.addWidget(self.eMaxtext)
label5I=QtGui.QLabel("Count")
row5.addWidget(label5I)
self.eCounttext=QtGui.QLineEdit("51")
row5.addWidget(self.eCounttext)
# Row 6: broadening
row6=QtGui.QHBoxLayout()
row6.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row6)
label6A=QtGui.QLabel("k broadening (Ang^-1)")
label6A.setFont(bFont)
row6.addWidget(label6A)
self.sigmaktext=QtGui.QLineEdit("0.01")
row6.addWidget(self.sigmaktext)
label6B=QtGui.QLabel("E broadening (eV)")
label6B.setFont(bFont)
row6.addWidget(label6B)
self.sigmaetext=QtGui.QLineEdit("0.05")
row6.addWidget(self.sigmaetext)
# Row 7: start calculation, plot, export, import buttons
row7=QtGui.QHBoxLayout()
row7.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row7)
self.startCalc=QtGui.QPushButton("Start calculation")
row7.addWidget(self.startCalc)
self.plotChoice=QtGui.QButtonGroup()
self.plotDisp1=QtGui.QRadioButton("Original")
self.plotDisp1.setFont(bFont)
self.plotChoice.addButton(self.plotDisp1)
row7.addWidget(self.plotDisp1)
self.plotDisp2=QtGui.QRadioButton("Refracted")
self.plotDisp2.setFont(bFont)
self.plotChoice.addButton(self.plotDisp2)
row7.addWidget(self.plotDisp2)
self.export=QtGui.QPushButton("Export")
row7.addWidget(self.export)
self.importH5=QtGui.QPushButton("Import")
row7.addWidget(self.importH5)
# Row 8: kx, ky, and e index of guide lines
row8=QtGui.QHBoxLayout()
row8.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row8)
label8A=QtGui.QLabel("kx index")
row8.addWidget(label8A)
self.kxIndex=QtGui.QSpinBox()
self.kxIndex.setSingleStep(1)
self.kxIndex.setMinimum(0)
row8.addWidget(self.kxIndex)
self.kxValue=QtGui.QLabel()
row8.addWidget(self.kxValue)
label8B=QtGui.QLabel("ky index")
row8.addWidget(label8B)
self.kyIndex=QtGui.QSpinBox()
self.kyIndex.setSingleStep(1)
self.kyIndex.setMinimum(0)
row8.addWidget(self.kyIndex)
self.kyValue=QtGui.QLabel()
row8.addWidget(self.kyValue)
label8C=QtGui.QLabel("Energy index")
row8.addWidget(label8C)
self.eIndex=QtGui.QSpinBox()
self.eIndex.setSingleStep(1)
self.eIndex.setMinimum(0)
row8.addWidget(self.eIndex)
self.eValue=QtGui.QLabel()
row8.addWidget(self.eValue)
# Row 9: dispersion graph
row9=QtGui.QHBoxLayout()
row9.setAlignment(QtCore.Qt.AlignLeft)
vbox.addLayout(row9)
self.plot3=pg.GraphicsLayoutWidget()
self.plot3.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
row9.addWidget(self.plot3)
cmap=pg.colormap.get("CET-L9")
labelStyle={"font-size":str(Config.fontSize_normal)+"px", "color": "white"}
self.plotEy=self.plot3.addPlot()
self.plotxy=self.plot3.addPlot()
self.plot3.nextRow()
self.plot3.nextColumn()
self.plotEx=self.plot3.addPlot()
self.imgEx=pg.ImageItem()
self.imgEy=pg.ImageItem()
self.imgxy=pg.ImageItem()
self.plotEx.addItem(self.imgEx)
self.plotEy.addItem(self.imgEy)
self.plotxy.addItem(self.imgxy)
self.barEx=pg.ColorBarItem(colorMap=cmap, values=(0,1))
self.barEy=pg.ColorBarItem(colorMap=cmap, values=(0,1))
self.barxy=pg.ColorBarItem(colorMap=cmap, values=(0,1))
self.barEx.setImageItem(self.imgEx)
self.barEy.setImageItem(self.imgEy)
self.barxy.setImageItem(self.imgxy)
self.plotEx.getAxis("bottom").setStyle(tickFont=font,tickLength=Config.tickLength)
self.plotEx.getAxis("left").setStyle(tickFont=font,tickLength=Config.tickLength)
self.plotEx.getAxis("bottom").setPen((255,255,255))
self.plotEx.getAxis("left").setPen((255,255,255))
self.plotEx.getAxis("bottom").setTextPen((255,255,255))
self.plotEx.getAxis("left").setTextPen((255,255,255))
self.plotEx.getAxis("bottom").setLabel(**labelStyle)
self.plotEx.getAxis("left").setLabel(**labelStyle)
self.plotEx.showGrid(x=True, y=True, alpha=1.0)
self.plotEx.getAxis("bottom").setZValue(1)
self.plotEx.getAxis("left").setZValue(1)
self.plotEy.getAxis("bottom").setStyle(tickFont=font,tickLength=Config.tickLength)
self.plotEy.getAxis("left").setStyle(tickFont=font,tickLength=Config.tickLength)
self.plotEy.getAxis("bottom").setPen((255,255,255))
self.plotEy.getAxis("left").setPen((255,255,255))
self.plotEy.getAxis("bottom").setTextPen((255,255,255))
self.plotEy.getAxis("left").setTextPen((255,255,255))
self.plotEy.getAxis("bottom").setLabel(**labelStyle)
self.plotEy.getAxis("left").setLabel(**labelStyle)
self.plotEy.showGrid(x=True, y=True, alpha=1.0)
self.plotEy.getAxis("bottom").setZValue(1)
self.plotEy.getAxis("left").setZValue(1)
self.plotxy.getAxis("bottom").setStyle(tickFont=font,tickLength=Config.tickLength)
self.plotxy.getAxis("left").setStyle(tickFont=font,tickLength=Config.tickLength)
self.plotxy.getAxis("bottom").setPen((255,255,255))
self.plotxy.getAxis("left").setPen((255,255,255))
self.plotxy.getAxis("bottom").setTextPen((255,255,255))
self.plotxy.getAxis("left").setTextPen((255,255,255))
self.plotxy.getAxis("bottom").setLabel(**labelStyle)
self.plotxy.getAxis("left").setLabel(**labelStyle)
self.plotxy.showGrid(x=True, y=True, alpha=1.0)
self.plotxy.getAxis("bottom").setZValue(1)
self.plotxy.getAxis("left").setZValue(1)
self.plotEx.setLabel(axis="left", text="E-EF (eV)")
self.plotEx.setLabel(axis="bottom", text="kx (ang^-1)")
self.plotEy.setLabel(axis="bottom", text="E-EF (eV)")
self.plotEy.setLabel(axis="left", text="ky (ang^-1)")
self.plotxy.setLabel(axis="left", text="ky (ang^-1)")
self.plotxy.setLabel(axis="bottom", text="kx (ang^-1)")
self.vLineEx=pg.InfiniteLine(angle=90, movable=False, pen=Config.pen1)
self.plotEx.addItem(self.vLineEx, ignoreBounds=True)
self.hLineEx=pg.InfiniteLine(angle=0, movable=False, pen=Config.pen1)
self.plotEx.addItem(self.hLineEx, ignoreBounds=True)
self.vLineEy=pg.InfiniteLine(angle=90, movable=False, pen=Config.pen1)
self.plotEy.addItem(self.vLineEy, ignoreBounds=True)
self.hLineEy=pg.InfiniteLine(angle=0, movable=False, pen=Config.pen1)
self.plotEy.addItem(self.hLineEy, ignoreBounds=True)
self.vLinexy=pg.InfiniteLine(angle=90, movable=False, pen=Config.pen1)
self.plotxy.addItem(self.vLinexy, ignoreBounds=True)
self.hLinexy=pg.InfiniteLine(angle=0, movable=False, pen=Config.pen1)
self.plotxy.addItem(self.hLinexy, ignoreBounds=True)
# Key event to move guide lines and plots
def changeKXYIndices(e):
if e.key()==QtCore.Qt.Key_Down:
self.kyIndex.setValue(self.kyIndex.value()-1)
elif e.key()==QtCore.Qt.Key_Up:
self.kyIndex.setValue(self.kyIndex.value()+1)
elif e.key()==QtCore.Qt.Key_Right:
self.kxIndex.setValue(self.kxIndex.value()+1)
elif e.key()==QtCore.Qt.Key_Left:
self.kxIndex.setValue(self.kxIndex.value()-1)
elif e.key()==QtCore.Qt.Key_PageUp:
self.eIndex.setValue(self.eIndex.value()+1)
elif e.key()==QtCore.Qt.Key_PageDown:
self.eIndex.setValue(self.eIndex.value()-1)
else:
return
self.plot3.keyPressEvent=changeKXYIndices
app=QtGui.QApplication([])
win=MainWindow()
font=QtGui.QFont()
font.setPixelSize(Config.fontSize_normal)
font.setFamilies(Config.fontFamilies)
win.setFont(font)
# Original dispersion (numpy array [kx][ky][E])
dispCube1=None
# Refracted dispersiom (numpy array [kx][ky][E])
dispCube2=None
# Execute calculation ("Start calculation" button)
def startCalc():
print("----")
print("Check input parameters...")
# Load parameters from GUI, convert to the atomic unit
try:
# W: work function
W_eV=float(win.Wtext.text())
W=W_eV/Config.Eh_eV
print(("{0:32s} = {1:.2f} eV = {2:.2f} Eh").format("Work function", W_eV, W))
# V0: inner potential
V0_eV=float(win.V0text.text())
V0=V0_eV/Config.Eh_eV
print(("{0:32s} = {1:.2f} eV = {2:.2f} Eh").format("Inner potential", V0_eV, V0))
# a: parabola parameter
a=float(win.atext.text())
# V1: Energy of parabola top
V1_eV=float(win.V1text.text())
V1=V1_eV/Config.Eh_eV
# k0: Origin of the parabola dispersion in the reciprocal space
k0_ang=np.zeros((3))
k0_ang[0]=float(win.k0xtext.text())
k0_ang[1]=float(win.k0ytext.text())
k0_ang[2]=float(win.k0ztext.text())
k0=k0_ang*Config.Bohr_ang
print(("{0:32s} = (k - ({1:.2f}, {2:.2f}, {3:.2f}))^2 * {4:.2f} /2 - {5:.2f} + {6:.2f} (Eh)").format("Initial state dispersion", k0[0], k0[1], k0[2], a, W, V1))
# Flatness of the plane in the reciprocal space
kFlat=True
if win.kFlat.isChecked()==True:
pass
elif win.kCurved.isChecked()==True:
kFlat=False
else:
print("Error: kFlat or kCurved should be checked")
return
# Parameters to specify the plane
kFlat_kz=0
kCurved_k=0
if kFlat==True:
kFlat_kz_ang=float(win.kFlat_kz.text())
kFlat_kz=kFlat_kz_ang*Config.Bohr_ang
print(("{0:32s} = flat, kz = {1:.2f} Ang^-1 = {2:.2f} Bohr^-1").format("k plane", kFlat_kz_ang, kFlat_kz))
else:
kCurved_k_ang=float(win.kCurved_k.text())
kCurved_k=kCurved_k_ang*Config.Bohr_ang
print(("{0:32s} = curved, |k| = {1:.2f} Ang^-1 = {2:.2f} Bohr^-1").format("k plane", kCurved_k_ang, kCurved_k))
# Random or constant crystal surface, where the refraction happens
surfaceConst=True
if win.surfaceConst.isChecked()==True:
pass
elif win.surfaceRandom.isChecked()==True:
surfaceConst=False
else:
print("Error: surfaceConst or surfaceRandom should be checked")
return
# Parameters to specify the crystal surface
surfaceConst_theta=0
surfaceConst_phi=0
surfaceRandom_samples=0
if surfaceConst==True:
surfaceConst_theta_deg=float(win.surfaceConst_theta.text())
surfaceConst_theta=math.radians(surfaceConst_theta_deg)
surfaceConst_phi_deg=float(win.surfaceConst_phi.text())
surfaceConst_phi=math.radians(surfaceConst_phi_deg)
print(("{0:32s} = constant, theta = {1:.1f} deg = {2:.2f} rad, phi = {3:.1f} deg = {4:.2f} rad").format("Surface orientation", surfaceConst_theta_deg, surfaceConst_theta, surfaceConst_phi_deg, surfaceConst_phi))
else:
surfaceRandom_samples=int(win.surfaceRandom_samples.text())
print(("{0:32s} = random, samples = {1:d}").format("Surface orientation", surfaceRandom_samples))
# Calculation range of kx
kxMin_ang=float(win.kxMintext.text())
kxMin=kxMin_ang*Config.Bohr_ang
kxMax_ang=float(win.kxMaxtext.text())
kxMax=kxMax_ang*Config.Bohr_ang
kxCount=int(win.kxCounttext.text())
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("kx range (Ang^-1)", kxMin_ang, kxMax_ang, kxCount))
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("kx range (Bohr^-1)", kxMin, kxMax, kxCount))
# Calculation range of ky
kyMin_ang=float(win.kyMintext.text())
kyMin=kyMin_ang*Config.Bohr_ang
kyMax_ang=float(win.kyMaxtext.text())
kyMax=kyMax_ang*Config.Bohr_ang
kyCount=int(win.kyCounttext.text())
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("ky range (Ang^-1)", kyMin_ang, kyMax_ang, kyCount))
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("ky range (Bohr^-1)", kyMin, kyMax, kyCount))
# Calculation range of energy
## Please be careful that the inputs are the binding energies
eMin_eV=float(win.eMintext.text())
eMin=eMin_eV/Config.Eh_eV-W
eMax_eV=float(win.eMaxtext.text())
eMax=eMax_eV/Config.Eh_eV-W
eCount=int(win.eCounttext.text())
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("E range from EF (eV)", eMin_eV, eMax_eV, eCount))
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("E range from Vacuum (eV)", eMin*Config.Eh_eV, eMax*Config.Eh_eV, eCount))
print(("{0:32s} = {1:.2f} to {2:.2f}, {3:d} points").format("E range from Vacuum (Eh)", eMin, eMax, eCount))
# Broadening parameters
sigmak_ang=float(win.sigmaktext.text())
sigmak=sigmak_ang*Config.Bohr_ang
sigmae_eV=float(win.sigmaetext.text())
sigmae=sigmae_eV/Config.Eh_eV
print(("{0:32s} = {1:.2f} Ang^-1 = {2:.2f} Bohr^-1").format("k broadening", sigmak_ang, sigmak))
print(("{0:32s} = {1:.2f} eV = {2:.3f} Eh").format("E broadening", sigmae_eV, sigmae))
# size of 1 pixel, calculated from minimum, maximum, and number of points
dkx=(kxMax-kxMin)/(kxCount-1)
dky=(kyMax-kyMin)/(kyCount-1)
de=(eMax-eMin)/(eCount-1)
print(("{0:32s} = {1:.3f} Ang^-1, {2:.3f} Ang^-1, {3:.3f} eV").format("dkx, dky, de", dkx/Config.Bohr_ang, dky/Config.Bohr_ang, de*Config.Eh_eV))
print(("{0:32s} = {1:.3f} Bohr^-1, {2:.3f} Bohr^-1, {3:.3f} Eh").format("dkx, dky, de", dkx, dky, de))
except Exception as e:
print(e)
return
# Calculate original dispersion
global dispCube1
dispCube1=np.zeros((kxCount, kyCount, eCount))
lib.calc1(W, V0, k0, a, V1, kFlat, kFlat_kz, kCurved_k, kxMin, kxMax, kxCount, dkx, kyMin, kyMax, kyCount, dky, eMin, eMax, eCount, de, sigmak, sigmae, dispCube1)
# Calculate refracted dispersion
global dispCube2
dispCube2=np.zeros(dispCube1.shape)
lib.calc2(W, V0, k0, a, V1, kFlat, kFlat_kz, kCurved_k, surfaceConst, surfaceConst_theta, surfaceConst_phi, surfaceRandom_samples, kxMin, kxMax, kxCount, dkx, kyMin, kyMax, kyCount, dky, eMin, eMax, eCount, de, sigmak, sigmae, dispCube2)
win.kxIndex.setMaximum(kxCount-1)
win.kyIndex.setMaximum(kyCount-1)
win.eIndex.setMaximum(eCount-1)
print("Calculation finished")
plotDisp()
# Plot dispersion
def plotDisp():
global dispCube1
global dispCube2
if dispCube1 is None or dispCube2 is None:
return
# Parameters to specify plots (cross sections of a cube)
kxIndex=win.kxIndex.value()
kyIndex=win.kyIndex.value()
eIndex=win.eIndex.value()
# Load parameters
try:
kxMin=float(win.kxMintext.text())
kxMax=float(win.kxMaxtext.text())
kxCount=int(win.kxCounttext.text())
kyMin=float(win.kyMintext.text())
kyMax=float(win.kyMaxtext.text())
kyCount=int(win.kyCounttext.text())
eMin=float(win.eMintext.text())
eMax=float(win.eMaxtext.text())
eCount=int(win.eCounttext.text())
dkx=(kxMax-kxMin)/(kxCount-1)
dky=(kyMax-kyMin)/(kyCount-1)
de=(eMax-eMin)/(eCount-1)
except Exception as e:
print(e)
return
if dispCube1.shape[0]!=kxCount or dispCube1.shape[1]!=kyCount or dispCube1.shape[2]!=eCount or dispCube2.shape[0]!=kxCount or dispCube2.shape[1]!=kyCount or dispCube2.shape[2]!=eCount:
print("Plot error: size mismatch")
return
kxValue=kxMin+dkx*kxIndex
kyValue=kyMin+dky*kyIndex
eValue=eMin+de*eIndex
# Display kx, ky, and e values calculated from indices
win.kxValue.setText(("({0:.3f})").format(kxValue))
win.kyValue.setText(("({0:.3f})").format(kyValue))
win.eValue.setText(("({0:.3f})").format(eValue))
# Draw guide lines
win.vLineEx.setPos(kxValue)
win.hLineEx.setPos(eValue)
win.vLineEy.setPos(eValue)
win.hLineEy.setPos(kyValue)
win.vLinexy.setPos(kxValue)
win.hLinexy.setPos(kyValue)
# Obtain plots (cross sections)
if win.plotDisp1.isChecked()==True:
Ex=dispCube1[:,kyIndex,:]
Ey=dispCube1[kxIndex,:,:]
xy=dispCube1[:,:,eIndex]
elif win.plotDisp2.isChecked()==True:
Ex=dispCube2[:,kyIndex,:]
Ey=dispCube2[kxIndex,:,:]
xy=dispCube2[:,:,eIndex]
else:
return
# Set plot data
tr_Ex=QtGui.QTransform()
tr_Ex.translate(kxMin-dkx/2,eMin-de/2)
tr_Ex.scale(dkx, de)
win.imgEx.setTransform(tr_Ex)
tr_Ey=QtGui.QTransform()
tr_Ey.translate(eMin-de/2, kyMin-dky/2)
tr_Ey.rotate(-90)
tr_Ey.scale(-dky, de)
win.imgEy.setTransform(tr_Ey)
tr_xy=QtGui.QTransform()
tr_xy.translate(kxMin-dkx/2, kyMin-dky/2)
tr_xy.scale(dky, dky)
win.imgxy.setTransform(tr_xy)
win.imgEx.setImage(Ex)
win.imgEy.setImage(Ey)
win.imgxy.setImage(xy)
# import dispersion from a HDF5 file
# This function is also available for HDF5 files output from the C++ version
def importDisp():
global dispCube1
global dispCube2
# A dialog window to select a file
selectedFile, _filter=QtGui.QFileDialog.getOpenFileName(caption="Open file")
if selectedFile!="":
# load data
with h5py.File(selectedFile, "r") as f:
dispCube1=np.array(f["Original"])
dispCube2=np.array(f["Refracted"])
offset=np.array(f.attrs["Offset"])
delta=np.array(f.attrs["Delta"])
size=np.array(f.attrs["Size"])
win.kxMintext.setText(("{0:f}").format(offset[0]))
win.kyMintext.setText(("{0:f}").format(offset[1]))
win.eMintext.setText(("{0:f}").format(offset[2]))
win.kxMaxtext.setText(("{0:f}").format(offset[0]+delta[0]*(size[0]-1)))
win.kyMaxtext.setText(("{0:f}").format(offset[1]+delta[1]*(size[1]-1)))
win.eMaxtext.setText(("{0:f}").format(offset[2]+delta[2]*(size[2]-1)))
win.kxCounttext.setText(("{0:d}").format(size[0]))
win.kyCounttext.setText(("{0:d}").format(size[1]))
win.eCounttext.setText(("{0:d}").format(size[2]))
win.kxIndex.setMaximum(size[0]-1)
win.kyIndex.setMaximum(size[1]-1)
win.eIndex.setMaximum(size[2]-1)
win.Wtext.setText(("{0:f}").format(f.attrs["W"]))
win.V0text.setText(("{0:f}").format(f.attrs["V0"]))
win.V1text.setText(("{0:f}").format(f.attrs["V1"]))
win.atext.setText(("{0:f}").format(f.attrs["a"]))
k0=np.array(f.attrs["k0"])
win.k0xtext.setText(("{0:f}").format(k0[0]))
win.k0ytext.setText(("{0:f}").format(k0[1]))
win.k0ztext.setText(("{0:f}").format(k0[2]))
win.sigmaktext.setText(("{0:f}").format(f.attrs["sigmak"]))
win.sigmaetext.setText(("{0:f}").format(f.attrs["sigmae"]))
kPlane=f.attrs["kPlane"]
if kPlane=="Flat":
win.kFlat.setChecked(True)
win.kCurved.setChecked(False)
win.kFlat_kz.setText(("{0:f}").format(f.attrs["kPlane_kz"]))
elif kPlane=="Curved":
win.kFlat.setChecked(False)
win.kCurved.setChecked(True)
win.kCurved_k.setText(("{0:f}").format(f.attrs["kPlane_k"]))
else:
print(("Error: invalid kPlane {0:s}").format(kPlane))
surfaceConst=f.attrs["Surface"]
if surfaceConst=="Constant":
win.surfaceConst.setChecked(True)
win.surfaceRandom.setChecked(False)
win.surfaceConst_theta.setText(("{0:f}").format(f.attrs["Surface_theta"]))
win.surfaceConst_phi.setText(("{0:f}").format(f.attrs["Surface_phi"]))
elif surfaceConst=="Random":
win.surfaceConst.setChecked(False)
win.surfaceRandom.setChecked(True)
win.surfaceRandom_samples.setText(("{0:d}").format(f.attrs["Surface_samples"]))
print("Import finished")
plotDisp()
# Export dispersion to a HDF5 file
def exportDisp():
global dispCube1
global dispCube2
# Load parameters from GUI
try:
kxMin=float(win.kxMintext.text())
kxMax=float(win.kxMaxtext.text())
kxCount=int(win.kxCounttext.text())
kyMin=float(win.kyMintext.text())
kyMax=float(win.kyMaxtext.text())
kyCount=int(win.kyCounttext.text())
eMin=float(win.eMintext.text())
eMax=float(win.eMaxtext.text())
eCount=int(win.eCounttext.text())
dkx=(kxMax-kxMin)/(kxCount-1)
dky=(kyMax-kyMin)/(kyCount-1)
de=(eMax-eMin)/(eCount-1)
W=float(win.Wtext.text())
V0=float(win.V0text.text())
a=float(win.atext.text())
V1=float(win.V1text.text())
k0=np.zeros((3))
k0[0]=float(win.k0xtext.text())
k0[1]=float(win.k0ytext.text())
k0[2]=float(win.k0ztext.text())
kFlat=True
if win.kFlat.isChecked()==True:
pass
elif win.kCurved.isChecked()==True:
kFlat=False
else:
return
kFlat_kz=0
kCurved_k=0
if kFlat==True:
kFlat_kz=float(win.kFlat_kz.text())
else:
kCurved_k=float(win.kCurved_k.text())
surfaceConst=True
if win.surfaceConst.isChecked()==True:
pass
elif win.surfaceRandom.isChecked()==True:
surfaceConst=False
else:
return
surfaceConst_theta=0
surfaceConst_phi=0
surfaceRandom_samples=0
if surfaceConst==True:
surfaceConst_theta=float(win.surfaceConst_theta.text())
surfaceConst_phi=float(win.surfaceConst_phi.text())
else:
surfaceRandom_samples=int(win.surfaceRandom_samples.text())
sigmak=float(win.sigmaktext.text())
sigmae=float(win.sigmaetext.text())
except Exception as e:
print(e)
return
# A dialog window to select a file
selectedFile, _filter=QtGui.QFileDialog.getSaveFileName(caption="Open file")
if selectedFile!="":
# Write data
with h5py.File(selectedFile, "w") as f:
f.attrs.create("Datetime", datetime.now().isoformat(" "))
f.create_dataset("Original", data=dispCube1)
f.create_dataset("Refracted", data=dispCube2)
f.attrs.create("Offset", [kxMin, kyMin, eMin])
f.attrs.create("Delta", [dkx, dky, de])
f.attrs.create("Size", [kxCount, kyCount, eCount])
f.attrs.create("W", W)
f.attrs.create("V0", V0)
f.attrs.create("V1", V1)
f.attrs.create("a", a)
f.attrs.create("k0", k0)
f.attrs.create("sigmak", sigmak)
f.attrs.create("sigmae", sigmae)
if kFlat==True:
f.attrs.create("kPlane", "Flat")
f.attrs.create("kPlane_kz", kFlat_kz)
else:
f.attrs.create("kPlane", "Curved")
f.attrs.create("kPlane_k", kCurved_k)
if surfaceConst==True:
f.attrs.create("Surface", "Constant")
f.attrs.create("Surface_theta", surfaceConst_theta)
f.attrs.create("Surface_phi", surfaceConst_phi)
else:
f.attrs.create("Surface", "Random")
f.attrs.create("Surface_samples", surfaceRandom_samples)
# Connect buttons and event functions
win.startCalc.clicked.connect(startCalc)
win.plotDisp1.clicked.connect(plotDisp)
win.plotDisp2.clicked.connect(plotDisp)
win.export.clicked.connect(exportDisp)
win.importH5.clicked.connect(importDisp)
win.kxIndex.valueChanged.connect(plotDisp)
win.kyIndex.valueChanged.connect(plotDisp)
win.eIndex.valueChanged.connect(plotDisp)
pg.setConfigOptions(antialias=True)
win.show()
app.exec_()
|
Hiroaki-Tanaka-0606/Photoelectron_refraction | Python/lib.py | <filename>Python/lib.py<gh_stars>0
# Photoelectron_refraction
# Calculation library
import math
import numpy as np
import Config
import random
# gauss function
def gauss(x, s):
return 1.0/(math.sqrt(2*math.pi)*s)*math.exp(-x*x/(s*s*2))
# broadening profile
def profileCube(dkx, dky, de, sigmak, sigmae, sigmaMax):
kxLast=math.ceil(sigmak*sigmaMax/dkx)
kyLast=math.ceil(sigmak*sigmaMax/dky)
eLast=math.ceil(sigmae*sigmaMax/de)
cube=np.zeros((kxLast*2+1, kyLast*2+1, eLast*2+1))
for i in range(0, kxLast+1):
for j in range(0, kyLast+1):
for k in range(0, eLast+1):
weight=gauss(i*dkx, sigmak)*gauss(j*dky, sigmak)*gauss(k*de, sigmae)*dkx*dky*de
cube[kxLast+i][kyLast+j][eLast+k]=weight
cube[kxLast+i][kyLast+j][eLast-k]=weight
cube[kxLast+i][kyLast-j][eLast+k]=weight
cube[kxLast+i][kyLast-j][eLast-k]=weight
cube[kxLast-i][kyLast+j][eLast+k]=weight
cube[kxLast-i][kyLast+j][eLast-k]=weight
cube[kxLast-i][kyLast-j][eLast+k]=weight
cube[kxLast-i][kyLast-j][eLast-k]=weight
# print(cube.sum()*dkx*dky*de) # should be close to 1
return cube, kxLast, kyLast, eLast
# Original dispersion
def calc1(W, V0, k0, a, V1, kFlat, kFlat_kz, kCurved_k, kxMin, kxMax, kxCount, dkx, kyMin, kyMax, kyCount, dky, eMin, eMax, eCount, de, sigmak, sigmae, dispCube1):
profile, kxCenter, kyCenter, eCenter=profileCube(dkx, dky, de, sigmak, sigmae, Config.sigmaMax)
k=np.zeros((3))
for i in range(kxCount):
k[0]=kxMin+dkx*i
for j in range(kyCount):
k[1]=kyMin+dky*j
if kFlat==True:
k[2]=kFlat_kz
if kFlat==False:
k[2]=math.sqrt(kCurved_k*kCurved_k-k[0]*k[0]-k[1]*k[1])
kdiff=k-k0
esk=np.inner(kdiff, kdiff)*a/2.0-W+V1
esk_index=round((esk-eMin)/de)
for i2 in range(-kxCenter, kxCenter+1):
i3=i+i2
for j2 in range(-kyCenter, kyCenter+1):
j3=j+j2
for k2 in range(-eCenter, eCenter+1):
k3=esk_index+k2
if 0<=i3 and i3<kxCount and 0<=j3 and j3<kyCount and 0<=k3 and k3<eCount:
dispCube1[i3][j3][k3]+=profile[i2+kxCenter][j2+kyCenter][k2+eCenter]
# Refracted dispersion
def calc2(W, V0, k0, a, V1, kFlat, kFlat_kz, kCurved_k, surfaceConst, surfaceConst_theta, surfaceConst_phi, surfaceRandom_samples, kxMin, kxMax, kxCount, dkx, kyMin, kyMax, kyCount, dky, eMin, eMax, eCount, de, sigmak, sigmae, dispCube2):
profile, kxCenter, kyCenter, eCenter=profileCube(dkx, dky, de, sigmak, sigmae, Config.sigmaMax)
k=np.zeros((3))
n=np.zeros((3))
if surfaceConst==True:
n[0]=math.sin(surfaceConst_theta)*math.cos(surfaceConst_phi)
n[1]=math.sin(surfaceConst_theta)*math.sin(surfaceConst_phi)
n[2]=math.cos(surfaceConst_theta)
# generate random surfaces
if surfaceConst==False:
nList=np.zeros((surfaceRandom_samples, 3))
for i in range(surfaceRandom_samples):
nList[i]=genSurface()
# print(nList)
for i in range(kxCount):
print(i)
k[0]=kxMin+dkx*i
for j in range(kyCount):
k[1]=kyMin+dky*j
if kFlat==True:
k[2]=kFlat_kz
if kFlat==False:
k[2]=math.sqrt(kCurved_k*kCurved_k-k[0]*k[0]-k[1]*k[1])
kdiff=k-k0
esk=np.inner(kdiff, kdiff)*a/2.0-W+V1
epk=np.inner(k, k)/2.0-V0
eK=round((esk-eMin)/de)
# Constant surface
if surfaceConst==True:
K=calcK(k, epk, n)
# Exclude full reflection
if K is None:
continue
iK=round((K[0]-kxMin)/dkx)
jK=round((K[1]-kyMin)/dky)
# Append dispersion
for i2 in range(-kxCenter, kxCenter+1):
i3=iK+i2
for j2 in range(-kyCenter, kyCenter+1):
j3=jK+j2
for k2 in range(-eCenter, eCenter+1):
k3=eK+k2
if 0<=i3 and i3<kxCount and 0<=j3 and j3<kyCount and 0<=k3 and k3<eCount:
dispCube2[i3][j3][k3]+=profile[i2+kxCenter][j2+kyCenter][k2+eCenter]
# Random surfaces
else:
for t in range(surfaceRandom_samples):
n=nList[t]
K=calcK(k, epk, n)
# Exclude full reflection
if K is None:
# print("!!Full reflection")
continue
iK=round((K[0]-kxMin)/dkx)
jK=round((K[1]-kyMin)/dky)
# Append dispersion
for i2 in range(-kxCenter, kxCenter+1):
i3=iK+i2
for j2 in range(-kyCenter, kyCenter+1):
j3=jK+j2
for k2 in range(-eCenter, eCenter+1):
k3=eK+k2
if 0<=i3 and i3<kxCount and 0<=j3 and j3<kyCount and 0<=k3 and k3<eCount:
dispCube2[i3][j3][k3]+=profile[i2+kxCenter][j2+kyCenter][k2+eCenter]/surfaceRandom_samples
# Calculate refracted wavevector K
def calcK(k, epk, n):
KLength=math.sqrt(2*epk)
kPerpLength=np.inner(k, n)
kPara=k-kPerpLength*n
KPerpLength2=KLength*KLength-np.inner(kPara, kPara)
if KPerpLength2<0:
return None
KPerpLength=math.sqrt(KPerpLength2)
if kPerpLength<0:
# KPerpLength*=-1
return None
K=kPara+KPerpLength*n
return K
# Generate random surface
def genSurface():
nt=np.zeros((3))
while True:
nt[0]=random.uniform(-1, 1)
nt[1]=random.uniform(-1, 1)
nt[2]=random.uniform(0, 1)
nt_length=math.sqrt(np.inner(nt, nt))
if 0.1 < nt_length and nt_length < 1:
n=nt/nt_length
return n
|
efflicto/DomainListToHohser | hohser.py | <filename>hohser.py
#!/usr/bin/env python3
import json
import urllib.request
def get_urls(url):
urls = []
response = urllib.request.urlopen(url)
for url in response.readlines():
decoded_url = url.decode('utf-8').replace('\n', '')
if not decoded_url.startswith('#'):
urls.append(decoded_url)
return urls
def blocklist_to_hohser_json(blocklist, method):
blocked_urls = []
for url in blocklist:
blocked_urls.append({'domainName': url, 'display': method})
return blocked_urls
def main():
url = input('URL: ')
urls = get_urls(url)
method = input('Highlight, Set Back, Hide, (Default: Hide)')
print('Creating list...')
if method.lower() == 'highlight':
blocked_urls = blocklist_to_hohser_json(blocklist=urls, method='HIGHLIGHT')
elif method.lower() == 'set back':
blocked_urls = blocklist_to_hohser_json(blocklist=urls, method='PARTIAL_HIDE')
else:
blocked_urls = blocklist_to_hohser_json(blocklist=urls, method='FULL_HIDE')
with open('hohser_blocklist.json', 'w') as outfile:
json.dump(blocked_urls, outfile)
print('Finished.')
if __name__ == '__main__':
main()
|
junqili259/DatabaseDemo | main.py | import os
import logging
from flask import Flask, render_template, url_for, redirect, request, Response
from forms import PersonForm, SearchForm
from datetime import date,timedelta
import requests, random
import sqlalchemy
# Cloud SQL database
db_user = os.environ.get("CLOUD_SQL_USERNAME")
db_pass = os.environ.get("CLOUD_SQL_PASSWORD")
db_name = os.environ.get("CLOUD_SQL_DATABASE_NAME")
cloud_sql_connection_name = os.environ.get("CLOUD_SQL_CONNECTION_NAME")
app = Flask(__name__)
app.config.from_object('config.Config')
# Connect to database
db = sqlalchemy.create_engine(
sqlalchemy.engine.url.URL(
drivername="mysql+pymysql",
username=db_user,
password=<PASSWORD>,
database=db_name,
query={"unix_socket": "/cloudsql/{}".format(cloud_sql_connection_name)},
),
pool_size=5,
max_overflow=2,
pool_timeout=30,
pool_recycle=1800,
)
# For exception handling
logger = logging.getLogger()
# General entry form for user information
@app.route('/', methods=['GET','POST'])
def home():
form = PersonForm()
if form.validate_on_submit():
# Get User input
fname = request.form.get('fname')
lname = request.form.get('lname')
id_num = request.form.get('id_num')
sex = request.form.get('sex')
dob = request.form.get('dob')
phone = request.form.get('phone')
ethnicity = request.form.get('ethnicity')
marital_status = request.form.get('marital_status')
medical = request.form.get('medical')
# Verifier info
v_fname = request.form.get('v_fname')
v_lname = request.form.get('v_lname')
v_id = request.form.get('v_id')
v_phone = request.form.get('v_phone')
v_address = request.form.get('v_address')
v_city = request.form.get('v_city')
v_state = request.form.get('v_state')
v_zipcode = request.form.get('v_zipcode')
# Shelter History Info
shelter = request.form.get('shelter')
date_in = request.form.get('date_in')
date_out = request.form.get('date_out')
plan = request.form.get('plan')
# Create the SQL statement
# Person info
stmt = sqlalchemy.text("INSERT INTO Person(Fname,Lname,id,sex,dob,phone,ethnicity,admission_id,marital_status,med_id,verifier_id,history_id)" "VALUES(:Fname,:Lname,:id,:sex,:dob,:phone,:ethnicity,:admission_id,:marital_status,:med_id,:verifier_id,:history_id)")
# Verifier Info
stmt2 = sqlalchemy.text("INSERT INTO Verifier(verifier_id,Fname,Lname,phone,Vaddress_id)" "VALUES(:verifier_id,:Fname,:Lname,:phone,:Vaddress_id)")
# Verifier Address
stmt3 = sqlalchemy.text("INSERT INTO VerifierAddress(Vaddress_id, address, state, city, zipcode)" "VALUES(:Vaddress_id, :address, :state, :city, :zipcode)")
# Shelter History for Person
stmt4 = sqlalchemy.text("INSERT INTO ShelterHistory(history_id,shelterID,date_in,date_out)" "VALUES(:history_id,:shelterID,:date_in,:date_out)")
# Person's medical details
stmt5 = sqlalchemy.text("INSERT INTO Medical(med_id,allergy)" "VALUES(:med_id,:allergy)")
# Free trial or subscription plan
stmt6 = sqlalchemy.text("INSERT INTO FreeTrial(trial_id,trial_start,trial_end)" "VALUES(:trial_id,:trial_start,:trial_end)")
stmt7 = sqlalchemy.text("INSERT INTO Subscription(subscription_id, start, end)" "VALUES(:subscription_id, :start, :end)")
stmt8 = sqlalchemy.text("INSERT INTO Admission(admission_id,trial_id,subscription_id)" "VALUES(:admission_id,:trial_id,:subscription_id)")
# To check if verifier data already exist or not
stmt9 = sqlalchemy.text("SELECT verifier_id FROM Verifier where verifier_id=:verifier_id")
try:
with db.connect() as conn:
conn.execute(stmt,Fname=fname,Lname=lname,id=id_num,sex=sex,dob=dob,phone=phone,ethnicity=ethnicity,admission_id=id_num,marital_status=marital_status,med_id=id_num,verifier_id=v_id,history_id=id_num)
conn.execute(stmt8,admission_id=id_num,trial_id=id_num,subscription_id=id_num)
# Check if Verifier already exists or not
# If verifier doesn't exist, enter verifier data into table
# else ignore if exists
verifier_check = conn.execute(stmt9, verifier_id=v_id)
if verifier_check[0] != '':
conn.execute(stmt2,verifier_id=v_id, Fname=v_fname, Lname=v_lname, phone=v_phone, Vaddress_id=v_id)
conn.execute(stmt3, Vaddress_id=v_id, address=v_address, state=v_state, city=v_city, zipcode=v_zipcode)
# If there are medical details, insert data into table
if medical != '':
conn.execute(stmt5, med_id=id_num,allergy=medical)
# If there is shelter history, insert data into table
if shelter != '0':
conn.execute(stmt4, history_id=id_num, shelterID=shelter, date_in=date_in, date_out=date_out)
# Add to Free Trial
if plan == 'F':
start_date = date.today()
end_date = date.today() + timedelta(7)
conn.execute(stmt6,trial_id=id_num, trial_start=start_date, trial_end=end_date)
# Add to Subscription
if plan == 'S':
start_date = date.today()
end_date = date.today() + timedelta(30)
conn.execute(stmt7,subscription_id=id_num, start=start_date, end=end_date)
except Exception as e:
logger.exception(e)
return redirect(url_for('success'))
return render_template('home.html',form=form)
@app.route('/success',methods=['GET'])
def success():
return Response(response="Successfully added to database", status=200)
# Get information about person with a certain id
@app.route('/searchid',methods=['GET','POST'])
def search():
form=SearchForm()
if form.validate_on_submit():
med = ''
# Get User input
id_num = request.form.get('id_num')
stmt_person = sqlalchemy.text("SELECT Fname,Lname,sex,dob,ethnicity,marital_status FROM Person where id=:id")
stmt_medical = sqlalchemy.text("SELECT allergy from Medical where med_id=:med_id")
stmt_verifier = sqlalchemy.text("SELECT Verifier.Fname, Verifier.Lname, Verifier.phone FROM Verifier join Person on Verifier.verifier_id=Person.verifier_id WHERE Person.id=:id")
try:
with db.connect() as conn:
# Get Person details
result = conn.execute(stmt_person, id=id_num).fetchone()
except Exception as e:
logger.exception(e)
return render_template('result.html', len_result=len(result), result=result)
return render_template('searchid.html',form=form) |
junqili259/DatabaseDemo | forms.py | <gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, RadioField, DateField, SelectField
from wtforms.validators import DataRequired, Length, EqualTo, Optional
states = [('AL','AL'),('AK','AK'),('AZ','AZ'),('AR','AR'),('CA','CA'),('CO','CO'),('CT','CT'),
('DE','DE'),('DC','DC'),('FL','FL'),('GA','GA'),('HI','HI'),('ID','ID'),('IL','IL'),('IN','IN'),
('IA','IA'),('KS','KS'),('KY','KY'),('LA','LA'),('ME','ME'),('MD','MD'),('MA','MA'),('MI','MI'),
('MN','MN'),('MS','MS'),('MO','MO'),('MT','MT'),('NE','NE'),('NV','NV'),('NH','NH'),('NJ','NJ'),
('NM','NM'),('NY','NY'),('NC','NC'),('ND','ND'),('OH','OH'),('OK','OK'),('OR','OR'),('PA','PA'),
('RI','RI'),('SC','SC'),('SD','SD'),('TN','TN'),('TX','TX'),('UT','UT'),('VT','VT'),('VA','VA'),
('WA','WA'),('WV','WV'),('WI','WI'),('WY','WY'),('PR','PR')]
shelter_choices = [('0','None'),('1','Shelter One'),('2','Shelter Two'),('3','Shelter Three'),('4','Shelter Four'),('5','Shelter Five'),('6','Shelter Six'),('7','Shelter Seven'),('8','Shelter Eight'),('9','Shelter Nine'),('10','Shelter Ten'),
('11','Shelter Eleven'),('12','Shelter Twelve'),('13','Shelter Thirteen'),('14','Shelter Fourteen'),('15','Shelter Fifteen'),('16','Shelter Sixteen'),('17','Shelter Seventeen'),('18','Shelter Eighteen'),('19','Shelter Nineteen'),('20','Shelter Twenty')]
class PersonForm(FlaskForm):
# Personal Information
fname = StringField('First Name', validators=[DataRequired()])
lname = StringField('Last Name', validators=[DataRequired()])
id_num = IntegerField('ID number', validators=[DataRequired()])
sex = RadioField('Sex', choices = [('M','Male'),('F','Female')], validators=[DataRequired()])
dob = DateField('Date of Birth, format: yyyy-mm-dd, Example: 2000-01-01',format='%Y-%m-%d',validators=[DataRequired()])
phone = StringField('Phone Number',validators=[Optional()])
ethnicity = RadioField('Ethnicity',choices=[('American Indian/Alaskan Native','American Indian/Alaskan Native'),('Asian/Pacific Islander','Asian/Pacific Islander'),('Black/African American','Black/African American'),('Hispanic','Hispanic'),('White','White')], validators=[DataRequired()])
marital_status = RadioField('Marital Status',choices = [('Single','Single'),('Married','Married'),('Widowed','Widowed'),('Divorced','Divorced')], validators=[DataRequired()])
# Medical Details
medical = StringField('Medical Details', validators=[Optional()])
# Verifier Information
v_fname = StringField('Verifier First Name', validators=[DataRequired()])
v_lname = StringField('Verifier Last Name', validators=[DataRequired()])
v_id = IntegerField('Verifier ID number', validators=[DataRequired()])
v_phone = StringField('Verifier Phone Number', validators=[DataRequired()])
v_address = StringField('Verifier Address', validators=[DataRequired()])
v_city = StringField('Verifier City', validators=[DataRequired()])
v_state = SelectField('Verifier State', choices=states, validators=[DataRequired()])
v_zipcode = IntegerField('Verifier Zipcode', validators=[DataRequired()])
# Shelter History
shelter = SelectField('Shelter Name', choices=shelter_choices,validators=[DataRequired()])
date_in = DateField('Date In, format: yyyy-mm-dd, Example: 2000-01-01',format='%Y-%m-%d',validators=[Optional()])
date_out = DateField('Date Out, format: yyyy-mm-dd, Example: 2000-01-01', format='%Y-%m-%d',validators=[Optional()])
# Free trial or Subscription
plan = RadioField('Choose', choices=[('F','Free Trial'),('S','Subscription')], validators=[DataRequired()])
submit = SubmitField('submit')
# Search data by ID
class SearchForm(FlaskForm):
id_num = IntegerField('Enter ID', validators=[DataRequired()])
submit = SubmitField('submit')
|
krishnakesari/Graph-and-Network-Analysis | test.py | a = 2
b = a ** 5
print(b)
|
nilaya123/interview-scheduler-app-automation | page_objects/index_object.py | """
This class models the form on the Selenium tutorial page
The form consists of some input fields, a dropdown, a checkbox and a button
"""
from .Base_Page import Base_Page
import conf.locators_conf as locators
from utils.Wrapit import Wrapit
class Index_Object:
"Page object for the Form"
#locators
candidates_page = locators.candidates_page
interviewers_page = locators.interviewers_page
jobs_page = locators.jobs_page
heading = locators.heading
@Wrapit._exceptionHandler
def check_heading(self):
"Check if the heading exists"
result_flag = self.check_element_present(self.heading)
self.conditional_write(result_flag,
positive='Correct heading present on index page',
negative='Heading on index page is INCORRECT!!',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def click_ok(self):
"Accept the terms and conditions"
alert = self.switch_window(self.name)
alert.accept()
self.conditional_write(result_flag,
positive='Accepted the terms and conditions',
negative='Failed to accept the terms and conditions',
level='debug')
return result_flag
'''
@Wrapit._exceptionHandler
@Wrapit._screenshot
def open_candidate(self,username):
"Set the name on the form"
result_flag = self.set_text(self.username_field,username)
self.conditional_write(result_flag,
positive='Set the name to: %s'% username,
negative='Failed to set the name in the form',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def set_password(self,password):
"Set the email on the form"
result_flag = self.set_text(self.password_field,password)
self.conditional_write(result_flag,
positive='Set the email to: %s'%password,
negative='Failed to set the email in the form',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def login(self):
"Click on 'Click Me' button"
result_flag = self.click_element(self.login_button)
self.conditional_write(result_flag,
positive='Clicked on the "Login" button',
negative='Failed to click on "Login" button',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def accept_terms(self):
"Accept the terms and conditions"
result_flag = self.select_checkbox(self.tac_checkbox)
self.conditional_write(result_flag,
positive='Accepted the terms and conditions',
negative='Failed to accept the terms and conditions',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def check_redirect(self):
"Check if we have been redirected to the redirect page"
result_flag = False
if self.redirect_title in self.driver.title:
result_flag = True
self.switch_page("redirect")
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def submit_form(self,username,password):
"Submit the form"
result_flag = self.set_user(username)
result_flag &= self.set_password(password)
result_flag &= self.accept_terms()
result_flag &= self.login()
result_flag &= self.check_redirect()
return result_flag
''' |
nilaya123/interview-scheduler-app-automation | tests/test_index_page.py | """
This is an example automated test to help you learn Qxf2's framework
Our automated test will do the following:
#Open Qxf2 selenium-tutorial-main page.
#Fill the example form.
#Click on Click me! button and check if its working fine.
"""
import os,sys,time,pytest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from page_objects.PageFactory import PageFactory
from utils.Option_Parser import Option_Parser
import conf.login_conf as conf
import conf.testrail_caseid_conf as testrail_file
import pytest
@pytest.mark.GUI
def test_login_page(test_obj):
"Run the test"
try:
#Initalize flags for tests summary
expected_pass = 0
actual_pass = -1
#1. Create a test object and fill the example form.
test_obj = PageFactory.get_page_object("index page")
#Set start_time with current time
start_time = int(time.time())
# Turn on the highlighting feature
test_obj.turn_on_highlight()
#Turn off the highlighting feature
#test_obj.turn_off_highlight()
# Accepting alert
alert = driver.switch_to.alert
print(alert.text)
#13. Print out the result
test_obj.write_test_summary()
expected_pass = test_obj.result_counter
actual_pass = test_obj.pass_counter
except Exception as e:
print("Exception when trying to run test: %s"%__file__)
print("Python says:%s"%str(e))
assert expected_pass == actual_pass, "Test failed: %s"%__file__
#---START OF SCRIPT
if __name__=='__main__':
print("Start of %s"%__file__)
#Creating an instance of the class
options_obj = Option_Parser()
options = options_obj.get_options()
#Run the test only if the options provided are valid
if options_obj.check_options(options):
test_obj = PageFactory.get_page_object("Zero",base_url=options.url)
#Setup and register a driver
test_obj.register_driver(options.remote_flag,options.os_name,options.os_version,options.browser,options.browser_version,options.remote_project_name,options.remote_build_name)
test_login_page(test_obj)
#teardowm
test_obj.wait(3)
test_obj.teardown()
else:
print('ERROR: Received incorrect comand line input arguments')
print(option_obj.print_usage())
|
nilaya123/interview-scheduler-app-automation | conf/login_conf.py | <filename>conf/login_conf.py<gh_stars>0
user_name = "nilaya"
password = "<PASSWORD>" |
nilaya123/interview-scheduler-app-automation | conf/base_url_conf.py | """
Conf file for base_url
"""
base_url = "http://3.219.215.68/"
|
nilaya123/interview-scheduler-app-automation | tests/test_login_page.py | <reponame>nilaya123/interview-scheduler-app-automation
"""
This is an example automated test to help you learn Qxf2's framework
Our automated test will do the following:
#Open Qxf2 selenium-tutorial-main page.
#Fill the example form.
#Click on Click me! button and check if its working fine.
"""
import os,sys,time,pytest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from page_objects.PageFactory import PageFactory
from utils.Option_Parser import Option_Parser
import conf.login_conf as conf
import conf.testrail_caseid_conf as testrail_file
import pytest
@pytest.mark.GUI
def test_login_page(test_obj):
"Run the test"
try:
#Initalize flags for tests summary
expected_pass = 0
actual_pass = -1
#1. Create a test object and fill the example form.
test_obj = PageFactory.get_page_object("login page")
#Set start_time with current time
start_time = int(time.time())
# Turn on the highlighting feature
test_obj.turn_on_highlight()
#4. Get the test details from the conf file
username = conf.user_name
password = <PASSWORD>
#5. Set name in form
print(username)
result_flag = test_obj.set_user(username)
test_obj.log_result(result_flag,
positive="Name was successfully set to: %s\n" % username,
negative="Failed to set name: %s \nOn url: %s\n" % (username, test_obj.get_current_url()))
test_obj.write('Script duration: %d seconds\n' %
(int(time.time()-start_time)))
#6. Set Password in form
result_flag = test_obj.set_password(password)
test_obj.log_result(result_flag,
positive="Password was successfully set to: %s\n" % password,
negative="Failed to set password: %s \nOn url: %s\n" % (password, test_obj.get_current_url()))
test_obj.write('Script duration: %d seconds\n' %
(int(time.time()-start_time)))
#10. Set and submit the form in one go
result_flag = test_obj.login()
test_obj.log_result(result_flag,
positive="Successfully logged in the page\n",
negative="Failed to login the page \nOn url: %s" % test_obj.get_current_url(),
level="critical")
#11. Click ok on alert window
#test_obj.alert_window()
result_flag = test_obj.alert_accept()
test_obj.log_result(result_flag,
positive="Successfully logged in the page\n",
negative="Failed to login the page \nOn url: %s" % test_obj.get_current_url(),
level="critical")
#Turn off the highlighting feature
#test_obj.turn_off_highlight()
#13. Print out the result
test_obj.write_test_summary()
expected_pass = test_obj.result_counter
actual_pass = test_obj.pass_counter
except Exception as e:
print("Exception when trying to run test: %s"%__file__)
print("Python says:%s"%str(e))
assert expected_pass == actual_pass, "Test failed: %s"%__file__
#---START OF SCRIPT
if __name__=='__main__':
print("Start of %s"%__file__)
#Creating an instance of the class
options_obj = Option_Parser()
options = options_obj.get_options()
#Run the test only if the options provided are valid
if options_obj.check_options(options):
test_obj = PageFactory.get_page_object("Zero",base_url=options.url)
#Setup and register a driver
test_obj.register_driver(options.remote_flag,options.os_name,options.os_version,options.browser,options.browser_version,options.remote_project_name,options.remote_build_name)
test_login_page(test_obj)
#teardowm
test_obj.wait(3)
test_obj.teardown()
else:
print('ERROR: Received incorrect comand line input arguments')
print(option_obj.print_usage())
|
nilaya123/interview-scheduler-app-automation | page_objects/form_object.py | <gh_stars>0
"""
This class models the form on the Selenium tutorial page
The form consists of some input fields, a dropdown, a checkbox and a button
"""
from .Base_Page import Base_Page
import conf.locators_conf as locators
from utils.Wrapit import Wrapit
class Form_Object:
"Page object for the Form"
#locators
username_field = locators.username_field
password_field = locators.password_field
login_button = locators.login_button
signup_button = locators.signup_button
redirect_title = "redirect"
@Wrapit._exceptionHandler
@Wrapit._screenshot
def set_user(self,username):
"Set the name on the form"
result_flag = self.set_text(self.username_field,username)
self.conditional_write(result_flag,
positive='Set the name to: %s'% username,
negative='Failed to set the name in the form',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def set_password(self,password):
"Set the email on the form"
result_flag = self.set_text(self.password_field,password)
self.conditional_write(result_flag,
positive='Set the email to: %s'%password,
negative='Failed to set the email in the form',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def login(self):
"Click on 'Login' button"
result_flag = self.click_element(self.login_button)
self.conditional_write(result_flag,
positive='Clicked on the "Login" button',
negative='Failed to click on "Login" button',
level='debug')
result_flag = self.alert_accept()
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def alert_accept(self):
"Click on 'Ok' alert"
result_flag = self.alert_window()
self.conditional_write(result_flag,
positive='Clicked on the OK',
negative='Failed to click on OK',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def accept_terms(self):
"Accept the terms and conditions"
result_flag = self.select_checkbox(self.tac_checkbox)
self.conditional_write(result_flag,
positive='Accepted the terms and conditions',
negative='Failed to accept the terms and conditions',
level='debug')
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def check_redirect(self):
"Check if we have been redirected to the redirect page"
result_flag = False
if self.redirect_title in self.driver.title:
result_flag = True
self.switch_page("redirect")
return result_flag
@Wrapit._exceptionHandler
@Wrapit._screenshot
def submit_form(self,username,password):
"Submit the form"
result_flag = self.set_user(username)
result_flag &= self.set_password(password)
result_flag &= self.accept_terms()
result_flag &= self.login()
result_flag &= self.check_redirect()
return result_flag
|
quang2705/arc_project | populate_users.py | <filename>populate_users.py
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','arc_project.settings')
import django
django.setup()
from arc_app.models import UserProfile, Contract, Session, ContractMeeting, Subject
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
#Generate user based on a list of first_name and last_name
#specify whether this list of users is tutor or tutee
def generate_user_info(first_name, last_name, is_tutor=False, is_tutee=False):
username = []
password = []
email = []
for i in range(len(first_name)):
username.append(first_name[i] + last_name[i]+ "Clone")
password.append(<PASSWORD>] +"<PASSWORD>")
email.append(last_name[i]+"_"+first_name[i][0] +"1_clone" +"@<EMAIL>")
user_db = {"first_name": first_name,
"last_name": last_name,
"username": username,
"password": password,
"email": email,
"is_tutor": is_tutor,
"is_tutee": is_tutee,
"phone": ["7404019934"]*len(first_name),
"d_number": ["D19283013"]*len(first_name)
}
return user_db
#Create a User and UserProfile models based on a user info and
#save User and UserProfile into the database.
def create_user_db(user_db, size):
print("Creating User and UserProfile... ")
for i in range(size):
username = user_db['username'][i]
first_name = user_db['first_name'][i]
last_name = user_db['last_name'][i]
password = user_db['password'][i]
email = user_db['email'][i].lower()
phone = user_db['phone'][i]
d_number = user_db['d_number'][i]
is_tutor = user_db['is_tutor']
is_tutee = user_db['is_tutee']
user = User.objects.create_user(username=username,
first_name=first_name,
last_name=last_name,
password=password,
email=email)
user_profile = UserProfile(user=user,
first_name=first_name,
last_name=last_name,
email=email,
is_tutor=is_tutor,
is_tutee=is_tutee,
phone=phone,
d_number=d_number)
print("Creating user {0} {1}".format(username, email))
user.save()
user_profile.save()
#Create a contract for each tutor and tutee
#
def create_contract_db(tutor_fn, tutor_ln, tutee_fn, tutee_ln):
print("Create Contract... ")
size = len(tutor_fn)
class_name_prefix = "CS"
subject_name = "Computer Science"
professor_prefix = "Dr. "
for i in range(size):
tutor_first_name = tutor_fn[i]
tutor_last_name = tutor_ln[i]
tutee_first_name = tutee_fn[i]
tutee_last_name = tutee_ln[i]
tutor = UserProfile.objects.get(first_name=tutor_first_name,
last_name=tutor_last_name)
tutee = UserProfile.objects.get(first_name=tutee_first_name,
last_name=tutee_last_name)
subject = Subject.objects.get(subject_name=subject_name)
class_name = class_name_prefix + str(i)
professor_name = professor_prefix + chr(ord('A') + i)
contract = Contract(tutor=tutor, tutee=tutee,
class_name=class_name,
subject=subject,
professor_name=professor_name)
print("Create a Contract between {0} {1} for class: {2}".format(tutor_first_name,
tutee_first_name,
class_name))
contract.save()
#Generate the contract meeting for each Contract
def create_contract_meeting():
print("Create Contract Meeting...")
no_meeting_per_contract = 2
contracts = Contract.objects.all()
for i in range(len(contracts)):
contract = contracts[i]
for j in range(no_meeting_per_contract):
date = 'Monday'
start = datetime.now()
end = datetime.now()
location = "Olin " + str(i)
contract_meeting = ContractMeeting(contract=contract,
date=date,
start=start,
end=end,
location=location)
print("Create a Contract Meeting at {0} on {1} from {2} to {3}".format(location, date, start, end))
contract_meeting.save()
#Generate the Session for each Contract
def create_session():
print("Create Session...")
no_session_per_contract = 2
contracts = Contract.objects.all()
for i in range(len(contracts)):
contract = contracts[i]
for j in range(no_session_per_contract):
date = datetime.now()
start = datetime.now()
end = datetime.now()
summary = "Today we learn about something"
session = Session(contract=contract,
date=date,
start=start,
end=end,
summary=summary)
print("Create a Session on {0} from {1} to {2}".format(date, start, end))
session.save()
def create_subject():
print("Creating subject...")
subject_name_list = ['Astronomy', 'Biology', 'Chemistry', 'Communication',\
'Computer Science', 'Data Analytics', 'Economics', \
'French', 'German', 'Global Commerce', 'Health Education and Sport Studies', \
'Japanese', 'Music', 'Philosophy', 'Physics', 'Psychology', \
'Queer Studies', 'Spanish']
for subject_name in subject_name_list:
subject = Subject(subject_name=subject_name)
subject.save()
print("Create a Subject name : ", subject_name)
#Populate the database with our 5 tutor and 5 tutee
def populate():
create_subject();
print("already have these subjects")
tutor_first_name = ["Hiep", "Khanh", "Khue", "Meg", "Quang"]
tutor_last_name = ["Phan", "Tran", "Le", "Jaffy", "Nguyen"]
size = len(tutor_first_name)
tutee_first_name = ["John", "Yuri", "Wang", "Sam", "Jake"]
tutee_last_name = ["Doe", "Kuro", "Shei", "Smith", "Perata"]
try:
tutor_db = generate_user_info(tutor_first_name, tutor_last_name, True, False)
create_user_db(tutor_db, size)
except:
print("already have tutor with similar name")
try:
tutee_db = generate_user_info(tutee_first_name, tutee_last_name, False, True)
create_user_db(tutee_db, size)
except:
print("already have tutee with similar name")
print("================================================")
try:
create_contract_db(tutor_first_name, tutor_last_name, \
tutee_first_name, tutee_last_name)
tutee_first_name = ["Yuri", "Wang", "Sam", "Jake", "John"]
tutee_last_name = ["Kuro", "Shei", "Smith", "Perata", "Doe"]
create_contract_db(tutor_first_name, tutor_last_name, \
tutee_first_name, tutee_last_name)
tutee_first_name = ["Khanh", "Khue", "Meg", "Quang", "Hiep"]
tutee_last_name = ["Tran", "Le", "Jaffy", "Nguyen", "Phan"]
create_contract_db(tutor_first_name, tutor_last_name, \
tutee_first_name, tutee_last_name)
except:
print("already have contract with this tutor and tutee")
print("================================================")
try:
create_contract_meeting()
except:
print("already have the contract meeting ")
print("================================================")
try:
create_session()
except:
print("already have the session")
if __name__=='__main__':
print("Starting to populate")
populate()
|
quang2705/arc_project | arc_app/views.py | <gh_stars>1-10
#Django class
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render
#custom Database, Serializer Permission and Utilities class
from arc_app.models import UserProfile, Contract, Session, ContractMeeting, Subject
from arc_app.serializers import UserSerializer, UserProfileSerializer, ContractSerializer
from arc_app.serializers import SessionSerializer, ContractMeetingSerializer, SubjectSerializer
from arc_app.permissions import IsTutorOrIsAdminAndHeadTutorReadOnly
from arc_app.utils import create_userprofile, check_for_key, setup_query, encode_val, decode_val
#Rest framework class
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.decorators import action, authentication_classes, permission_classes, api_view
from rest_framework.response import Response
from rest_framework import viewsets
class UserProfileViewSet(viewsets.ModelViewSet):
#This viewset automatically provides 'list', 'create', 'retrieve',
#'update', and 'destroy' actions
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
#Basic permission, allows all permission if authenticated otherwise
#user can only have 'read' operation
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
create_userprofile(user)
if user.userprofiles.is_admin:
userprofiles = UserProfile.objects.all()
query = setup_query(self.request.query_params, ['first_name', 'last_name', 'email', 'is_tutor'])
if query is not None:
userprofiles = userprofiles.filter(query)
return userprofiles
else:
return UserProfile.objects.filter(user=user)
#get sessions only show the sessions that belongs to the contracts that
#the user is a tutor of
@action(methods=['get'], detail=True)
def get_sessions(self, request, pk=None):
userprofile = UserProfile.objects.get(pk=pk)
#if the user is staff return nothing
if (userprofile.is_tutor or userprofile.is_headtutor):
#get the contracts of this tutor
contracts = userprofile.tutor_contracts.all()
query = Q(id = -1)
for contract in contracts:
for session in contract.sessions.all():
query |= Q(id= session.id)
sessions = Session.objects.filter(query)
#filter date by lte: less than or equal, gte: greater than or equal
#lt: less than, gt: greater than
operators = ['lte', 'lt', 'gte', 'gt']
for operator in operators:
date = self.request.query_params.get('date[{}]'.format(operator), None)
if date is not None:
sessions = sessions.filter(**{'date__{}'.format(operator):date})
return Response(SessionSerializer(sessions, many=True, context={'request': request}).data)
#get contracts only show the contracts that the user is a tutor of
@action(methods=['get'], detail=True)
def get_contracts(self, request, pk=None):
userprofile = UserProfile.objects.get(pk=pk)
#if the user is staff return nothing
if (userprofile.is_tutor or userprofile.is_headtutor):
#get the contracts of this tuktor
contracts = userprofile.tutor_contracts.all()
return Response(ContractSerializer(contracts, many=True, context={'request': request}).data)
@action(methods=['get'], detail=False)
def get_current_userprofile(self, request):
user = self.request.user
create_userprofile(user)
return Response(UserProfileSerializer(user.userprofiles, context={'request':request}).data)
@action(methods=['get'], detail=False)
def current_position(self, request):
user = self.request.user
create_userprofile(user)
position = []
if (user.userprofiles.is_tutee):
position.append('tutee')
if (user.userprofiles.is_tutor):
position.append('tutor')
if (user.userprofiles.is_headtutor):
position.append('headtutor')
if (user.userprofiles.is_admin):
position.append('admin')
return Response(position)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
#This viewset automatically provide 'list' and 'detail' action
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
create_userprofile(user)
if user.userprofiles.is_admin:
#get the params from url and filter it with
#the users objects
users = self.queryset
query = setup_query(self.request.query_params, ['first_name', 'last_name', 'email'])
if query is not None:
users = users.filter(query)
return users
else:
return User.objects.filter(username=user.username)
#/contracts/
class ContractViewSet(viewsets.ModelViewSet):
#This viewset automatically provides 'list', 'create', 'retrieve',
#'update', and 'destroy' actions
queryset = Contract.objects.all()
serializer_class = ContractSerializer
permission_classes = [IsAuthenticated, IsTutorOrIsAdminAndHeadTutorReadOnly]
def destroy(self, request, pk=None):
super().destroy(request, pk)
return Response({'status': 200, 'id': pk})
def create(self, request):
key_list = ['tutor_email', 'tutee_first_name', 'tutee_last_name', 'tutee_email', \
'tutee_phone', 'tutee_dnumber', 'class_name', 'professor_name', 'subject']
check_for_key(request.data, key_list)
try:
tutor = UserProfile.objects.get(email=request.data['tutor_email'])
#TODO: check to see if the tutor is the user sending
#the request and if he is a tutor
try:
tutee = UserProfile.objects.get(email=request.data['tutee_email'])
except:
#incase there is not tutee with this name in the database
tutee = UserProfile(first_name=request.data['tutee_first_name'],
last_name=request.data['tutee_last_name'],
email=request.data['tutee_email'],
phone=request.data['tutee_phone'],
d_number=request.data['tutee_dnumber'],
is_tutee=True)
tutee.save()
except Exception as e:
return Response("Your information about tutor or tutee is not correct, check the parameter again")
subject = Subject.objects.get(subject_name=request.data['subject'])
contract = Contract(tutor=tutor, tutee=tutee,
class_name=request.data['class_name'],
subject=subject,
professor_name=request.data['professor_name'])
contract.save()
contract_serializer = ContractSerializer(contract, many=False, context={'request':request})
return Response(contract_serializer.data)
#query contract based on the tutor of the contract and
#any parameter that is added onto the url
def get_queryset(self):
user = self.request.user
create_userprofile(user)
userprofile = user.userprofiles
if userprofile.is_admin:
contracts = self.queryset
elif userprofile.is_tutor or userprofile.is_headtutor:
position = self.request.query_params.get('position', None)
#query the contracts based on the user loging in
#show the contracts belong to tutor if 'position' is None or is 'tutor'
#show all contracts if position is 'headtutor'
if (userprofile.is_tutor and (position is None or position == 'tutor')):
contracts = userprofile.tutor_contracts.all()
elif userprofile.is_headtutor and position == 'headtutor':
#only get the contracts from department of the headtutor
contracts = self.queryset
subjects = userprofile.subjects.all()
query = Q(subject= -1)
for subject in subjects:
query |= Q(subject = subject)
contracts = contracts.filter(query)
#get all the params from the url
subject = self.request.query_params.get('subject', None)
tutee_email = self.request.query_params.get('tutee_email', None)
#if the params is not Null, query it
if subject is not None:
subject = Subject.objects.get(subject_name=subject)
contracts = contracts.filter(subject = subject)
if tutee_email is not None:
tutee = UserProfile.objects.get(email=tutee_email)
contracts = contracts.filter(tutee = tutee)
query = setup_query(self.request.query_params, ['class_name', 'professor_name'])
if query is not None :
contracts = contracts.filter(query)
return contracts
def update(self, request, pk=None):
key_list = ['tutee_first_name', 'tutee_last_name', 'tutee_email', \
'tutee_phone', 'tutee_dnumber', 'class_name', 'professor_name', 'subject']
check_for_key(request.data, key_list)
contract = Contract.objects.get(pk=pk);
contract.tutee.first_name = request.data['tutee_first_name']
contract.tutee.last_name = request.data['tutee_last_name']
contract.tutee.email = request.data['tutee_email']
contract.tutee.phone = request.data['tutee_phone']
contract.tutee.d_number = request.data['tutee_dnumber']
contract.class_name = request.data['class_name']
contract.professor_name = request.data['professor_name']
newSubject = Subject.objects.get(subject_name=request.data['subject'])
contract.subject = newSubject
contract.save()
contract.tutee.save()
return Response(ContractSerializer(contract, context={ 'request': request }).data)
#Return all the sessions of the current contract
#/contracts/<pk>/get_sesions/
@action(methods=['get'], detail=True)
def get_sessions(self, request, pk=None):
#get the contract and the sessions that belong to this contract
contract = self.get_object()
serializer = ContractSerializer(contract, many=False, context={'request':request})
sessions = serializer.data['sessions']
sessions_all = Session.objects.all()
#query the sessions based on the id
query = Q(id = sessions[0]['id'])
for i in range (1, len(sessions)):
session_id = sessions[i]['id']
query.add(Q(id=session_id), Q.OR)
#return the query of the sessions
contract_sessions = sessions_all.filter(query)
contract_sessions_serializer = SessionSerializer(contract_sessions, many=True , context={'request':request})
return Response(contract_sessions_serializer.data)
#Return all the contracts meetings of the current contract
@action(methods=['get'], detail=True)
def get_contractmeetings(self, request, pk=None):
#get the contract and the contract meetings belong to this contract
contract = self.get_object()
serializer = ContractSerializer(contract, many=False, context={'request':request})
cmeetings = serializer.data['contract_meetings']
cmeetings_all = ContractMeeting.objects.all()
#query the contract meetings based on the id
query = Q(id = cmeetings[0]['id'])
for i in range (1, len(cmeetings)):
session_id = cmeetings[i]['id']
query.add(Q(id=session_id), Q.OR)
#return the query of the contract meetings
contract_cmeetings = cmeetings_all.filter(query)
contract_cmeetings_serializer = ContractMeetingSerializer(contract_cmeetings, many=True , context={'request':request})
return Response(contract_cmeetings_serializer.data)
#/contractmeetings/
class ContractMeetingViewSet(viewsets.ModelViewSet):
queryset = ContractMeeting.objects.all()
serializer_class = ContractMeetingSerializer
permission_classes = [IsAuthenticated, IsTutorOrIsAdminAndHeadTutorReadOnly]
def create(self, request):
#Get all the required parameters for the POST request
#contract_id, date, start, end, location
key_list = ['contract_id', 'week_day', 'start', 'end', 'location']
check_for_key(request.data, key_list)
#get the contract that this meeting is associated to
try:
contract = Contract.objects.get(pk=request.data['contract_id'])
except:
return Response('You dont have contract with this id')
#create a new contract and save to the database
contract_meeting = ContractMeeting(contract=contract,
date=request.data['week_day'],
start=request.data['start'],
end=request.data['end'],
location=request.data['location'])
contract_meeting.save()
return Response(ContractMeetingSerializer(contract_meeting, context={'request': request}).data)
#filter contract meeting based on the user
def get_queryset(self):
user = self.request.user
create_userprofile(user)
userprofile = user.userprofiles
if userprofile.is_admin:
contracts = Contract.objects.all()
elif userprofile.is_tutor or userprofile.is_headtutor:
position = self.request.query_params.get('position', None)
if (userprofile.is_tutor and (position is None or position == 'tutor')):
contracts = userprofile.tutor_contracts.all()
elif (userprofile.is_headtutor and position == 'headtutor'):
#only get the contracts from department of the headtutor
contracts = Contract.objects.all()
subjects = userprofile.subjects.all()
query = Q(subject= -1)
for subject in subjects:
query |= Q(subject = subject)
contracts = contracts.filter(query)
#get all contracts if the user request as a headtutor
# otherwise get only the owned contracts if the user request as a tutor
contract_meetings = [cmeeting for contract in contracts for cmeeting in contract.contract_meetings.all()]
location = self.request.query_params.get('location', None)
query = Q(id = -1)
for cmeeting in contract_meetings:
query |= Q(id = cmeeting.id)
if location is not None:
query |= Q(location = location)
return ContractMeeting.objects.filter(query)
#/sessions/
class SessionViewSet(viewsets.ModelViewSet):
queryset = Session.objects.all()
serializer_class = SessionSerializer
permission_classes = [IsAuthenticated, IsTutorOrIsAdminAndHeadTutorReadOnly]
def destroy(self, request, pk=None):
super().destroy(request, pk)
return Response({'status': 200, 'id': pk})
#create our own action in handling post request
#handling POST request /sessions/
def create(self, request):
#Get all the required parameter for the POST request
#contract_id, date, start, end, summary
key_list = ['contract_id', 'date', 'start', 'end', 'summary']
check_for_key(request.data, key_list)
#get the contract that this session is associated to
try:
contract = Contract.objects.get(pk=request.data['contract_id'])
except:
return Response("you dont have a contract with this id")
#create and save the session into the database
session = Session(contract=contract,
date=request.data['date'],
start=request.data['start'],
end=request.data['end'],
summary=request.data['summary'])
session.save()
return Response(SessionSerializer(session, context={'request': request}).data)
#filter session based on users
#Note: get_queryset should return a queryset
#/sessions/
def get_queryset(self):
user = self.request.user
create_userprofile(user)
userprofile = user.userprofiles
if userprofile.is_admin:
contracts = Contract.objects.all()
elif userprofile.is_tutor or userprofile.is_headtutor:
position = self.request.query_params.get('position', None)
if (userprofile.is_tutor and (position is None or position == 'tutor')):
contracts = userprofile.tutor_contracts.all()
elif userprofile.is_headtutor and position == 'headtutor':
#only get the contracts from department of the headtutor
contracts = Contract.objects.all()
subjects = userprofile.subjects.all()
query = Q(subject= -1)
for subject in subjects:
query |= Q(subject = subject)
contracts = contracts.filter(query)
query = Q(id = -1)
for contract in contracts:
for session in contract.sessions.all():
query |= Q(id= session.id)
sessions = Session.objects.filter(query)
#filter date by lte: less than or equal, gte: greater than or equal
#lt: less than, gt: greater than
operators = ['lte', 'lt', 'gte', 'gt']
for operator in operators:
date = self.request.query_params.get('date[{}]'.format(operator), None)
if date is not None:
sessions = sessions.filter(**{'date__{}'.format(operator):date})
return sessions;
@action(methods=['put'], detail=True)
def verify(self, request, pk=None):
try:
session = Session.objects.get(pk=pk)
session.is_verified = True
session.save()
return Response({"status": 200})
except:
return Response({"status": 400})
class SubjectViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
return self.queryset
@action(methods=['get'], detail=True)
def get_sessions(self, request, pk=None):
subject = Subject.objects.get(pk=pk)
#get all the sessions of this subject
query = Q(id = -1)
for contract in subject.contracts.all():
for session in contract.sessions.all():
query |= Q(id = session.id)
sessions = Session.objects.filter(query)
#filter date by lte: less than or equal, gte: greater than or equal
#lt: less than, gt: greater than
operators = ['lte', 'lt', 'gte', 'gt']
for operator in operators:
date = request.query_params.get('date[{}]'.format(operator), None)
if date is not None:
sessions = sessions.filter(**{'date__{}'.format(operator):date})
return Response(SessionSerializer(sessions, many = True, context={'request': request}).data)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def encode (request):
MASTER_KEY="Th1s-1is-@-R3lly-L0ng-M@ster_key-used-to-de%code$ stu##"
try:
my_string = request.GET['encode_string']
except:
return JsonResponse({"status": 400})
cipher_text = encode_val(my_string, MASTER_KEY)
return JsonResponse({"encrypted_string": cipher_text})
def verify(request):
try:
value = request.GET['secret']
except:
return render(request, 'frontend/tutee_verify.html', context = {"status":400})
MASTER_KEY="Th1s-1is-@-R3lly-L0ng-M@ster_key-used-to-de%code$ stu##"
pk = int(decode_val(value, MASTER_KEY))
session = Session.objects.get(pk=pk)
session.is_verified = True
session.save()
return render(request, 'frontend/tutee_verify.html', context = {"status":200})
|
quang2705/arc_project | arc_app/models.py | <gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import BaseUserManager
#TODO: Gives contrains for all the CharField and IntegerField
#TODO: Write test for database on contrain and relationship
class UserProfile(models.Model):
user = models.OneToOneField(User,
unique=True,
on_delete=models.CASCADE,
related_name='userprofiles',
null=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(unique=True)
d_number = models.CharField(max_length=100)
phone = models.CharField(max_length=100)
is_tutor = models.BooleanField(default=False)
is_tutee = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
is_headtutor = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-id']
def __str__(self):
try:
return (self.first_name + self.last_name)
except:
return "Anonymous"
class Subject(models.Model):
subject_name = models.CharField(max_length=100)
headtutor = models.ForeignKey(UserProfile, on_delete=models.CASCADE,
related_name='subjects',
null=True)
def __str__(self):
return ("Subject " + self.subject_name)
class Contract(models.Model):
#Contract has a many to one relation with tutor
#Contract has a many to one relation with tutee
#on_delete=models.CASCADE is cascade delete, if user is delete,
#contract will delete automatically
tutor = models.ForeignKey(UserProfile,
on_delete=models.CASCADE,
related_name='tutor_contracts')
tutee = models.ForeignKey(UserProfile,
on_delete=models.CASCADE,
related_name='tutee_contracts')
subject = models.ForeignKey(Subject,
on_delete=models.CASCADE,
related_name='contracts')
class_name = models.CharField(max_length=100)
professor_name = models.CharField(max_length=100)
is_verified = models.BooleanField(default=False)
is_waiting = models.BooleanField(default=False)
def __str__(self):
return ("Contract "+ str(self.id))
class Session(models.Model):
#Session has a many to one relationship with Contract
#on_delete=models.CASCADE is cascade delete, if Contract is delete,
#Session will delete automatically
contract = models.ForeignKey(Contract,
on_delete=models.CASCADE,
related_name='sessions')
date = models.DateField()
start = models.TimeField()
end = models.TimeField()
summary = models.TextField()
is_verified = models.BooleanField(default=False)
is_waiting = models.BooleanField(default=False)
def __str__(self):
return ("Session "+ str(self.id))
class ContractMeeting(models.Model):
#ContractMeeting has a many to one relationship with Contract
#on_delete=models.CASCADE is cascade delete, if Contract is delete,
#ContractMeeting will delete automatically
contract = models.ForeignKey(Contract,
on_delete=models.CASCADE,
related_name='contract_meetings')
date = models.CharField(max_length=100)
start = models.TimeField()
end = models.TimeField()
location = models.CharField(max_length=100)
def __str__(self):
return ("Contract Meeting "+ str(self.id))
|
quang2705/arc_project | arc_app/permissions.py | from rest_framework import permissions
#This permission checks if the user is tutor or admin
# if the user is tutor: gives them all access
# if the user is admin: gives them readonly access: GIVE, OPTIONS and HEAD
class IsTutorOrIsAdminAndHeadTutorReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.userprofiles.is_tutor:
return True
elif request.user.userprofiles.is_admin or request.user.userprofiles.is_headtutor:
if request.method in permissions.SAFE_METHODS:
return True
else:
return False
else:
return False
|
quang2705/arc_project | populate_contracts_sessions.py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','arc_project.settings')
import django
django.setup()
from arc_app.models import UserProfile, Contract, Session, ContractMeeting, Subject
import datetime
def create_subject():
print("Creating subject...")
subject_name_list = ['Astronomy', 'Biology', 'Chemistry', 'Communication',\
'Computer Science', 'Data Analytics', 'Economics', \
'French', 'German', 'Global Commerce', 'Health Education and Sport Studies', \
'Japanese', 'Music', 'Philosophy', 'Physics', 'Psychology', \
'Queer Studies', 'Spanish']
for subject_name in subject_name_list:
try:
subject = Subject.objects.get(subject_name=subject_name)
except:
subject = Subject(subject_name=subject_name)
subject.save()
print("Create a Subject name : ", subject_name)
def create_contract(userprofile):
print("Creating contract for user: ", userprofile.email)
num_contracts = 2
num_meetings_per_contract = 2
class_name_prefix = "CS"
subject_name = "Computer Science"
professor_prefix = "Dr. "
#create contract base on the number of contract
for i in range(num_contracts):
class_name = class_name_prefix + str(i)
professor_name = professor_prefix + chr(ord('A') + i)
subject = Subject.objects.get(subject_name = subject_name)
contract = Contract(tutor=userprofile, tutee=userprofile,
class_name=class_name,
subject=subject,
professor_name=professor_name)
contract.save()
print("Create a Contract between {0} {1} for class: {2} of prof {3}".format(userprofile.email,
userprofile.email,
class_name,
professor_name))
for j in range (num_meetings_per_contract):
print("Creating a contract meeting for user: ", userprofile.email)
date = 'Monday'
start = datetime.time((j+1)*4,30)
end = datetime.time((j+1)*6,0)
location = 'Olin' + str(j)
contract_meeting = ContractMeeting(contract=contract,
date = date,
start = start,
end=end,
location=location)
contract_meeting.save()
print("Create a Contract Meeting at {0} on {1} from {2} to {3}".format(location, date, start, end))
def create_session(userprofile):
print("Creating Session for user: ", userprofile.email)
no_session_per_contract = 2
contracts = Contract.objects.filter(tutor = userprofile)
for i in range (len(contracts)):
contract = contracts[i]
for j in range(no_session_per_contract):
date = datetime.date.today()
start = datetime.time((j+1)*4, 30)
end = datetime.time((j+1)*6, 0)
summary = "Today we learn how to do something else"
session = Session(contract=contract,
date=date,
start=start,
end=end,
summary=summary)
session.save()
print("Create a Session on {0} from {1} to {2}".format(date, start, end))
def main():
user_email = input("Type your Denison email here: ")
print(user_email)
create_subject()
userprofile = UserProfile.objects.get(email = user_email)
userprofile.is_tutor = True
create_contract(userprofile)
create_session(userprofile)
return 0
main()
|
quang2705/arc_project | arc_app/urls.py | <filename>arc_app/urls.py
from arc_app import views
from rest_framework import renderers
from rest_framework.routers import DefaultRouter
from django.urls import path, include
from django.conf.urls import url
#Create a router and register our viewsets
router = DefaultRouter()
router.register(r'userprofiles', views.UserProfileViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'contracts', views.ContractViewSet)
router.register(r'sessions', views.SessionViewSet)
router.register(r'contractmeetings', views.ContractMeetingViewSet)
router.register(r'subjects', views.SubjectViewSet)
urlpatterns = [
path(r'api/', include(router.urls)),
path(r'verify/sessions/', views.verify, name='verify'),
path(r'encode/', views.encode, name='encode')
]
|
quang2705/arc_project | arc_app/tests.py | from django.test import TestCase
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from rest_framework.test import force_authenticate
from django.contrib.auth.models import User
from arc_app.models import UserProfile, Contract, Session, ContractMeeting, Subject
from arc_app.views import UserProfileViewSet
# Create your tests here.
class UserProfileMethodTests(APITestCase):
#test to make sure that userprofile email must not be left empty
def test_email_is_not_empty(self):
userprofile = UserProfile(first_name="Quang",
last_name="Nguyen",
email="<EMAIL>",
d_number="D01760117",
phone="7404050190",
is_tutor=True,
is_tutee=False,
is_admin=False)
userprofile.save()
self.assertEqual(userprofile.email != "", True)
#test to make sure that userprofile identity is either tutor, tutee or
#admin
def test_userprofile_identity(self):
userprofile = UserProfile(first_name="Quang",
last_name="Nguyen",
email="<EMAIL>",
d_number="D01760117",
phone="7404050190",
is_tutor=True,
is_tutee=False,
is_admin=False)
userprofile.save()
identity = (userprofile.is_tutor == True) or (userprofile.is_tutee == True) or (userprofile.is_admin == True)
self.assertEqual(identity, True)
class UserProfileViewSetTest(APITestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.user = User.objects.create_user(
username='nguyen_q1',
email='<EMAIL>')
#test get from api/userprofile
def test_userprofiles_get(self):
request = self.factory.get('./api/userprofiles/')
force_authenticate(request, user=self.user)
request.user = self.user
response = UserProfileViewSet.as_view({'get':'list'})(request)
response.render()
print(response.content)
|
quang2705/arc_project | arc_app/serializers.py | <filename>arc_app/serializers.py<gh_stars>1-10
from rest_framework import serializers
from arc_app.models import UserProfile, Contract
from arc_app.models import Session, ContractMeeting, Subject
from django.contrib.auth.models import User
#TODO: need testing
class MiniSessionSerializer(serializers.ModelSerializer):
class Meta:
model = Session
fields = ('id', 'url')
class MiniContractMeetingSerializer(serializers.ModelSerializer):
class Meta:
model = ContractMeeting
fields = ('id', 'url')
class MiniUserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('id', 'url', 'email', 'first_name', 'last_name', 'phone', 'd_number')
class MiniContractSerializer(serializers.ModelSerializer):
tutee = MiniUserProfileSerializer(many=False, read_only=True)
class Meta:
model = Contract
fields = ('id', 'url', 'class_name', 'tutee')
class MiniSubjectSerializer(serializers.ModelSerializer):
headtutor = MiniUserProfileSerializer(many=False, read_only=True)
class Meta:
model = Subject
fields = ('id', 'subject_name', 'headtutor')
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
tutor_contracts = MiniContractSerializer(many=True, read_only=True)
tutee_contracts = MiniContractSerializer(many=True, read_only=True)
class Meta:
model = UserProfile
fields = ('url','id', 'user', 'tutor_contracts',
'tutee_contracts','first_name', 'last_name',
'email','d_number','phone', 'is_tutor',
'is_tutee', 'is_admin', 'is_headtutor')
class ContractSerializer(serializers.HyperlinkedModelSerializer):
sessions = MiniSessionSerializer(many=True, read_only=True)
contract_meetings = MiniContractMeetingSerializer(many=True, read_only=True)
tutor = MiniUserProfileSerializer(many=False, read_only=True)
tutee = MiniUserProfileSerializer(many=False, read_only=True)
subject = MiniSubjectSerializer(many=False, read_only=True)
class Meta:
model = Contract
fields = ('url', 'id', 'tutor', 'tutee','sessions',
'contract_meetings', 'class_name', 'subject', 'professor_name',
'is_verified', 'is_waiting')
class UserSerializer(serializers.HyperlinkedModelSerializer):
userprofiles = MiniUserProfileSerializer(many=False, read_only=True)
class Meta:
model = User
fields = ('url', 'id', 'userprofiles', 'first_name', 'last_name', 'email')
class SessionSerializer(serializers.HyperlinkedModelSerializer):
contract = MiniContractSerializer(many=False, read_only=True)
class Meta:
model = Session
fields = ('url', 'id', 'contract', 'date', 'start', 'end', 'summary', 'is_verified', 'is_waiting')
class ContractMeetingSerializer(serializers.HyperlinkedModelSerializer):
contract = MiniContractSerializer(many=False, read_only=True)
class Meta:
model = ContractMeeting
fields = ('url','id', 'contract', 'date', 'start', 'end', 'location')
class SubjectSerializer(serializers.ModelSerializer):
contracts = MiniContractSerializer(many=True, read_only=True)
class Meta:
model = Subject
fields = ('id', 'subject_name', 'contracts', 'headtutor')
|
quang2705/arc_project | arc_app/apps.py | <reponame>quang2705/arc_project
from django.apps import AppConfig
class ArcAppConfig(AppConfig):
name = 'arc_app'
|
quang2705/arc_project | arc_app/admin.py | <reponame>quang2705/arc_project
from django.contrib import admin
from arc_app.models import UserProfile, Contract, Session, ContractMeeting, Subject
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(Contract)
admin.site.register(Session)
admin.site.register(ContractMeeting)
admin.site.register(Subject)
|
quang2705/arc_project | arc_app/utils.py | <reponame>quang2705/arc_project<filename>arc_app/utils.py
from arc_app.models import UserProfile
from django.db.models import Q
from rest_framework.response import Response
#python class
import base64
#create a user profile for user if user doesn't have any user profile
def create_userprofile(user):
try:
userprofile = user.userprofiles
except:
userprofile = UserProfile(user=user,
first_name=user.first_name,
last_name=user.last_name,
email=user.email)
userprofile.save()
user.userprofiles = userprofile
#check to see if there is a required parameters for creating
#a database object
def check_for_key(request_data, key_list):
for key in key_list:
try:
val = request_data[key]
except KeyError:
return Response('You dont have the params `{0}`'.format(key))
#This function return a Q objects that represents the exact filtering query
#of the query parameters
def setup_query(request_params, key_list):
query = None
for key in key_list:
val = request_params.get(key, None)
if (val != None):
if val == 'true':
val = 'True'
if val == 'false':
val = 'False'
q_object = Q(**{"%s__exact" % key : val})
if (query):
query = query & q_object
else:
query = q_object
return query
def encode_val(text, MASTER_KEY):
return base64.b64encode(text.encode('utf-8')).decode('utf-8')
def decode_val(cipher_text, MASTER_KEY):
return base64.b64decode(cipher_text.encode('utf-8')).decode('utf-8')
|
quang2705/arc_project | arc_app/migrations/0001_initial.py | # Generated by Django 2.2.10 on 2020-05-07 01:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_name', models.CharField(max_length=100)),
('professor_name', models.CharField(max_length=100)),
('is_verified', models.BooleanField(default=False)),
('is_waiting', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254, unique=True)),
('d_number', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('is_tutor', models.BooleanField(default=False)),
('is_tutee', models.BooleanField(default=False)),
('is_admin', models.BooleanField(default=False)),
('is_headtutor', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userprofiles', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject_name', models.CharField(max_length=100)),
('headtutor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subjects', to='arc_app.UserProfile')),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('start', models.TimeField()),
('end', models.TimeField()),
('summary', models.TextField()),
('is_verified', models.BooleanField(default=False)),
('is_waiting', models.BooleanField(default=False)),
('contract', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sessions', to='arc_app.Contract')),
],
),
migrations.CreateModel(
name='ContractMeeting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.CharField(max_length=100)),
('start', models.TimeField()),
('end', models.TimeField()),
('location', models.CharField(max_length=100)),
('contract', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contract_meetings', to='arc_app.Contract')),
],
),
migrations.AddField(
model_name='contract',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contracts', to='arc_app.Subject'),
),
migrations.AddField(
model_name='contract',
name='tutee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tutee_contracts', to='arc_app.UserProfile'),
),
migrations.AddField(
model_name='contract',
name='tutor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tutor_contracts', to='arc_app.UserProfile'),
),
]
|
phaticusthiccy/Carbon-API | app.py | # Author: Phaticusthiccy
# Telegram: t.me/phaticusthiccy
from flask import Flask, send_file, jsonify, request
import utility, carbon
from flask_cors import CORS
import asyncio
import os
import json
app = Flask(__name__)
app.secret_key = '<KEY>'
loop = asyncio.get_event_loop()
CORS(app)
@app.route('/', methods=['GET', 'POST'])
def home():
from flask import jsonify
data = None
if request.method == "POST":
data = jsonify(**request.json)
try:
code = data['code']
except KeyError:
return jsonify({"error": "You need to write any code to generate carbon!"})
else:
code = request.args.get('code')
if code is None:
return jsonify({"error": "You need to write any code to generate carbon!"})
data = request.args
try:
validatedBody = utility.validateBody(data)
carbonURL = utility.createURLString(validatedBody)
path = os.getcwd() + '/carbon_screenshot.png'
loop.run_until_complete(carbon.get_response(carbonURL, path))
return send_file(path, mimetype='image/png')
except Exception as e:
return jsonify({"error": e})
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000, use_reloader=True, threaded=True)
|
david-gpu/deep-makeover | dm_flags.py |
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def define_flags():
# Configuration (alphabetically)
tf.app.flags.DEFINE_integer('annealing_half_life', 10000,
"Number of batches until annealing temperature is halved")
tf.app.flags.DEFINE_string('attribute_file', 'list_attr_celeba.txt',
"Celeb-A dataset attribute file")
tf.app.flags.DEFINE_integer('batch_size', 16,
"Number of samples per batch.")
tf.app.flags.DEFINE_string('checkpoint_dir', 'checkpoint',
"Output folder where checkpoints are dumped.")
tf.app.flags.DEFINE_string('dataset', 'dataset',
"Path to the dataset directory.")
tf.app.flags.DEFINE_float('disc_loss_threshold', 0.1,
"If the discriminator's loss is above this threshold then only the discriminator will train in during the next step")
tf.app.flags.DEFINE_float('disc_weights_threshold', 0.01,
"Maximum absolute value allowed for weights in the discriminator")
tf.app.flags.DEFINE_float('epsilon', 1e-8,
"Fuzz term to avoid numerical instability")
tf.app.flags.DEFINE_string('infile', None,
"Inference input file. See also `outfile`")
tf.app.flags.DEFINE_float('instance_noise', 0.5,
"Standard deviation (amplitude) of instance noise")
tf.app.flags.DEFINE_float('learning_rate_start', 0.000100,
"Starting learning rate used for AdamOptimizer")
tf.app.flags.DEFINE_float('learning_rate_end', 0.000001,
"Ending learning rate used for AdamOptimizer")
tf.app.flags.DEFINE_string('outfile', 'inference_out.png',
"Inference output file. See also `infile`")
tf.app.flags.DEFINE_float('pixel_loss_max', 0.95,
"Initial pixel loss relative weight")
tf.app.flags.DEFINE_float('pixel_loss_min', 0.70,
"Asymptotic pixel loss relative weight")
tf.app.flags.DEFINE_string('run', None,
"Which operation to run. [train|inference]")
tf.app.flags.DEFINE_integer('summary_period', 20,
"Number of batches between summary data dumps")
tf.app.flags.DEFINE_integer('random_seed', 10,
"Seed used to initialize rng.")
tf.app.flags.DEFINE_integer('test_vectors', 16,
"""Number of features to use for testing""")
tf.app.flags.DEFINE_string('train_dir', 'train',
"Output folder where training logs are dumped.")
tf.app.flags.DEFINE_string('train_mode', 'mtf',
"Training mode. Can be male-to-female (`mtf`), female-to-male (`ftm`), male-to-male (`mtm`) or female-to-female (`ftf`)")
tf.app.flags.DEFINE_integer('train_time', 180,
"Time in minutes to train the model")
|
david-gpu/deep-makeover | dm_show.py | import numpy as np
import os.path
import scipy.misc
import tensorflow as tf
import time
import dm_arch
import dm_input
import dm_utils
FLAGS = tf.app.flags.FLAGS
|
david-gpu/deep-makeover | dm_arch.py | <filename>dm_arch.py
import math
import numpy as np
import tensorflow as tf
import dm_utils
FLAGS = tf.app.flags.FLAGS
# Global switch to enable/disable training of variables
_glbl_is_training = tf.Variable(initial_value=True, trainable=False, name='glbl_is_training')
# Global variable dictionary. This is how we can share variables across models
_glbl_variables = {_glbl_is_training.name : _glbl_is_training}
def initialize_variables(sess):
"""Run this function only once and before the model begins to train"""
# First initialize all variables
sess.run(tf.global_variables_initializer())
# Now freeze the graph to prevent new operations from being added
#tf.get_default_graph().finalize()
def enable_training(onoff):
"""Switches training on or off globally (all models are affected).
It is expected that dropout will be enabled during training and disabled afterwards. Batch normalization also affected.
"""
tf.assign(_glbl_is_training, bool(onoff))
# TBD: Add "All you need is a good init"
class Model:
"""A neural network model.
Currently only supports a feedforward architecture."""
def __init__(self, name, features, enable_batch_norm=True):
self.name = name
self.locals = set()
self.outputs = [features]
self.enable_batch_norm = enable_batch_norm
def _get_variable(self, name, initializer=None):
# Variables are uniquely identified by a triplet: model name, layer number, and variable name
layer = 'L%03d' % (self.get_num_layers()+1,)
full_name = '/'.join([self.name, layer, name])
if full_name in _glbl_variables:
# Reuse existing variable
#print("Reusing variable %s" % full_name)
var = _glbl_variables[full_name]
assert var.get_shape() == initializer.get_shape()
elif initializer is not None:
# Create new variable
var = tf.Variable(initializer, name=full_name)
_glbl_variables[full_name] = var
else:
raise ValueError("Initializer must be provided if variable is new")
self.locals.add(var)
return var
def _get_num_inputs(self):
return int(self.get_output().get_shape()[-1])
def _variable_initializer(self, prev_units, num_units, stddev_factor=1.0):
"""Initialization in the style of Glorot 2010.
stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
assert prev_units > 0 and num_units > 0
stddev = np.sqrt(float(stddev_factor) / np.sqrt(prev_units*num_units))
return tf.truncated_normal([prev_units, num_units],
mean=0.0, stddev=stddev)
def _variable_initializer_conv2d(self, prev_units, num_units, mapsize, is_residual):
"""Initialization in the style of Glorot 2010.
stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
assert prev_units > 0 and num_units > 0
size = [mapsize, mapsize, prev_units, num_units]
stddev_factor = 1e-1 / (mapsize * mapsize * prev_units * num_units)
result = stddev_factor * np.random.uniform(low=-1, high=1, size=size)
if not is_residual:
# Focus nearly all the weight on the center
for i in range(min(prev_units, num_units)):
result[mapsize//2, mapsize//2, i, i] += 1.0
# else leaving all parameters near zero is the right thing to do
result = tf.constant(result.astype(np.float32))
return result
def get_num_layers(self):
return len(self.outputs)
def add_batch_norm(self, scale=False):
"""Adds a batch normalization layer to this model.
See ArXiv 1502.03167v3 for details."""
if not self.enable_batch_norm:
return self
out = tf.contrib.layers.batch_norm(self.get_output(), scale=scale, is_training=_glbl_is_training)
self.outputs.append(out)
return self
def add_dropout(self, keep_prob=.5):
"""Applies dropout to output of this model"""
is_training = tf.to_float(_glbl_is_training)
keep_prob = is_training * keep_prob + (1.0 - is_training)
out = tf.nn.dropout(self.get_output(), keep_prob=keep_prob)
self.outputs.append(out)
return self
def add_flatten(self):
"""Transforms the output of this network to a 1D tensor"""
batch_size = int(self.get_output().get_shape()[0])
out = tf.reshape(self.get_output(), [batch_size, -1])
self.outputs.append(out)
return self
def add_reshape(self, shape):
"""Reshapes the output of this network"""
out = tf.reshape(self.get_output(), shape)
self.outputs.append(out)
return self
def add_dense(self, num_units, stddev_factor=1.0):
"""Adds a dense linear layer to this model.
Uses Glorot 2010 initialization assuming linear activation."""
assert len(self.get_output().get_shape()) == 2, "Previous layer must be 2-dimensional (batch, channels)"
prev_units = self._get_num_inputs()
# Weight term
initw = self._variable_initializer(prev_units, num_units,
stddev_factor=stddev_factor)
weight = self._get_variable('weight', initw)
# Bias term
initb = tf.constant(0.0, shape=[num_units])
bias = self._get_variable('bias', initb)
# Output of this layer
out = tf.matmul(self.get_output(), weight) + bias
self.outputs.append(out)
return self
def add_sigmoid(self, rnge=1.0):
"""Adds a sigmoid (0,1) activation function layer to this model."""
prev_units = self._get_num_inputs()
out = 0.5 + rnge * (tf.nn.sigmoid(self.get_output()) - 0.5)
self.outputs.append(out)
return self
def add_tanh(self):
"""Adds a tanh (-1,+1) activation function layer to this model."""
prev_units = self._get_num_inputs()
out = tf.nn.tanh(self.get_output())
self.outputs.append(out)
return self
def add_softmax(self):
"""Adds a softmax operation to this model"""
this_input = tf.square(self.get_output())
reduction_indices = list(range(1, len(this_input.get_shape())))
acc = tf.reduce_sum(this_input, reduction_indices=reduction_indices, keep_dims=True)
out = this_input / (acc+FLAGS.epsilon)
#out = tf.verify_tensor_all_finite(out, "add_softmax failed; is sum equal to zero?")
self.outputs.append(out)
return self
def add_relu(self):
"""Adds a ReLU activation function to this model"""
out = tf.nn.relu(self.get_output())
self.outputs.append(out)
return self
def add_elu(self):
"""Adds a ELU activation function to this model"""
out = tf.nn.elu(self.get_output())
self.outputs.append(out)
return self
def add_lrelu(self, leak=.2):
"""Adds a leaky ReLU (LReLU) activation function to this model"""
t1 = .5 * (1 + leak)
t2 = .5 * (1 - leak)
out = t1 * self.get_output() + \
t2 * tf.abs(self.get_output())
self.outputs.append(out)
return self
def add_conv2d(self, num_units, mapsize=1, stride=1, is_residual = False):
"""Adds a 2D convolutional layer."""
assert len(self.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
prev_units = self._get_num_inputs()
# Weight term and convolution
initw = self._variable_initializer_conv2d(prev_units, num_units, mapsize, is_residual=is_residual)
weight = self._get_variable('weight', initw)
out = tf.nn.conv2d(self.get_output(), weight,
strides=[1, stride, stride, 1],
padding='SAME')
# Bias term
initb = tf.constant(0.0, shape=[num_units])
bias = self._get_variable('bias', initb)
out = tf.nn.bias_add(out, bias)
self.outputs.append(out)
return self
def add_conv2d_transpose(self, num_units, mapsize=1, stride=1, is_residual = False):
"""Adds a transposed 2D convolutional layer"""
assert not "This function is broken right now due to how _variable_initializer_conv2d is built. Use a regular convolution instead"
assert len(self.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
prev_units = self._get_num_inputs()
# Weight term and convolution
initw = self._variable_initializer_conv2d(prev_units, num_units, mapsize, is_residual=is_residual)
weight = self._get_variable('weight', initw)
weight = tf.transpose(weight, perm=[0, 1, 3, 2])
prev_output = self.get_output()
output_shape = [FLAGS.batch_size,
int(prev_output.get_shape()[1]) * stride,
int(prev_output.get_shape()[2]) * stride,
num_units]
out = tf.nn.conv2d_transpose(self.get_output(), weight,
output_shape=output_shape,
strides=[1, stride, stride, 1],
padding='SAME')
# Bias term
initb = tf.constant(0.0, shape=[num_units])
bias = self._get_variable('bias', initb)
out = tf.nn.bias_add(out, bias)
self.outputs.append(out)
return self
def add_concat(self, terms):
"""Adds a concatenation layer"""
if len(terms) > 0:
axis = len(self.get_output().get_shape()) - 1
terms = terms + [self.get_output()]
out = tf.concat(axis, terms)
self.outputs.append(out)
return self
def add_sum(self, term):
"""Adds a layer that sums the top layer with the given term"""
prev_shape = self.get_output().get_shape()
term_shape = term.get_shape()
#print("%s %s" % (prev_shape, term_shape))
assert prev_shape[1:] == term_shape[1:] and "Can't sum terms with a different size"
out = tf.add(self.get_output(), term)
self.outputs.append(out)
return self
def add_mean(self):
"""Adds a layer that averages the inputs from the previous layer"""
prev_shape = self.get_output().get_shape()
reduction_indices = list(range(len(prev_shape)))
assert len(reduction_indices) > 2 and "Can't average a (batch, activation) tensor"
reduction_indices = reduction_indices[1:-1]
out = tf.reduce_mean(self.get_output(), reduction_indices=reduction_indices)
self.outputs.append(out)
return self
def add_avg_pool(self, height=2, width=2):
"""Adds a layer that performs average pooling of the given size"""
ksize = [1, height, width, 1]
strides = [1, height, width, 1]
out = tf.nn.avg_pool(self.get_output(), ksize, strides, 'VALID')
self.outputs.append(out)
return self
def add_upscale(self, factor=2):
"""Adds a layer that upscales the output by 2x through nearest neighbor interpolation.
See http://distill.pub/2016/deconv-checkerboard/"""
out = dm_utils.upscale(self.get_output(), factor)
self.outputs.append(out)
return self
def get_output(self):
"""Returns the output from the topmost layer of the network"""
return self.outputs[-1]
def get_num_parameters(self):
"""Return the number of parameters in this model"""
num_params = 0
for var in self.locals:
size = 1
for dim in var.get_shape():
size *= int(dim)
num_params += size
return num_params
def get_all_variables(self):
"""Returns all variables used in this model"""
return list(self.locals)
|
david-gpu/deep-makeover | dm_train.py | import numpy as np
import os.path
import tensorflow as tf
import time
import dm_arch
import dm_input
import dm_utils
FLAGS = tf.app.flags.FLAGS
def _save_image(train_data, feature, gene_output, batch, suffix, max_samples=None):
"""Saves a picture showing the current progress of the model"""
if max_samples is None:
max_samples = int(feature.shape[0])
td = train_data
clipped = np.clip(gene_output, 0, 1)
image = np.concatenate([feature, clipped], 2)
image = image[:max_samples,:,:,:]
cols = []
num_cols = 4
samples_per_col = max_samples//num_cols
for c in range(num_cols):
col = np.concatenate([image[samples_per_col*c + i,:,:,:] for i in range(samples_per_col)], 0)
cols.append(col)
image = np.concatenate(cols, 1)
filename = 'batch%06d_%s.png' % (batch, suffix)
filename = os.path.join(FLAGS.train_dir, filename)
dm_utils.save_image(image, filename)
def _save_checkpoint(train_data, batch):
"""Saves a checkpoint of the model which can later be restored"""
td = train_data
oldname = 'checkpoint_old.txt'
newname = 'checkpoint_new.txt'
oldname = os.path.join(FLAGS.checkpoint_dir, oldname)
newname = os.path.join(FLAGS.checkpoint_dir, newname)
# Delete oldest checkpoint
try:
tf.gfile.Remove(oldname)
tf.gfile.Remove(oldname + '.meta')
except:
pass
# Rename old checkpoint
try:
tf.gfile.Rename(newname, oldname)
tf.gfile.Rename(newname + '.meta', oldname + '.meta')
except:
pass
# Generate new checkpoint
saver = tf.train.Saver()
saver.save(td.sess, newname)
print(" Checkpoint saved")
def train_model(train_data):
"""Trains the given model with the given dataset"""
td = train_data
tda = td.train_model
tde = td.test_model
dm_arch.enable_training(True)
dm_arch.initialize_variables(td.sess)
# Train the model
minimize_ops = [tda.gene_minimize, tda.disc_minimize]
show_ops = [td.annealing, tda.gene_loss, tda.disc_loss, tda.disc_real_loss, tda.disc_fake_loss]
start_time = time.time()
step = 0
done = False
gene_decor = " "
print('\nModel training...')
step = 0
while not done:
# Show progress with test features
if step % FLAGS.summary_period == 0:
feature, gene_mout = td.sess.run([tde.source_images, tde.gene_out])
_save_image(td, feature, gene_mout, step, 'out')
# Compute losses and show that we are alive
annealing, gene_loss, disc_loss, disc_real_loss, disc_fake_loss = td.sess.run(show_ops)
elapsed = int(time.time() - start_time)/60
print(' Progress[%3d%%], ETA[%4dm], Step [%5d], temp[%3.3f], %sgene[%-3.3f], *disc[%-3.3f] real[%-3.3f] fake[%-3.3f]' %
(int(100*elapsed/FLAGS.train_time), FLAGS.train_time - elapsed, step,
annealing, gene_decor, gene_loss, disc_loss, disc_real_loss, disc_fake_loss))
# Tight loop to maximize GPU utilization
# TBD: Is there any way to make Tensorflow repeat multiple times an operation with a single sess.run call?
if step < 200:
# Discriminator doing poorly --> train discriminator only
gene_decor = " "
for _ in range(10):
td.sess.run(tda.disc_minimize)
else:
# Discriminator doing well --> train both generator and discriminator, but mostly discriminator
gene_decor = "*"
for _ in range(2):
td.sess.run(minimize_ops)
td.sess.run(tda.disc_minimize)
td.sess.run(tda.disc_minimize)
td.sess.run(tda.disc_minimize)
step += 1
# Finished?
current_progress = elapsed / FLAGS.train_time
if current_progress >= 1.0:
done = True
# Decrease annealing temperature exponentially
if step % FLAGS.annealing_half_life == 0:
td.sess.run(td.halve_annealing)
# Save checkpoint
#if step % FLAGS.checkpoint_period == 0:
# _save_checkpoint(td, step)
_save_checkpoint(td, step)
print('Finished training!')
|
david-gpu/deep-makeover | dm_input.py | <gh_stars>100-1000
import tensorflow as tf
import dm_celeba
FLAGS = tf.app.flags.FLAGS
def input_data(sess, mode, filenames, capacity_factor=3):
# Separate training and test sets
# TBD: Use partition given by dataset creators
assert mode == 'inference' or len(filenames) >= FLAGS.test_vectors
if mode == 'train':
filenames = filenames[FLAGS.test_vectors:]
batch_size = FLAGS.batch_size
elif mode == 'test':
filenames = filenames[:FLAGS.test_vectors]
batch_size = FLAGS.batch_size
elif mode == 'inference':
filenames = filenames[:]
batch_size = 1
else:
raise ValueError('Unknown mode `%s`' % (mode,))
# Read each JPEG file
reader = tf.WholeFileReader()
filename_queue = tf.train.string_input_producer(filenames)
key, value = reader.read(filename_queue)
channels = 3
image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image")
image.set_shape([None, None, channels])
# Crop and other random augmentations
if mode == 'train':
image = tf.image.random_flip_left_right(image)
#image = tf.image.random_saturation(image, .95, 1.05)
#image = tf.image.random_brightness(image, .05)
#image = tf.image.random_contrast(image, .95, 1.05)
size_x, size_y = 80, 100
if mode == 'inference':
# TBD: What does the 'align_corners' parameter do? Stretch blit?
image = tf.image.resize_images(image, (size_y, size_x), method=tf.image.ResizeMethod.AREA)
else:
# Dataset samples are 178x218 pixels
# Select face only without hair
off_x, off_y = 49, 90
image = tf.image.crop_to_bounding_box(image, off_y, off_x, size_y, size_x)
feature = tf.cast(image, tf.float32)/255.0
# Using asynchronous queues
features = tf.train.batch([feature],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='features')
tf.train.start_queue_runners(sess=sess)
return features
|
david-gpu/deep-makeover | dm_model.py | <gh_stars>100-1000
import math
import numpy as np
import tensorflow as tf
import dm_arch
import dm_utils
FLAGS = tf.app.flags.FLAGS
def _residual_block(model, num_units, mapsize, nlayers=2):
"""Adds a residual block similar to Arxiv 1512.03385, Figure 3.
"""
# TBD: Try pyramidal block as per arXiv 1610.02915.
# Note Figure 6d (the extra BN compared to 6b seems to help as per Table 2)
# Also note Figure 5b.
assert len(model.get_output().get_shape()) == 4 and "Previous layer must be 4-dimensional (batch, width, height, channels)"
# Add *linear* projection in series if needed prior to shortcut
if num_units != int(model.get_output().get_shape()[3]):
model.add_conv2d(num_units, mapsize=1, stride=1)
if nlayers > 0:
# Batch norm not needed for every conv layer
# and it slows down training substantially
model.add_batch_norm()
for _ in range(nlayers):
# Bypassing on every conv layer, as implied by Arxiv 1612.07771
# Experimental results particularly favor one (Arxiv 1512.03385) or the other (this)
bypass = model.get_output()
model.add_relu()
model.add_conv2d(num_units, mapsize=mapsize, is_residual=True)
model.add_sum(bypass)
return model
def _generator_model(sess, features):
# See Arxiv 1603.05027
model = dm_arch.Model('GENE', 2 * features - 1)
mapsize = 3
# Encoder
layers = [24, 48]
for nunits in layers:
_residual_block(model, nunits, mapsize)
model.add_avg_pool()
# Decoder
layers = [96, 64]
for nunits in layers:
_residual_block(model, nunits, mapsize)
_residual_block(model, nunits, mapsize)
model.add_upscale()
nunits = 48
_residual_block(model, nunits, mapsize)
_residual_block(model, nunits, mapsize)
model.add_conv2d(3, mapsize=1)
model.add_sigmoid(1.1)
return model
def _discriminator_model(sess, image):
model = dm_arch.Model('DISC', 2 * image - 1.0)
mapsize = 3
layers = [64, 96, 128, 192] #[32, 48, 96, 128]
for nunits in layers:
model.add_batch_norm()
model.add_lrelu()
model.add_conv2d(nunits, mapsize=mapsize)
model.add_avg_pool()
nunits = layers[-1]
model.add_batch_norm()
model.add_lrelu()
model.add_conv2d(nunits, mapsize=mapsize)
#model.add_batch_norm()
model.add_lrelu()
model.add_conv2d(1, mapsize=mapsize)
model.add_mean()
return model
def _generator_loss(features, gene_output, disc_fake_output, annealing):
# I.e. did we fool the discriminator?
gene_adversarial_loss = tf.reduce_mean(-disc_fake_output, name='gene_adversarial_loss')
return gene_adversarial_loss # gene_loss
def _discriminator_loss(disc_real_output, disc_fake_output):
# I.e. did we correctly identify the input as real or not?
disc_real_loss = -disc_real_output
disc_fake_loss = disc_fake_output
disc_real_loss = tf.reduce_mean(disc_real_loss, name='disc_real_loss')
disc_fake_loss = tf.reduce_mean(disc_fake_loss, name='disc_fake_loss')
disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='dics_loss')
return disc_loss, disc_real_loss, disc_fake_loss
def _clip_weights(var_list, weights_threshold):
"""Clips all the given weights to fall within the range [-weight_threshold, weight_threshold]"""
ops = []
for var in var_list:
clipped = tf.clip_by_value(var, -weights_threshold, weights_threshold)
op = tf.assign(var, clipped)
ops.append(op)
return tf.group(*ops, name='clip_weights')
def create_model(sess, source_images, target_images=None, annealing=None, verbose=False):
rows = int(source_images.get_shape()[1])
cols = int(source_images.get_shape()[2])
depth = int(source_images.get_shape()[3])
#
# Generator
#
gene = _generator_model(sess, source_images)
gene_out = gene.get_output()
gene_var_list = gene.get_all_variables()
if verbose:
print("Generator input (feature) size is %d x %d x %d = %d" %
(rows, cols, depth, rows*cols*depth))
print("Generator has %4.2fM parameters" % (gene.get_num_parameters()/1e6,))
print()
if target_images is not None:
learning_rate = tf.maximum(FLAGS.learning_rate_start * annealing, FLAGS.learning_rate_end, name='learning_rate')
# Instance noise used to aid convergence.
# See http://www.inference.vc/instance-noise-a-trick-for-stabilising-gan-training/
noise_shape = [FLAGS.batch_size, rows, cols, depth]
noise = tf.truncated_normal(noise_shape, mean=0.0, stddev=FLAGS.instance_noise*annealing, name='instance_noise')
noise = tf.reshape(noise, noise_shape) # TBD: Why is this even necessary? I don't get it.
noise = 0.0
#
# Discriminator: one takes real inputs, another takes fake (generated) inputs
#
disc_real = _discriminator_model(sess, target_images + noise)
disc_real_out = disc_real.get_output()
disc_var_list = disc_real.get_all_variables()
disc_fake = _discriminator_model(sess, gene_out + noise)
disc_fake_out = disc_fake.get_output()
if verbose:
print("Discriminator input (feature) size is %d x %d x %d = %d" %
(rows, cols, depth, rows*cols*depth))
print("Discriminator has %4.2fM parameters" % (disc_real.get_num_parameters()/1e6,))
print()
#
# Losses and optimizers
#
gene_loss = _generator_loss(source_images, gene_out, disc_fake_out, annealing)
disc_loss, disc_real_loss, disc_fake_loss = _discriminator_loss(disc_real_out, disc_fake_out)
gene_opti = tf.train.AdamOptimizer(learning_rate=learning_rate,
name='gene_optimizer')
# Note WGAN doesn't work well with Adam or any other optimizer that relies on momentum
disc_opti = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.0,
name='disc_optimizer')
gene_minimize = gene_opti.minimize(gene_loss, var_list=gene_var_list, name='gene_loss_minimize')
disc_minimize = disc_opti.minimize(disc_loss, var_list=disc_var_list, name='disc_loss_minimize')
# Weight clipping a la WGAN (arXiv 1701.07875)
# TBD: We shouldn't be clipping all variables (incl biases), just the weights
disc_clip_weights = _clip_weights(disc_var_list, FLAGS.disc_weights_threshold)
disc_minimize = tf.group(disc_minimize, disc_clip_weights)
# Package everything into an dumb object
model = dm_utils.Container(locals())
return model
|
david-gpu/deep-makeover | dm_celeba.py | <reponame>david-gpu/deep-makeover<gh_stars>100-1000
import numpy as np
import os.path
import random
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# For convenience, here are the available attributes in the dataset:
# 5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs Big_Lips \
# Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows Chubby Double_Chin \
# Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache \
# Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns \
# Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace
# Wearing_Necktie Young
def _read_attributes(attrfile):
"""Parses attributes file from Celeb-A dataset and returns"""
# The first line is the number of images in the dataset. Ignore.
f = open(attrfile, 'r')
f.readline()
# The second line contains the names of the boolean attributes
names = f.readline().strip().split()
attr_names = {}
for i in range(len(names)):
attr_names[names[i]] = i
# The remaining lines contain file name and a list of boolean attributes
attr_values = []
for _, line in enumerate(f):
fields = line.strip().split()
img_name = fields[0]
assert img_name[-4:] == '.jpg'
attr_bitfield = [field == '1' for field in fields[1:]]
attr_bitfield = np.array(attr_bitfield, dtype=np.bool)
attr_values.append((img_name, attr_bitfield))
return attr_names, attr_values
def _filter_attributes(attr_names, attr_values, sel):
"""Returns the filenames that match the attributes given by 'dic'"""
# Then select those files whose attributes all match the selection
filenames = []
for filename, attrs in attr_values:
all_match = True
for name, value in sel.items():
column = attr_names[name]
#print("name=%s, value=%s, column=%s, attrs[column]=%s" % (name, value, column, attrs[column]))
if attrs[column] != value:
all_match = False
break
if all_match:
filenames.append(filename)
return filenames
def select_samples(selection={}):
"""Selects those images in the Celeb-A dataset whose
attributes match the constraints given in 'sel'"""
attrfile = os.path.join(FLAGS.dataset, FLAGS.attribute_file)
names, attributes = _read_attributes(attrfile)
filenames = _filter_attributes(names, attributes, selection)
filenames = sorted(filenames)
random.shuffle(filenames)
filenames = [os.path.join(FLAGS.dataset, file) for file in filenames]
return filenames
|
david-gpu/deep-makeover | dm_infer.py | import numpy as np
import tensorflow as tf
import dm_utils
FLAGS = tf.app.flags.FLAGS
def inference(infer_data):
sess = infer_data.sess
idm = infer_data.infer_model
image = sess.run(idm.gene_out)
image = np.squeeze(image, axis=0)
dm_utils.save_image(image, FLAGS.outfile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.