blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e244afe21842d52ced891cd2c82f5a5dc61e1701 | 658e2e3cb8a4d5343a125f7deed19c9ebf06fa68 | /course_DE/Udacity-Data-Engineering-master/Data Pipeline with Airflow/Production Data Pipelines - Exercise 1.py | 2189c509168783ee7e6770e7df5d77f68ffca7c2 | [] | no_license | yennanliu/analysis | 3f0018809cdc2403f4fbfe4b245df1ad73fa08a5 | 643ad3fed41961cddd006fadceb0e927f1db1f23 | refs/heads/master | 2021-01-23T21:48:58.572269 | 2020-10-13T22:47:12 | 2020-10-13T22:47:12 | 57,648,676 | 11 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | #Instructions
#In this exercise, we’ll consolidate repeated code into Operator Plugins
#1 - Move the data quality check logic into a custom operator
#2 - Replace the data quality check PythonOperators with our new custom operator
#3 - Consolidate both the S3 to RedShift functions into a custom operator
#4 - Replace the S3 to RedShift PythonOperators with our new custom operator
#5 - Execute the DAG
import datetime
import logging
from airflow import DAG
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators import (
HasRowsOperator,
PostgresOperator,
PythonOperator,
S3ToRedshiftOperator
)
import sql_statements
#
# TODO: Replace the data quality checks with the HasRowsOperator
#
dag = DAG(
"lesson3.exercise1",
start_date=datetime.datetime(2018, 1, 1, 0, 0, 0, 0),
end_date=datetime.datetime(2018, 12, 1, 0, 0, 0, 0),
schedule_interval="@monthly",
max_active_runs=1
)
create_trips_table = PostgresOperator(
task_id="create_trips_table",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.CREATE_TRIPS_TABLE_SQL
)
copy_trips_task = S3ToRedshiftOperator(
task_id="load_trips_from_s3_to_redshift",
dag=dag,
table="trips",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udac-data-pipelines",
s3_key="divvy/partitioned/{execution_date.year}/{execution_date.month}/divvy_trips.csv"
)
#
# TODO: Replace this data quality check with the HasRowsOperator
#
check_trips = HasRowsOperator(
task_id='check_trips_data',
dag=dag,
redshift_conn_id="redshift",
table="trips"
)
create_stations_table = PostgresOperator(
task_id="create_stations_table",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.CREATE_STATIONS_TABLE_SQL,
)
copy_stations_task = S3ToRedshiftOperator(
task_id="load_stations_from_s3_to_redshift",
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udac-data-pipelines",
s3_key="divvy/unpartitioned/divvy_stations_2017.csv",
table="stations"
)
#
# TODO: Replace this data quality check with the HasRowsOperator
#
check_stations = HasRowsOperator(
task_id='check_stations_data',
dag=dag,
redshift_conn_id="redshift",
table="stations"
)
create_trips_table >> copy_trips_task
create_stations_table >> copy_stations_task
copy_stations_task >> check_stations
copy_trips_task >> check_trips | [
"f339339@gmail.com"
] | f339339@gmail.com |
bf0ed9214a5348f09e8400bef9fa1eaedeb2800b | a2db83bda49ef23a49949d4730ea6d8b85ea7790 | /tgenv/bin/rst2html.py | e38129ccaeb851156432365cff1535757f8428d8 | [] | no_license | garrettmc/TGWikiTutorial | 18d0577845ec8c22245b5fa23f9e72b7ddf0c5cf | fcf1955cafc34c25986a14b841a9f05d11dd86d0 | refs/heads/master | 2021-01-10T18:37:36.724065 | 2014-12-20T00:46:00 | 2014-12-20T00:46:00 | 28,248,656 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #!/Users/garrett/Documents/InfoPogo/GitHub/TGWikiTutorial/tgenv/bin/python2.6
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"garrett@Garretts-Mac-Pro.local"
] | garrett@Garretts-Mac-Pro.local |
616872fb1ce3082ace8c174bd99afa40e1fef1d8 | eb34fba79aceb38feaa78e148e3051603ae82474 | /Sanity/Chapter1.py | a985aa0e12c281bb2f08a7a4d0e3b6e40a53d004 | [] | no_license | RakeshDevopsPy/com.theautomatedtester | a5202c58db4451ca0fd607bec01cfecc482b77d9 | acc21172236c141b4030ba6c25d296772b21ae25 | refs/heads/master | 2021-03-29T00:03:01.335392 | 2020-03-17T07:40:09 | 2020-03-17T07:40:09 | 247,907,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from selenium import webdriver
import time
driver=webdriver.Chrome()
driver.get("http://book.theautomatedtester.co.uk/")
driver.maximize_window()
time.sleep(2)
driver.refresh() | [
"Tejasree.Krishnammagari@landisgyr.com"
] | Tejasree.Krishnammagari@landisgyr.com |
0fe346359edc276de2c737c0eb967f27d570aafe | 6ac77834909c485686638d27c0bf41e6d1765cf7 | /src/mapping/writer/mysql_hbase_hawq_writer.py | 79911c5ab6d7c61ed2a50ecbcdabf8ecb5943d18 | [] | no_license | YangXinNewlife/gears | 4144e451861efb0f3ae1d738eb5fcd6cec46a833 | 486b1ce5a7b8d8682bb1394be8f5dd6ae0fca837 | refs/heads/master | 2021-01-20T01:41:30.074696 | 2017-05-26T08:17:45 | 2017-05-26T08:17:45 | 89,316,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | # -*- coding:utf-8 -*-
__author__ = 'yx'
from src.mapping.writer.writer import Writer
class MysqlHBaseHawqWriter(Writer):
def __init__(self):
pass
def convert_data_type(self, data_type):
pass
| [
"yangxin@zetyun.com"
] | yangxin@zetyun.com |
107ba4ec948047de9c4b2d500a7aba9b07f2cc54 | 780011f6f6f8bfea1073b2e2c9762891776f552a | /2. face training.py | 0753972434297414b9de6bf7d9cf237d33dcc17e | [] | no_license | cuongnv70/FaceRecognition | a5f4fcd3ebc3f76a32ced73e55b6d7241060440b | 9777adb9f8a18ffd154f26695f8a3e5c6b832008 | refs/heads/main | 2023-01-09T23:33:51.873197 | 2020-10-31T07:46:44 | 2020-10-31T07:46:44 | 308,830,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | ''''
Đào tạo nhiều khuôn mặt có trong dataset
==> Mỗi khuôn mặt phải có một ID số nguyên bằng số duy nhất là 1, 2, 3, v.v.
'''
import cv2
import numpy as np
from PIL import Image
import os
# Đường dẫn ảnh khuôn mặt
path = 'dataset'
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Chức năng lấy hình ảnh và dữ liệu nhãn
def getImagesAndLabels(path, directory=None):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # Chuyển ảnh đã chụp sang thang độ xám
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
print ("\n [!] Đang đào tạo các khuôn mặt. Vui lòng đợi ...")
faces, ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
# Lưu mô hình vào trainer/trainer.yml
recognizer.write('trainer/trainer.yml')
#In số lượng khuôn mặt được đào tạo và kết thúc chương trình
print("\n [!] {0} Khuôn mặt được đào tạo. Thoát khỏi chương trình".format(len(np.unique(ids))))
| [
"noreply@github.com"
] | noreply@github.com |
7bba5d8fd1bd54efec43b3b29ca9afb5a43a8368 | e4a0402114702d8e8b5f4b6f8585200358d4ccf9 | /training/config.py | f54f7fb5c39a88ea8cd47fcfa810de26aaa4b213 | [] | no_license | jbdatascience/self-attention-music-tagging | 2cbd6beb3e357b3d7aefc908cf50a995bd439163 | 2122c265d8c66ade43cff70fc8b585f21756d497 | refs/heads/master | 2023-04-09T05:19:04.936812 | 2020-10-30T03:01:15 | 2020-10-30T03:01:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | class Config(object):
def __init__(self,
back_end,
conv_channels,
attention_channels,
attention_layers,
attention_heads,
attention_length,
num_class,
batch_size,
attention_dropout,
fc_dropout,
is_cuda):
self.back_end = back_end
self.conv_channels = conv_channels
self.attention_channels = attention_channels
self.attention_layers = attention_layers
self.attention_heads = attention_heads
self.attention_length = attention_length
self.num_class = num_class
self.batch_size = batch_size
self.attention_dropout = attention_dropout
self.fc_dropout = fc_dropout
self.is_cuda = is_cuda
| [
"sanghyuk.chun@gmail.com"
] | sanghyuk.chun@gmail.com |
c8bb8be2b7b980a1af94cd61d72a7b573d2fad49 | 608052e735474dc4dffc129f80dc70407a1cf7db | /make_xml_from_feature.py | e748988a1e63a1f0da4168780757cee470db706e | [] | no_license | 646677064/tools | 389595517271c6d4a407848dc6572082b6d59826 | 8e857e7634b5690430111446bdd4fcd76850fa43 | refs/heads/master | 2020-03-11T18:24:29.754483 | 2018-04-19T07:33:54 | 2018-04-19T07:33:54 | 130,176,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,278 | py | import numpy as np
import os
import sys
import argparse
import glob
import time
#import _init_paths
from units import SClassifier, AverageMeter, convert_secs2time
import caffe
import scipy.io as sio
import sys,os,subprocess,commands
from subprocess import Popen,PIPE
import random
import math
# from fast_rcnn.config import cfg
# from fast_rcnn.test import im_detect
# from fast_rcnn.nms_wrapper import nms
#from utils.timer import Timer
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ElementTree,Element
from xml.etree.ElementTree import SubElement
# import matplotlib.pyplot as plt
# import numpy as np
#import scipy.io as sio
import cv2
def read_xml(in_path):
tree = ElementTree()
tree.parse(in_path)
return tree
def write_xml(tree, out_path):
tree.write(out_path, encoding="utf-8",xml_declaration=True)
def if_match(node, kv_map):
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
#---------------search -----
def find_nodes(tree, path):
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
#---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
for node in nodelist:
for key in kv_map:
if is_delete:
if key in node.attrib:
del node.attrib[key]
else:
node.set(key, kv_map.get(key))
def change_node_text(nodelist, text, is_add=False, is_delete=False):
for node in nodelist:
if is_add:
node.text += text
elif is_delete:
node.text = ""
else:
node.text = text
def create_node(tag, property_map, content):
element = Element(tag, property_map)
element.text = content
return element
def add_child_node(nodelist, element):
for node in nodelist:
node.append(element)
def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
for parent_node in nodelist:
children = parent_node.getchildren()
for child in children:
if child.tag == tag and if_match(child, kv_map):
parent_node.remove(child)
def parse_xml_WH(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
obj_size = tree.find('size')
obj_struct = {}
obj_struct['width'] = (obj_size.find('width').text)
obj_struct['height'] = (obj_size.find('height').text)
obj_struct['depth'] = (obj_size.find('depth').text)
def parse_xml(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
# tree=ElementTree()
# tree.parse(filename)
baseInfo={}
#baseInfo['folder'] = tree.find('folder').text
baseInfo['filename'] = tree.find('filename').text
baseInfo['path'] = tree.find('path').text
baseInfo['source/database'] = tree.find('source/database').text
#tree.find('database')
baseInfo['size/width'] = tree.find('size/width').text
baseInfo['size/height'] = tree.find('size/height').text
baseInfo['size/depth'] = tree.find('size/depth').text
baseInfo['segmented'] = tree.find('segmented').text
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['score'] = obj.find('score').text
obj_struct['region'] = obj.find('region').text
obj_struct['imageptr'] = obj.find('imageptr').text
if obj.find('label_des') is None:
obj_struct['label_des']=""
else:
obj_struct['label_des'] = obj.find('label_des').text
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = obj.find('truncated').text #remove int()
obj_struct['difficult'] = obj.find('difficult').text #remove int()
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return baseInfo,objects
def parse_xml1(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
# tree=ElementTree()
# tree.parse(filename)
baseInfo={}
baseInfo['foder'] = tree.find('foder').text
baseInfo['filename'] = tree.find('filename').text
baseInfo['path'] = tree.find('path').text
baseInfo['source/database'] = tree.find('source/database').text
#tree.find('database')
baseInfo['size/width'] = tree.find('size/width').text
baseInfo['size/height'] = tree.find('size/height').text
baseInfo['size/depth'] = tree.find('size/depth').text
baseInfo['segmented'] = tree.find('segmented').text
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['score'] = obj.find('score').text
obj_struct['region'] = obj.find('region').text
obj_struct['imageptr'] = obj.find('imageptr').text
if obj.find('label_des') is None:
obj_struct['label_des']=""
else:
obj_struct['label_des'] = obj.find('label_des').text
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = obj.find('truncated').text #remove int()
obj_struct['difficult'] = obj.find('difficult').text #remove int()
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return baseInfo,objects
def load_txt(xfile):
img_files = []
labels = []
for line in open(xfile):
line = line.strip('\n').split(' ')
assert(len(line) == 2)
img_files.append(line[0])
labels.append(int(float(line[1])))
return img_files, labels
def comp_feature(feature_1,feature_2):
feature_1=feature_1.reshape(-1)
feature_2=feature_2.reshape(-1)
feature_1_mult = feature_1*feature_1
feature_2_mult = feature_2*feature_2
sum1=np.sqrt(sum(feature_1_mult))
feature_1=feature_1/sum1
sum2=np.sqrt(sum(feature_2_mult))
feature_2=feature_2/sum2
mult=feature_1*feature_2
feature_1_mult = feature_1*feature_1
feature_2_mult = feature_2*feature_2
# print feature_1.shape
# print feature_1_mult
# print sum1
# print feature_1
ret = sum(feature_1_mult)+sum(feature_2_mult)-2*sum(mult)
return ret
def Popen_do(pp_string,b_pip_stdout=True):
#print pp_string
if b_pip_stdout==True:
p = Popen(pp_string, shell=True, stdout=PIPE, stderr=PIPE)#,close_fds=True)
else:
p = Popen(pp_string, shell=True, stderr=PIPE)#,close_fds=True)
out, err = p.communicate()
#p.wait()
print pp_string
if p.returncode != 0:
print err
#return 0
return 1
def demo(net, image_name,num_class,save_ff):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
#im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im_file=image_name
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
#for zzz in range(100):
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.35
NMS_THRESH = 0.3
thresh=CONF_THRESH
for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]
cls_ind += 1 # because we skipped background
# cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
# cls_scores = scores[:, cls_ind]
# dets = np.hstack((cls_boxes,
# cls_scores[:, np.newaxis])).astype(np.float32)
inds = np.where(scores[:, cls_ind] > thresh)[0]
cls_scores = scores[inds, cls_ind]
if cfg.TEST.AGNOSTIC:
cls_boxes = boxes[inds, 4:8]
else:
cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
#vis_detections(im, cls, dets, thresh=CONF_THRESH)
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
continue
im_tmp = im#im[:, :, (2, 1, 0)]
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
print bbox,score,cls
cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)
#save_ff="/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg"
im_tmp = im#im[:, :, (2, 1, 0)]
cv2.imwrite(save_ff,im_tmp)
#save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
print inputs[0].shape
print input_.shape
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
# if oversample:
# # Generate center, corner, and mirrored crops.
# input_ = caffe.io.oversample(input_, self.crop_dims)
# else:
# # Take center crop.
# center = np.array(self.image_dims) / 2.0
# crop = np.tile(center, (1, 2))[0] + np.concatenate([
# -self.crop_dims / 2.0,
# self.crop_dims / 2.0
# ])
# crop = crop.astype(int)
# input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# # For oversampling, average predictions across crops.
# if oversample:
# predictions = predictions.reshape((len(predictions) / 10, 10, -1))
# predictions = predictions.mean(1)
return predictions
def get_blob_data(self, blob_name):
return self.blobs[blob_name].data
def skloadimage(path_1, color=True):
im_1 = skimage.img_as_float(skimage.io.imread(path_1, as_grey=not color)).astype(np.float32)
print im_1.ndim
print im_1.shape[2]
if im_1.ndim == 2:
im_1 = im_1[:, :, np.newaxis]
if color:
im_1 = np.tile(im_1, (1, 1, 3))
elif im_1.shape[2] == 4:
im_1 = im_1[:, :, :3]
return im_1
def main(argv):
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"input_file",
help="Input image, directory"
)
parser.add_argument(
"feature_file",
help="Feature mat filename."
)
parser.add_argument(
"score_file",
help="Score Output mat filename."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(
"./models/market1501/caffenet/feature.proto"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(
"./models/market1501/caffenet/caffenet_iter_17000.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
type=int,
default=-1,
help="Switch for gpu computation."
)
parser.add_argument(
"--center_only",
action='store_true',
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='256,256',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_value",
default=os.path.join(
'examples/market1501/market1501_mean.binaryproto'),
help="Data set image mean of [Channels x Height x Width] dimensions " +
"(numpy array). Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--ext",
default='jpg',
help="Image file extension to take as input when a directory " +
"is given as the input file."
)
parser.add_argument(
"--feature_name",
default="fc7",
help="feature blob name."
)
parser.add_argument(
"--score_name",
default="prediction",
help="prediction score blob name."
)
args = parser.parse_args()
#======================================================================================================
# cfg.TEST.HAS_RPN = True # Use RPN for proposals
# cfg.TEST.RPN_PRE_NMS_TOP_N = 6000
# ## Number of top scoring boxes to keep after applying NMS to RPN proposals
# cfg.TEST.RPN_POST_NMS_TOP_N = 2000 #lius 300
# #cfg.TEST.RPN_POST_NMS_TOP_N = 2000 #lius 300
# cfg.TEST.AGNOSTIC=True
# #cfg.TEST.AGNOSTIC=False
# cfg.TEST.RPN_MIN_SIZE=10
# prototxt = "/home/liushuai/tiannuocaffe/py-rfcn-gpu/models/shape/ResNet-101_2/rfcn_end2end/s16_14/b14_test_16_s_4_8_16_32_agnostic.prototxt"
# caffemodel = "/home/liushuai/tiannuocaffe/py-rfcn-gpu/output/goodsType/shapeproj2_trainval/ResNet-101_2_b14_16_s_4_8_16_32_shape_rfcn_ohem_iter_210000.caffemodel"
# save_ff="/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_resultvgg.jpg"
# im_name="/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641//cat.jpg"
# im_name="/storage2/tiannuodata/work/projdata/baiwei/baiweiproj1/JPEGImages/budweiser08782.jpg"#budweiser15059.jpg"
# num_class=2-1#1360-1 #341
# if not os.path.isfile(caffemodel):
# raise IOError(('{:s} not found.\nDid you run ./data/script/'
# 'fetch_faster_rcnn_models.sh?').format(caffemodel))
# caffe.set_mode_gpu()
# caffe.set_device(5)
# cfg.GPU_ID = 5
# net = caffe.Net(prototxt, caffemodel, caffe.TEST)
# print '\n\nLoaded network {:s}'.format(caffemodel)
# # Warmup on a dummy image
# im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
# # for i in xrange(2):
# # _, _= im_detect(net, im)
# im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
# '001763.jpg', '004545.jpg']
# # for im_name in im_names:
# # print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
# # print 'Demo for data/demo/{}'.format(im_name)
# demo(net, im_name,num_class,save_ff)
#======================================================================================================
#args.images_dim="224,224"
image_dims = [int(s) for s in args.images_dim.split(',')]
channel_swap = None
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
mean_value = None
if args.mean_value:
mean_value = [float(s) for s in args.mean_value.split(',')]
mean_value = np.array(mean_value)
if args.gpu >= 0:
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
print("GPU mode, device : {}".format(args.gpu))
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make classifier
classifier = SClassifier(args.model_def, args.pretrained_model,
image_dims=image_dims, mean_value=mean_value,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap)
# classifier = Classifier(args.model_def, args.pretrained_model,
# image_dims=image_dims, mean=mean_value,
# input_scale=args.input_scale, raw_scale=args.raw_scale,
# channel_swap=channel_swap)
dir_1="/storage2/liushuai/gs6_env/market1501_extract_freature/test/OK/"
dir_2="/storage2/liushuai/gs6_env/market1501_extract_freature/test/NG/"
dir_out="/storage2/liushuai/gs6_env/market1501_extract_freature/test/out/"
save_feature_all=None
labels_all=[]
list_1 = os.listdir(dir_1)
for file_1 in list_1:
if os.path.splitext(file_1)[1] !=".xml":
basename = os.path.splitext(file_1)[0]
jpgname = dir_1+file_1
xmlname= dir_1+basename+".xml"
im = caffe.io.load_image(jpgname)#cv2.imread(jpgname)
baseInfo,objects = parse_xml(xmlname)
save_feature=None
#labels=None
for idx_f,oject_1 in enumerate(objects):
cropImg = im[oject_1["bbox"][1]:oject_1["bbox"][3], oject_1["bbox"][0]:oject_1["bbox"][2],:]
_ = classifier.predict([cropImg], not args.center_only)
feature = classifier.get_blob_data(args.feature_name)
assert (feature.shape[0] == 1 )
feature_shape = feature.shape
if save_feature is None:
print('feature : {} : {}'.format(args.feature_name, feature_shape))
save_feature = np.zeros((len(objects), feature.size),dtype=np.float32)
feature = feature.reshape(1, feature.size)
save_feature[idx_f, :] = feature.copy()
labels_all.append(oject_1['name'])
#tmp_file_name=os.path.basename(file_list[idx_f])
#sio.savemat(dir_1+'/'+basename+".feature", {'feature':feature})
if save_feature_all==None:
save_feature_all=save_feature
else:
save_feature_all=np.concatenate((save_feature_all,save_feature),axis=0)
print len(labels_all),len(save_feature)
print labels_all
list_2 = os.listdir(dir_2)
for file_2 in list_2:
if os.path.splitext(file_2)[1] !=".xml":
basename_2 = os.path.splitext(file_2)[0]
jpgname_2 = dir_2+file_2
xmlname_2= dir_2+basename_2+".xml"
print xmlname_2
im = caffe.io.load_image(jpgname)#cv2.imread(jpgname_2)
baseInfo_2,objects_2 = parse_xml(xmlname_2)
save_feature=None
labels=[]
for idx_f_2,oject_2 in enumerate(objects_2):
if oject_2['name']=="origin":
labels.append("origin")
continue
if oject_2['name']=="miss":
labels.append("miss")
continue
cropImg = im[oject_2["bbox"][1]:oject_2["bbox"][3], oject_2["bbox"][0]:oject_2["bbox"][2],:]
_ = classifier.predict([cropImg], not args.center_only)
feature = classifier.get_blob_data(args.feature_name)
assert (feature.shape[0] == 1 )
feature_shape = feature.shape
# if save_feature is None:
# print('feature : {} : {}'.format(args.feature_name, feature_shape))
# save_feature = np.zeros((len(objects), feature.size),dtype=np.float32)
feature_here = feature.reshape(1, feature.size)
# save_feature[idx_f, :] = feature.copy()
b_same_class=False
for bb_fea in range(0,len(save_feature_all)):
#print aa_fea," ",bb_fea," ",same_file_list[bb_fea][0]
ret = comp_feature(save_feature_all[bb_fea],feature_here)
print labels_all[bb_fea],ret,oject_2['name']
if ret <0.2:
print " ",labels_all[bb_fea],ret,oject_2['name']," ok"
b_same_class=True
#print type(bb_fea)
labels.append(labels_all[bb_fea])
oject_2['name']=labels_all[bb_fea]
break
if b_same_class==False:
print " ",oject_2['name']," background"
labels.append("background")
oject_2['name']="background"
four_root = ElementTree()
A1 = create_node('annotation',{},"")
four_root._setroot(A1)
B1 = create_node('foder',{},"2")
B2 = create_node('filename',{},jpgname_2)
B3 = create_node('path',{},"2")
A1.append(B1)
A1.append(B2)
A1.append(B3)
B4 = create_node('source',{},"")
A1.append(B4)
C1 = create_node('database',{},"Unknown")
B4.append(C1)
B5 = create_node('size',{},"")
SubElement(B5,"width").text=str(im.shape[1])
SubElement(B5,"height").text=str(im.shape[0])
SubElement(B5,"depth").text="3"
A1.append(B5)
# SubElement(A1,"folder").text=str(width[i])
# SubElement(A1,"filename").text=str(height[i])
# SubElement(A1,"path").text="3"
for idx_f_2,oject_2 in enumerate(objects_2):
if oject_2['name']=="background":
continue
BBobj = create_node('object',{},"")
SubElement(BBobj,"name").text=oject_2['name']
SubElement(BBobj,"pose").text='Unspecified'
SubElement(BBobj,"truncated").text='0'
SubElement(BBobj,"difficult").text='0'
SubElement(BBobj,"score").text=oject_2['score']
SubElement(BBobj,"region").text=oject_2['region']
SubElement(BBobj,"label_des").text=oject_2['label_des']
SubElement(BBobj,"imageptr").text=oject_2['imageptr']
child5 = SubElement(BBobj,"bndbox")
# child1= create_node('name',{},obj['name'])
SubElement(child5,"xmin").text=str(oject_2["bbox"][0])
SubElement(child5,"ymin").text=str(oject_2["bbox"][1])
SubElement(child5,"xmax").text=str(oject_2["bbox"][2])
SubElement(child5,"ymax").text=str(oject_2["bbox"][3])
A1.append(BBobj)
print dir_out+"/"+basename_2+".xml"
four_root.write(dir_out+"/"+basename_2+".xml", encoding="utf-8",xml_declaration=False)
# args.input_file = os.path.expanduser(args.input_file)
# if os.path.isdir(args.input_file):
# list_dir = os.listdir(args.input_file)
# for idx_dir in list_dir:
# print idx_dir
# start_time = time.time()
# epoch_time = AverageMeter()
# if os.path.isdir(args.input_file +"/"+idx_dir):
# #print idx_dir
# file_list=glob.glob(args.input_file +"/"+idx_dir+ '/*.' + args.ext)
# labels = [-1 for _ in xrange(len(file_list))]
# if not os.path.exists(args.feature_file+"/"+idx_dir+'/'):
# os.mkdir(args.feature_file+"/"+idx_dir+'/')
# with open(args.feature_file+"/"+idx_dir+"/list_file.txt","w") as z_f:
# tmp_file_list = [line+"\n" for line in file_list]
# z_f.writelines(tmp_file_list)
# save_feature = None
# size = len(file_list)
# for idx_f, _file_i in enumerate(file_list):
# _input=caffe.io.load_image(_file_i)
# _ = classifier.predict([_input], not args.center_only)
# feature = classifier.get_blob_data(args.feature_name)
# assert (feature.shape[0] == 1 )
# #assert (feature.shape[0] == 1 and score.shape[0] == 1)
# feature_shape = feature.shape
# #score = classifier.get_blob_data(args.score_name)
# # score_shape = score.shape
# if save_feature is None:
# print('feature : {} : {}'.format(args.feature_name, feature_shape))
# save_feature = np.zeros((len(file_list), feature.size),dtype=np.float32)
# save_feature[idx_f, :] = feature.reshape(1, feature.size)
# tmp_file_name=os.path.basename(file_list[idx_f])
# #sio.savemat(args.feature_file+"/"+idx_dir+'/'+os.path.splitext(tmp_file_name)[0]+".feature", {'feature':feature})
# same_file_list=[]
# if len(same_file_list) == 0:
# tmp_list=[0]
# same_file_list.append(tmp_list)
# print size
# for aa_fea in range(1,size):
# #print len(same_file_list)
# b_same_class=False
# for bb_fea in range(0,len(same_file_list)):
# #print aa_fea," ",bb_fea," ",same_file_list[bb_fea][0]
# ret = comp_feature(save_feature[aa_fea],save_feature[same_file_list[bb_fea][0]])
# if ret <0.2:
# b_same_class=True
# same_file_list[bb_fea].append(aa_fea)
# break
# if b_same_class==False:
# tmp_list_in=[aa_fea]
# same_file_list.append(tmp_list_in)
# one_file_list=[ file_list[same_file_list[ss][0]] for ss in range(0,len(same_file_list))]
# with open(args.feature_file+"/"+idx_dir+"/everyclass_one_list_file.txt","w") as one_f:
# tmp_file_one = [line+"\n" for line in one_file_list]
# one_f.writelines(tmp_file_one)
# for cp_file in one_file_list:
# ppsring= "cp "+cp_file+" "+args.feature_file+"/"+idx_dir+"/"
# assert Popen_do(ppsring),ppsring+" error!"
# print idx_dir," different pic :",len(one_file_list)
# epoch_time.update(time.time() - start_time)
# start_time = time.time()
# need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (len(file_list)-1))
# need_time = '{:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs)
# print need_time
if __name__ == '__main__':
main(sys.argv)
| [
"liushuai@tunicorn.cn"
] | liushuai@tunicorn.cn |
e9f8df1e669df7bb971e196bef4e8f0b517d633e | ca17bd80ac1d02c711423ac4093330172002a513 | /goodyhandy/FirstMissingPositive.py | 9988bcba209286d3584cc6e41ed5e95b6469f9f4 | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | class Solution(object):
def firstMissingPositive(self, A):
"""
:type nums: List[int]
:rtype: int
"""
length = len(A)
for i, num in enumerate(A):
if A[i] != i + 1:
while A[i] != i + 1:
if A[i] <= 0 or A[i] > length or A[A[i] -1] == A[i]: break
t = A[A[i] - 1] ; A[A[i] - 1] = A[i] ; A[i] = t
for i, num in enumerate(A):
if num != i + 1:
return i + 1
return length + 1
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
12a195aa8d4e62c4e95cde855a819abf7d531f49 | f73a20969a41cc5ff51c449a5c8bc9f066251fe7 | /train.py | 6483ec54b5e49fa549ee98a7a97999939f7ffb24 | [
"MIT"
] | permissive | eungbean/knowledge-distillation-cifar10 | af092d08df3a289835b6868c0dc6fddccf936581 | 683379804c8724d097a845cee85f130b6767dbd7 | refs/heads/master | 2021-12-07T02:25:12.813212 | 2019-10-30T01:21:26 | 2019-10-30T01:21:26 | 218,184,257 | 1 | 0 | MIT | 2021-09-08T01:23:11 | 2019-10-29T02:07:34 | Python | UTF-8 | Python | false | false | 17,514 | py | """Main entrance for train/eval with/without KD on CIFAR-10"""
import argparse
import logging
import os
import time
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.autograd import Variable
from tqdm import tqdm
import utils
import model.data_loader as data_loader
import model.alexnet as alexnet
import model.studentA as studentA
import model.studentB as studentB
from evaluate import evaluate, evaluate_kd
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
parser = argparse.ArgumentParser()
# parser.add_argument('--data_dir', default='data/64x64_SIGNS', help="Directory for the dataset")
parser.add_argument('--model_dir', default='experiments/alexnet',
help="Directory containing params.json")
parser.add_argument('--restore_file', default=None,
help="Optional, name of the file in --model_dir \
containing weights to reload before training") # 'best' or 'train'
def train(model, optimizer, loss_fn, dataloader, metrics, params, epoch):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn:
dataloader:
metrics: (dict)
params: (Params) hyperparameters
"""
# set model to training mode
model.train()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
acc_avg = utils.RunningAverage()
# Use tqdm for progress bar
with tqdm(total=len(dataloader)) as t:
for i, (train_batch, labels_batch) in enumerate(dataloader):
# move to GPU if available
if params.cuda:
train_batch, labels_batch = train_batch.cuda(async=True), \
labels_batch.cuda(async=True)
# convert to torch Variables
train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)
# compute model output and loss
output_batch = model(train_batch)
loss = loss_fn(output_batch, labels_batch)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
prediction = torch.max(output_batch, 1)
acc_avg.update(np.sum(prediction[1].cpu().numpy() == labels_batch.cpu().numpy()))
# update the average loss
loss_avg.update(loss.item())
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric:metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
t.update()
# compute mean of all metrics in summary
metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
#logging on Tensorboard
for k, v in metrics_mean.items():
board_logger.add_scalars(k, {'train':v}, epoch )
def train_and_evaluate(model, train_dataloader, val_dataloader, optimizer,
loss_fn, metrics, params, model_dir, restore_file, board_logger):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) - name of file to restore from (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_acc = 0.0
# for cnn models, num_epoch is always < 100, so it's intentionally not using scheduler here
scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
for epoch in range(params.num_epochs):
scheduler.step()
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
train(model, optimizer, loss_fn, train_dataloader, metrics, params, epoch)
# Evaluate for one epoch on validation set
val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)
#logging on Tensorboard
for k, v in val_metrics.items():
board_logger.add_scalars(k, {'test':v}, epoch)
val_acc = val_metrics['accuracy']
is_best = val_acc>=best_val_acc
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best accuracy")
best_val_acc = val_acc
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path)
# Helper function: get [batch_idx, teacher_outputs] list by running teacher model once
def fetch_teacher_outputs(teacher_model, dataloader, params):
# set teacher_model to evaluation mode
teacher_model.eval()
teacher_outputs = []
for i, (data_batch, labels_batch) in enumerate(dataloader):
if params.cuda:
data_batch, labels_batch = data_batch.cuda(async=True), \
labels_batch.cuda(async=True)
data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)
output_teacher_batch = teacher_model(data_batch).data.cpu().numpy()
teacher_outputs.append(output_teacher_batch)
return teacher_outputs
# Defining train_kd & train_and_evaluate_kd functions
def train_kd(model, teacher_outputs, optimizer, loss_fn_kd, dataloader, metrics, params, epoch):
"""Train the model on `num_steps` batches
Args:
model: (torch.nn.Module) the neural network
optimizer: (torch.optim) optimizer for parameters of model
loss_fn_kd:
dataloader:
metrics: (dict)
params: (Params) hyperparameters
"""
# set model to training mode
model.train()
# teacher_model.eval()
# summary for current training loop and a running average object for loss
summ = []
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
with tqdm(total=len(dataloader)) as t:
for i, (train_batch, labels_batch) in enumerate(dataloader):
# move to GPU if available
if params.cuda:
train_batch, labels_batch = train_batch.cuda(async=True), \
labels_batch.cuda(async=True)
# convert to torch Variables
train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)
# compute model output, fetch teacher output, and compute KD loss
output_batch = model(train_batch) #torch.Size([128, 10])
# get one batch output from teacher_outputs list
output_teacher_batch = torch.from_numpy(teacher_outputs[i])
if params.cuda:
output_teacher_batch = output_teacher_batch.cuda(async=True)
output_teacher_batch = Variable(output_teacher_batch, requires_grad=False)
# print("output shape: ",output_teacher_batch.shape,"; labels shape: ",labels_batch.shape)
loss = loss_fn_kd(output_batch, labels_batch, output_teacher_batch, params)
# clear previous gradients, compute gradients of all variables wrt loss
optimizer.zero_grad()
loss.backward()
# performs updates using calculated gradients
optimizer.step()
# Evaluate summaries only once in a while
if i % params.save_summary_steps == 0:
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
# compute all metrics on this batch
summary_batch = {metric:metrics[metric](output_batch, labels_batch)
for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# update the average loss
loss_avg.update(loss.item())
t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
t.update()
# compute mean of all metrics in summary
metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
logging.info("- Train metrics: " + metrics_string)
#logging on Tensorboard
for k, v in metrics_mean.items():
board_logger.add_scalars(k, {'train':v}, epoch)
def train_and_evaluate_kd(model, teacher_model, train_dataloader, val_dataloader, optimizer,
loss_fn_kd, metrics, params, model_dir, restore_file=None):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
params: (Params) hyperparameters
model_dir: (string) directory containing config, weights and log
restore_file: (string) - file to restore (without its extension .pth.tar)
"""
# reload weights from restore_file if specified
if restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
logging.info("Restoring parameters from {}".format(restore_path))
utils.load_checkpoint(restore_path, model, optimizer)
best_val_acc = 0.0
# fetch teacher outputs using teacher_model under eval() mode
loading_start = time.time()
teacher_model.eval()
teacher_outputs = fetch_teacher_outputs(teacher_model, train_dataloader, params)
teacher_outputs_val = fetch_teacher_outputs(teacher_model, val_dataloader, params)
elapsed_time = math.ceil(time.time() - loading_start)
logging.info("- Finished computing teacher outputs after {} secs..".format(elapsed_time))
# for cnn models, num_epoch is always < 100, so it's intentionally not using scheduler here
scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
for epoch in range(params.num_epochs):
scheduler.step()
# Run one epoch
logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))
# compute number of batches in one epoch (one full pass over the training set)
train_kd(model, teacher_outputs, optimizer, loss_fn_kd, train_dataloader,
metrics, params, epoch)
# Evaluate for one epoch on validation set
val_metrics = evaluate_kd(model, val_dataloader, metrics, params, teacher_outputs_val, loss_fn_kd)
#logging on Tensorboard
for k, v in val_metrics.items():
board_logger.add_scalars(k, {'test':v}, epoch)
val_acc = val_metrics['accuracy']
is_best = val_acc>=best_val_acc
# Save weights
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict' : optimizer.state_dict()},
is_best=is_best,
checkpoint=model_dir)
# If best_eval, best_save_path
if is_best:
logging.info("- Found new best accuracy")
best_val_acc = val_acc
# Save best val metrics in a json file in the model directory
best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
utils.save_dict_to_json(val_metrics, best_json_path)
# Save latest val metrics in a json file in the model directory
last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
utils.save_dict_to_json(val_metrics, last_json_path)
if __name__ == '__main__':
# Load the parameters from json file
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available()
# Set the random seed for reproducible experiments
random.seed(230)
torch.manual_seed(230)
if params.cuda: torch.cuda.manual_seed(230)
# Set the logger
utils.set_logger(os.path.join(args.model_dir, 'train.log'))
board_logger = SummaryWriter(os.path.join(args.model_dir,'runs',time.strftime("%Y%m%d-%H%M%S")))
# Create the input data pipeline
logging.info("Loading the datasets...")
# fetch dataloaders, considering full-set vs. sub-set scenarios
if params.subset_percent < 1.0:
train_dl = data_loader.fetch_subset_dataloader('train', params)
else:
train_dl = data_loader.fetch_dataloader('train', params)
dev_dl = data_loader.fetch_dataloader('dev', params)
logging.info("- done.")
"""Based on the model_version, determine model/optimizer and KD training mode
alexnet was trained on multi-GPU; need to specify a dummy
nn.DataParallel module to correctly load the model parameters
"""
if "student" in params.model_version:
if "studentA" in params.model_version:
# train a studentA with knowledge distillation
model = studentA.studentA(params).cuda() if params.cuda else studentA.studentA(params)
# fetch loss function and metrics definition in model files
loss_fn_kd = studentA.loss_fn_kd
metrics = studentA.metrics
elif "studentB" in params.model_version:
# train a studentA with knowledge distillation
model = studentB.studentB(params).cuda() if params.cuda else studentB.studentB(params)
# fetch loss function and metrics definition in model files
loss_fn_kd = studentB.loss_fn_kd
metrics = studentB.metrics
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
summary(model, input_size=(3,32,32))
"""
Specify the pre-trained teacher models for knowledge distillation
Important note: alexnet was pre-trained models using multi-GPU,
therefore need to call "nn.DaraParallel" to correctly load the model weights
Trying to run on CPU will then trigger errors (too time-consuming anyway)!
"""
teacher_model = alexnet.AlexNet(params)
teacher_checkpoint = 'experiments/alexnet/best.pth.tar'
teacher_model = teacher_model.cuda() if params.cuda else teacher_model
utils.load_checkpoint(teacher_checkpoint, teacher_model)
# Train the model with KD
logging.info("Experiment - model version: {}".format(params.model_version))
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
logging.info("First, loading the teacher model and computing its outputs...")
train_and_evaluate_kd(model, teacher_model, train_dl, dev_dl, optimizer, loss_fn_kd,
metrics, params, args.model_dir, args.restore_file)
# non-KD mode: regular training of the baseline AlexNet
else:
model = alexnet.AlexNet(params).cuda() if params.cuda else alexnet.AlexNet(params)
optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
# fetch loss function and metrics
loss_fn = alexnet.loss_fn
metrics = alexnet.metrics
summary(model, input_size=(3,32,32))
# Train the model
logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
train_and_evaluate(model, train_dl, dev_dl, optimizer, loss_fn, metrics, params,
args.model_dir, args.restore_file, board_logger) | [
"eungbean@yonsei.ac.kr"
] | eungbean@yonsei.ac.kr |
2224c4722a23ff2f4f9c86984146a37d9ca3749e | e76ea38dbe5774fccaf14e1a0090d9275cdaee08 | /src/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp | ade15eebff42e9f6af9baf7ca1709eba30e3b3e3 | [
"BSD-3-Clause"
] | permissive | eurogiciel-oss/Tizen_Crosswalk | efc424807a5434df1d5c9e8ed51364974643707d | a68aed6e29bd157c95564e7af2e3a26191813e51 | refs/heads/master | 2021-01-18T19:19:04.527505 | 2014-02-06T13:43:21 | 2014-02-06T13:43:21 | 16,070,101 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 569 | gyp | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cast_rtp_parser',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
'<(DEPTH)/third_party/',
],
'sources': [
'rtp_parser.cc',
'rtp_parser.h',
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
}
| [
"ronan@fridu.net"
] | ronan@fridu.net |
8ef80e1dd722b7f1fd3ae65c0ac896ff6de6dc4c | f83729a558721ff17dfc91767e80a689808597cc | /viraloverlay/viraloverlay.py | f0f033d6b9240cfcba7ff7eb7cac860e832cbf6c | [] | no_license | zevaverbach/viraloverlay | 703364c6675a8a54cc696030ebd92fbeb63b3391 | a4a61437fa92ccd2aa7ecaa9c52e1f1a17cb92f1 | refs/heads/master | 2020-04-20T21:50:45.333683 | 2019-03-09T03:09:52 | 2019-03-09T03:09:52 | 169,120,955 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,027 | py | import json
import math
import os
from pprint import pprint
import sys
from subprocess import check_output
from .config import (
APPEND_TO_OVERLAID_VIDS,
FONT_COLOR,
FONT_SIZE,
TEXT_POSITION_X,
TEXT_POSITION_Y,
MAX_ARG_CHARS,
)
from .custom_types import Numeric
from .helpers import (
shell_call,
get_platform_font_path,
append_string_to_filepath,
)
from .exceptions import (
UnsupportedSystem,
NoFont,
NoOverlays,
MissingArgument,
LengthError,
)
class Overlay:
def __init__(
self,
text: str,
start: Numeric,
stop: Numeric,
font_path: str,
font_size: int,
font_color: int,
text_position_x,
text_position_y,
):
self.text = text
self.start = start
self.stop = stop
self.font_path = font_path
self.font_size = font_size
self.font_color = font_color
self.text_position_x = text_position_x
if isinstance(text_position_x, str):
if text_position_x == 'center':
self.text_position_x = 'x=(main_w/2-text_w/2)'
self.text_position_y = text_position_y
if isinstance(text_position_y, str):
if text_position_y == 'bottom':
self.text_position_y = 'y=main_h-(text_h*2)'
def __str__(self):
text = self.text.replace("'", "\\\\\\'")
text = text.replace("[", "\\\\\[")
text = text.replace("]", "\\\\\]")
return (
f"drawtext=enable='between(t,{self.start},{self.stop})':"
f"fontfile={self.font_path}:"
f"fontcolor={self.font_color}:"
f'text="{text}":'
f"fontsize={self.font_size}:"
f"{self.text_position_x}:"
f"{self.text_position_y}"
)
class ViralOverlay:
def __init__(
self,
filepath,
font_path=None,
font_color=None,
font_size=None,
text_position_x=None,
text_position_y=None,
overlays=None):
"""
Each overlay should be a tuple in the format
(text, start, stop, <font_path>, <font_size>)
"""
if font_path is None:
try:
font_path = get_platform_font_path()
except UnsupportedSystem:
raise UnsupportedSystem(
'Please provide a path to the font you\'d like to use')
try:
assert os.path.exists(filepath)
except AssertionError:
raise FileNotFoundError
self.filepath = filepath
validate_font_path(font_path)
self.font_path = font_path
self.font_size = font_size or FONT_SIZE
self.font_color = font_color or FONT_COLOR
self.text_position_x = text_position_x or TEXT_POSITION_X
self.text_position_y = text_position_y or TEXT_POSITION_Y
self.overlays = []
if overlays is not None:
self.add(overlays)
def go(self):
return self.export()
def gif(self):
self._prepare_command(output_filetype='gif')
return self._make()
def export(self):
self._prepare_command()
return self._make()
def _prepare_command(self, output_filetype=None):
if not self.overlays:
raise NoOverlays(
'Please add at least one overlay tuple via `ViralOverlay.add`.')
new_filepath = append_string_to_filepath(
self.filepath,
APPEND_TO_OVERLAID_VIDS)
if output_filetype:
new_filepath = '.'.join(new_filepath.split('.')[:-1]) + '.' + output_filetype
overlay_arg_strings = []
overlay_args = ','.join(str(o) for o in self.overlays)
if len(overlay_args) > MAX_ARG_CHARS:
raise LengthError(
f'Your system only allows {MAX_ARG_CHARS} characters in a command,'
f' and the one generated here is {len(overlay_args)}!')
self.command = (
f'ffmpeg -y -i {self.filepath} -vf "{overlay_args}" -acodec '
f'copy {new_filepath}')
self.new_filepath = new_filepath
def _make(self):
shell_call(self.command)
return self.new_filepath
def add(self, overlay_or_overlays):
if isinstance(overlay_or_overlays, tuple):
overlays = overlay_or_overlays
elif isinstance(overlay_or_overlays, list):
overlays = tuple(overlay_or_overlays)
else:
if isinstance(overlay_or_overlays, str):
overlays_path = overlay_or_overlays
with open(overlays_path) as fin:
overlays = json.load(fin)
else:
overlays = [overlay_or_overlays]
for overlay in overlays:
overlay = self.validate_and_fortify_overlay(overlay)
self.overlays.append(Overlay(**overlay))
def validate_and_fortify_overlay(self, overlay):
if any(key not in overlay for key in ['text', 'start', 'stop']):
pprint(overlay)
raise MissingArgument
overlay['font_path'] = overlay.get('font_path') or self.font_path
overlay['font_size'] = overlay.get('font_size') or self.font_size
overlay['font_color'] = overlay.get('font_color') or self.font_color
overlay['text_position_x'] = (overlay.get('text_position_x')
or self.text_position_x)
overlay['text_position_y'] = (overlay.get('text_position_y')
or self.text_position_y)
return overlay
def validate_font_path(font_path):
assert os.path.exists(font_path)
assert any(
font_path.endswith(file_extension)
for file_extension in ['ttf', 'otf'])
| [
"zev@averba.ch"
] | zev@averba.ch |
d5a6333eedb40b22f342c751d3803cda984846d0 | 33cbc171f4f2f6e1493eed6da6f2b8e80d284747 | /respuestas_encuestas/admin.py | a696dbb5c6d3c67e9f906dd285e71199994bddb0 | [] | no_license | mauronet/crm | 963aeddd4cdfa1d44eb2add3e6d0cfaa9f72cd8f | 972306a89b3c8978975408d0b27269c8f87639d3 | refs/heads/master | 2020-05-21T00:26:49.812126 | 2014-09-07T13:12:09 | 2014-09-07T13:12:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from django.contrib import admin
from .models import RespuestaEncuesta
class RespuestaEncuestaAdmin(admin.ModelAdmin):
list_display = ('respuesta', 'votos')
admin.site.register(RespuestaEncuesta, RespuestaEncuestaAdmin) | [
"mrodrigg@gmail.com"
] | mrodrigg@gmail.com |
47a1085793c09d8ff86cf8e73980e0bcd9595eeb | 43461f999228079c9bfee03f0e4043f08426051f | /python爬虫开发与项目实战笔记/通用爬虫/day10/code/SNBook/items.py | cc4533585eccbe86d3f6186bcea51a5c1d717dbc | [] | no_license | MapleStoryBoy/spider | f9af844ae9812fe21141060213ac2677e719ac73 | b014d81d52805f9317e85b66024d047e73d59053 | refs/heads/master | 2020-05-21T18:27:50.585790 | 2019-07-12T10:11:58 | 2019-07-12T10:11:58 | 186,132,575 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SnbookItem(scrapy.Item):
# define the fields for your item here like:
parent_type = scrapy.Field()
parent_href = scrapy.Field()
pagecount = scrapy.Field()
son_type = scrapy.Field()
son_href = scrapy.Field()
belong_son_tyoe = scrapy.Field()
book_href = scrapy.Field()
book_name = scrapy.Field()
book_img = scrapy.Field()
book_author = scrapy.Field()
book_descrip = scrapy.Field()
| [
"MapleStoryBoy@163.com"
] | MapleStoryBoy@163.com |
229d541d8fb546628d890430fc9fbd401743dc1c | 8043ec8cc28b69f1850d5d5f0cafcd2a43ce0eab | /docs/conf.py | e8d5c7666af5c51922fdb76f65a66713d6631143 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | ach0o/boilerplate_flask_sqlalchemy | d0ab9b8ae80617de1c3e7b39525f70e4a39bd18b | 9a6a89e359b446651dff5064e4b37b8bc70fdcc1 | refs/heads/master | 2023-02-10T15:46:17.576226 | 2019-07-28T10:32:01 | 2019-07-28T10:32:01 | 165,043,068 | 0 | 0 | MIT | 2023-02-02T06:32:16 | 2019-01-10T10:52:41 | Python | UTF-8 | Python | false | false | 5,656 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'boilerplate_flask_sqlalchemy'
copyright = '2019, achooan'
author = 'achooan'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'boilerplate_flask_sqlalchemydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'boilerplate_flask_sqlalchemy.tex',
'boilerplate\\_flask\\_sqlalchemy Documentation',
'achooan', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'boilerplate_flask_sqlalchemy',
'boilerplate_flask_sqlalchemy Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'boilerplate_flask_sqlalchemy',
'boilerplate_flask_sqlalchemy Documentation',
author, 'boilerplate_flask_sqlalchemy',
'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"88soldieron@gmail.com"
] | 88soldieron@gmail.com |
3efa87d0b1435b7ff792fe5481378d70cc3f6640 | 8649511435eb30d692471088a1b7c820f42343fc | /textbox/data/dataset/multi_sent_dataset.py | 683841c9fdb90cebe60c3397819ed9d08e527c31 | [
"MIT"
] | permissive | Richar-Du/TextBox | 3858cf0e43fcce0d879c8485df9fed6eda3879c6 | bb828118665e5a1183318ea5ba9368db052fb540 | refs/heads/main | 2023-03-24T15:35:05.791439 | 2021-02-24T01:55:00 | 2021-02-24T01:55:00 | 343,679,215 | 0 | 0 | MIT | 2021-03-02T07:11:17 | 2021-03-02T07:11:16 | null | UTF-8 | Python | false | false | 7,476 | py | # @Time : 2021/2/3
# @Author : Tianyi Tang
# @Email : steven_tang@ruc.edu.cn
"""
textbox.data.dataset.multi_sent_dataset
########################################
"""
import os
from textbox.data.dataset import AbstractDataset
from textbox.data.utils import tokenize, split_data, build_vocab, detect_restored, dump_data, load_restored
class MultipleSentenceDataset(AbstractDataset):
def __init__(self, config):
self.language = config['language'].lower()
self.max_vocab_size = config['max_vocab_size']
self.group_split_token = config['group_split_token']
self.sentence_split_token = config['sentence_split_token']
self._build_data_format(config)
super().__init__(config)
def _build_data_format(self, config):
for group in ['knowledge', 'source', 'target']:
format_name = group + '_format'
format = config[format_name] if format_name in config else 'none'
setattr(self, format_name, format)
if format != 'none':
max_length_name = 'max_' + group + '_length'
if max_length_name in config:
setattr(self, max_length_name, config[max_length_name])
else:
setattr(self, max_length_name, config['max_seq_length'])
if format == 'multiple':
max_num_name = 'max_' + group + '_num'
if max_num_name in config:
setattr(self, max_num_name, config[max_num_name])
else:
setattr(self, max_num_name, config['max_sentence_num'])
def _get_preset(self):
self.token2idx = {}
self.idx2token = {}
self.group_text_data = [[], [], []]
def _load_multi_data(self, dataset_path):
if not os.path.isfile(dataset_path):
raise ValueError('File {} not exist'.format(dataset_path))
fin = open(dataset_path, "r")
group_text = [[], [], []]
for line in fin:
groups = line.strip().lower().split(self.group_split_token)
drop_flag = False
for i, (group, data) in enumerate(zip(['target', 'source', 'knowledge'], groups[::-1])):
max_length = getattr(self, 'max_' + group + '_length')
if getattr(self, group + '_format') == 'single':
text = tokenize(data, self.tokenize_strategy, self.language)
drop_flag |= (len(text) > max_length)
text = text[:max_length]
group_text[i].append(text)
else:
max_num = getattr(self, 'max_' + group + '_num')
texts = [
tokenize(text, self.tokenize_strategy, self.language)
for text in data.split(self.sentence_split_token)
]
drop_flag |= any([len(text) > max_length for text in texts])
drop_flag |= (len(texts) > max_num)
texts = [text[:max_length] for text in texts[-max_num:]]
group_text[i].append(texts)
if drop_flag & (self.overlength_strategy == 'drop'):
group_text = [group[:-1] for group in group_text]
return group_text[::-1]
def _load_split_data(self, dataset_path):
"""Load dataset from split (train, dev, test).
This is designed for single sentence format, unconditional task.
Args:
dataset_path (str): path of dataset dir.
"""
for i, prefix in enumerate(['train', 'dev', 'test']):
filename = os.path.join(dataset_path, '{}.txt'.format(prefix))
knowledge, src, tgt = self._load_multi_data(filename)
self.group_text_data[0].append(knowledge)
self.group_text_data[1].append(src)
self.group_text_data[2].append(tgt)
def _load_single_data(self, dataset_path):
"""Load full corpus.
This is designed for single sentence format, unconditional task.
Args:
dataset_path (str): path of dataset dir.
"""
dataset_file = os.path.join(dataset_path, 'corpus.txt')
group_text_data = self._load_multi_data(dataset_file)
self.group_text_data = split_data([text_data for text_data in group_text_data], self.split_ratio)
def _load_data(self, dataset_path):
if self.split_strategy == "load_split":
self._load_split_data(dataset_path)
elif self.split_strategy == "by_ratio":
self._load_single_data(dataset_path)
else:
raise NotImplementedError("{} split strategy not implemented".format(self.split_strategy))
for i, group in enumerate(['knowledge', 'source', 'target']):
if getattr(self, group + '_format') != 'none':
setattr(self, group + '_text_data', self.group_text_data[i])
def _build_vocab(self):
text_data = self.group_text_data[0] + self.group_text_data[1] + self.group_text_data[2]
self.idx2token, self.token2idx, self.max_vocab_size = build_vocab(
text_data, self.max_vocab_size, self.special_token_list
)
def _detect_restored(self, dataset_path):
restored_flag = True
for group in ['knowledge', 'source', 'target']:
if getattr(self, group + '_format') != 'none':
restored_flag &= detect_restored(dataset_path, group + '.', ignore_file='vocab')
return restored_flag & detect_restored(dataset_path, ignore_file='data')
def _dump_data(self, dataset_path):
for group in ['knowledge', 'source', 'target']:
if getattr(self, group + '_format') != 'none':
dump_data(dataset_path, getattr(self, group + '_text_data'), suffix=group + '.')
dump_data(dataset_path, idx2token=self.idx2token, token2idx=self.token2idx)
self.logger.info("Dump finished!")
def _load_restored(self, dataset_path):
"""Load dataset from restored binary files (train, dev, test).
Args:
dataset_path (str): path of dataset dir.
"""
for group in ['knowledge', 'source', 'target']:
if getattr(self, group + '_format') != 'none':
text_data = load_restored(dataset_path, group + '.', ignore_file='vocab')[0]
setattr(self, group + '_text_data', text_data)
idx2token, token2idx = load_restored(dataset_path, ignore_file='data')
setattr(self, 'idx2token', idx2token)
setattr(self, 'token2idx', token2idx)
self.max_vocab_size = len(self.idx2token)
self.logger.info("Restore finished!")
def build(self):
info_str = ''
corpus_list = []
self.logger.info("Vocab size: {}".format(self.max_vocab_size))
for i, prefix in enumerate(['train', 'dev', 'test']):
tp_data = {
'idx2token': self.idx2token,
'token2idx': self.token2idx,
}
for group in ['knowledge', 'source', 'target']:
if getattr(self, group + '_format') != 'none':
text_data = getattr(self, group + '_text_data')[i]
tp_data[group + '_text_data'] = text_data
corpus_list.append(tp_data)
info_str += '{}: {} cases, '.format(prefix, len(tp_data['target_text_data']))
self.logger.info(info_str[:-2] + '\n')
return corpus_list
| [
"1020139164@qq.com"
] | 1020139164@qq.com |
d92ae14ec4a5f7f378a2afd59049a7861ff896ad | cf58c2c216f6c76c71b5a04f72d79fb1d58e4b64 | /tests/components/modbus/test_init.py | 90fc0c086e8b3925d523bda0e1e2adfab5a83adb | [
"Apache-2.0"
] | permissive | whtsky/home-assistant | c301a7a0c2f8e94806d411b705c8f7b5939355d2 | 2ea5811e3a34e228908802e18c29af1c2fc249c5 | refs/heads/dev | 2023-08-19T07:37:29.365289 | 2023-02-17T22:21:28 | 2023-02-17T22:21:28 | 204,410,639 | 1 | 0 | Apache-2.0 | 2023-02-22T06:14:25 | 2019-08-26T06:30:12 | Python | UTF-8 | Python | false | false | 27,086 | py | """The tests for the Modbus init.
This file is responsible for testing:
- pymodbus API
- Functionality of class ModbusHub
- Coverage 100%:
__init__.py
const.py
modbus.py
validators.py
baseplatform.py (only BasePlatform)
It uses binary_sensors/sensors to do black box testing of the read calls.
"""
from datetime import timedelta
import logging
from unittest import mock
from freezegun.api import FrozenDateTimeFactory
from pymodbus.exceptions import ModbusException
from pymodbus.pdu import ExceptionResponse, IllegalFunctionRequest
import pytest
import voluptuous as vol
from homeassistant import config as hass_config
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.modbus.const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_SLAVE,
ATTR_UNIT,
ATTR_VALUE,
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_WRITE_COIL,
CALL_TYPE_WRITE_COILS,
CALL_TYPE_WRITE_REGISTER,
CALL_TYPE_WRITE_REGISTERS,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_DATA_TYPE,
CONF_INPUT_TYPE,
CONF_MSG_WAIT,
CONF_PARITY,
CONF_SLAVE_COUNT,
CONF_STOPBITS,
CONF_SWAP,
CONF_SWAP_BYTE,
CONF_SWAP_WORD,
DEFAULT_SCAN_INTERVAL,
MODBUS_DOMAIN as DOMAIN,
RTUOVERTCP,
SERIAL,
SERVICE_RESTART,
SERVICE_STOP,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
TCP,
UDP,
DataType,
)
from homeassistant.components.modbus.validators import (
duplicate_entity_validator,
duplicate_modbus_validator,
number_validator,
struct_validator,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_STATE,
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COUNT,
CONF_DELAY,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_TIMEOUT,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
SERVICE_RELOAD,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .conftest import (
TEST_ENTITY_NAME,
TEST_MODBUS_HOST,
TEST_MODBUS_NAME,
TEST_PORT_SERIAL,
TEST_PORT_TCP,
ReadResult,
)
from tests.common import async_fire_time_changed, get_fixture_path
@pytest.fixture(name="mock_modbus_with_pymodbus")
async def mock_modbus_with_pymodbus_fixture(hass, caplog, do_config, mock_pymodbus):
"""Load integration modbus using mocked pymodbus."""
caplog.clear()
caplog.set_level(logging.ERROR)
config = {DOMAIN: do_config}
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert DOMAIN in hass.config.components
assert caplog.text == ""
return mock_pymodbus
async def test_number_validator() -> None:
"""Test number validator."""
for value, value_type in (
(15, int),
(15.1, float),
("15", int),
("15.1", float),
(-15, int),
(-15.1, float),
("-15", int),
("-15.1", float),
):
assert isinstance(number_validator(value), value_type)
try:
number_validator("x15.1")
except vol.Invalid:
return
pytest.fail("Number_validator not throwing exception")
@pytest.mark.parametrize(
"do_config",
[
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.STRING,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_DATA_TYPE: DataType.INT32,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_DATA_TYPE: DataType.INT32,
CONF_SWAP: CONF_SWAP_BYTE,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">i",
CONF_SWAP: CONF_SWAP_BYTE,
},
],
)
async def test_ok_struct_validator(do_config) -> None:
"""Test struct validator."""
try:
struct_validator(do_config)
except vol.Invalid:
pytest.fail("struct_validator unexpected exception")
@pytest.mark.parametrize(
"do_config",
[
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 8,
CONF_DATA_TYPE: "int",
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 8,
CONF_DATA_TYPE: DataType.CUSTOM,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 8,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: "no good",
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 20,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">f",
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 1,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">f",
CONF_SWAP: CONF_SWAP_WORD,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 1,
CONF_DATA_TYPE: DataType.STRING,
CONF_STRUCTURE: ">f",
CONF_SWAP: CONF_SWAP_WORD,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_COUNT: 2,
CONF_DATA_TYPE: DataType.CUSTOM,
CONF_STRUCTURE: ">f",
CONF_SLAVE_COUNT: 5,
},
],
)
async def test_exception_struct_validator(do_config) -> None:
"""Test struct validator."""
try:
struct_validator(do_config)
except vol.Invalid:
return
pytest.fail("struct_validator missing exception")
@pytest.mark.parametrize(
"do_config",
[
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST + " 2",
CONF_PORT: TEST_PORT_TCP,
},
],
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_NAME: TEST_MODBUS_NAME + " 2",
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
],
],
)
async def test_duplicate_modbus_validator(do_config) -> None:
"""Test duplicate modbus validator."""
duplicate_modbus_validator(do_config)
assert len(do_config) == 1
@pytest.mark.parametrize(
"do_config",
[
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
CONF_SLAVE: 0,
},
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 119,
CONF_SLAVE: 0,
},
],
}
],
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
CONF_SLAVE: 0,
},
{
CONF_NAME: TEST_ENTITY_NAME + " 2",
CONF_ADDRESS: 117,
CONF_SLAVE: 0,
},
],
}
],
],
)
async def test_duplicate_entity_validator(do_config) -> None:
"""Test duplicate entity validator."""
duplicate_entity_validator(do_config)
assert len(do_config[0][CONF_SENSORS]) == 1
@pytest.mark.parametrize(
"do_config",
[
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: UDP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_TYPE: UDP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: RTUOVERTCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
},
{
CONF_TYPE: RTUOVERTCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_MSG_WAIT: 100,
},
{
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_NAME: TEST_MODBUS_NAME,
CONF_TIMEOUT: 30,
CONF_DELAY: 10,
},
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_DELAY: 5,
},
[
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
},
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: f"{TEST_MODBUS_NAME} 2",
},
{
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
CONF_NAME: f"{TEST_MODBUS_NAME} 3",
},
],
{
# Special test for scan_interval validator with scan_interval: 0
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
CONF_SLAVE: 0,
CONF_SCAN_INTERVAL: 0,
}
],
},
],
)
async def test_config_modbus(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture, mock_modbus_with_pymodbus
) -> None:
"""Run configuration test for modbus."""
VALUE = "value"
FUNC = "func"
DATA = "data"
SERVICE = "service"
@pytest.mark.parametrize(
"do_config",
[
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: SERIAL,
CONF_BAUDRATE: 9600,
CONF_BYTESIZE: 8,
CONF_METHOD: "rtu",
CONF_PORT: TEST_PORT_SERIAL,
CONF_PARITY: "E",
CONF_STOPBITS: 1,
},
],
)
@pytest.mark.parametrize(
"do_write",
[
{
DATA: ATTR_VALUE,
VALUE: 15,
SERVICE: SERVICE_WRITE_REGISTER,
FUNC: CALL_TYPE_WRITE_REGISTER,
},
{
DATA: ATTR_VALUE,
VALUE: [1, 2, 3],
SERVICE: SERVICE_WRITE_REGISTER,
FUNC: CALL_TYPE_WRITE_REGISTERS,
},
{
DATA: ATTR_STATE,
VALUE: False,
SERVICE: SERVICE_WRITE_COIL,
FUNC: CALL_TYPE_WRITE_COIL,
},
{
DATA: ATTR_STATE,
VALUE: [True, False, True],
SERVICE: SERVICE_WRITE_COIL,
FUNC: CALL_TYPE_WRITE_COILS,
},
],
)
@pytest.mark.parametrize(
"do_return",
[
{VALUE: ReadResult([0x0001]), DATA: ""},
{VALUE: ExceptionResponse(0x06), DATA: "Pymodbus:"},
{VALUE: IllegalFunctionRequest(0x06), DATA: "Pymodbus:"},
{VALUE: ModbusException("fail write_"), DATA: "Pymodbus:"},
],
)
@pytest.mark.parametrize(
"do_unit",
[
ATTR_UNIT,
ATTR_SLAVE,
],
)
async def test_pb_service_write(
hass: HomeAssistant,
do_write,
do_return,
do_unit,
caplog: pytest.LogCaptureFixture,
mock_modbus_with_pymodbus,
) -> None:
"""Run test for service write_register."""
func_name = {
CALL_TYPE_WRITE_COIL: mock_modbus_with_pymodbus.write_coil,
CALL_TYPE_WRITE_COILS: mock_modbus_with_pymodbus.write_coils,
CALL_TYPE_WRITE_REGISTER: mock_modbus_with_pymodbus.write_register,
CALL_TYPE_WRITE_REGISTERS: mock_modbus_with_pymodbus.write_registers,
}
data = {
ATTR_HUB: TEST_MODBUS_NAME,
do_unit: 17,
ATTR_ADDRESS: 16,
do_write[DATA]: do_write[VALUE],
}
mock_modbus_with_pymodbus.reset_mock()
caplog.clear()
caplog.set_level(logging.DEBUG)
func_name[do_write[FUNC]].return_value = do_return[VALUE]
await hass.services.async_call(DOMAIN, do_write[SERVICE], data, blocking=True)
assert func_name[do_write[FUNC]].called
assert func_name[do_write[FUNC]].call_args[0] == (
data[ATTR_ADDRESS],
data[do_write[DATA]],
)
if do_return[DATA]:
assert any(message.startswith("Pymodbus:") for message in caplog.messages)
@pytest.fixture(name="mock_modbus_read_pymodbus")
async def mock_modbus_read_pymodbus_fixture(
hass,
do_group,
do_type,
do_scan_interval,
do_return,
do_exception,
caplog,
mock_pymodbus,
freezer: FrozenDateTimeFactory,
):
"""Load integration modbus using mocked pymodbus."""
caplog.clear()
caplog.set_level(logging.ERROR)
mock_pymodbus.read_coils.side_effect = do_exception
mock_pymodbus.read_discrete_inputs.side_effect = do_exception
mock_pymodbus.read_input_registers.side_effect = do_exception
mock_pymodbus.read_holding_registers.side_effect = do_exception
mock_pymodbus.read_coils.return_value = do_return
mock_pymodbus.read_discrete_inputs.return_value = do_return
mock_pymodbus.read_input_registers.return_value = do_return
mock_pymodbus.read_holding_registers.return_value = do_return
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
do_group: [
{
CONF_INPUT_TYPE: do_type,
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SLAVE: 0,
CONF_SCAN_INTERVAL: do_scan_interval,
}
],
}
],
}
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert DOMAIN in hass.config.components
assert caplog.text == ""
freezer.tick(timedelta(seconds=DEFAULT_SCAN_INTERVAL + 60))
async_fire_time_changed(hass)
await hass.async_block_till_done()
return mock_pymodbus
@pytest.mark.parametrize(
("do_domain", "do_group", "do_type", "do_scan_interval"),
[
[SENSOR_DOMAIN, CONF_SENSORS, CALL_TYPE_REGISTER_HOLDING, 10],
[SENSOR_DOMAIN, CONF_SENSORS, CALL_TYPE_REGISTER_INPUT, 10],
[BINARY_SENSOR_DOMAIN, CONF_BINARY_SENSORS, CALL_TYPE_DISCRETE, 10],
[BINARY_SENSOR_DOMAIN, CONF_BINARY_SENSORS, CALL_TYPE_COIL, 1],
],
)
@pytest.mark.parametrize(
("do_return", "do_exception", "do_expect_state", "do_expect_value"),
[
[ReadResult([1]), None, STATE_ON, "1"],
[IllegalFunctionRequest(0x99), None, STATE_UNAVAILABLE, STATE_UNAVAILABLE],
[ExceptionResponse(0x99), None, STATE_UNAVAILABLE, STATE_UNAVAILABLE],
[
ReadResult([1]),
ModbusException("fail read_"),
STATE_UNAVAILABLE,
STATE_UNAVAILABLE,
],
],
)
async def test_pb_read(
hass: HomeAssistant,
do_domain,
do_expect_state,
do_expect_value,
caplog: pytest.LogCaptureFixture,
mock_modbus_read_pymodbus,
) -> None:
"""Run test for different read."""
# Check state
entity_id = f"{do_domain}.{TEST_ENTITY_NAME}".replace(" ", "_")
state = hass.states.get(entity_id).state
assert hass.states.get(entity_id).state
# this if is needed to avoid explode the
if do_domain == SENSOR_DOMAIN:
do_expect = do_expect_value
else:
do_expect = do_expect_state
assert state == do_expect
async def test_pymodbus_constructor_fail(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
"""Run test for failing pymodbus constructor."""
config = {
DOMAIN: [
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
}
]
}
with mock.patch(
"homeassistant.components.modbus.modbus.ModbusTcpClient", autospec=True
) as mock_pb:
caplog.set_level(logging.ERROR)
mock_pb.side_effect = ModbusException("test no class")
assert await async_setup_component(hass, DOMAIN, config) is False
await hass.async_block_till_done()
message = f"Pymodbus: {TEST_MODBUS_NAME}: Modbus Error: test"
assert caplog.messages[0].startswith(message)
assert caplog.records[0].levelname == "ERROR"
assert mock_pb.called
async def test_pymodbus_close_fail(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture, mock_pymodbus
) -> None:
"""Run test for failing pymodbus close."""
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
}
]
}
caplog.set_level(logging.ERROR)
mock_pymodbus.connect.return_value = True
mock_pymodbus.close.side_effect = ModbusException("close fail")
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
# Close() is called as part of teardown
async def test_pymodbus_connect_fail(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture, mock_pymodbus
) -> None:
"""Run test for failing pymodbus constructor."""
config = {
DOMAIN: [
{
CONF_NAME: TEST_MODBUS_NAME,
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
}
]
}
caplog.set_level(logging.WARNING)
ExceptionMessage = "test connect exception"
mock_pymodbus.connect.side_effect = ModbusException(ExceptionMessage)
assert await async_setup_component(hass, DOMAIN, config) is False
assert ExceptionMessage in caplog.text
async def test_delay(
hass: HomeAssistant, mock_pymodbus, freezer: FrozenDateTimeFactory
) -> None:
"""Run test for startup delay."""
# the purpose of this test is to test startup delay
# We "hijiack" a binary_sensor to make a proper blackbox test.
set_delay = 15
set_scan_interval = 5
entity_id = f"{BINARY_SENSOR_DOMAIN}.{TEST_ENTITY_NAME}".replace(" ", "_")
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
CONF_DELAY: set_delay,
CONF_BINARY_SENSORS: [
{
CONF_INPUT_TYPE: CALL_TYPE_COIL,
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 52,
CONF_SLAVE: 0,
CONF_SCAN_INTERVAL: set_scan_interval,
},
],
}
]
}
mock_pymodbus.read_coils.return_value = ReadResult([0x01])
start_time = dt_util.utcnow()
assert await async_setup_component(hass, DOMAIN, config) is True
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNKNOWN
time_sensor_active = start_time + timedelta(seconds=2)
time_after_delay = start_time + timedelta(seconds=(set_delay))
time_after_scan = start_time + timedelta(seconds=(set_delay + set_scan_interval))
time_stop = time_after_scan + timedelta(seconds=10)
now = start_time
while now < time_stop:
# This test assumed listeners are always fired at 0
# microseconds which is impossible in production so
# we use 999999 microseconds to simulate the real world.
freezer.tick(timedelta(seconds=1, microseconds=999999))
now = dt_util.utcnow()
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
if now > time_sensor_active:
if now <= time_after_delay:
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
elif now > time_after_scan:
assert hass.states.get(entity_id).state == STATE_ON
@pytest.mark.parametrize(
"do_config",
[
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 117,
CONF_SLAVE: 0,
CONF_SCAN_INTERVAL: 0,
}
],
},
],
)
async def test_shutdown(
hass: HomeAssistant,
caplog: pytest.LogCaptureFixture,
mock_pymodbus,
mock_modbus_with_pymodbus,
) -> None:
"""Run test for shutdown."""
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert mock_pymodbus.close.called
assert caplog.text == ""
@pytest.mark.parametrize(
"do_config",
[
{
CONF_SENSORS: [
{
CONF_NAME: TEST_ENTITY_NAME,
CONF_ADDRESS: 51,
CONF_SLAVE: 0,
}
]
},
],
)
async def test_stop_restart(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture, mock_modbus
) -> None:
"""Run test for service stop."""
caplog.set_level(logging.INFO)
entity_id = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}".replace(" ", "_")
assert hass.states.get(entity_id).state == STATE_UNKNOWN
hass.states.async_set(entity_id, 17)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "17"
mock_modbus.reset_mock()
caplog.clear()
data = {
ATTR_HUB: TEST_MODBUS_NAME,
}
await hass.services.async_call(DOMAIN, SERVICE_STOP, data, blocking=True)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
assert mock_modbus.close.called
assert f"modbus {TEST_MODBUS_NAME} communication closed" in caplog.text
mock_modbus.reset_mock()
caplog.clear()
await hass.services.async_call(DOMAIN, SERVICE_RESTART, data, blocking=True)
await hass.async_block_till_done()
assert not mock_modbus.close.called
assert mock_modbus.connect.called
assert f"modbus {TEST_MODBUS_NAME} communication open" in caplog.text
mock_modbus.reset_mock()
caplog.clear()
await hass.services.async_call(DOMAIN, SERVICE_RESTART, data, blocking=True)
await hass.async_block_till_done()
assert mock_modbus.close.called
assert mock_modbus.connect.called
assert f"modbus {TEST_MODBUS_NAME} communication closed" in caplog.text
assert f"modbus {TEST_MODBUS_NAME} communication open" in caplog.text
@pytest.mark.parametrize("do_config", [{}])
async def test_write_no_client(hass: HomeAssistant, mock_modbus) -> None:
"""Run test for service stop and write without client."""
mock_modbus.reset()
data = {
ATTR_HUB: TEST_MODBUS_NAME,
}
await hass.services.async_call(DOMAIN, SERVICE_STOP, data, blocking=True)
await hass.async_block_till_done()
assert mock_modbus.close.called
data = {
ATTR_HUB: TEST_MODBUS_NAME,
ATTR_UNIT: 17,
ATTR_ADDRESS: 16,
ATTR_STATE: True,
}
await hass.services.async_call(DOMAIN, SERVICE_WRITE_COIL, data, blocking=True)
@pytest.mark.parametrize("do_config", [{}])
async def test_integration_reload(
hass: HomeAssistant,
caplog: pytest.LogCaptureFixture,
mock_modbus,
freezer: FrozenDateTimeFactory,
) -> None:
"""Run test for integration reload."""
caplog.set_level(logging.INFO)
caplog.clear()
yaml_path = get_fixture_path("configuration.yaml", "modbus")
with mock.patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
await hass.async_block_till_done()
for i in range(4):
freezer.tick(timedelta(seconds=1))
async_fire_time_changed(hass)
await hass.async_block_till_done()
assert "Modbus reloading" in caplog.text
@pytest.mark.parametrize("do_config", [{}])
async def test_integration_reload_failed(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture, mock_modbus
) -> None:
"""Run test for integration connect failure on reload."""
caplog.set_level(logging.INFO)
caplog.clear()
yaml_path = get_fixture_path("configuration.yaml", "modbus")
with mock.patch.object(
hass_config, "YAML_CONFIG_FILE", yaml_path
), mock.patch.object(mock_modbus, "connect", side_effect=ModbusException("error")):
await hass.services.async_call(DOMAIN, SERVICE_RELOAD, blocking=True)
await hass.async_block_till_done()
assert "Modbus reloading" in caplog.text
assert "connect failed, retry in pymodbus" in caplog.text
| [
"noreply@github.com"
] | noreply@github.com |
b313f11a5449630aeac14c8c12fc7b7444fdc85a | 2cef313487b916ad27ee668e5d34da6670c0c27a | /parsers.py | 2e530a1b4ac6524047ce1ec3241c98fdb2ed418c | [
"MIT"
] | permissive | hgkahng/pysc2-defogging | 52b43d38dd197315da4858fe8029c79d6c11d20b | dcb622c874fcc8f004ad0cab7ca94f33d44d5893 | refs/heads/master | 2023-01-08T13:46:11.588111 | 2020-11-01T13:50:25 | 2020-11-01T13:50:25 | 309,101,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,404 | py | # -*- coding: utf-8 -*-
"""
1. ParserBase
2. ScreenFeatParser
3. MinimapFeatParser
4. CustomSpatialParser
"""
import os
import shutil
import collections
import numpy as np
import scipy.sparse as sp
from absl import flags
FLAGS = flags.FLAGS
class ParserBase(object):
"""Abstract class for replay parsers."""
def __init__(self):
pass
def parse(self, timestep):
"""Must override."""
raise NotImplementedError
def save(self):
"""Must override."""
raise NotImplementedError
class ScreenFeatParser(ParserBase):
"""Parse 'feature_screen' from timestep observation."""
def __init__(self, sparse: bool = True):
super(ScreenFeatParser, self).__init__()
self.screen_features = collections.defaultdict(list)
self.sparse = sparse
self.write_file = None
def parse(self, timestep):
if self.sparse:
raise NotImplementedError
self._append_screen_features(timestep)
def save(self, path: str):
if self.sparse:
raise NotImplementedError
self._save_screen_features(path)
def _append_screen_features(self, timestep):
screen = timestep.observation['feature_screen']
name2idx = screen._index_names[0] # pylint: disable=protected-access
for name, _ in name2idx.items():
self.screen_features[name].append(screen[name])
def _save_screen_features(self, path: str):
assert isinstance(self.screen_features, dict)
np.savez_compressed(file=path ,**self.screen_features)
print('{} | Saved screen features to: {}'.format(self.__class__.__name__, path))
class MinimapFeatParser(ParserBase):
"""Parse 'feature_minimap' from timestep observation."""
def __init__(self, sparse: bool = True):
super(MinimapFeatParser, self).__init__()
self.minimap_features = collections.defaultdict(list)
self.sparse = sparse
self.write_file = None
def parse(self, timestep):
if self.sparse:
raise NotImplementedError
self._append_minimap_features(timestep)
def save(self, path: str):
if self.sparse:
raise NotImplementedError
self._save_minimap_features(path)
def _append_minimap_features(self, timestep):
minimap = timestep.observation['feature_minimap']
name2idx = minimap._index_names[0] # pylint: disable=protected-access
for name, _ in name2idx.items():
self.minimap_features[name].append(minimap[name])
def _save_minimap_features(self, path: str):
"""Save minimap to .npz format."""
assert isinstance(self.minimap_features, dict)
np.savez_compressed(file=path, **self.minimap_features)
print('{} | Saved minimap features to: {}'.format(self.__class__.__name__, path))
class SpatialFeatParser(ParserBase):
"""
Parse 'feature_spatial' from timestep observation.
Note that 'feature_spatial' is a customly implemented feature.
"""
def __init__(self, sparse: bool = True):
super(SpatialFeatParser, self).__init__()
self.spatial_features = collections.defaultdict(list)
self.sparse = sparse # bool
self.write_file = None
def parse(self, timestep):
if self.sparse:
self._append_spatial_features_sparse(timestep)
else:
self._append_spatial_features(timestep)
def save(self, path: str = None, override: bool = False):
if path is None:
if self.write_file is None:
raise AttributeError("File to write has not been specified")
path = self.write_file
if not override:
if os.path.exists(path):
raise FileExistsError
if self.sparse:
self._save_spatial_features_sparse(path)
else:
self._save_spatial_features(path)
def _append_spatial_features(self, timestep):
"""..."""
spatial = timestep.observation['feature_spatial']
name2idx = spatial._index_names[0] # pylint: disable=protected-access
for name, _ in name2idx.items():
self.spatial_features[name] += [spatial[name]]
def _append_spatial_features_sparse(self, timestep):
"""..."""
spatial = timestep.observation['feature_spatial']
name2idx = spatial._index_names[0] # pylint: disable=protected-access
for name, _ in name2idx.items():
coo = sp.coo_matrix(spatial[name])
self.spatial_features[name] += [coo]
def _save_spatial_features(self, path: str = None):
"""..."""
assert isinstance(self.spatial_features, dict)
np.savez_compressed(path,**self.spatial_features)
print('{} | Saved Spatial features to: {}'.format(self.__class__.__name__, path))
def _save_spatial_features_sparse(self, path: str):
"""Save 'spatial' to .npz format."""
assert isinstance(self.spatial_features, dict)
path = os.path.abspath(path)
temp_dir = os.path.join(os.path.dirname(path), 'temp') # /dir/to/path/temp
if os.path.isdir(temp_dir):
raise FileExistsError(f"'{temp_dir}' already exists.")
os.makedirs(temp_dir, exist_ok=False)
for name, spatial in self.spatial_features.items():
for i, spt in enumerate(spatial):
write_dir = os.path.join(temp_dir, str(name)) # /dir/to/path/temp/name/
os.makedirs(write_dir, exist_ok=True)
sp.save_npz(os.path.join(write_dir, f"{i}.npz"), spt)
self.make_archive(temp_dir, path)
shutil.rmtree(temp_dir) # remove temporaries
@staticmethod
def make_archive(source: str, destination: str):
base = os.path.basename(destination)
name, ext = os.path.splitext(base) # name.zip -> name, .zip
ext = ext.strip('.') # .zip -> zip
archive_from = os.path.dirname(source)
archive_to = os.path.basename(source.strip(os.sep))
shutil.make_archive(base_name=name, format=ext, root_dir=archive_from, base_dir=archive_to)
shutil.move(f"{name}.{ext}", destination)
| [
"hgkahng@korea.ac.kr"
] | hgkahng@korea.ac.kr |
313272182e4a107138a4a0aeb3fd35388773dbb4 | 3fb93b805cd9c6d19f21a443001fec4597f9105f | /scripts/uncertainty_scripts/train_pfrot.py | e11123f11ef5312baed60581c97980407a5f8a7e | [
"Apache-2.0"
] | permissive | neuroailab/curiosity_deprecated | c3a5a28d93b5bb0d923719c8fe289cfc9a9feec3 | 65f7cde13b07cdac52eed39535a94e7544c396b8 | refs/heads/master | 2021-08-15T08:07:34.845737 | 2017-11-15T19:35:45 | 2017-11-15T19:35:45 | 69,374,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,621 | py | '''
Random actions, after index mismatch bug.
'''
import sys
sys.path.append('curiosity')
sys.path.append('tfutils')
import tensorflow as tf
from curiosity.interaction import train, environment, data, cfg_generation, update_step
import curiosity.interaction.models as models
from tfutils import base, optimizer
import numpy as np
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', default = '0', type = str)
parser.add_argument('-wmea', '--wmencarchitecture', default = 2, type = int)
parser.add_argument('-wmfca', '--wmfcarchitecture', default = 4, type = int)
parser.add_argument('-wmmbca', '--wmmbcarchitecture', default = -1, type = int)
parser.add_argument('-umea', '--umencarchitecture', default = 0, type = int)
parser.add_argument('-umfca', '--umfcarchitecture', default = 2, type = int)
parser.add_argument('-ummbaa', '--ummbaarchitecture', default = 1, type = int)
parser.add_argument('--umlr', default = 1e-3, type = float)
parser.add_argument('--actlr', default = 1e-4, type = float)
#parser.add_argument('--loss', default = 0, type = int)
parser.add_argument('--tiedencoding', default = False, type = bool)
parser.add_argument('--heat', default = 1., type = float)
parser.add_argument('--egoonly', default = False, type = bool)
parser.add_argument('--zeroedforce', default = False, type = bool)
parser.add_argument('--optimizer', default = 'adam', type = str)
parser.add_argument('--batching', default = 'uniform', type = str)
parser.add_argument('--batchsize', default = 32, type = int)
parser.add_argument('--numperbatch', default = 8, type = int)
parser.add_argument('--historylen', default = 1000, type = int)
parser.add_argument('--ratio', default = 2 / .17, type = float)
parser.add_argument('--objsize', default = .4, type = float)
parser.add_argument('--lossfac', default = 1., type = float)
parser.add_argument('--nclasses', default = 4, type = int)
#parser.add_argument('--t1', default = .05, type = float)
#parser.add_argument('--t2', default = .3, type = float)
#parser.add_argument('--t3', default = .6, type = float)
parser.add_argument('-at', '--actionthreshold', default = .1, type = float)
parser.add_argument('-ut', '--uncertaintythreshold', default = .1, type = float)
parser.add_argument('--modelseed', default = 0, type = int)
N_ACTION_SAMPLES = 1000
EXP_ID_PREFIX = 'pfr'
NUM_BATCHES_PER_EPOCH = 1e8
IMAGE_SCALE = (128, 170)
ACTION_DIM = 5
NUM_TIMESTEPS = 3
T_PER_STATE = 2
RENDER1_HOST_ADDRESS = '10.102.2.161'
STATE_STEPS = [-1, 0]
STATES_GIVEN = [-2, -1, 0, 1]
ACTIONS_GIVEN = [-2, -1, 1]
s_back = - (min(STATES_GIVEN) + min(STATE_STEPS))
s_forward = max(STATES_GIVEN) + max(STATE_STEPS)
a_back = - min(ACTIONS_GIVEN)
a_forward = max(ACTIONS_GIVEN)
args = vars(parser.parse_args())
act_thresholds = [-args['actionthreshold'], args['actionthreshold']]
n_classes_wm = len(act_thresholds) + 1
um_thresholds = [args['uncertaintythreshold']]
n_classes_um = len(um_thresholds) + 1
wm_encoding_choices = [
{
'sizes' : [3, 3, 3, 3],
'strides' : [2, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [None, None, None, None]
},
{
'sizes' : [7, 3, 3, 3],
'strides' : [3, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [0, 0, 0, 0]
},
{
'sizes' : [7, 3, 3, 3, 3],
'strides' : [3, 2, 2, 2, 2],
'num_filters' : [32, 32, 32, 32, 32],
'bypass' : [0, 0, 0, 0, 0]
},
{
'sizes' : [7, 3, 3, 3, 3],
'strides' : [2, 2, 1, 1, 1],
'num_filters' : [4, 4, 4, 4, 4],
'bypass' : [0, 0, 0, 0, 0]
}
]
wm_mlp_before_concat_choices = [
{
'num_features' : [500, 10],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [500, 50],
'nonlinearities' : ['relu', 'relu']
}
]
wm_mlp_choices = [
{
'num_features' : [256, ACTION_DIM * n_classes_wm],
'nonlinearities' : ['relu', 'identity'],
'dropout' : [None, None]
},
{
'num_features' : [50, 50, ACTION_DIM * n_classes_wm],
'nonlinearities' : ['relu', 'relu', 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [50, 50, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [100, 100, 100, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None, None]
},
{
'num_features' : [500, 500, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [1000, 1000, 500, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None, None]
}
]
wm_encoding_choice = wm_encoding_choices[args['wmencarchitecture']]
wm_mlp_choice = wm_mlp_choices[args['wmfcarchitecture']]
wm_cfg = {
'num_timesteps' : NUM_TIMESTEPS,
'state_steps' : [-1, 0],
'image_shape' : list(IMAGE_SCALE) + [3],
'states_given' : [-2, -1, 0, 1],
'actions_given' : [-2, -1, 1],
'act_dim' : ACTION_DIM,
'encode' : cfg_generation.generate_conv_architecture_cfg(**wm_encoding_choice),
'action_model' : {
'loss_func' : models.binned_softmax_loss_per_example,
'thresholds': act_thresholds,
'loss_factor' : 1.,
'mlp' : cfg_generation.generate_mlp_architecture_cfg(**wm_mlp_choice)
}
}
mbc_idx = args['wmmbcarchitecture']
if mbc_idx != -1:
wm_mbc_choice = wm_mlp_before_concat_choices[mbc_idx]
wm_cfg['action_model']['mlp_before_concat'] = cfg_generation.generate_mlp_architecture_cfg(**wm_mbc_choice)
um_encoding_choices = [
{
'sizes' : [7, 3, 3, 3],
'strides' : [3, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [0, 0, 0, 0]
},
{
'sizes' : [7, 3],
'strides' : [3, 2],
'num_filters' : [16, 2],
'bypass' : [0, 0]
},
{
'sizes' : [7, 3, 3, 3, 3],
'strides' : [3, 2, 2, 2, 2],
'num_filters' : [32, 32, 32, 32, 32],
'bypass' : [0, 0, 0, 0, 0]
}
]
shared_mlp_choices = [
{
'num_features' : [100, 100],
'nonlinearities' : ['relu', 'relu'],
'dropout' : [None, None]
},
{
'num_features' : [50, 50],
'nonlinearities' : ['relu', 'relu'],
'dropout' : [None, None]
},
{
'num_features' : [500],
'nonlinearities' : ['relu'],
'dropout' : [None]
},
{
'num_features' : [50, 50],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu']],
'dropout' : [None, None]
}
]
separate_mlp_choices_proto = {
'num_features' : [n_classes_um],
'nonlinearities' : ['identity'],
'dropout' : [None]
}
separate_mlp_choice = dict((t, separate_mlp_choices_proto) for t in range(NUM_TIMESTEPS))
mlp_before_action_choices = [
{
'num_features' : [500, 10],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [500, 50],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [300, 100],
'nonlinearities' : ['relu', 'relu']
}
]
um_encoding_args = um_encoding_choices[args['umencarchitecture']]
um_mlp_before_act_args = mlp_before_action_choices[args['ummbaarchitecture']]
um_mlp_args = shared_mlp_choices[args['umfcarchitecture']]
um_cfg = {
'shared_encode' : cfg_generation.generate_conv_architecture_cfg(desc = 'encode', **um_encoding_args),
'shared_mlp_before_action' : cfg_generation.generate_mlp_architecture_cfg(**um_mlp_before_act_args),
'shared_mlp' : cfg_generation.generate_mlp_architecture_cfg(**um_mlp_args),
'mlp' : dict((t, cfg_generation.generate_mlp_architecture_cfg(**choice_args)) for t, choice_args in separate_mlp_choice.iteritems()),
'loss_func' : models.ms_sum_binned_softmax_loss,
'thresholds' : um_thresholds,
'loss_factor' : args['lossfac'],
'n_action_samples' : N_ACTION_SAMPLES,
'heat' : args['heat'],
'just_random' : 1
}
model_cfg = {
'world_model' : wm_cfg,
'uncertainty_model' : um_cfg,
'seed' : args['modelseed']
}
lr_params = {
'world_model' : {
'act_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['actlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
},
'fut_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['actlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'uncertainty_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['umlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
}
if args['optimizer'] == 'adam':
optimizer_class = tf.train.AdamOptimizer
optimizer_params = {
'world_model' : {
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
},
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
}
}
elif args['optimizer'] == 'momentum':
optimizer_class = tf.train.MomentumOptimizer
optimizer_params = {
'world_model' : {
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
},
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
}
}
train_params = {
'updater_func' : update_step.ActionUncertaintyUpdater,
'updater_kwargs' : {
'state_desc' : 'depths1'
}
}
def get_ms_models(cfg):
world_model = models.MoreInfoActionWorldModel(cfg['world_model'])
uncertainty_model = models.MSExpectedUncertaintyModel(cfg['uncertainty_model'], world_model)
return {'world_model' : world_model, 'uncertainty_model' : uncertainty_model}
model_params = {
'func' : get_ms_models,
'cfg' : model_cfg,
'action_model_desc' : 'uncertainty_model'
}
one_obj_scene_info = [
{
'type' : 'SHAPENET',
'scale' : args['objsize'],
'mass' : 1.,
'scale_var' : .01,
'num_items' : 1,
}
]
force_scaling = 200.
room_dims = (5, 5)
my_rng = np.random.RandomState(0)
history_len = args['historylen']
batch_size = args['batchsize']
data_lengths = {
'obs' : {'depths1' : s_back + s_forward + NUM_TIMESTEPS},
'action' : a_back + a_forward + NUM_TIMESTEPS,
'action_post' : a_back + a_forward + NUM_TIMESTEPS}
dp_config = {
'func' : train.get_batching_data_provider,
'action_limits' : np.array([1., 1.] + [force_scaling for _ in range(ACTION_DIM - 2)]),
'environment_params' : {
'random_seed' : 1,
'unity_seed' : 1,
'room_dims' : room_dims,
'state_memory_len' : {
'depths1' : history_len + s_back + s_forward + NUM_TIMESTEPS
},
'action_memory_len' : history_len + a_back + a_forward + NUM_TIMESTEPS,
'message_memory_len' : history_len + a_back + a_forward + NUM_TIMESTEPS,
'other_data_memory_length' : 32,
'rescale_dict' : {
'depths1' : IMAGE_SCALE
},
'USE_TDW' : True,
'host_address' : RENDER1_HOST_ADDRESS,
'rng_periodicity' : 1,
'termination_condition' : environment.obj_not_present_termination_condition
},
'provider_params' : {
'batching_fn' : lambda hist : data.uniform_experience_replay(hist, history_len, my_rng = my_rng, batch_size = batch_size,
get_object_there_binary = False, data_lengths = data_lengths, which_matters_for_freq = -2),
'capacity' : 5,
'gather_per_batch' : batch_size / 4,
'gather_at_beginning' : history_len + T_PER_STATE + NUM_TIMESTEPS
},
'scene_list' : [one_obj_scene_info],
'scene_lengths' : [1024 * 32],
'do_torque' : False,
'use_absolute_coordinates' : False
}
load_and_save_params = cfg_generation.query_gen_latent_save_params(location = 'freud', prefix = EXP_ID_PREFIX, state_desc = 'depths1', portnum = cfg_generation.NODE_5_PORT)
postprocessor_params = {
'func' : train.get_experience_replay_postprocessor
}
params = {
'model_params' : model_params,
'data_params' : dp_config,
'postprocessor_params' : postprocessor_params,
'optimizer_params' : optimizer_params,
'learning_rate_params' : lr_params,
'train_params' : train_params
}
params.update(load_and_save_params)
params['allow_growth'] = True
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = args['gpu']
train.train_from_params(**params)
| [
"nhaber@stanford.edu"
] | nhaber@stanford.edu |
e82ef1fcf015a4fc0e894feb02466424a8fb1b62 | d21bcfd92d8bbc4f214af29e801b9929d3ab11ef | /winningstrat_2016.py | fbc47a816d33c84e16cee2293c3226539e648143 | [] | no_license | rvictordelta/practiceproblems | 47dfd46ac1eda11769aab10067be3ce6ab012435 | 362b374b3c275870e909c6c2e6624585250ca825 | refs/heads/master | 2020-08-28T15:41:28.691037 | 2019-11-04T15:02:00 | 2019-11-04T15:02:00 | 217,743,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | t1= """100
5
4
0 1 0 0"""
t2= """140
20
8
0 0 0 1 0 1 1 1"""
t3 = """100
5
4
1 1 1 1"""
ts = [t1,t2,t3]
def martingaleyeah(s):
l = s.split('\n')
m = int(l[0]) # starting amount
b = int(l[1]) # first bet
flips = [int(x) for x in l[3].split(" ")]
for flip in flips:
if m < b: # assuming this? stricter than instructions say?
return "BROKE"
if flip == 1:
m += b
b = int(l[1])
continue
elif flip == 0:
m -= b
b = b*2
continue
return m
for t in ts:
print(martingaleyeah(t))
| [
"rvandusen@4170trading.com"
] | rvandusen@4170trading.com |
1607cc2e583da0dc2499c9cb382f3d5e35ea2501 | 80bb17c3d9ccece1319b991f4f26740c460ad5f5 | /hw06/emacs/emacs.py | 0adaf3267b8c80d29b2eb3369766c677c4198753 | [] | no_license | jozdashh/agra | 4754a7c2eaff0374622a36fbf721a0950cac1644 | aa862971f133d03b30a7d37df421d778e56de813 | refs/heads/main | 2023-05-29T12:30:55.235743 | 2023-04-27T19:54:21 | 2023-04-27T19:54:21 | 350,473,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # Estudiante: Josue Peña Atencio
# Código: 8935601
# Fecha: 04/11/2018
from sys import stdin
def kmp_prefix(s):
n = len(s) ; P = [None]*n
i = 0 ; j = P[0] = -1
while i < n-1:
while j> -1 and s[i] != s[j]: j = P[j]
i += 1 ; j += 1
if s[i] == s[j]: P[i] = P[j]
else: P[i] = j
return P
def kmp_search(x, y, j):
i = 0
P = kmp_prefix(x)
while j < len(y):
while i > -1 and x[i] != y[j]: i = P[i]
i += 1 ; j += 1
if i >= len(x):
i = P[i-1]
return (True, j)
return (False, -1)
def solve(t, p):
ans = True
q = p.strip('*').split('*')
if len(q)==0 and len(p)!=len(t): ans = False
i, j = 0, 0
while i < len(q) and ans:
if len(q[i])!=0:
aux = kmp_search(q[i], t, j)
ans = ans and aux[0]
j = max(aux[1], j)
i += 1
return ans
def main():
line = stdin.readline()
while len(line)!=0:
n = int(line)
t = stdin.readline()
for i in range(n):
w = stdin.readline().strip()
ans = solve(t, w)
if ans: print('yes')
else: print('no')
line = stdin.readline()
main()
| [
"noreply@github.com"
] | noreply@github.com |
0650b52e5ba0f028a8b022e6f1bd71a79ff90fd2 | 7b14ef3fa1890c89a9f63825f2fdaaf022161c8d | /rialtows/dto.py | 021508dd4282b79e41bb5d6aa17c77e3dfbc260f | [] | no_license | heid9001/okexbinance_ws_api | ad24ac32996b1fc42fb764886d95fabfbc5627fc | 26f315d9580b24aa8c4a81ce5c6ee54a43062512 | refs/heads/master | 2020-08-12T17:06:34.435805 | 2019-10-13T11:14:24 | 2019-10-13T11:14:24 | 214,806,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | from .core import BaseDto, Mapping
import arrow
class ResourceDto(BaseDto):
def __init__(self, data, name):
super().__init__(data)
self.name = name
def __str__(self):
return "%s [ask=%.2f, bid=%.2f]" % (self.name, self.ask, self.bid)
def __repr__(self):
return str(self)
def to_timestamp(date):
return arrow.get(date).timestamp
class BinanceDto(ResourceDto):
date = Mapping("E", converter=int)
ask = Mapping("a", converter=float)
bid = Mapping("b", converter=float)
def __init__(self, data):
super().__init__(data, "binance")
class OkexDto(ResourceDto):
date = Mapping("timestamp", converter=to_timestamp)
ask = Mapping("best_ask", converter=float)
bid = Mapping("best_bid", converter=float)
def __init__(self, data):
super().__init__(data, "okex")
| [
"heid9001@gmail.com"
] | heid9001@gmail.com |
2d858cf00a434b0b52a700a9a1d5e9e02287d8f5 | 51de952d3bfdadaef0ea9ea72d995fb44322f094 | /py_codes/1_Two_Layer_MLP/MLP.py | e44d47ea2766a329ba25a7fec7c5e422fcf6e3a1 | [] | no_license | 1437363934/MNITS-Classification-with-Neural-Networks | a72075396123ca52e71d1e4ea6a6cb91a04887c1 | 700d7a952a92f4e1e15039fdca8b809d1584d98c | refs/heads/master | 2022-12-23T19:29:52.543426 | 2020-09-28T15:39:17 | 2020-09-28T15:39:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
normalize = transforms.Normalize((0.1307,), (0.3081,)) # MNIST
transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
# choose the training and test datasets
train_data = datasets.MNIST(root='./data', train=True, download=False, transform=transform)
test_data = datasets.MNIST(root='./data', train=False, download=False, transform=transform)
# prepare data loaders
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(0.2 * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size, sampler=valid_sampler)
import matplotlib.pyplot as plt
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
0024cfc6a2431b74278efd18cc57d34ba22a2b61 | 414c5a2d46ad90b6d7102fd7a4339b086678a3c2 | /truthyvalues.py | 807c5754a5a9e62837a64ed0dd9f66ddcee3f8b3 | [] | no_license | paulmcaruana/ProgramFlowControlInPython | 565446f836fa2f93a29e12f15b4cb78a7ae00105 | aefc544e6fc48fc0d7bb82fc3ee46fa0f3f80c33 | refs/heads/master | 2022-04-21T06:32:06.737509 | 2020-04-25T13:53:27 | 2020-04-25T13:53:27 | 258,787,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | if 0:
print("True") # 0 always evaluates to false as you will see in the output
else:
print("False")
name = input("Please enter your name: ")
# if name: It is better to write this as below until very comfortable
if name != "":
print("Hello, {}".format(name))
else:
print("Are you the man with no name?") | [
"paul@beambient.co.uk"
] | paul@beambient.co.uk |
016d2f4b0007f8a40384dcd7a57e8d67f5a5f01f | 7708c2526947a86d064fc8b07a579baa332c5575 | /Database/build_db_datasets.py | b0b7c3d3ff443564267cc2ad0962d02df56a6c71 | [] | no_license | shunsunsun/Cell_BLAST-notebooks | d622aea190015e8b76207866889dddbd4dd333a8 | 9baebb4311eaf71670f4852238db7b91157e71b1 | refs/heads/master | 2022-01-19T05:05:30.269257 | 2019-04-21T13:30:42 | 2019-04-21T13:30:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,024 | py | #!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import mysql.connector
from utils import nan_safe
def generate_datasets_meta():
dataset_dict = {
item: [
file for file in os.listdir(item)
if file.endswith(".pdf") and file != "peek.pdf"
] for item in os.listdir(".") if item not in (
"__pycache__", ".ipynb_checkpoints"
) and os.path.isdir(item)
}
used_columns = (
"dataset_name", "organism", "organ", "platform",
"cell_number", "publication", "pmid", "remark"
)
single = pd.read_csv(
"../../Datasets/ACA_datasets.csv",
comment="#", skip_blank_lines=True
).loc[:, used_columns]
additional = pd.read_csv(
"../../Datasets/additional_datasets.csv",
comment="#", skip_blank_lines=True
).loc[:, used_columns]
single = pd.concat([single, additional], axis=0, ignore_index=True)
aligned = pd.read_csv(
"../../Datasets/aligned_datasets.csv",
comment="#", skip_blank_lines=True
).loc[:, used_columns]
for idx, row in aligned.iterrows():
aligned.loc[idx, "cell_number"] = single.loc[np.in1d(
single["dataset_name"], row["remark"].split(", ")
), "cell_number"].sum()
combined = pd.concat([single, aligned], axis=0, ignore_index=True)
combined["display"] = np.in1d(
combined["dataset_name"], list(dataset_dict.keys()))
# combined = combined.loc[np.in1d(
# combined["dataset_name"], list(dataset_dict.keys())
# ), :]
# combined["cell_number"] = combined["cell_number"].astype(np.int)
combined["self-projection coverage"] = np.nan
combined["self-projection accuracy"] = np.nan
for idx, row in combined.iterrows():
spf_path = os.path.join(row["dataset_name"], "self_projection.txt")
if not os.path.exists(spf_path):
if row["dataset_name"] in dataset_dict:
print("Missing: " + spf_path)
else:
with open(spf_path, "r") as spf:
lines = spf.readlines()
k1, v1 = lines[0].split()
k2, v2 = lines[1].split()
assert k1 == "coverage" and k2 == "accuracy"
v1, v2 = float(v1.strip()), float(v2.strip())
combined.loc[idx, "self-projection coverage"] = v1
combined.loc[idx, "self-projection accuracy"] = v2
combined["visualization"] = [
(", ".join(dataset_dict[item]) if item in dataset_dict else np.nan)
for item in combined["dataset_name"]
]
# combined.to_csv("./datasets_meta.csv", index=False)
# combined.to_json("./datasets_meta.json", orient="records", double_precision=3)
return combined
def create_table(cnx, cursor):
cursor.execute("DROP TABLE IF EXISTS `datasets`;")
cursor.execute(
"CREATE TABLE `datasets` ("
" `dataset_name` CHAR(50) NOT NULL UNIQUE,"
" `organism` char(50) NOT NULL,"
" `organ` char(100) NOT NULL,"
" `platform` char(50),"
" `cell_number` INT CHECK(`cell_number` > 0),"
" `publication` VARCHAR(300),"
" `pmid` CHAR(8),"
" `remark` VARCHAR(200),"
" `self-projection coverage` FLOAT CHECK(`self-projection coverage` BETWEEN 0 AND 1),"
" `self-projection accuracy` FLOAT CHECK(`self-projection accuracy` BETWEEN 0 AND 1),"
" `visualization` VARCHAR(200),"
" `display` BOOL NOT NULL,"
" PRIMARY KEY USING HASH(`dataset_name`)"
");"
)
def insert_data(cnx, cursor, data):
insert_sql = (
"INSERT INTO `datasets` ("
" `dataset_name`, `organism`, `organ`, `platform`,"
" `cell_number`, `publication`, `pmid`, `remark`,"
" `self-projection coverage`, `self-projection accuracy`,"
" `visualization`, `display`"
") VALUES ("
" %s, %s, %s, %s,"
" %s, %s, %s, %s,"
" %s, %s, %s, %s"
");"
)
for idx, row in data.iterrows():
cursor.execute(insert_sql, (
nan_safe(row["dataset_name"]), nan_safe(row["organism"]),
nan_safe(row["organ"]), nan_safe(row["platform"]),
nan_safe(row["cell_number"], int), nan_safe(row["publication"]),
nan_safe(row["pmid"], lambda x: str(int(x))), nan_safe(row["remark"]),
nan_safe(row["self-projection coverage"], lambda x: float(np.round(x, 3))),
nan_safe(row["self-projection accuracy"], lambda x: float(np.round(x, 3))),
nan_safe(row["visualization"]), nan_safe(row["display"])
))
def main():
cnx = mysql.connector.connect(
user=input("Please enter username: "), password=input("Please enter password: "),
host="127.0.0.1", database="aca"
)
cursor = cnx.cursor()
create_table(cnx, cursor)
insert_data(cnx, cursor, generate_datasets_meta())
cnx.commit()
cursor.close()
cnx.close()
if __name__ == "__main__":
main()
| [
"caozj@mail.cbi.pku.edu.cn"
] | caozj@mail.cbi.pku.edu.cn |
8ead2cc76bbb2d6fc24ed27579b4c369e17df37c | af6e4fbc7ccf3a19322d0ba8bda05ece43ea7205 | /sample.py | 8cda191161fa4974ce01ad25b6ea18d6f513d0c7 | [] | no_license | senaprojects/YOUTUBE_VIDEO | bc3d0782693c9589054955629fb075f6c10855c8 | 3d2b4d8ae44a5e35a927b165ed25ee6c4aaa8fc1 | refs/heads/main | 2023-08-19T04:39:11.820006 | 2021-08-23T04:23:42 | 2021-08-23T04:23:42 | 398,971,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | import pywhatkit as kit
y=input("Enter the playlist :")
kit.playonyt(y)
| [
"noreply@github.com"
] | noreply@github.com |
8bc523a3d7f94a6554ba21f8ade1ce2b4de1c047 | 33643fa32b68acb68154d6e76f79fca436381482 | /resources/examples/test.py | 68453ef707231ed6571de35fd3c211b4c1b5ceb4 | [
"Apache-2.0"
] | permissive | R0X4R/turbo-intruder | 3f562ad2e271d27a471f1113c019a37d58c4acb6 | 8535b2e2edee5f99a64526df3fc1e8466bcb04b6 | refs/heads/master | 2020-07-23T23:58:05.326742 | 2019-09-03T08:48:37 | 2019-09-03T08:48:37 | 207,743,824 | 1 | 2 | Apache-2.0 | 2019-09-11T06:54:31 | 2019-09-11T06:54:27 | null | UTF-8 | Python | false | false | 933 | py | # This is just for making sure the engine works during development
# Launch with java -jar build/libs/turbo-intruder-all.jar resources/examples/test.py /dev/null z z
def queueRequests(target, wordlists):
engine = RequestEngine(endpoint='https://hackxor.net:443',
concurrentConnections=1,
requestsPerConnection=10,
pipeline=False
)
engine.start()
noPayload = '''GET /static/404 HTTP/1.1
Host: hackxor.net
Connection: close
'''
engine.queue(noPayload)
onePayload = '''GET /static/404?q=%s HTTP/1.1
Host: hackxor.net
Connection: close
'''
engine.queue(onePayload, 'one payload')
twoPayloads = '''GET /static/404?q=%s HTTP/1.1
Host: hackxor.net
Connection: close
'''
engine.queue(twoPayloads, ['first payload', 'second payload'])
def handleResponse(req, interesting):
table.add(req)
| [
"albinowax@gmail.com"
] | albinowax@gmail.com |
5821bf541b8ad5152c0a740e1115539500cdd973 | fd4a26ef38bf4fa91af57884fbcea7a0230c097a | /CrowdSupplyDotComScraper/CrowdSupplyDotComScraper/settings.py | dc4ae09126cbdcc3fe52e76d304a732e12c04d2f | [] | no_license | kelvingakuo/Crowd-Funding-Bots | 260cbf89cdb5d1a9c129a0089ba4c6738c32b992 | 00ba5a6fa5f8b2378cd5e27336a2df82902bed43 | refs/heads/master | 2020-03-14T08:38:55.588095 | 2018-04-30T22:41:25 | 2018-04-30T22:41:25 | 131,529,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,306 | py | # -*- coding: utf-8 -*-
# Scrapy settings for CrowdSupplyDotComScraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'CrowdSupplyDotComScraper'
SPIDER_MODULES = ['CrowdSupplyDotComScraper.spiders']
NEWSPIDER_MODULE = 'CrowdSupplyDotComScraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'CrowdSupplyDotComScraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'CrowdSupplyDotComScraper.middlewares.CrowdsupplydotcomscraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'CrowdSupplyDotComScraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'CrowdSupplyDotComScraper.pipelines.CrowdsupplydotcomscraperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"kelvingakuo@gmail.com"
] | kelvingakuo@gmail.com |
b84c3ca4482e26c4f3ab2a79107e873f9b1656c5 | b3879bc761ac38dab903da57c4061ad79fd70c6d | /курсы пайтон модуль 3/задание 23.py | 14e81b84571b42392d7be1c1572eee18530c2954 | [] | no_license | Ruslan5252/all-of-my-projects-byPyCharm | 4df70cc3a31c4a5d97560fa858a706edcc856299 | 817d5f711408590ea141590ae52c6d888dfa2015 | refs/heads/master | 2023-05-03T01:06:30.156731 | 2021-05-29T13:51:16 | 2021-05-29T13:51:16 | 371,970,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | a=1
max=0
while a != 0:
a = int(input("введите число"))
if a>max:
max=a
print("максимальное значение",max)
| [
"r.u.s_2000@mail.ru"
] | r.u.s_2000@mail.ru |
5b9bfc13c5a323f114e11e4cc09c5ee13fb5f784 | 2128204364199ce5a0b8f090615c75af541666ef | /Attendance.py | 3fd550bc925001aa74423444243d32945ebdc48a | [] | no_license | k4kunalll/Attendance-System | 5094abb1b5b62cc8263c90a10b2672ae98620ea0 | 85c5d64ef3a3b8c8849516e291ff6981fdb7c3e7 | refs/heads/main | 2023-01-24T18:55:43.501871 | 2020-12-05T10:31:57 | 2020-12-05T10:31:57 | 318,759,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,376 | py | import os
from PIL import ImageTk, Image
import csv
from functools import partial
from tkinter import *
import numpy as np
import pandas as pd
import paramiko
from scp import SCPClient
import time
from sklearn.preprocessing import Normalizer
import mtcnn
from PIL import Image
from os import listdir
from os.path import isdir, isfile
from keras.models import load_model
from sklearn.externals import joblib
from datetime import date
from tkinter import messagebox
#model = load_model("C:/Users/Kunal/Desktop/model/facenet_keras.h5") #loading te saved facenet model
print("model loaded")
in_encoder = Normalizer(norm='l2')
def Detect_face(): # extraction and recognition of face
def Copy_Image(server, port, user, password):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
scp = SCPClient(client.get_transport())
scp.get("/home/pi/cam.jpg", local_path="C:/Users/Kunal/Desktop/image")
scp.close()
def Capture_Image(server, port, user, password):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
stdin, cpu, stderr = client.exec_command("raspistill -o cam.jpg")
def extract_face(filename, required_size=(160, 160)): # extraction of face
image = Image.open(filename) # loading image from directory
image = image.convert("RGB")
pixels = np.asarray(image)
detector = mtcnn.mtcnn.MTCNN()
results = detector.detect_faces(pixels) # detecting face using MTCNN Model
x1, y1, width, height = results[0]['box'] # getting box coordinates
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
face = pixels[y1:y2, x1:x2] # extracting face using those coordinates
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = np.asarray(image) # converting image back to numpy array
return face_array
def get_face_embedding(face_pixels, model): # getting the face embeddings using facenet model
face_pixels = face_pixels.astype('float32')
mean, std = face_pixels.mean(), face_pixels.std() # normalizing the face pixels
face_pixels = (face_pixels - mean) / std
samples = np.expand_dims(face_pixels, axis=0)
yhat = model.predict(samples)
return yhat
def face_predict(face_array, model_file_path):
model_learn = joblib.load(model_file_path)
y_predict_class = model_learn.predict(face_array)
y_predict_prob = model_learn.predict_proba(face_array)
return y_predict_class[0]
def Red_led(server, port, user, password):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
stdin, cpu, stderr = client.exec_command("python RED.py")
def Green_led(server, port, user, password):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
stdin, cpu, stderr = client.exec_command("python Green.py")
Capture_Image("192.168.43.248", 22, "pi", "4489Dma3")
time.sleep(7)
Copy_Image("192.168.43.248", 22, "pi", "4489Dma3")
time.sleep(3)
print('image copied')
face = extract_face("C:/Users/Kunal/Desktop/image/cam.jpg") # calling extract_face()
face_embedding = get_face_embedding(face, model) # calling get_face_embedding()
face_arr = np.asarray(face_embedding)
face_array = in_encoder.transform(face_arr)
name = face_predict(face_array, 'C:/Users/Kunal/Desktop/model/Attendance_model.pkl')
today = date.today()
if not isfile(r'C:\Users\Kunal\Desktop\file' + r'\y' + str(today.year) + r'\at' + str(today.month) + '_' + str(
today.year) + '.csv'):
f = open(r'C:\Users\Kunal\Desktop\file' + r'\y' + str(today.year) + r'\at' + str(today.month) + '_' + str(
today.year) + '.csv', "w")
writer = csv.DictWriter(
f, fieldnames=["AdmissionNo", "Name", "Date"])
writer.writeheader()
f.close()
if name < 4:
data = pd.read_csv(r'C:\Users\Kunal\Desktop\file\y' + str(today.year) + r'\StudentDetails.csv')
fieldnames = ['AdmissionNo', 'Name', 'Date']
info = {'AdmissionNo': data.loc[name].AdmissionNo, 'Name': data.loc[name].Name, 'Date': today}
with open(r'C:\Users\Kunal\Desktop\file\y' + str(today.year) + r'\at' + str(today.month) + '_' + str(
today.year) + '.csv', 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(info)
Green_led("192.168.43.248", 22, "pi", "4489Dma3")
messagebox.showinfo("Attendance System", "Attendance of " + data['Name'][name] + " taken")
else:
Red_led("192.168.43.248", 22, "pi", "4489Dma3")
messagebox.showinfo("Attendance System", "Another Face Detected")
# opens csv file
def open_file():
os.startfile(r"C:\Users\Kunal\Desktop\file\at.csv")
# opens capture window
def popup_window():
window = Toplevel()
window.title("Attendance Taking System")
window.geometry("400x200")
window.configure(background="grey")
label = Label(window, text="Take Attendance", relief="solid", fg="white", bg="red",
font=("ariel", 16, "bold")).pack()
button1 = Button(window, text="Capture", command=Detect_face, fg="white", bg="brown", relief=GROOVE,
font=("ariel", 12, "bold"))
button1.place(x=160, y=100)
exit_b = Button(window, text="Exit", command=window.destroy, fg="white", bg="brown", relief=GROOVE,
font=("ariel", 12, "bold"))
exit_b.place(x=174, y=150)
# opens manually attendance window
def manually_window():
def Red_led(server, port, user, password):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
stdin, cpu, stderr = client.exec_command("python RED.py")
def Green_led(server, port, user, password):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(server, port, user, password)
stdin, cpu, stderr = client.exec_command("python Green.py")
def call_result(n1, n2):
today = date.today()
name = (n1.get())
roll = (n2.get())
name = str(name)
roll = str(roll)
if not isfile(r'C:\Users\Kunal\Desktop\file\y' + str(today.year) + r'\at' + str(today.month) + '_' + str(
today.year) + '.csv'):
f = open(r'C:\Users\Kunal\Desktop\file\y' + str(today.year) + r'\at' + str(today.month) + '_' + str(
today.year) + '.csv', "w")
writer = csv.DictWriter(
f, fieldnames=["AdmissionNo", "Name", "Date"])
writer.writeheader()
f.close()
data = pd.read_csv(r'C:\Users\Kunal\Desktop\file\y' + str(today.year) + '\StudentDetails.csv')
if roll in list(data['AdmissionNo']):
fieldnames = ['AdmissionNo', 'Name', 'Date']
info = {'AdmissionNo': roll, 'Name': data['Name'][data['AdmissionNo'] == roll][
list(data['SNo.'][data['AdmissionNo'] == roll])[0]], 'Date': today}
with open(r'C:\Users\Kunal\Desktop\file' + r'\y' + str(today.year) + r'\at' + str(today.month) + '_' + str(
today.year) + '.csv', 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(info)
Green_led("192.168.43.248", 22, "pi", "4489Dma3")
messagebox.showinfo("Attendance System", "Attendance of " + data['Name'][data['AdmissionNo'] == roll][
list(data['SNo.'][data['AdmissionNo'] == roll])[0]] + " taken")
else:
Red_led("192.168.43.248", 22, "pi", "4489Dma3")
messagebox.showinfo("Attendance System", "Wrong Admission No.!!!")
return
window = Toplevel()
window.title("Attendance Taking System")
window.geometry("600x450")
window.configure(background="grey")
number1 = StringVar()
number2 = StringVar()
label1 = Label(window, text="Manually Fill Attendance", relief="solid", fg="white", bg="red",
font=("ariel", 16, "bold")).pack()
label2 = Label(window, text="Enter Name", width=15, height=1, fg="white", bg="blue2",
font=('times', 15, ' bold '))
label2.place(x=70, y=100)
label3 = Label(window, text="Enter Enrollment", width=15, height=1, fg="white", bg="blue2",
font=('times', 15, ' bold '))
label3.place(x=70, y=200)
entry_name = Entry(window, textvar=number1, width=15, fg="black", font=('times', 13, ' bold '))
entry_name.place(x=350, y=105)
entry_rollno = Entry(window, textvar=number2, width=15, fg="black", font=('times', 13, ' bold '))
entry_rollno.place(x=350, y=200)
call_result = partial(call_result, number1, number2)
b1 = Button(window, text="Enter", command=call_result, width=10, height=1, fg="white", bg="blue2",
font=('times', 12, ' bold '))
b1.place(x=250, y=280)
b2 = Button(window, text="Exit", command=window.destroy, width=10, height=1, fg="white", bg="blue2",
font=('times', 12, ' bold '))
b2.place(x=250, y=350)
def monthly_stats():
today = date.today()
def monthly(roll,m):
if not isfile(r'C:\Users\Kunal\Desktop\file' + r'\y' + str(today.year) + r'\at' + str(m) + '_' + str(
today.year) + '.csv'):
messagebox.showinfo("Attendance System", "File Not Found")
else:
data = pd.read_csv(r'C:\Users\Kunal\Desktop\file\y' + str(today.year) + r'\at' + str(m) + '_' + str(today.year) + '.csv')
admin_no = roll.get()
admin = list(data['AdmissionNo'])
cnt = admin.count(admin_no)
label2 = Label(window, text="Total Attendance", width=15, height=1, fg="white", bg="blue2",
font=('times', 15, ' bold '))
label2.place(x=20, y=220)
label = Label(window,relief="solid", fg="black", bg="white",font=("ariel", 16, "bold"),
text = str(cnt))
label.place(x = 250, y = 220)
window = Toplevel()
window.title("Attendance Taking System")
window.geometry("400x400")
window.configure(background="grey")
var_month = StringVar()
enroll = StringVar()
label1 = Label(window, text="Monthly Enrollment", relief="solid", fg="white", bg="red",
font=("ariel", 16, "bold")).pack()
label2 = Label(window, text="Choose Month", width=15, height=1, fg="white", bg="blue2",
font=('times', 15, ' bold '))
label2.place(x=20, y=150)
label3 = Label(window, text="Enter Enrollment", width=15, height=1, fg="white", bg="blue2",
font=('times', 15, ' bold '))
label3.place(x=20, y=100)
entry_enroll = Entry(window, textvar=enroll, width=15, fg="black", font=('times', 13, ' bold '))
entry_enroll.place(x=250, y=100)
months = ["January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"]
droplist = OptionMenu(window, var_month, *months)
var_month.set("Select Month")
droplist.config(width=15)
droplist.place(x=250, y=150)
def button():
mon = var_month.get()
ind = months.index(mon)
monthly(enroll, ind+1)
b2 = Button(window, text="Enter", command=button, width=9, height=1, fg="white", bg="blue2",
font=('times', 11, ' bold '))
b2.place(x=160, y=280)
b3 = Button(window, text="Exit", command=window.destroy, width=9, height=1, fg="white", bg="blue2",
font=('times', 11, ' bold '))
b3.place(x=160, y=320)
# exit gui
def exitcode():
exit()
| [
"kunalsahuvic@gmail.com"
] | kunalsahuvic@gmail.com |
febfe65ae8c61e9e2ee00a30f5a65ef5d45eb9df | 6b8c3974d3ce5f7841e51dcb406666c0c5d92155 | /heat/heat/tests/test_sahara_templates.py | 4a887b85c2db0dce48627fc26ea234c8235c9a1b | [
"Apache-2.0"
] | permissive | swjang/cloudexchange | bbbf78a2e7444c1070a55378092c17e8ecb27059 | c06ed54f38daeff23166fb0940b27df74c70fc3e | refs/heads/master | 2020-12-29T03:18:43.076887 | 2015-09-21T07:13:22 | 2015-09-21T07:13:22 | 42,845,532 | 1 | 1 | null | 2015-09-21T07:13:22 | 2015-09-21T05:19:35 | C++ | UTF-8 | Python | false | false | 13,120 | py | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import sahara_templates as st
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
node_group_template = """
heat_template_version: 2013-05-23
description: Sahara Node Group Template
resources:
node-group:
type: OS::Sahara::NodeGroupTemplate
properties:
name: node-group-template
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
volume_type: lvm
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
"""
cluster_template = """
heat_template_version: 2013-05-23
description: Sahara Cluster Template
resources:
cluster-template:
type: OS::Sahara::ClusterTemplate
properties:
name: test-cluster-template
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
cluster_template_without_name = """
heat_template_version: 2013-05-23
resources:
cluster_template!:
type: OS::Sahara::ClusterTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
node_group_template_without_name = """
heat_template_version: 2013-05-23
resources:
node_group!:
type: OS::Sahara::NodeGroupTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
"""
class FakeNodeGroupTemplate(object):
def __init__(self):
self.id = "some_ng_id"
self.name = "test-cluster-template"
class FakeClusterTemplate(object):
def __init__(self):
self.id = "some_ct_id"
self.name = "node-group-template"
class SaharaNodeGroupTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaNodeGroupTemplateTest, self).setUp()
self.patchobject(st.constraints.CustomConstraint,
'_is_valid').return_value = True
self.patchobject(nova.NovaClientPlugin, 'get_flavor_id'
).return_value = 'someflavorid'
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_pool_id'
sahara_mock = mock.MagicMock()
self.ngt_mgr = sahara_mock.node_group_templates
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.fake_ngt = FakeNodeGroupTemplate()
self.t = template_format.parse(node_group_template)
def _init_ngt(self, template):
self.stack = utils.parse_stack(template)
return self.stack['node-group']
def test_ngt_resource_mapping(self):
ngt = self._init_ngt(self.t)
mapping = st.resource_mapping()
self.assertEqual(st.SaharaNodeGroupTemplate,
mapping['OS::Sahara::NodeGroupTemplate'])
self.assertIsInstance(ngt,
st.SaharaNodeGroupTemplate)
def _create_ngt(self, template):
ngt = self._init_ngt(template)
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
return ngt
def test_ngt_create(self):
self._create_ngt(self.t)
expected_args = ('node-group-template', 'vanilla',
'2.3.0', 'someflavorid')
expected_kwargs = {'description': "",
'volumes_per_node': None,
'volumes_size': None,
'volume_type': 'lvm',
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'volumes_availability_zone': None,
'node_processes': ['namenode', 'jobtracker'],
'floating_ip_pool': 'some_pool_id',
'node_configs': None,
'image_id': None,
}
self.ngt_mgr.create.assert_called_once_with(*expected_args,
**expected_kwargs)
def test_ngt_delete(self):
ngt = self._create_ngt(self.t)
scheduler.TaskRunner(ngt.delete)()
self.ngt_mgr.delete.assert_called_once_with(self.fake_ngt.id)
self.assertEqual((ngt.DELETE, ngt.COMPLETE), ngt.state)
def test_ngt_delete_ignores_not_found(self):
ngt = self._create_ngt(self.t)
self.ngt_mgr.delete.side_effect = sahara.sahara_base.APIException(
error_code=404)
scheduler.TaskRunner(ngt.delete)()
self.ngt_mgr.delete.assert_called_once_with(self.fake_ngt.id)
def test_ngt_delete_fails(self):
ngt = self._create_ngt(self.t)
self.ngt_mgr.delete.side_effect = sahara.sahara_base.APIException()
delete_task = scheduler.TaskRunner(ngt.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = "APIException: resources.node-group: None"
self.assertEqual(expected, six.text_type(ex))
self.ngt_mgr.delete.assert_called_once_with(self.fake_ngt.id)
def test_validate_floatingippool_on_neutron_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(ngt, 'is_using_neutron').return_value = True
self.patchobject(
neutron.NeutronClientPlugin, 'find_neutron_resource'
).side_effect = [
neutron.exceptions.NeutronClientNoUniqueMatch(message='Too many'),
neutron.exceptions.NeutronClientException(message='Not found',
status_code=404)
]
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Too many',
six.text_type(ex))
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found',
six.text_type(ex))
def test_validate_floatingippool_on_novanetwork_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(ngt, 'is_using_neutron').return_value = False
nova_mock = mock.MagicMock()
nova_mock.floating_ip_pools.find.side_effect = (
nova.exceptions.NotFound(404, message='Not found'))
self.patchobject(nova.NovaClientPlugin,
'_create').return_value = nova_mock
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found', six.text_type(ex))
def test_validate_flavor_constraint_return_false(self):
self.t['resources']['node-group']['properties'].pop('floating_ip_pool')
self.t['resources']['node-group']['properties'].pop('volume_type')
ngt = self._init_ngt(self.t)
self.patchobject(st.constraints.CustomConstraint, '_is_valid'
).return_value = False
self.patchobject(ngt, 'is_using_neutron').return_value = False
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual(u"Property error: "
u"resources.node-group.properties.flavor: "
u"Error validating value 'm1.large'",
six.text_type(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(node_group_template_without_name)
stack = utils.parse_stack(tmpl)
ngt = stack['node_group!']
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
name = self.ngt_mgr.create.call_args[0][0]
self.assertIn('-nodegroup-', name)
class SaharaClusterTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaClusterTemplateTest, self).setUp()
self.patchobject(st.constraints.CustomConstraint, '_is_valid'
).return_value = True
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin, 'find_neutron_resource'
).return_value = 'some_network_id'
sahara_mock = mock.MagicMock()
self.ct_mgr = sahara_mock.cluster_templates
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.fake_ct = FakeClusterTemplate()
self.t = template_format.parse(cluster_template)
def _init_ct(self, template):
self.stack = utils.parse_stack(template)
return self.stack['cluster-template']
def test_ct_resource_mapping(self):
ct = self._init_ct(self.t)
mapping = st.resource_mapping()
self.assertEqual(st.SaharaClusterTemplate,
mapping['OS::Sahara::ClusterTemplate'])
self.assertIsInstance(ct,
st.SaharaClusterTemplate)
def _create_ct(self, template):
ct = self._init_ct(template)
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
return ct
def test_ct_create(self):
self._create_ct(self.t)
expected_args = ('test-cluster-template', 'vanilla',
'2.3.0')
expected_kwargs = {'description': '',
'default_image_id': None,
'net_id': 'some_network_id',
'anti_affinity': None,
'node_groups': None,
'cluster_configs': None
}
self.ct_mgr.create.assert_called_once_with(*expected_args,
**expected_kwargs)
def test_ct_delete(self):
ct = self._create_ct(self.t)
scheduler.TaskRunner(ct.delete)()
self.ct_mgr.delete.assert_called_once_with(self.fake_ct.id)
self.assertEqual((ct.DELETE, ct.COMPLETE), ct.state)
def test_ngt_delete_ignores_not_found(self):
ct = self._create_ct(self.t)
self.ct_mgr.delete.side_effect = sahara.sahara_base.APIException(
error_code=404)
scheduler.TaskRunner(ct.delete)()
self.ct_mgr.delete.assert_called_once_with(self.fake_ct.id)
def test_ngt_delete_fails(self):
ct = self._create_ct(self.t)
self.ct_mgr.delete.side_effect = sahara.sahara_base.APIException()
delete_task = scheduler.TaskRunner(ct.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = "APIException: resources.cluster-template: None"
self.assertEqual(expected, six.text_type(ex))
self.ct_mgr.delete.assert_called_once_with(self.fake_ct.id)
def test_ct_validate_no_network_on_neutron_fails(self):
self.t['resources']['cluster-template']['properties'].pop(
'neutron_management_network')
ct = self._init_ct(self.t)
self.patchobject(ct, 'is_using_neutron', return_value=True)
ex = self.assertRaises(exception.StackValidationFailed,
ct.validate)
self.assertEqual("neutron_management_network must be provided",
six.text_type(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(cluster_template_without_name)
stack = utils.parse_stack(tmpl)
ct = stack['cluster_template!']
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
name = self.ct_mgr.create.call_args[0][0]
self.assertIn('-clustertemplate-', name)
| [
"kiku4@kinx.net"
] | kiku4@kinx.net |
45f0d40c005fd82f923f1c618aed8a16151bee5d | 4efa04e98563e08dd1618525003e5022220780ec | /pypolybuilder/Exclusions.py | d1e4177a838d5d2b1bc3ac4101d208dbcb41e686 | [
"MIT"
] | permissive | lgjun/pypolybuilder | 0bf9c6cde1cbc709f41e943c29e23d04390caec7 | af0e1801305080501b18e45d2383e3466865f19d | refs/heads/main | 2023-03-15T06:38:50.499010 | 2021-03-03T21:57:39 | 2021-03-03T21:57:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | '''
Created on Apr 20, 2015
@author: vitor
'''
class Exclusions(object):
'''
classdocs
'''
__exclusion_list = None
__exclusion_extra = None
def __init__(self, exclusion_list, exclusion_extra):
exclusion_list.sort()
self.__exclusion_list = exclusion_list
self.__exclusion_extra = exclusion_extra
def print_exclusions(self):
for exc in self.get_exclusion_list():
print(exc)
def get_exclusion_list(self):
return self.__exclusion_list
def get_exclusion_extra(self): #This is used for the Gromacs output only since GROMOS will handle explicitly all the exclusions
return self.__exclusion_extra
def set_exclusion_list(self, value):
self.__exclusion_list = value
def del_exclusion_list(self):
del self.__exclusion_list
exclusion_list = property(get_exclusion_list, set_exclusion_list, del_exclusion_list, "exclusion_list's docstring")
| [
"bruno.horta@gmail.com"
] | bruno.horta@gmail.com |
623a1d02618a98b846a644d65873a91f693fb8d4 | 21901fb376d67172a9f5dfcddb58ba5dcb83864e | /estadísticas/tamaño_muestras.py | 17a747ab2f39c3ed19e8029681fd2066dc3bf1bf | [] | no_license | LaManoServida/TFM-Biocruces | 977a9f15bbf96d61022df7ba0f491c4169a4cf9c | b126070c415aa4184765f7474a82953dbf1c54ce | refs/heads/master | 2023-01-03T11:13:08.717203 | 2020-10-13T05:09:17 | 2020-10-13T05:09:17 | 262,435,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from preprocesamiento.funciones import buscar_csv
''' Calcula los tamaños de muestra de los archivos de una carpeta '''
# PARÁMETROS
ruta_carpeta = 'D:/Dropbox/UNI/TFM/datos/3 - Fecha a timestamp'
ruta_grafico = 'Tamaño de muestras - diagrama de barras.pdf'
clave_principal = 'PATNO'
# leer tablas
tablas = [pd.read_csv(ruta_arch, sep=',', float_precision='round_trip') for ruta_arch in buscar_csv(ruta_carpeta)]
# obtener solo el número de pacientes únicos de cada archivo
nums_pacientes = [len(np.unique(tabla[clave_principal].values)) for tabla in tablas]
print(nums_pacientes)
# generar diagrama de barras
etiquetas = [os.path.splitext(os.path.basename(r))[0] for r in buscar_csv(ruta_carpeta)]
print(etiquetas)
plt.figure(figsize=(6, 7))
plt.bar(etiquetas, nums_pacientes, color='forestgreen')
# marcas horizontales
axes = plt.gca()
axes.yaxis.grid()
# etiquetas eje x
plt.xticks(rotation=45, ha='right', rotation_mode='anchor')
# título
plt.title('Número de pacientes únicos por cada archivo')
# guardar
plt.tight_layout()
plt.savefig(ruta_grafico)
plt.show()
| [
"andoni7@outlook.com"
] | andoni7@outlook.com |
1bb80b25bf87d695dd5433efee4ab2a9b1aa572c | 483508a4e002bcd734b8729459d3e5d5e02aae70 | /number_frequency.py | 27b6776f20516181ec134ca21ebb9c493c09bc5c | [] | no_license | jdavid54/benford_law | 9d54cd539130bc3665080ca801d1bb4db96a18a9 | 3ff9d8358f59fef60f401c290ceb94701613e1b2 | refs/heads/main | 2023-07-18T03:56:18.685081 | 2021-08-25T10:44:37 | 2021-08-25T10:44:37 | 399,751,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | import numpy as np
import random
import matplotlib.pyplot as plt
# benford's law
# value
l1 = 10000
# size
l2 = 100
freq=[0]*10
x = np.arange(1,10)
'''
a = np.random.randint(1,l1,(1,l2))
print(a)
for i in np.array(*a):
n = int(str(i)[0])
#print(n)
freq[n] = freq[n]+1
print(freq)
plt.bar(x,freq[1:])
#plt.show()
for i in range(100):
n = int(str(a[0][np.random.randint(0,l2)])[0])
#print(n)
freq[n] = freq[n]+1
print(freq)
plt.bar(x,freq[1:])
#plt.show()
'''
# loi benford
log_array=[]
for k in x:
print((1+1/k, np.log10(1+1/k)))
log_array.append(np.log10(1+1/k))
#print('sum',sum(log_array)) # sum=1
#plt.bar(x, np.log10(1+1/x)*100)
#plt.title('Loi Benford')
#plt.show()
# https://fr.wikipedia.org/wiki/Loi_de_Benford
# Par contre, dans une liste de 100 nombres obtenus comme produits de deux nombres
# ou plus tirés au hasard entre 1 et 10 000, les fréquences des chiffres 1 à 9 en
# première position suivent peu ou prou les valeurs de la loi de Benford.
val = 10000
numbers=[]
m = 5
kmin = 2
kmax = 5
klist = []
benford=[np.log10(1+1/x) for x in range(1,10)]
print(benford)
benford_cumsum = np.cumsum(benford)
print(benford_cumsum)
# get 100 numbers as a product of k random numbers between 1 and val=10000
for i in range(m*100):
p = 1
k = random.randint(kmin,kmax)
if k not in klist:
klist.append(k)
for i in range(k):
p *= np.random.randint(1,val)
p0 = int(str(p)[0])
numbers.append((k,p0,p))
freq[p0] = freq[p0]+1
freq=[f/m for f in freq]
freq_cumul = np.cumsum(freq)
print(freq[1:])
print(klist)
print(numbers)
plt.bar(x-0.2,np.log10(1+1/x)*100,0.4, label='Benford\'s law')
plt.bar(x+0.2,freq[1:],0.4, label='Product of k random numbers')
plt.title(', '.join([str(round(s,1)) for s in freq[1:]]))
plt.legend()
plt.show()
plt.bar(x-0.2, benford_cumsum*100,0.4, label='Benford\'s cumul sum')
plt.bar(x+0.2,freq_cumul[1:],0.4, label='Product of k random numbers frequence cumul sum')
#plt.bar(x,freq_cumul[1:])
plt.title('Fréquences cumulées')
plt.legend()
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
9b84a3e64e7ca9d613cb149edf544b6042f3e0ce | 6d061520f6d2507d772ddd4cb2f9d39a8963a092 | /Mao/88.py | 3e8943a4af5635f1f5c92403eb8b709ab65a4837 | [] | no_license | SeanXiaohengMao/Leetcode | 54db3d9de024c111e28175ec896dba15aa63b5da | aab53ac5f93c7c8109468c919ec2f5a0de7a1b0f | refs/heads/master | 2020-03-11T04:50:07.518665 | 2018-05-13T21:08:37 | 2018-05-13T21:08:37 | 129,786,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
a = m+n-1
while m>0 and n>0:
if nums1[m-1]>nums2[n-1]:
nums1[a] = nums1[m-1]
m-=1
a-=1
else:
nums1[a] = nums2[n-1]
n-=1
a-=1
if n>0:
nums1[:n] = nums2[:n]
return nums1
print Solution().merge([1,2,7,0,0,0], 3, [3,5,6], 3) | [
"Mstarfish@163.com"
] | Mstarfish@163.com |
a964f2d2d1b7c201506d4e805323c0516c2c46f3 | 47be39fab9443d66a26228fd6966ee67f802da8d | /models/user.py | d91a269e00716b49096cdf3324a048a971b28b1f | [] | no_license | JosueLC/trinos-api | 4689cd739d802e0026b36a99cfd4ea5b3ae5af3b | 5fe043e52428d2288128ff2557f5a74cb5788fcb | refs/heads/main | 2023-08-28T04:44:12.917318 | 2021-10-24T22:50:39 | 2021-10-24T22:50:39 | 420,283,726 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | #Python packages
from typing import Optional
from uuid import UUID
from datetime import date
#Pydantic packages
from pydantic import BaseModel
from pydantic import Field,EmailStr
#Classes for User
class UserBase(BaseModel):
id: UUID = Field(
...,
title="User ID"
)
username: str = Field(
...,
title="Username"
)
email: EmailStr = Field(
...,
title="Email"
)
class UserLogin(UserBase):
password: str = Field(
...,
title="Password",
min_length=8,
max_length=64
)
class User(UserBase):
first_name: str = Field(
...,
title="First Name",
max_length=50,
min_length=2
)
last_name: str = Field(
...,
title="Last Name",
max_length=50,
min_length=2
)
birth_date: Optional[date] = Field(
default=None,
title="Birth Date"
)
class UserFull(User, UserLogin):
pass
| [
"josueflopez@msn.com"
] | josueflopez@msn.com |
6ad1d197d98aacc51bf406e2a0f6e4a567c7be4c | 0c96fa82fed9f5c11deb18c9c13225f9debb2134 | /env/Scripts/django-admin.py | 9e3ccf83f5865ffddd389d1100daec6c34a80d80 | [] | no_license | Amoney22/my-first-website | 71405be5b07e6ef2d905a1b3c472728b9f753a52 | 74e57142cc89f10b922af89dc6cfea6f1b95d491 | refs/heads/master | 2021-01-15T10:57:50.344939 | 2017-08-10T21:15:57 | 2017-08-10T21:15:57 | 99,602,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!c:\django girls tutorials\env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"seriesofillusions@outlook.com"
] | seriesofillusions@outlook.com |
1fceeeb430f33f411c419f486b38d0774cab3965 | 838965f73c5ae0d47a68512e431e18200a21097d | /curriculum_vitae/models/project_experiences.py | ae796f8be1d10b7ebb30b58e1759424416b2ee48 | [] | no_license | whitestar592020/Odoo13Development | 61851bc0afd41aef4f06975f477d69a6e6e5d706 | f96745dad88484567511ed07e9f4bd07b6b3afd6 | refs/heads/master | 2023-01-16T02:27:21.814952 | 2020-11-30T06:47:32 | 2020-11-30T06:47:32 | 298,996,876 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from odoo import models, fields
class ProjectExperiences(models.Model):
_name = 'project.experiences'
image = fields.Binary(string="Image")
name = fields.Char(string="Project Name", translate=True)
position = fields.Char(string="Position", translate=True)
responsibilities = fields.Char(string="Responsibilities", translate=True)
programming_languages = fields.Char(string="Programming Languages", translate=True)
period_from = fields.Date(string="Period From")
period_to = fields.Date(string="Period To")
description = fields.Text(string="Description", translate=True)
curriculum_vitae_id = fields.Many2one('curriculum.vitae')
| [
"noreply@github.com"
] | noreply@github.com |
7772681d2d62d42390bb457998a5fe0649424b02 | d1247c530074efccc62f7538eb8cc354f61a8d38 | /genotype.py | 542e097f76579c61f8a425068c989c854ba3de59 | [] | no_license | Gudzy/ga_segmentation | 9b1c4ef8a0bac57806c7934dc35e1e36db16fa67 | 993dfdfc0543b28a13994c6da022c0ad337b150c | refs/heads/master | 2021-03-14T17:38:11.439099 | 2020-03-12T08:43:31 | 2020-03-12T08:43:31 | 246,780,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | import numpy as np
from numba import jit
from skimage import io, color
import queue
from tqdm import tqdm
NEIGHBORS = np.array([(0, 0), (0, -1), (0, 1), (-1, 0), (1, 0)], dtype=np.int)
def _graph_to_genotype(image, graph):
que = queue.PriorityQueue()
genotype = np.zeros(len(graph.keys()), dtype=np.int)
mst = []
visited = set()
starting_vertex_idx = np.random.randint(len(graph.keys()))
starting_vertex = list(graph.keys())[starting_vertex_idx]
visited.add(starting_vertex)
for next_to, weight in graph[starting_vertex].items():
que.put((weight, starting_vertex, next_to))
while que.empty() is False:
edge = que.get()
weight, frm, to = edge
if to in visited:
continue
visited.add(to)
_, node_u, node_v = edge
raveled_index = np.ravel_multi_index(node_v, image.shape[:-1])
genotype[raveled_index] = calculate_direction(node_v, node_u)
mst.append(edge[1:])
for next_to, weight in graph[to].items():
if next_to not in visited:
que.put((weight, to, next_to))
return genotype
def read_image(image_path, use_lab=False):
image = io.imread(image_path)[..., 0:3]
if use_lab:
return color.rgb2lab(image)
else:
return image / 255
@jit(nopython=True)
def _calculate_dist(image, p1, p2):
return np.linalg.norm(image[p1] - image[p2])
@jit(nopython=True)
def _is_valid_vertex(image, vertex):
shape = image.shape[0:2]
return 0 <= vertex[0] < shape[0] and 0 <= vertex[1] < shape[1]
@jit(nopython=True)
def calculate_direction(u, v):
direction = np.array([v[i] - u[i] for i in range(2)])
curr_direction = 0
for i in range(NEIGHBORS.shape[0]):
if np.all(NEIGHBORS[i] == direction):
return curr_direction
curr_direction += 1
def _image_to_graph(image):
valid_vertices = np.argwhere(np.ones_like(image[..., 0]))
graph = dict.fromkeys([tuple(vertex) for vertex in valid_vertices])
# iterate over all vertices
for vertex in valid_vertices:
vertex = tuple(vertex)
adj = {}
# iterate over all neighbors
for neighbor in vertex + NEIGHBORS[1:]:
neighbor = tuple(neighbor)
if _is_valid_vertex(image, neighbor):
adj[neighbor] = _calculate_dist(image, vertex, neighbor)
graph[vertex] = adj
return graph
def create_population(image_path, population_size, use_lab=False):
image = read_image(image_path, use_lab=use_lab)
graph = _image_to_graph(image)
return [_graph_to_genotype(image, graph) for _ in tqdm(range(population_size))]
| [
"gustdyn@gmail.com"
] | gustdyn@gmail.com |
701ad972f588196ecf39150a4070c732b52007fe | 0badae2ddd1fe30db5ccf1b9cbbc0fdbd7df42e4 | /leetcode/containsDuplicates.py | a0db15741676bb3ee080fcf6b034556a2ce93def | [] | no_license | danny128373/LeetCode-Challenges | 17fe336a1076914e72d2ce9b77aa2f2b24305ad4 | 8d332d473e7231cf1dff9faebe766f7779b61fdd | refs/heads/master | 2023-01-08T23:45:06.149917 | 2020-11-20T17:00:44 | 2020-11-20T17:00:44 | 306,706,338 | 0 | 0 | null | 2020-11-20T17:00:46 | 2020-10-23T17:40:48 | Python | UTF-8 | Python | false | false | 241 | py | class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == len(set(nums)):
return False
else:
return True
| [
"daniel.meza29@yahoo.com"
] | daniel.meza29@yahoo.com |
d6ad3b3fcb33608098e33669ace7929712eed35e | ab817e17b79835bdee1e6985520daaf43ded4f85 | /pluginlib.py | f500e09d5e3e6c5d1ba0c39f0a7ba6369a03eb3b | [
"MIT"
] | permissive | thodnev/pluginlib | c36e1b14b6b74c0ca6cf5b283d92f326b00e20c0 | cfeefc5e9bebccef3c22e6f9dfc5583afe0ed559 | refs/heads/master | 2020-06-13T04:02:13.308062 | 2016-12-03T04:46:19 | 2016-12-03T04:46:19 | 75,450,437 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,250 | py | '''
pluginlib: use dynamic python plugins with natural inheritance model
Usage:
1. Define plugin root class by subclassing PluginBase:
class SomeCls(PluginBase):
... some code here ...
2. Write some plugins, that inherit from plugin root:
class MyPlugin(SomeCls):
... extend/add/overwrite methods ...
3. Get a mixed subclass with plugins as <Root>.PluginExtended:
WithPlugins = SomeCls.PluginExtended
or
class WithPlugins(SomeCls.PluginExtended):
... so could be used during further subclassing ...
* The advantage is that SomeCls doesn't even need to know which plugins would
be used in future. And it's still natural inheritance scheme for plugins.
* Plugins must be imported before <Root>.PluginExtended is referenced,
otherwise need to reference it again.
* The order of plugins in resulting class is defined by order of set
iteration, so don't rely on any ordering.
* Attribute __name__ of resulting class will be <Root_name>PluginExtended,
use subclassing scheme if crucial.
* Plugin branches could be disabled/enabled/changed/etc by manipulating
<Root>.__pluginextensions__ set and re-referencing <Root>.PluginExtended
'''
# TODO:
# ? order when ext-subclassing is arbitrary
# ? ext-subclass is derived from PluginBaseMeta
# - 'del' hack used to create PluginBase
# ? do we need to keep full list of plugins?
# + done: need to implement caching in PluginExtended property
__all__ = ['PluginBase']
class PluginBaseMeta(type):
def __new__(mcls, name, bases, namespace):
cls = super(PluginBaseMeta, mcls).__new__(mcls, name, bases, namespace)
if not hasattr(cls, '__pluginextensions__'): # parent class
cls.__pluginextensions__ = {cls} # set reflects lowest plugins
cls.__pluginroot__ = cls
cls.__pluginiscachevalid__ = False
else: # subclass
assert not set(namespace) & {'__pluginextensions__',
'__pluginroot__'} # only in parent
exts = cls.__pluginextensions__
exts.difference_update(set(bases)) # remove parents
exts.add(cls) # and add current
cls.__pluginroot__.__pluginiscachevalid__ = False
return cls
@property
def PluginExtended(cls):
# After PluginExtended creation we'll have only 1 item in set
# so this is used for caching, mainly not to create same PluginExtended
if cls.__pluginroot__.__pluginiscachevalid__:
return next(iter(cls.__pluginextensions__)) # only 1 item in set
else:
name = cls.__pluginroot__.__name__ + 'PluginExtended'
extended = type(name, tuple(cls.__pluginextensions__), {})
cls.__pluginroot__.__pluginiscachevalid__ = True
return extended
# 2.x compatible creation of class from metaclass
# Needed to make it simply inheritable, with no need to specify meta each time
PluginBase = PluginBaseMeta(
'PluginBase', (object,), {'__metaclass__': PluginBaseMeta})
del PluginBase.__pluginextensions__ # dirty hack to avoid being plugin root
# Test below
if __name__ == '__main__':
def ext(cls):
e = cls.__pluginextensions__
res = '{}: {}'.format(cls.__pluginroot__.__name__,
sorted(e, key=lambda c: c.__name__))
print(res)
tree = '''\tPLUGIN TREE
R o o t OtherRoot
/|\ |
/ | \ |
A B C OtherSub
| /|
|/ |
D E
|
|
F
'''
print(tree)
class Root(PluginBase):
pass
ext(Root)
class OtherRoot(PluginBase):
pass
ext(Root)
ext(OtherRoot)
class OtherSub(OtherRoot):
pass
ext(OtherSub)
class A(Root):
pass
class B(Root):
pass
class C(Root):
pass
ext(C)
class D(A, B):
pass
ext(D)
class E(B):
pass
ext(E)
class F(D):
pass
ext(F)
XT = F.PluginExtended
XT2 = E.PluginExtended
print('Same? (T)', XT is XT2)
class ETC(F):
pass
XT3 = E.PluginExtended
print('Now cache should be rebuilt. Same? (F)', XT3 is XT2)
| [
"noreply@github.com"
] | noreply@github.com |
ddc61bcfaaa6888c845392caf8c5235ba528dfa8 | 0f5ce0c0c49b71ce5866cffee93b2a8537da005c | /app/app/settings.py | 2ff7e7ec1622687a5c70f08bdd81294163ef8965 | [
"MIT"
] | permissive | fr0gz/recipe-app-api | 1fb909dbf2c845238249aa195fba3ff79b830a1e | 68fe19aa2e1caafc9d658a0cdd06323f00b08f09 | refs/heads/main | 2023-01-13T13:25:37.450959 | 2020-11-24T20:41:37 | 2020-11-24T20:41:37 | 315,443,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&t@94gzwsbjzf_&(@x=&js2hu3o$6tkoz^!7h@1oa@tny$%d97'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Local
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| [
"cguevarak@fen.uchile.cl"
] | cguevarak@fen.uchile.cl |
62ceb76be57c25a2e488327b27172132967004f9 | 3a13430d694f7c159f73ff91f2cc1d6e46cec5ca | /season/welcome/migrations/0003_remove_order_date.py | 73e1f1c834023be39d7eaa932c004508fcf4f0ea | [] | no_license | drand23andrey/season-shop | 4c236bbd0ea7588db5fcf7de48623fb46d050131 | cb99280e6361a97a8c31485efe44007310131688 | refs/heads/master | 2022-12-01T00:05:57.943736 | 2020-07-09T15:31:35 | 2020-07-09T15:31:35 | 175,046,691 | 1 | 0 | null | 2022-11-22T05:55:51 | 2019-03-11T17:04:25 | JavaScript | UTF-8 | Python | false | false | 329 | py | # Generated by Django 3.0.5 on 2020-04-22 14:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('welcome', '0002_remove_order_last_name'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='date',
),
]
| [
"dushkakruf@mail.ru"
] | dushkakruf@mail.ru |
e54737a669c120405ba1ae40ca1f029499c044b7 | 5660cc796ba00f5090c7de6208c2e0a77c2328ba | /utils/models/__init__.py | dfb823114abe17847e769d9c4a1db4047f2d0b29 | [] | no_license | OMalenfantThuot/scripts_raman | ac65fec1c5ac97a13b8c222e02241f02393f7709 | 2fd42a4b09d33fcf96da4b4d3340f67d21428b18 | refs/heads/master | 2023-07-19T20:11:10.457974 | 2023-07-18T18:15:26 | 2023-07-18T18:15:26 | 193,724,148 | 1 | 1 | null | 2023-08-28T20:00:38 | 2019-06-25T14:29:42 | Python | UTF-8 | Python | false | false | 297 | py | from .model import *
from .dropout_schnet import DropoutSchNet, DropoutAtomwise
from .patches_schnet import PatchesAtomisticModel, PatchesAtomwise
from .smoothtrainer import SmoothTrainer
from .memory_estimation import (
schnet_memory_estimation_for_graphene,
get_graphene_patches_grid,
)
| [
"malenfantthuotolivier@gmail.com"
] | malenfantthuotolivier@gmail.com |
7bf8d2a366551d6774730e60de1d62b78af16d52 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_108/125.py | c96c54037ce532c493eb7d9d77e0a2a5ad1f93b3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | #!/usr/bin/env python
import bisect
import sys
from collections import defaultdict
def main(args):
finname = '%s.in' % args[1]
foutname = '%s.out' % args[1]
with open(finname, 'r') as fin, open(foutname, 'w') as fout:
T = int(fin.readline().strip())
for i in xrange(1, T+1):
num_vines = int(fin.readline().strip())
vinestats = []
for j in xrange(num_vines):
d, l = [int(_) for _ in fin.readline().strip().split()]
vinestats.append((d, l))
D = int(fin.readline().strip())
memo = dict()
def ok(start_vine, swing_length):
if (start_vine, swing_length) in memo:
return memo[(start_vine, swing_length)]
vine_d, vine_l = vinestats[start_vine]
if vine_l < swing_length:
swing_length = vine_l
if vine_d + swing_length >= D:
memo[(start_vine, swing_length)] = True
return True
last_vine = bisect.bisect(vinestats, (vine_d+swing_length+1, 0), start_vine)
i = start_vine+1
result = False
while i < last_vine:
if ok(i, vinestats[i][0]-vine_d):
memo[(start_vine, swing_length)] = True
return True
i+=1
memo[(start_vine, swing_length)] = False
return False
result = 'YES' if ok(0, vinestats[0][0]) else 'NO'
result_str = 'Case #%s: %s\n' % (i, result)
# print result_str,
fout.write(result_str)
if __name__ == '__main__':
status = main(sys.argv)
sys.exit(status)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d0e69f8ba2907a96de17eac4ef173bcbad3780a0 | 16a536eacd586f5a87934fcc999adf551edfe259 | /experiments/common/helper_fxns.py | bbcebf952d32e4350a3e0d3a5a8ff867246d9170 | [] | no_license | teganmaharaj/climate-dl | 5dad87aec737294c100800748fde61ace295d9aa | dd08fb6a85e8635ba86c06e19b4d487f76b45941 | refs/heads/master | 2021-01-11T02:07:09.911603 | 2016-10-10T16:57:04 | 2016-10-10T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,340 | py | from lasagne.layers import *
import theano
from theano import tensor as T
import lasagne
from lasagne.objectives import *
from lasagne.nonlinearities import *
from lasagne.updates import *
from lasagne.utils import *
import sys
import logging
def get_logger(out_folder):
logger=logging.getLogger('trainlog')
if len(logger.handlers) < 1:
logger = logging.getLogger('log_train')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('%s/results.txt'%(out_folder))
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.addHandler(fh)
return logger
def get_net(net_cfg, args):
l_out, ladder = net_cfg(args)
if args["mode"] == "2d":
X = T.tensor4('X')
elif args["mode"] == "3d":
X = T.tensor5('X')
ladder_output = get_output([l_out] + ladder, X)
net_out = ladder_output[0]
# squared error loss is between the first two terms
loss = squared_error(net_out, X).mean()
ladder_output = ladder_output[1::]
if "ladder" in args:
sys.stderr.write("using ladder connections for conv\n")
for i in range(0, len(ladder_output), 2):
sys.stderr.write("ladder connection between %s and %s\n" % (str(ladder[i].output_shape), str(ladder[i+1].output_shape)) )
assert ladder[i].output_shape == ladder[i+1].output_shape
loss += args["ladder"]*squared_error(ladder_output[i], ladder_output[i+1]).mean()
net_out_det = get_output(l_out, X, deterministic=True)
# this is deterministic + doesn't have the reg params added to the end
loss_det = squared_error(net_out_det, X).mean()
params = get_all_params(l_out, trainable=True)
lr = theano.shared(floatX(args["learning_rate"]))
if "optim" not in args:
updates = nesterov_momentum(loss, params, learning_rate=lr, momentum=0.9)
else:
if args["optim"] == "rmsprop":
updates = rmsprop(loss, params, learning_rate=lr)
elif args["optim"] == "adam":
updates = adam(loss, params, learning_rate=lr)
train_fn = theano.function([X], [loss, loss_det], updates=updates)
loss_fn = theano.function([X], loss)
out_fn = theano.function([X], net_out_det)
return {
"train_fn": train_fn,
"loss_fn": loss_fn,
"out_fn": out_fn,
"lr": lr,
"l_out": l_out
}
def make_dense_block(inp, args, conv_kwargs=dict(filter_size=3, pad=1)):
conc = inp
for i in range(args['L']):
# if "bn_relu" in args:
# bn = BatchNormLayer(conc)
# bn_relu = NonlinearityLayer(bn, nonlinearity=args['nonlinearity'])
# else:
conv = Conv2DLayer(conc, **conv_kwargs)
conc = ConcatLayer([conc, conv], axis=1)
return conc
def make_inverse_dense_block(inp, layer, args):
conc = inp
inv_layers = get_all_layers(layer)[::-1]
#3 layers per comp unit and args['L'] units per block
first_concat_lay = inv_layers[2*args['L']-2]
print first_concat_lay
conc = make_concat_inverse(conc,first_concat_lay)
return conc, inv_layers[2*args['L']]
def make_concat_inverse(inp, concat_layer):
first_input_shape = concat_layer.input_shapes[0][concat_layer.axis]
return SliceLayer(inp,indices=slice(0,first_input_shape), axis=concat_layer.axis)
def print_network(l_out):
for layer in get_all_layers(l_out):
print layer, layer.output_shape
print count_params(layer)
def create_run_dir(custom_rc=False):
results_dir = os.getcwd() + '/results'
run_num_file = os.path.join(results_dir, "run_num.txt")
if not os.path.exists(results_dir):
print "making results dir"
os.mkdir(results_dir)
if not os.path.exists(run_num_file):
print "making run num file...."
f = open(run_num_file,'w')
f.write('0')
f.close()
f = open(run_num_file,'r+')
run_num = int(f.readline()) + 1
f.seek(0)
f.write(str(run_num))
run_dir = os.path.join(results_dir,'run%i'%(run_num))
os.mkdir(run_dir)
if custom_rc:
make_custom_config_file(run_dir)
return run_dir
| [
"ejracah@gmail.com"
] | ejracah@gmail.com |
b754ef64cb90d4767c378a2cfddb81f139ca8828 | ebcfc261ba65b2296e3e3ee409cc6061afaada13 | /psi/jlab/jlab/bin/ipython3 | 15703db575e30ec225c8512cfe0564a7c31c620a | [] | no_license | sosnus/psi | d281f74b9b6f85446ea77ceafd28b73a85567ba9 | cfeb574cbd36c2d233772eaa3221a08b0d40bd56 | refs/heads/master | 2020-04-29T00:45:59.712159 | 2019-03-18T01:46:20 | 2019-03-18T01:46:20 | 175,706,747 | 0 | 0 | null | 2019-03-18T01:46:21 | 2019-03-14T22:02:44 | Python | UTF-8 | Python | false | false | 256 | #!/home/sosnus/python/psi/jlab/jlab/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
| [
"sosnus15@gmail.com"
] | sosnus15@gmail.com | |
a7db53021d314e8a8940afd0b9d509d6c3431464 | eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd | /homeassistant/components/wilight/light.py | 3236b3b3851a234fc1d369afef91f7753338940f | [
"Apache-2.0"
] | permissive | JeffLIrion/home-assistant | 53966b81b5d5816679f12fc761f79e8777c738d6 | 8f4ec89be6c2505d8a59eee44de335abe308ac9f | refs/heads/dev | 2023-08-22T09:42:02.399277 | 2022-02-16T01:26:13 | 2022-02-16T01:26:13 | 136,679,169 | 5 | 2 | Apache-2.0 | 2023-09-13T06:59:25 | 2018-06-09T00:58:35 | Python | UTF-8 | Python | false | false | 5,995 | py | """Support for WiLight lights."""
from pywilight.const import (
ITEM_LIGHT,
LIGHT_COLOR,
LIGHT_DIMMER,
LIGHT_ON_OFF,
SUPPORT_NONE,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN, WiLightDevice
def entities_from_discovered_wilight(hass, api_device):
"""Parse configuration and add WiLight light entities."""
entities = []
for item in api_device.items:
if item["type"] != ITEM_LIGHT:
continue
index = item["index"]
item_name = item["name"]
if item["sub_type"] == LIGHT_ON_OFF:
entity = WiLightLightOnOff(api_device, index, item_name)
elif item["sub_type"] == LIGHT_DIMMER:
entity = WiLightLightDimmer(api_device, index, item_name)
elif item["sub_type"] == LIGHT_COLOR:
entity = WiLightLightColor(api_device, index, item_name)
else:
continue
entities.append(entity)
return entities
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up WiLight lights from a config entry."""
parent = hass.data[DOMAIN][entry.entry_id]
# Handle a discovered WiLight device.
entities = entities_from_discovered_wilight(hass, parent.api)
async_add_entities(entities)
class WiLightLightOnOff(WiLightDevice, LightEntity):
"""Representation of a WiLights light on-off."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_NONE
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
class WiLightLightDimmer(WiLightDevice, LightEntity):
"""Representation of a WiLights light dimmer."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._status.get("brightness", 0))
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on,set brightness if needed."""
# Dimmer switches use a range of [0, 255] to control
# brightness. Level 255 might mean to set it to previous value
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
await self._client.set_brightness(self._index, brightness)
else:
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
def wilight_to_hass_hue(value):
"""Convert wilight hue 1..255 to hass 0..360 scale."""
return min(360, round((value * 360) / 255, 3))
def hass_to_wilight_hue(value):
"""Convert hass hue 0..360 to wilight 1..255 scale."""
return min(255, round((value * 255) / 360))
def wilight_to_hass_saturation(value):
"""Convert wilight saturation 1..255 to hass 0..100 scale."""
return min(100, round((value * 100) / 255, 3))
def hass_to_wilight_saturation(value):
"""Convert hass saturation 0..100 to wilight 1..255 scale."""
return min(255, round((value * 255) / 100))
class WiLightLightColor(WiLightDevice, LightEntity):
"""Representation of a WiLights light rgb."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._status.get("brightness", 0))
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return [
wilight_to_hass_hue(int(self._status.get("hue", 0))),
wilight_to_hass_saturation(int(self._status.get("saturation", 0))),
]
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on,set brightness if needed."""
# Brightness use a range of [0, 255] to control
# Hue use a range of [0, 360] to control
# Saturation use a range of [0, 100] to control
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
hue = hass_to_wilight_hue(kwargs[ATTR_HS_COLOR][0])
saturation = hass_to_wilight_saturation(kwargs[ATTR_HS_COLOR][1])
await self._client.set_hsb_color(self._index, hue, saturation, brightness)
elif ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR not in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
await self._client.set_brightness(self._index, brightness)
elif ATTR_BRIGHTNESS not in kwargs and ATTR_HS_COLOR in kwargs:
hue = hass_to_wilight_hue(kwargs[ATTR_HS_COLOR][0])
saturation = hass_to_wilight_saturation(kwargs[ATTR_HS_COLOR][1])
await self._client.set_hs_color(self._index, hue, saturation)
else:
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
| [
"noreply@github.com"
] | noreply@github.com |
79a4bb8bec0d2d35bfcfb2c239be6aee46b0fd66 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_373/ch4_2020_04_12_18_58_48_907546.py | cde9dac5e0e2b7c03893f3ea611cee967836abd9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | def classifica_idade (idade):
input(int( 'idade: '))
if idade <=11:
print ( 'crinca')
elif idade >= 12 and idade <= 17:
print ('adolecente')
else:
print ('adulto')
| [
"you@example.com"
] | you@example.com |
4f6c2ddefe3cb20c6fbca82335e3c70b1d4c0626 | bf2ea61883b8dd0074710ac532df35413720e5ec | /UserTask/migrations/0001_initial.py | 8d997be8d514af2cb779c7d6edb3424a0ebe6abc | [] | no_license | Jigar2998/Exam | f068a14d9f54ea4a84da4c2916d83dc28ac98389 | fa6795249961019facf3bd7bf0eab116d161bb2d | refs/heads/master | 2023-03-28T05:13:40.554728 | 2021-03-24T14:57:41 | 2021-03-24T14:57:41 | 350,630,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | # Generated by Django 3.1.5 on 2021-03-23 14:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
('email', models.EmailField(max_length=254)),
('message', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='user',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=25)),
('lname', models.CharField(max_length=25)),
('email', models.EmailField(max_length=254)),
('mobile', models.CharField(max_length=10)),
('gender', models.CharField(max_length=10)),
('birth_date', models.DateField()),
('address', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='')),
('passeword', models.CharField(max_length=20)),
],
),
]
| [
"jigarkumarramani1998@gmail.com"
] | jigarkumarramani1998@gmail.com |
6405b2626aba482937b14dfeafe8be7ddfd5657d | 6392354e74cce4a303a544c53e13d0a7b87978ee | /m4/socket_correlation/company_review/lock_test.py | 154a5366cb5434bb78837c326d9e8b9c99355720 | [] | no_license | music51555/wxPythonCode | dc35e42e55d11850d7714a413da3dde51ccdd37e | f77b71ed67d926fbafd1cfec89de8987d9832016 | refs/heads/master | 2020-04-11T20:20:38.136446 | 2019-04-01T09:17:34 | 2019-04-01T09:17:34 | 162,067,449 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | import time
from threading import Thread,RLock
mutexA = mutexB = RLock()
class MyThread(Thread):
def __init__(self,name):
super(MyThread,self).__init__()
self.name = name
def run(self):
self.f1()
self.f2()
def f1(self):
mutexA.acquire()
print('%s得到A锁'%self.name)
mutexB.acquire()
print('%s得到B锁'%self.name)
mutexA.release()
print('%s释放A锁'%self.name)
mutexB.release()
print('%s释放B锁'%self.name)
def f2(self):
mutexB.acquire()
print('%s得到B锁'%self.name)
time.sleep(0.1)
mutexA.acquire()
print('%s得到A锁'%self.name)
mutexB.release()
print('%s释放B锁'%self.name)
mutexA.release()
print('%s释放A锁'%self.name)
if __name__ == '__main__':
for i in range(3):
m = MyThread('子线程%s'%i)
m.start() | [
"music51555@163.com"
] | music51555@163.com |
4bbf48067b37dfa9b7a43b74bd31d72cf471611d | 8f8e378c0ce4224244582c506c268edda3cc3b30 | /Common/OpenCV/Day1/open2.py | 6683662896f77c68b9e3b75157a97e725953ee7e | [] | no_license | srsapireddy/Diploma-in-AI_NIELIT_Files | 223318319b2d4b8647d77b99d1ba03f0d6e15cf6 | 9e2ed78fbe03369ebef1aa81f3417fc21bdd4107 | refs/heads/master | 2021-05-17T14:28:00.059617 | 2020-03-29T09:28:04 | 2020-03-29T09:28:04 | 250,820,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | import cv2 as cv
img=cv.imread('walking.jpg',0)
cv.imshow('img1',img)
print(img)
print(img.shape)
cv.waitKey(0)
| [
"sapireddyrahul@gmail.com"
] | sapireddyrahul@gmail.com |
cfd2f3a4ff73b738da8ead70acf5ea441bdb0daa | ade929eeed0a13bee8e05619536639a32c60c22e | /Task0.py | a6a1e4b1a5b8d19cec83d7a73659f58ce07a3784 | [] | no_license | liuyuan512/Udacity-Python-Project | b2022b1632d73d43de03f1fc8f15029eabc2f0e5 | aca7e65b4bae7f0a6e41ede6180045ca34bf56fd | refs/heads/master | 2021-05-14T05:41:04.135673 | 2018-01-05T05:01:42 | 2018-01-05T05:01:42 | 116,226,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | """
下面的文件将会从csv文件中读取读取短信与电话记录,
你将在以后的课程中了解更多有关读取文件的知识。
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
任务0:
短信记录的第一条记录是什么?通话记录最后一条记录是什么?
输出信息:
"First record of texts, <incoming number> texts <answering number> at time <time>"
"Last record of calls, <incoming number> calls <answering number> at time <time>, lasting <during> seconds"
"""
print("First record of texts, {} texts {} at time {}".format(texts[0][0],texts[0][1],texts[0][2]))
print("Last record of calls, {} calls {} at time {}, lasting {} seconds".format(calls[-1][0],calls[-1][1],calls[-1][2],calls[-1][3]))
| [
"liuyuan@liuyuandeMacBook-Pro.local"
] | liuyuan@liuyuandeMacBook-Pro.local |
c517166cc1f3487953498d678a31ad7fb19e3f58 | 11aaa9f4fccab3f05deddb092fc87fd24f08d643 | /music/views.py | fd6f24bc78f369241153d5583e244dbaaacd3efe | [] | no_license | prorammerarc/Django-MediaPalyer | 3282499c739670e74b742932ba133db086e97dcb | b97bedcd662ceff83eb645507cc04deb4956748f | refs/heads/master | 2021-05-11T21:05:12.025427 | 2018-01-14T19:36:04 | 2018-01-14T19:36:04 | 117,460,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | from django.shortcuts import render,get_object_or_404
from django.http import HttpResponse,Http404
from .models import Album,Song
from django.template import loader
def index(request):
all_albums = Album.objects.all()
template = loader.get_template("music/index.html")
context = {
'all_album':all_albums,
}
return HttpResponse(template.render(context,request))
def detail(reuest,album_id):
album = get_object_or_404(Album, pk=album_id)
context ={
'album': album,
}
template = loader.get_template("music/detail.html")
return HttpResponse(template.render(context,reuest))
def favourite(request,album_id):
album = get_object_or_404(Album, pk=album_id)
try:
selected_song = album.song_set.get(pk=request.POST['song'])
except (KeyError,Song.DoesNotExist):
return render(request,"music/detail.html",{
'album': album,
'error_message':"you did't selected any Songs"
})
else:
if selected_song.is_favourite is False:
selected_song.is_favourite = True
else:
selected_song.is_favourite = False
selected_song.save()
return render(request,"music/detail.html",{"album": album}) | [
"32127426+prorammerarc@users.noreply.github.com"
] | 32127426+prorammerarc@users.noreply.github.com |
b71b434ca0c4d33ae478c5de3fe560b4df6a19b4 | f84dbb215fa3c8917444d69d8a626523ded8ea01 | /utils.py | 7bec8f638356dce37131e8c58611384dc39a198d | [] | no_license | amirunpri2018/style_transfer | 3086963a6315d838db7272b18456d4518f16e103 | f613655a5964f78ed4a81e6051f4a598baf5f7cd | refs/heads/master | 2020-04-11T14:36:55.749802 | 2017-11-20T11:29:44 | 2017-11-20T19:00:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import imageio
# if Keras model has one output, it returns a tensor, otherwise a list of tensors
def make_list(x):
if isinstance(x, list):
return x
return [x]
# auxiliary function to inverse ImageNet preprocessing
def postprocess(x):
x[..., 0] += 103.939
x[..., 1] += 116.779
x[..., 2] += 123.68
x = x[..., ::-1]
return x
def create_gif(images, output_file, duration=0.1):
imageio.mimsave(output_file, images, duration=duration)
| [
"emzajac@gmail.com"
] | emzajac@gmail.com |
a44f361047b27f3505d603357681d2fca47f37b6 | bad686ba27539a3d3286418cc3ebf2aa80ae4958 | /src/pong/full-game.py | 383a097d39786a83f75f9eefa942508b67aa3626 | [] | no_license | AaryaBatchu/micropython | f0a31b579b3a998586f26b92036875c93588eca7 | aef7d33937352e9ab6f9615bfc5bf9aa1a9bee57 | refs/heads/main | 2023-08-19T13:33:15.006432 | 2021-10-23T19:06:26 | 2021-10-23T19:06:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | # Pong game on Raspberry Pi Pico with a OLED and two Potentimeters
from machine import Pin, PWM, SPI
import ssd1306
from utime import sleep
import random # random direction for new ball
sda=machine.Pin(0)
scl=machine.Pin(1)
pot_pin = machine.ADC(26)
WIDTH = 128
HEIGHT = 64
i2c=machine.I2C(0,sda=sda, scl=scl)
oled = ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c)
# connect the center tops of the potentiometers to ADC0 and ADC1
pot_pin_1 = machine.ADC(27)
pot_pin_2 = machine.ADC(26) # make them the same for testing
# lower right corner with USB connector on top
SPEAKER_PIN = 16
# create a Pulse Width Modulation Object on this pin
speaker = PWM(Pin(SPEAKER_PIN))
# globals variables
# static variables are constants are uppercase variable names
HALF_WIDTH = int(WIDTH / 2)
HALF_HEIGHT = HEIGHT
BALL_SIZE = 3 # 2X2 pixels
PAD_WIDTH = 2
PAD_HEIGHT = 8
HALF_PAD_WIDTH = int(PAD_WIDTH / 2)
HALF_PAD_HEIGHT = int(PAD_HEIGHT / 2)
POT_MIN = 3000
POT_MAX = 65534
MAX_ADC_VALUE = 65534 # Maximum value from the Analog to Digital Converter is 2^16 - 1
# dynamic global variables use lowercase
paddle1_vel = 0
paddle2_vel = 0
l_score = 0
r_score = 0
# continiuous update of the paddle and ball
# play_startup_sound()
# start with the ball in the center
ball_x = int(WIDTH / 2)
ball_y = int(HEIGHT / 2)
# set the initial directinon to down to the right
ball_x_dir = 1
ball_y_dir = 1
def play_startup_sound():
speaker.duty_u16(1000)
speaker.freq(600)
sleep(.25)
speaker.freq(800)
sleep(.25)
speaker.freq(1200)
sleep(.25)
speaker.duty_u16(0)
def play_bounce_sound():
speaker.duty_u16(1000)
speaker.freq(900)
sleep(.25)
speaker.duty_u16(0)
def play_score_sound():
speaker.duty_u16(1000)
speaker.freq(600)
sleep(.25)
speaker.freq(800)
sleep(.25)
speaker.duty_u16(0)
# note that OLEDs have problems with screen burn it - don't leave this on too long!
def border(WIDTH, HEIGHT):
oled.rect(0, 0, WIDTH, HEIGHT, 1)
# Takes an input number vale and a range between high-and-low and returns it scaled to the new range
# This is similar to the Arduino map() function
def valmap(value, istart, istop, ostart, ostop):
return int(ostart + (ostop - ostart) * ((value - istart) / (istop - istart)))
# draw a vertical bar
def draw_paddle(paddle_no, paddle_center):
if paddle_no == 1:
x = 0
else:
x = WIDTH - 2
y = paddle_center - HALF_PAD_HEIGHT
oled.fill_rect(x, y, PAD_WIDTH, PAD_HEIGHT, 1) # fill with 1s
def draw_ball():
oled.fill_rect(ball_x, ball_y, BALL_SIZE, BALL_SIZE, 1) # square balls for now
# The main event loop
while True:
oled.fill(0) # clear screen
oled.vline(int(WIDTH / 2), 0, HEIGHT, 1)
# border(WIDTH, HEIGHT)
# read both the pot values
pot_val_1 = pot_pin_1.read_u16()
pot_val_2 = pot_pin_2.read_u16()
# print(pot_val_1)
# scale the values from the max value of the input is a 2^16 or 65536 to 0 to HEIGHT - PAD_HEIGHT
# ideally, it should range from 5 to 58
pot_val_1 = valmap(pot_val_1, POT_MIN, POT_MAX, HALF_PAD_HEIGHT, HEIGHT - HALF_PAD_HEIGHT - 2)
pot_val_2 = valmap(pot_val_2, POT_MIN, POT_MAX, HALF_PAD_HEIGHT, HEIGHT - HALF_PAD_HEIGHT - 2)
# print(pot_val, pot_scaled)
draw_paddle(1, pot_val_1 + HALF_PAD_HEIGHT)
draw_paddle(2, pot_val_2 + HALF_PAD_HEIGHT)
draw_ball()
#update ball position with the current directions
ball_x = ball_x + ball_x_dir
ball_y = ball_y + ball_y_dir
# update the ball direction if we are at the top or bottom edge
if ball_y < 0:
ball_y_dir = 1
#play_bounce_sound()
if ball_y > HEIGHT - 3:
ball_y_dir = -1
#play_bounce_sound()
# if it hits the paddle bounce else score
if ball_x < 1:
top_paddle = pot_val_1 - HALF_PAD_HEIGHT
bottom_paddle = pot_val_1 + HALF_PAD_HEIGHT
if ball_y > top_paddle and ball_y < bottom_paddle:
# we have a hit
ball_x_dir = 1
ball_x = 2
play_bounce_sound()
print('paddle hit on left edge', pot_val_1, top_paddle, bottom_paddle)
else:
# we have a score for the right player
play_score_sound()
r_score += 1
ball_x = int(WIDTH / 2)
ball_y = int(HEIGHT / 2)
ball_x_dir = random.randint(-1, 2)
if ball_x_dir == 0:
ball_x_dir = 1
ball_y_dir = random.randint(-1, 2)
print('score on left edge', pot_val_1, top_paddle, bottom_paddle)
sleep(.25)
if ball_x > WIDTH - 3:
ball_x = WIDTH - 4
top_paddle = pot_val_2 - HALF_PAD_HEIGHT
bottom_paddle = pot_val_2 + HALF_PAD_HEIGHT
if ball_y > top_paddle and ball_y < bottom_paddle:
ball_x_dir = -1
print('bounce on right paddle', pot_val_1, top_paddle, bottom_paddle)
else:
l_score += 1
play_score_sound()
ball_x = int(WIDTH / 2)
ball_y = int(HEIGHT / 2)
ball_x_dir = random.randint(-1, 2)
if ball_x_dir == 0:
ball_x_dir = 1
ball_y_dir = random.randint(-1, 2)
play_bounce_sound()
print('score on right edge', pot_val_1, top_paddle, bottom_paddle)
sleep(.25)
oled.text(str(l_score), HALF_WIDTH - 20, 5, 1)
oled.text(str(r_score), HALF_WIDTH + 5, 5, 1)
oled.show() | [
"dan.mccreary@gmail.com"
] | dan.mccreary@gmail.com |
e94621edc11fd90840ea6da3902f469a1c7f89ca | ef83f030d562bbb0e88bc07f49474c4768a9ad9f | /test.py | 4706e5d9d10a221b6c40765e05b18a2ae417d9ee | [] | no_license | FinnAllen/Machine-Learning | 7eea1e8565645b3377f56a1e02ad9752939d2d19 | e6ad9761928570ce01dbad7b0e78173add975cfc | refs/heads/master | 2021-08-22T14:21:42.555197 | 2020-08-08T00:02:24 | 2020-08-08T00:02:24 | 213,487,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | import tensorflow as tf
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0)
print(node1,node2) | [
"noreply@github.com"
] | noreply@github.com |
3bbf306660eebd6bf7546e4f3ed1d19ed77db877 | ce981ff3ffc0fe301f0a1a6092dafb491fb67e7d | /comments/api/urls.py | 43b29fc1302df28d37deba22fbf83d54c396db5f | [
"MIT"
] | permissive | videetssinghai/Blog-Rest-Api | 06a630b13a579c49f41cad6e1604dc36704ab32f | e81e0c1969b170cb482d2fdf7f2883efc42c69db | refs/heads/master | 2021-01-12T00:48:55.630129 | 2017-01-11T15:02:25 | 2017-01-11T15:02:25 | 78,300,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.conf.urls import url
from django.contrib import admin
from .views import (
CommentListAPIView,
CommentDetailAPIView,
)
urlpatterns = [
url(r'^$', CommentListAPIView.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', CommentDetailAPIView.as_view(), name='detail'),
]
| [
"videetssinghai@gmail.com"
] | videetssinghai@gmail.com |
d104bf3d62088709c04abe85b3ea2c295ccf4b59 | b38666d5710b241930e6ee6ec71c66a14a219d46 | /problem028.py | 355ebc184a773fd9d8ed389ec4fa1096e2e3bc61 | [] | no_license | yosuketanaka/project-euler | 3508bfbf3cdbc60f14d111c95b5c44f69cf85930 | 2cb747fb755684cd574590f168412e9e30e34a97 | refs/heads/master | 2016-09-05T11:12:13.555956 | 2011-12-20T00:50:37 | 2011-12-20T00:50:37 | 2,613,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | a=[2*n for n in range(1,501)]
y=1
sum=1
for x in a:
sum+=y+x+y+2*x+y+3*x+y+4*x
y+=4*x
print sum
| [
"lazysuits@gmail.com"
] | lazysuits@gmail.com |
10670058ee66bbaa0c344330a9c20c5c2b7523ec | 4e14c0561e539faaf5e081b0d5eb7dda627c97f5 | /time.py | f7b4b67726552a96079cd32d63facdf909470a1e | [] | no_license | amruthaarun/Think-python | 44e7cd7978e3212c676cdf5eb3001aa78d11c76f | 08d7d6b803c2b1f58b11a0024a59fa2b5d77045f | refs/heads/master | 2021-01-21T13:03:59.660906 | 2017-06-04T14:07:08 | 2017-06-04T14:07:08 | 91,806,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | class Time(object):
def time_to_int(time):
minutes = time.hour * 60 + time.minute
seconds = minutes * 60 + time.second
return seconds
def print_time(t):
print '%.2d : %.2d : %.2d'%(t.hour,t.minute,t.second)
time1 = Time()
time1.hour = 11
time1.minute = 59
time1.second = 30
time1.print_time()
print time1.time_to_int()
| [
"noreply@github.com"
] | noreply@github.com |
bfb1b0feeb739779d22fd03dc6c1baf01c5a00e2 | 425badd89420156a586421bf26119193eea06f0a | /linkedlists/insert-in-sorted-dll/insert_in_sorted_dll.py | d69fbba02bbf51bef58fc49eb49827425ea40ed6 | [] | no_license | mich-chen/code-challenges | 66c82be05fdc8e9655525649283e2cea9e854916 | 286677fc7be84f8f8a8754c785a39d07a2c5604a | refs/heads/master | 2023-04-19T16:52:51.352926 | 2021-05-04T07:05:32 | 2021-05-04T07:05:32 | 301,622,163 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | class DoublyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = DoublyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def print_doubly_linked_list(node):
while node:
print(node.data)
node = node.next
def sortedInsert(head, data):
new = DoublyLinkedListNode(data)
# insert in empty list
if head == None:
return new
# insert at beginning of list
if head.data > data:
new.next = head
head.prev = new
return new # new head pointer of ll
current = head
while current:
# if at tail of ll
if current.next == None and current.data <= data:
current.next = new
new.prev = current
break
# if inserting in middle of ll
# if current node is greater than data, then update current's prev's next as new
if current.data >= data:
prev = current.prev
prev.next = current.prev = new
new.prev = prev
new.next = current
break
current = current.next
return head
if __name__ == '__main__':
# ***** test 1 *****
for t_itr in range(1):
llist_count = [1, 3, 4, 10]
llist = DoublyLinkedList()
for item in llist_count:
llist.insert_node(item)
data = 5
llist1 = sortedInsert(llist.head, data)
print_doubly_linked_list(llist1) # 1 3 4 5 10
| [
"mich.chen.94@gmail.com"
] | mich.chen.94@gmail.com |
23d2bade3de1ab0aea32326d54633952e7129674 | 2f6a9629aa7497a033ae2836621f65e92529c28d | /homework 5/P6.py | 549f546e17e6f72daaec2c3efbbd0c8b5765292c | [] | no_license | H602602/AI-Python- | 9a4136d989c8f62def423b957c476cb1531dae38 | bcdd4727bf78ca7673d195a603e2a2da9ad06270 | refs/heads/main | 2023-07-27T12:24:31.895467 | 2021-09-15T12:38:37 | 2021-09-15T12:38:37 | 398,769,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | a=int(input("Enter a number you want to know the table of "))
i=1
while i<11:
print(a*i)
i=i+1 | [
"noreply@github.com"
] | noreply@github.com |
60d41043584f6d063c4c43e77e9af68f68c35b35 | 54fead852941ba66783fca735dcc9d6f84c1d40b | /eshop_account/migrations/0005_auto_20201229_0922.py | 7c0b14f18d483eaa0d43da3f8bf0d40ea75d1cb3 | [] | no_license | saeide-tohidi/django-ecommerce | 36369686f4c8904fd7be7241a2e356582938cd6f | 9ab524d396b3bada4d326ca4ea416d164b3c8c64 | refs/heads/master | 2023-03-08T13:51:11.424289 | 2021-02-27T13:59:35 | 2021-02-27T13:59:35 | 342,868,421 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Generated by Django 3.1 on 2020-12-29 05:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('eshop_account', '0004_auto_20201215_1643'),
]
operations = [
migrations.AlterField(
model_name='history',
name='content_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype'),
),
]
| [
"s.tohidi22@gmail.com"
] | s.tohidi22@gmail.com |
91b04f73233aff30c6eca08c2f7dc761d779237c | f497cacd115a08f3901d50362a7412755783c3d9 | /events/forms.py | f8ee7abf416df218903763f37935e0a2e52efe13 | [] | no_license | snregales/UWS | aafa9a9ce3ad06f1b04d0da224d114142d55c16a | 712042d446c06ba42699e26a9aceb7dd77006881 | refs/heads/master | 2020-03-26T15:28:20.178895 | 2018-08-18T22:09:53 | 2018-08-18T22:09:53 | 129,656,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | from django import forms
from .models import Event, EventMeta
from utils.utils import get_timestamp
from utils.validations import (
validate_date,
validate_event_name,
validate_participants,
)
class EventForm(forms.ModelForm):
current_year = get_timestamp().year
name = forms.CharField(
max_length=255,
widget=forms.TextInput(attrs={'placeholder': 'Aquamen Annual Underwater Mayhem'}))
date = forms.DateTimeField(
help_text='Date must be at least two weeks from now',
widget=forms.SelectDateWidget(
empty_label=('Year', 'Month', 'Day'),
years=list(range(current_year, current_year + 10))))
class Meta:
model = EventMeta
exclude = ('event', 'teams')
field_order = ['name', 'date', 'type', 'grade', 'age_group', 'gender']
def clean_date(self):
return validate_date(self.cleaned_data.get('date'))
def clean_name(self):
return validate_event_name(self.cleaned_data.get('name'), Event)
def save(self, commit=True):
data = self.cleaned_data.copy()
del data['name']
del data['date']
return EventMeta.objects.create_event(self.cleaned_data['name'], self.cleaned_data['date'], **data)
class AddTeamForm(forms.Form):
def __init__(self, *args, **kwargs):
from teams.models import Team
if 'slug' not in kwargs:
raise ValueError('slug is missing')
self.slug = kwargs['slug']
del kwargs['slug']
super(AddTeamForm, self).__init__(*args, **kwargs)
self.fields['teams'] = forms.ModelChoiceField(
Team.objects.all(),
label='Teams in System',
empty_label='Pick a Team'
)
def clean_teams(self):
return validate_participants(
self.slug,
self.cleaned_data.get('teams')
)
| [
"sharlonregales@gmail.com"
] | sharlonregales@gmail.com |
291d92c0795dd0b5a8b22701412af4dad5921440 | 680c2cc9a733716938f5dc2b8b5469a25f8a16c8 | /123.py | 565ba3fef6d1e12807bcd4fed6c20bbd31a24986 | [] | no_license | e5a27123/hi | c897c6068a6ac5efe66c08d90cdc507092fb8a8a | a7ec9a34d7148c36173187fb794e476725514bf6 | refs/heads/master | 2021-05-23T18:56:45.866291 | 2020-04-06T08:04:52 | 2020-04-06T08:04:52 | 253,426,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | high = input('請輸入身高:')
weight = input('請輸入體重:')
print('您的身高體重:',high,'/',weight) | [
"e5a27123@gmail.com"
] | e5a27123@gmail.com |
c42d06f160e74bd4ea815479182bf3aa58950f95 | 78652c075802f77888712447f609ef33012acb12 | /GraphMatchingByConvexPolygon/src/Match_byCrossPoints.py | 6c1c5065fc461e49db0874d2150ef69d89a2b5dd | [
"MIT"
] | permissive | Anonymous772066235/GraduationDesignProgram | 5cfb18e730f284ffce16bbd9291a558541d2a678 | 3337c48005def8515a4660fcaa004dcfbe4766be | refs/heads/main | 2023-03-22T12:11:50.641790 | 2021-03-19T06:44:28 | 2021-03-19T06:44:28 | 336,760,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | # File :Match_byCrossPoints.py
# Author :WJ
# Function :
# Time :2021/02/06
# Version :
# Amend :
import LineProcess as lp
import numpy as np
import LaplacianMatrice as laplcian
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
import Visualization as Vs
import time
start = time.time()
doppLines = np.loadtxt('Lines_dopp15_ok13.txt', delimiter=' ')
dlgLines = np.loadtxt('Lines_dlg16_OK_13.txt', delimiter=' ')
doppPoints = lp.GetIntersectPointofLines(doppLines)
print(len(doppPoints))
dlgPoints = lp.GetIntersectPointofLines(dlgLines)
print(len(dlgPoints))
doppL = laplcian.LaplacianMatrice(doppPoints,sigma=500)
dlgL = laplcian.LaplacianMatrice(dlgPoints,sigma=500)
print('对拉普拉斯矩阵进行谱分解:')
U1, Lambda1 = laplcian.LaplacianMatrice_decomposed(doppL)
U2, Lambda2 = laplcian.LaplacianMatrice_decomposed(dlgL)
print('计算直方图相似度,计算矩阵A:')
k = min(len(doppPoints), len(dlgPoints))
A = laplcian.corrlation(U1, U2, k)
row_ind, col_ind = linear_sum_assignment(A)
print(row_ind) # 开销矩阵对应的行索引
print(col_ind) # 对应行索引的最优指派的列索引
print('相异度:')
print(A[row_ind, col_ind])
row, col = laplcian.DeleteLargeValue(A, row_ind, col_ind, 0.7)
print(row)
print(col)
doppPoints_new = []
for i in range(len(col)):
doppPoints_new.append(doppPoints[col, :])
doppPoints_new = np.resize(doppPoints_new, (len(col), 2))
np.savetxt('CrossPoints_DOPP_OK.txt', doppPoints_new, delimiter=' ')
np.savetxt('CrossPoints_DLG_OK.txt', dlgPoints, delimiter=' ')
print(A[row, col])
plt.figure(figsize=(12, 9))
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
Vs.VisualizePoints(doppPoints, color='red', label='points1')
Vs.VisualizePoints(dlgPoints, color='green', label='points2')
Vs.VisualizeMacth(doppPoints, dlgPoints, row, col)
plt.axis('equal')
plt.xlabel('X')
plt.ylabel('Y')
name = '直线交点匹配' # 旋转(30)
plt.title(name)
plt.legend(loc='best')
plt.savefig('Match07_' + name + '.png', dpi=300)
TIME = time.time() - start
hours = TIME // 3600
minutes = TIME % 3600 // 60
seconds = TIME % 3600 % 60
print('整个过程耗时------------------------------------------------------')
print('Runningtime:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(hours, minutes, seconds))
| [
"noreply@github.com"
] | noreply@github.com |
4970c6284aaecea5775d6a3a9f128000451133f6 | cc54a6aba8ffaf7cc2c8a7e71a9332c6f4e26507 | /flask_app.py | 00cda83bc7ac474971c9e33c64b3d364a751f64d | [] | no_license | ayc3ue/flask_app | 4c77ed60f53bec0ad70052954dc3a2162d038793 | e567cdbdd915af11b82a16ea4e8801fd8327816f | refs/heads/master | 2020-05-05T05:08:28.980926 | 2019-04-17T02:12:39 | 2019-04-17T02:12:39 | 179,740,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | import os
from flask import Flask, flash, request, redirect, url_for, send_from_directory, render_template
from werkzeug.utils import secure_filename
import exiftool
import json
app = Flask(__name__)
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'dll', 'exe'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file',
filename=filename))
return render_template('./upload.html')
@app.route('/uploads/<filename>')
def uploaded_file(filename):
file = os.path.join(app.config['UPLOAD_FOLDER'], filename)
#access file
#exif
with exiftool.ExifTool() as et:
metadata = json.dumps(et.get_metadata_batch([file]))
print(metadata)
os.remove(file)
return render_template('./result.html', metadata= metadata)
#send_from_directory(app.config['UPLOAD_FOLDER'], filename) | [
"bwz3kt@virginia.edu"
] | bwz3kt@virginia.edu |
3ef70815071e67e660069013519d9ec8cf185aae | 2e011c9f2f4c43a4fca21a2072977fa820e78fe4 | /mysite/settings.py | d86a691a465a40f0a8c0280590a3a71b99097230 | [] | no_license | Lurey-Roshan/Basic_course_django | 7f3210a767889d0cdd0f3a180bb842aa8d26a4ac | 7410d5c6f23fec481fa7a15a1a1f0cacf26c349b | refs/heads/master | 2023-05-06T18:33:05.445232 | 2021-05-27T10:30:58 | 2021-05-27T10:30:58 | 326,722,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,847 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR=os.path.join(BASE_DIR, 'templates')
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
STATIC_DIR=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g=jgr$z4)xo8b2o#gd0@dqx33ilr7$%sucwgq^!754io5gp@42'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'target',
'imagehandle',
'member',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#smtp simple mail transfer protocal
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'Your gmail '
EMAIL_HOST_PASSWORD = 'yourgmail password'
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
DEFAULT_FROM_EMAIL="from learning <donot.reply.@mail.com>"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL= '/media/'
STATICFILES_DIR=[
STATIC_DIR,
]
LOGIN_REDIRECT_URL='home'
LOGOUT_REDIRECT_URL='home'
#EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
#EMAIL_FILE_PATH = str(BASE_DIR.joinpath('sent_emails')) | [
"everforgoodrs@gmail.com"
] | everforgoodrs@gmail.com |
e63b88ed084aef3af607dfd3983492929682e249 | bebacae90aa17ad2ab4c9111a2e5cfa0f8cf13a6 | /Python-3/basic_examples/raw_input_example.py | 98bdf626e06b78a68c079f81f91a901b1a33be39 | [
"MIT"
] | permissive | ayanakshi/journaldev | 5b0d73c53bc9a5292a8629c6c0320196abeab76e | a61cba22232e8cc9c40264c31aaba0bd17ff2522 | refs/heads/master | 2020-03-27T21:52:15.081736 | 2018-08-31T11:51:28 | 2018-08-31T11:51:28 | 147,182,378 | 1 | 0 | MIT | 2018-09-03T09:28:38 | 2018-09-03T09:28:38 | null | UTF-8 | Python | false | false | 61 | py | a = raw_input('What\'s your name : ')
print 'Username : ', a
| [
"pankaj.0323@gmail.com"
] | pankaj.0323@gmail.com |
ebf5338c9d16d52fb1f01ccc605998b512d9edf6 | c6ff2a4484c371efd97ce610832cd9772dd406e0 | /app10_udemy/app10_udemy/wsgi.py | bb40d92d717d10e2eaaa247e3e39c58b6fc183fe | [] | no_license | inderdevkumar/Upload-and-display | 66bbb808be27d47f3ff8d57e663b58b71f62ef71 | 668beb97392f12d4b545937c18f2723919264987 | refs/heads/master | 2022-10-10T01:19:02.044549 | 2020-06-09T12:56:22 | 2020-06-09T12:56:22 | 271,003,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for app10_udemy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app10_udemy.settings')
application = get_wsgi_application()
| [
"id0102yadav@gmail.com"
] | id0102yadav@gmail.com |
add3c5e3953f6df905c0de935b4270629b78a549 | a682abf407ed6563c8ae5ddd2983e388307dabcd | /termanim/__init__.py | 5a8274b6480fb27471874acc47d29aeaddfbf6bc | [] | no_license | sahasatvik/termanim | 838baf7e3388610cd6a33aac66c67d32d5e29ed5 | aa2239e206b7dceb5647a646fa5d4f4c76087b79 | refs/heads/master | 2023-04-30T03:44:06.549829 | 2021-05-13T17:49:56 | 2021-05-13T17:49:56 | 365,275,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | """
An interface for creating graphics and animation in the terminal.
"""
| [
"sahasatvik@gmail.com"
] | sahasatvik@gmail.com |
0fc3634719a0d6a84e782bf164e1e5bb7b18e2b5 | 895cf7d265a7976c9243894bccebf3daa645bc57 | /venv/bin/django-admin | ea08885278bada07ab1206a8cc6a4fc880b79d0b | [] | no_license | HenryM975/D_news | ebab9b443699e7b8925320d7235cf398d3a310c5 | 140be1f6385352265a6c7172261524ce5a302858 | refs/heads/master | 2023-08-05T02:33:20.483347 | 2020-08-01T09:31:18 | 2020-08-01T09:31:18 | 282,980,070 | 0 | 0 | null | 2021-09-22T19:42:46 | 2020-07-27T18:18:05 | Python | UTF-8 | Python | false | false | 289 | #!/root/PycharmProjects/news/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"hemry975@yandex.ru"
] | hemry975@yandex.ru | |
07dd881dfa838563a5ef9d22778cd9993402dd4c | 22ebcc842dbc933bfa8fdad89b8b8ef48ecc91c7 | /load/load_aes_hd.py | f70ad4aa349abe0942c49ece8386f2b88e6237e6 | [] | no_license | klikooo/thesis-src | 192651c18f243c59cfa588e7052dc1a96ab0a146 | 64f2ee824afdc2d3fd0f98c6d9fcfda597b9ad9f | refs/heads/master | 2020-04-16T18:16:20.638147 | 2019-08-20T14:59:52 | 2019-08-20T14:59:52 | 161,623,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,364 | py | from decimal import Decimal
import torch
import numpy as np
import matplotlib.pyplot as plt
from models.load_model import load_model
from test import test_with_key_guess
import util
import pdb
path = '/media/rico/Data/TU/thesis'
#####################################################################################
# Parameters
use_hw = False
n_classes = 9 if use_hw else 256
spread_factor = 1
runs = [x for x in range(5)]
train_size = 20000
epochs = 140
batch_size = 100
lr = 0.00075
sub_key_index = 2
attack_size = 100
rank_step = 1
type_network = 'HW' if use_hw else 'ID'
unmask = False if sub_key_index < 2 else True
# network_names = ['SpreadV2', 'SpreadNet']
network_names = ['ConvNetKernel']
kernel_sizes = [3, 5, 7, 9, 11, 13, 15]
# network_names = ['ConvNet', 'ConvNetDK']
plt_titles = ['$Spread_{V2}$', '$Spread_{PH}$', '$Dense_{RT}$', '$MLP_{best}$']
only_accuracy = False
data_set = util.DataSet.RANDOM_DELAY
raw_traces = True
validation_size = 1000
#####################################################################################
data_set_name = str(data_set)
if len(plt_titles) != len(network_names):
plt_titles = network_names
device = torch.device("cuda")
# Load Data
loader = util.load_data_set(data_set)
print('Loading data set')
total_x_attack, total_y_attack, plain = loader({'use_hw': use_hw,
'traces_path': '/media/rico/Data/TU/thesis/data',
'raw_traces': raw_traces,
'start': train_size + validation_size,
'size': attack_size,
'domain_knowledge': True})
print('Loading key guesses')
key_guesses = util.load_csv('/media/rico/Data/TU/thesis/data/{}/Value/key_guesses_ALL_transposed.csv'.format(
data_set_name),
delimiter=' ',
dtype=np.int,
start=train_size + validation_size,
size=attack_size)
real_key = util.load_csv('/media/rico/Data/TU/thesis/data/{}/secret_key.csv'.format(data_set_name), dtype=np.int)
x_attack = total_x_attack
y_attack = total_y_attack
def get_ranks(x_attack, y_attack, key_guesses, runs, train_size,
epochs, lr, sub_key_index, attack_size, rank_step, unmask, network_name, kernel_size_string=""):
ranks_x = []
ranks_y = []
for run in runs:
model_path = '/media/rico/Data/TU/thesis/runs2/' \
'{}/subkey_{}/{}_SF{}_E{}_BZ{}_LR{}/train{}/model_r{}_{}{}.pt'.format(
data_set_name,
sub_key_index,
type_network,
spread_factor,
epochs,
batch_size,
'%.2E' % Decimal(lr),
train_size,
run,
network_name,
kernel_size_string)
print('path={}'.format(model_path))
# Load the model
model = load_model(network_name=network_name, model_path=model_path)
model.eval()
print("Using {}".format(model))
model.to(device)
# Number of times we test a single model + shuffle the test traces
num_exps = 100
x, y = [], []
for exp_i in range(num_exps):
permutation = np.random.permutation(x_attack.shape[0])
# permutation = np.arange(0, x_attack.shape[0])
x_attack_shuffled = util.shuffle_permutation(permutation, np.array(x_attack))
y_attack_shuffled = util.shuffle_permutation(permutation, np.array(y_attack))
key_guesses_shuffled = util.shuffle_permutation(permutation, key_guesses)
# Check if we need domain knowledge
dk_plain = None
if network_name in util.req_dk:
dk_plain = plain
dk_plain = util.shuffle_permutation(permutation, dk_plain)
x_exp, y_exp = test_with_key_guess(x_attack_shuffled, y_attack_shuffled, key_guesses_shuffled, model,
attack_size=attack_size,
real_key=real_key,
use_hw=use_hw,
plain=dk_plain)
x = x_exp
y.append(y_exp)
# Take the mean of the different experiments
y = np.mean(y, axis=0)
# Add the ranks
ranks_x.append(x)
ranks_y.append(y)
return ranks_x, ranks_y
# Test the networks that were specified
ranks_x = []
ranks_y = []
rank_mean_y = []
name_models = []
for network_name in network_names:
if network_name in util.req_kernel_size:
for kernel_size in kernel_sizes:
kernel_string = "_k{}".format(kernel_size)
x, y = get_ranks(x_attack, y_attack, key_guesses, runs, train_size, epochs, lr, sub_key_index,
attack_size, rank_step, unmask, network_name, kernel_string)
mean_y = np.mean(y, axis=0)
ranks_x.append(x)
ranks_y.append(y)
rank_mean_y.append(mean_y)
name_models.append("{} K{}".format(network_name, kernel_size))
else:
x, y = get_ranks(x_attack, y_attack, key_guesses, runs, train_size, epochs, lr, sub_key_index,
attack_size, rank_step, unmask, network_name)
mean_y = np.mean(y, axis=0)
ranks_x.append(x)
ranks_y.append(y)
rank_mean_y.append(mean_y)
name_models.append(network_name)
for i in range(len(rank_mean_y)):
plt.title('Performance of {}'.format(name_models[i]))
plt.xlabel('number of traces')
plt.ylabel('rank')
plt.grid(True)
# Plot the results
for x, y in zip(ranks_x[i], ranks_y[i]):
plt.plot(x, y)
figure = plt.gcf()
plt.figure()
figure.savefig('/home/rico/Pictures/{}.png'.format(name_models[i]), dpi=100)
# plt.title('Comparison of networks')
plt.xlabel('Number of traces')
plt.ylabel('Mean rank')
plt.grid(True)
for i in range(len(rank_mean_y)):
plt.plot(ranks_x[i][0], rank_mean_y[i], label=name_models[i])
plt.legend()
# plt.figure()
figure = plt.gcf()
figure.savefig('/home/rico/Pictures/{}.png'.format('mean'), dpi=100)
plt.show()
| [
"rico12978@hotmail.com"
] | rico12978@hotmail.com |
8c11b619d8bcfc1b32ce88c5f2693cdc3fe40a4c | 33d870b95a1b4761b9912eeef973be8af619c8f0 | /feature_eng/id_ip_join.py | aa4d4191f873480c518aac3e4274f49f47180aaa | [] | no_license | sclau/Avazu-1 | 641857f1949f0c0a4ec556c570da57c2ff213492 | cf1ebdb2be8c37b7789781e16b2f2e7aaa4f4584 | refs/heads/master | 2021-01-17T06:11:25.365314 | 2014-12-06T05:19:43 | 2014-12-06T05:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,031 | py | #!/usr/bin/python
import sys
import os.path
sys.path.append(os.path.dirname(__file__))
def read_device():
device_counts = {}
with open("id_ip_count_sort.txt", "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
words = line.split('\t')
day, device_id = words[0], words[1]
ip_count, id_count = words[2], words[3]
key = "%s,%s" % (day, device_id)
device_counts[key] = (ip_count, id_count)
return device_counts
device_dict = read_device()
#device_dict = {}
"""
for line in sys.stdin:
line = line.strip()
words = line.split("\t")
if len(words) == 1:
words = line.split(",")
if words[0] != "id":
time = words[2]
day = time[4:6]
device_id = words[11]
if device_id != "a99f214a":
key = "%s,%s" % (day, device_id)
if key in device_dict:
print "%s,%s,%s" % (line, device_dict[key][0], device_dict[key][1])
elif key not in device_dict: #shouldn't be used at all
print "%s,%s,%s" % (line, "1", "1")
elif device_id == "a99f214a": #then the device_id is an unknown id
print "%s,%s,%s" % (line, "1", "1") #assume the counts are just 1 if unknown id
elif len(words) == 4:
day, device_id = words[0], words[1]
ip_count, id_count = words[2], words[3]
key = "%s,%s" % (day, device_id)
device_dict[key] = (ip_count, id_count)
"""
for line in sys.stdin:
line = line.strip()
words = line.split(",")
if len(words) == 24 and words[0] != "id":
time = words[2]
day = time[4:6]
device_id = words[11]
if device_id != "a99f214a" and device_id != "c357dbff":
key = "%s,%s" % (day, device_id)
if key in device_dict:
print "%s,%s,%s" % (line, device_dict[key][0], device_dict[key][1])
elif key not in device_dict: #shouldn't be used at all
print "%s,%s,%s" % (line, "1", "1")
elif device_id == "a99f214a" or device_id == "c357dbff": #then the device_id is an unknown id
print "%s,%s,%s" % (line, "1", "1") #assume the counts are just 1 if unknown id
| [
"ianchin93@berkeley.edu"
] | ianchin93@berkeley.edu |
97ed4b7b177f9bfd4dd65cf0fe4e612cec5f5ca7 | c68580258e9fbe64bbf232e781d75584691de4c4 | /tests/django_settings.py | 2af62bb352df44c8386b6fd77435541a4214c8d9 | [
"MIT"
] | permissive | KyleAMathews/graphene | 7e092e6e7d9575c1f736d834a2913a63bc753006 | 5738b69271fd245339f35640d375d6bc13092358 | refs/heads/master | 2023-08-31T21:12:22.927712 | 2015-11-30T18:08:12 | 2015-11-30T18:08:12 | 47,149,828 | 2 | 0 | null | 2015-11-30T22:24:27 | 2015-11-30T22:24:27 | null | UTF-8 | Python | false | false | 196 | py | SECRET_KEY = 1
INSTALLED_APPS = [
'examples.starwars_django',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tests/django.sqlite',
}
}
| [
"me@syrusakbary.com"
] | me@syrusakbary.com |
cd3e3d7ff3cc8d3bc80032f50d00ed482ccfbd91 | c9eb43714afc10d59d3a5c8dc9ac948bcce8da3a | /wordcount_project/urls.py | 1c828ec47b9a478e57a9b9d8a21e3bfce02893f5 | [] | no_license | hyo-ji/wordcount | e6c88ec0fd168c28f9bfa9293227bd17ffc4b039 | 1e32ecb87c5c651cf1b1ae8cddd6351076d6b9ff | refs/heads/master | 2022-11-11T12:12:18.089366 | 2020-07-06T12:27:16 | 2020-07-06T12:27:16 | 277,532,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | """wordcount_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import myapp.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', myapp.views.main, name="main"),
path('result/', myapp.views.result, name="result"),
]
| [
"gywl21@naver.com"
] | gywl21@naver.com |
6e5f727a122ec1a1829134b85592fba4c0f04981 | 795c2505b1dd77e5e7ce230348dbc4558c11c74e | /week7/9-4.py | 3c295e5fd93cebbb1c6024d6d3e0e62b17fb397d | [] | no_license | mikephys8/The_Fundamentals_pythonBasic | 4326400e6308109606735f092f588bacbf61381b | a0f7386579deb8481f0632bb8c98e0d09392f7c7 | refs/heads/master | 2020-06-04T20:19:01.165552 | 2014-06-30T16:22:41 | 2014-06-30T16:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | __author__ = 'Administrator'
gradefile = open('student_grades.txt')
def read_grades(gradefile):
"""(file open for reading) -> list of float
Read the grades from gradefile and return a dictionary where
each key is a grade and each value is the list of ids of
students who earned that grade.
Precondition: gradefile starts with a header that contains
no blank lines, then has a blank line, and then lines
containing a student number and a grade.
"""
# skip over the header.
line = gradefile.readline()
while line != '\n':
line = gradefile.readline()
# Read the grades, accumulating them into a dict.
grade_to_ids = {}
line = gradefile.readline()
while line != '':
# Now we have a string containing the info for a
# single student.
student_id = line[:4]
# the strip() method removes all spaces from the string.
grade = float(line[4:].strip())
if grade not in grade_to_ids:
grade_to_ids[grade] = [student_id]
else:
grade_to_ids[grade].append(student_id)
line = gradefile.readline()
return grade_to_ids
# # Find the last space and take everything after
# # that space.
#
# # .rfind() method finds from the right most place
# # of the line the first! index number of the char.
# # that's why we add +1 for passing the space and
# # start from the number till the end of string.
#
# #this returns str
# grade = line[line.rfind(' ') + 1:]
# #but we want float
# grades.append(float(grade))
# line = gradefile.readline()
# return grades
classes = read_grades(gradefile)
print(classes)
print('--------------------------------------- \n')
# for printing a dictionary(classes) vertically!
for key, value in sorted(classes.items()):
print("{} : {}".format(key, value))
| [
"mikephys8@gmail.com"
] | mikephys8@gmail.com |
c788318f60d2098a2e34990bd3b98721e503be1f | be78f65e98c4a375666f0c22d18038c8ecef4519 | /QA_db.py | c883e669a4d52140c65d164b80da5cca1984ab8b | [
"BSD-3-Clause-Clear"
] | permissive | Rexmiao/QuickAnnotator | 5d9358d9fab04abfd10ac1950c62989c4d6715d2 | 8095bcf341ff0ec03590c23711fc1b228297ef82 | refs/heads/main | 2023-03-09T14:15:50.048688 | 2021-02-20T00:41:47 | 2021-02-20T00:41:47 | 337,521,794 | 0 | 1 | BSD-3-Clause-Clear | 2021-02-09T20:03:46 | 2021-02-09T20:03:45 | null | UTF-8 | Python | false | false | 4,391 | py | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Text
import sqlalchemy
import logging
from QA_config import get_database_uri
jobs_logger = logging.getLogger('jobs')
_pool = []
db = SQLAlchemy()
# Create Flask-SQLALchemy models
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(Text, nullable=False, unique=True)
description = db.Column(db.Text, default="")
date = db.Column(db.DateTime)
train_ae_time = db.Column(db.DateTime)
make_patches_time = db.Column(db.DateTime)
iteration = db.Column(db.Integer, default=-1)
embed_iteration = db.Column(db.Integer, default=-1)
images = db.relationship('Image', backref='project', lazy=True)
jobs = db.relationship('Job', backref='project', lazy=True)
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
projId = db.Column(db.Integer, db.ForeignKey('project.id'), nullable=False)
name = db.Column(db.Text)
path = db.Column(db.Text, unique=True)
height = db.Column(db.Integer)
width = db.Column(db.Integer)
ppixel = db.Column(db.Integer, default=0)
npixel = db.Column(db.Integer, default=0)
nobjects = db.Column(db.Integer, default=0)
date = db.Column(db.DateTime)
rois = db.relationship('Roi', backref='image', lazy=True)
make_patches_time = db.Column(db.DateTime)
superpixel_time = db.Column(db.DateTime)
superpixel_modelid = db.Column(db.Integer, default=-1)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Roi(db.Model):
id = db.Column(db.Integer, primary_key=True)
imageId = db.Column(db.Integer, db.ForeignKey('image.id'), nullable=False)
name = db.Column(db.Text)
path = db.Column(db.Text)
testingROI = db.Column(db.Integer, default=-1)
height = db.Column(db.Integer)
width = db.Column(db.Integer)
x = db.Column(db.Integer)
y = db.Column(db.Integer)
nobjects = db.Column(db.Integer, default=0)
date = db.Column(db.DateTime)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Job(db.Model):
id = db.Column(db.Integer, primary_key=True)
projId = db.Column(db.Integer, db.ForeignKey('project.id'), nullable=False)
imageId = db.Column(db.Integer, db.ForeignKey('image.id'), nullable=True)
cmd = db.Column(db.Text)
params = db.Column(db.Text)
status = db.Column(db.Text)
retval = db.Column(db.Text)
start_date = db.Column(db.DateTime, server_default=db.func.now())
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class JobidBase(db.Model):
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.Text)
procout = db.Column(db.Text)
# Remove all queued and running jobs from the database
def clear_stale_jobs():
jobs_deleted = Job.query.filter_by(status='QUEUE').delete()
jobs_deleted += Job.query.filter_by(status='RUNNING').delete()
return jobs_deleted
def set_job_status(job_id, status, retval = ""):
if job_id:
engine = sqlalchemy.create_engine(get_database_uri())
engine.connect().execute(f"update job set status= :status, retval = :retval where id={job_id}", status=status, retval = retval)
engine.dispose()
jobs_logger.info(f'Job {job_id} set to status "{status}".')
# Output the project id from the database for a given name:
def get_project_id(project_name):
return Project.query.filter_by(name=project_name).first().id
# Output the index of the latest trained ai
def get_latest_modelid(project_name):
# pull the last training iteration from the database
selected_proj = db.session.query(Project).filter_by(name=project_name).first()
iteration = int(selected_proj.iteration)
# count backwards until we find a trained model for the given index
# --- AJ: Lets comment this out for now to see if/when it breaks. with the improved callback, we should
# never be in this situation now and this code is more of a hack than a solution
# for model_id in range(iteration, -2, -1):
# model_path = f"./projects/{project_name}/models/{model_id}/best_model.pth"
# model_exists = os.path.exists(model_path)
# if (model_exists):
# break
# output the id
return iteration
| [
"choose.happy@gmail.com"
] | choose.happy@gmail.com |
83287219fc0ef5c36a89bcb50a247ae0f358e3f7 | 0b90c9bc3efd14cd2a869ab519f1dfe9645cefd6 | /hw03/main.py | a35d9b237aa7b247b282f0923017b48068ff95af | [] | no_license | 1510650114/exp | 2e0b6da6c5e17de48a060692d322cd77824b291b | c6a0da63b3922d4b513b56cec067749925af4815 | refs/heads/master | 2021-07-11T22:53:19.856300 | 2017-10-16T13:40:39 | 2017-10-16T13:40:39 | 107,132,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,958 | py | import turtle
import math
def draw_polygon(aTurtle, size=50, n=3):
## 绘制正多边形
##
## args:
## aTurtle: turtle对象实例
## size: int类型,正多边形的边长
## n: int类型,是几边形
for i in range(n):
aTurtle.forward(size)
aTurtle.left(360.0/n)
def draw_n_angle(aTurtle, size=50, num=5, color=None):
## 绘制正n角形,默认为黄色
##
## args:
## aTurtle: turtle对象实例
## size: int类型,正多角形的边长
## n: int类型,是几角形
## color: str, 图形颜色,默认不填色
##
if color:
aTurtle.begin_fill()
aTurtle.fillcolor(color)
for i in range(num):
aTurtle.forward(size)
aTurtle.left(360.0/num)
aTurtle.forward(size)
aTurtle.right(2*360.0/num)
if color:
aTurtle.end_fill()
def draw_5_angle(aTurtle=None, start_pos=(0,0), end_pos=(0,10), radius=100, color=None):
## 根据起始位置、结束位置和外接圆半径画五角星
##
## args:
## aTurtle: turtle对象实例
## start_pos: int的二元tuple,要画的五角星的外接圆圆心
## end_pos: int的二元tuple,圆心指向的位置坐标点
## radius: 五角星外接圆半径
## color: str, 图形颜色,默认不填色
##
aTurtle = aTurtle or turtle.Turtle()
size = radius * math.sin(math.pi/5)/math.sin(math.pi*2/5)
aTurtle.left(math.degrees(math.atan2(end_pos[1]-start_pos[1], end_pos[0]-start_pos[0])))
aTurtle.penup()
aTurtle.goto(start_pos)
aTurtle.fd(radius)
aTurtle.pendown()
aTurtle.right(math.degrees(math.pi*9/10))
draw_n_angle(aTurtle, size, 5, color)
def draw_5_star_flag(times=20.0):
## 绘制五星红旗
##
## args:
## times: 五星红旗的规格为30*20, times为倍数,默认大小为10倍, 即300*200
width, height = 30*times, 20*times
# 初始化屏幕和海龟
window = turtle.Screen()
aTurtle = turtle.Turtle()
aTurtle.hideturtle()
aTurtle.speed(10)
# 画红旗
aTurtle.penup()
aTurtle.goto(-width/2, height/2)
aTurtle.pendown()
aTurtle.begin_fill()
aTurtle.fillcolor('red')
aTurtle.fd(width)
aTurtle.right(90)
aTurtle.fd(height)
aTurtle.right(90)
aTurtle.fd(width)
aTurtle.right(90)
aTurtle.fd(height)
aTurtle.right(90)
aTurtle.end_fill()
# 画大星星
draw_5_angle(aTurtle, start_pos=(-10*times, 5*times), end_pos=(-10*times, 8*times), radius=3*times, color='yellow')
# 画四个小星星
stars_start_pos = [(-5, 8), (-3, 6), (-3, 3), (-5, 1)]
for pos in stars_start_pos:
draw_5_angle(aTurtle, start_pos=(pos[0]*times, pos[1]*times), end_pos=(-10*times, 5*times), radius=1*times, color='yellow')
# 点击关闭窗口
window.exitonclick()
if __name__ == '__main__':
draw_5_star_flag()
| [
"noreply@github.com"
] | noreply@github.com |
d479a02c95eafbfa0243368ff72acbbb96176ad8 | 1e737d53085f4628af8330a8705862e8458354f4 | /Toets_2_ECTTP_Ronald_van_Egdom_3018119/Controls.py | fcb84f5c9e1e61e774cfeeb372f2919a877f553f | [
"MIT"
] | permissive | RunicPixels/HKU-ECTTP | 0551df865763024bebeb34c326e690442e06657a | 1bfeefbb6ed372128edd2e62602fe5c8d35bd0bd | refs/heads/master | 2022-05-18T09:46:45.448697 | 2016-12-14T18:07:18 | 2016-12-14T18:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | class Controls() :
#Right, Left, Down, Up, Special.
#Keyboard.
String_controls = ["D","A","S","W"," "]
String_altControls = ["d","a","s","w","c"] | [
"superronald@live.nl"
] | superronald@live.nl |
d3d76cb1c9935b3b8fa21acac1bc730454de8817 | e168546cd2cbd76d7d1c77d62c9889d67769309f | /PokerModel.py | e69d84cfa60f9521099521f881b36d2249b5051f | [] | no_license | guptaronav/poker | 5dc4e12c5cba0c8044f0526769b07a3adfd4b727 | 59a0838ca3bbe2f4834e3096223aeb321df6d74a | refs/heads/master | 2023-01-21T14:53:14.315532 | 2020-11-28T18:43:31 | 2020-11-28T18:43:31 | 316,791,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,866 | py | import operator
import random
import pygame
class Card:
def __init__(self, rank, suit):
self.rank = 0
self.suit = ''
self.image_path = ('img/'+str(rank) + str(suit) + '.png')
self.selected = False
#convert the rank to an integer so it's easier to compute the winner of a hand
if rank == 'A':
self.rank = 14
elif rank == 'K':
self.rank = 13
elif rank == 'Q':
self.rank = 12
elif rank == 'J':
self.rank = 11
elif rank == 'T':
self.rank = 10
else:
self.rank = int(rank)
self.suit = suit
def __str__(self):
out = ""
#convert rank back to a word so it's easier to read
if self.rank == 14:
out += "Ace"
elif self.rank == 13:
out += "King"
elif self.rank == 12:
out += "Queen"
elif self.rank == 11:
out += "Jack"
else:
out += str(self.rank)
out += ' of '
#convert the suit to a word so it's easier to read
if self.suit == 'H':
out += 'Hearts'
elif self.suit == 'S':
out += 'Spades'
elif self.suit == 'C':
out += 'Clubs'
else:
out += 'Diamonds'
return out
#only exists for the __str__ function
class Hand:
def __init__(self, hand):
self.hand = hand
def __str__(self):
out = ""
for card in self.hand:
out += str(card) + ", "
return out
def __getitem__(self, index):
return self.hand[index]
def __len__(self):
return len(self.hand)
class Deck:
def __init__(self):
self.deck = []
for suit in ['H','S','C','D']:
for rank in range(2,15):
self.deck.append(Card(rank, suit))
def __str__(self):
out = ""
for card in self.deck:
out += str(card) + "\n"
return out
def __getitem__(self, index):
return self.deck[index]
#return a list a cards taken from the deck
def deal(self, amount):
cards = []
#cap out the cards dealt
if amount > len(self.deck):
print("There are not enough cards! I can only deal " + str(len(self.deck)) + " cards.")
amount = len(self.deck)
#create and then return a list of cards taken randomly from the deck
for i in range(amount):
card = random.choice(self.deck)
self.deck.remove(card)
cards.append(card)
return cards
class Poker:
def __init__(self):
self.deck = Deck()
self.scores = [0,0,0,0]
self.playerHand = Hand(self.deck.deal(5))
self.comp1Hand = Hand(self.deck.deal(5))
self.comp2Hand = Hand(self.deck.deal(5))
self.comp3Hand = Hand(self.deck.deal(5))
def __init__(self, scores):
self.deck = Deck()
self.scores = scores
self.playerHand = Hand(self.deck.deal(5))
self.comp1Hand = Hand(self.deck.deal(5))
self.comp2Hand = Hand(self.deck.deal(5))
self.comp3Hand = Hand(self.deck.deal(5))
#make each computer take a turn
def computerReplace(self):
self.AI_replace(self.comp1Hand)
self.AI_replace(self.comp2Hand)
self.AI_replace(self.comp3Hand)
def get_most_suit(self, hand):
suits = {'H':0, 'C':0, 'S':0, 'D':0}
for card in hand:
suits[card.suit] += 1
return max(suits.items(), key=operator.itemgetter(1))[0]
def get_most_rank(self, hand):
ranks = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0, 14:0}
for card in hand:
ranks[card.rank] += 1
return max(ranks.items(), key=operator.itemgetter(1))[0]
def replace_suit(self, hand):
suit = self.get_most_suit(hand)
for card in hand:
if card.suit != suit:
card.selected = True
self.replace(hand)
def replace_rank(self, hand):
rank = self.get_most_rank(hand)
for card in hand:
if card.rank != rank:
card.selected = True
self.replace(hand)
def AI_replace(self, hand):
score = self.get_score(hand)
#decide which cards not to toss away so as to keep the same score
if str(score)[0] == '1': #High card, try for flush
self.replace_suit(hand)
elif str(score)[0] == '2': #One pair, switch out cards not paired
self.replace_rank(hand)
elif str(score)[0] == '3': #Two pair, switch out card not paired
self.replace_rank(hand)
elif str(score)[0] == '4': #Three of a kind, switch out cards not paired
self.replace_rank(hand)
elif str(score)[0] == '8': #Four of a kind, switch out the not paired not
self.replace_rank(hand)
#all other cases are a pass
#repalces the selected cards in the hand with the top cards on the deck
def replace(self, hand):
count = 0
for i in range(3):
for card in hand:
if card.selected:
hand.hand.remove(card)
count += 1
hand.hand.extend(self.deck.deal(count))
#plays a round of poker with 4 hands
#winner is displayed and scores for each hand as well
#the number of the winner is returned by the function
def play_round(self):
score1 = self.get_score(self.playerHand)
score2 = self.get_score(self.comp1Hand)
score3 = self.get_score(self.comp2Hand)
score4 = self.get_score(self.comp3Hand)
winner = max(score1, max(score2, max(score3, score4)))
if winner == score1:
self.scores[0] += 1
elif winner == score2:
self.scores[1] += 1
elif winner == score3:
self.scores[2] += 1
elif winner == score4:
self.scores[3] += 1
return [score1, score2, score3, score4]
#returns an integer that represents a score given to the hand. The first digits represents the type of hand and the rest represent the cards in the hands
def get_score(self, hand):
#make a dictionary containing the count of each each
cardCount = {2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0, 14:0}
for card in hand.hand:
cardCount[card.rank] += 1
#count number of unique cards
uniqueCount = 0
for rankCount in cardCount.values():
if rankCount > 0:
uniqueCount += 1
straight = self.is_straight(hand)
flush = self.is_flush(hand)
points = 0
if straight and flush:
points = max(points, 9) #straight flush
elif flush and not straight:
points = max(points, 6) #flush
elif not flush and straight:
points = max(points, 5) #straight
elif uniqueCount == 2:
if max(cardCount.values()) == 4:
points = 8 #four of a kind (2 uniques and 4 are the same)
elif max(cardCount.values()) == 3:
points = 7 #full house (2 unique and 3 are the same)
elif uniqueCount == 3:
if max(cardCount.values()) == 3:
points = 4 #three of a kind (3 unique and 3 are the same)
elif max(cardCount.values()) == 2:
points = 3 #two pair (3 uniques and 2 are the same)
elif uniqueCount == 4:
if max(cardCount.values()) == 2:
points = 2 #one pair (4 uniques and 2 are the same)
elif uniqueCount == 5:
points = 1 #high card
#print out the values of the cards in order from greatest to least with 2 digits for each card in order to generate a point value
sorted_cardCount = sorted(cardCount.items(), key=operator.itemgetter(1,0), reverse=True)
for keyval in sorted_cardCount:
if keyval[1] != 0:
points = int(str(points) + (keyval[1] * str(keyval[0]).zfill(2)))
return points
#given an integer score, returns the poker term equivalent
def convert_score(self, score):
if str(score)[0] == '1':
return "High Card"
elif str(score)[0] == '2':
return "One Pair"
elif str(score)[0] == '3':
return "Two Pair"
elif str(score)[0] == '4':
return "Three of a Kind"
elif str(score)[0] == '5':
return "Straight"
elif str(score)[0] == '6':
return "Flush"
elif str(score)[0] == '7':
return "Full House"
elif str(score)[0] == '8':
return "Four of a Kind"
elif str(score)[0] == '9':
return "Straight Flush"
#a hand is a straight if, when sorted, the current card's rank + 1 is the same as the next card
def is_straight(self,hand):
values = []
for card in hand.hand:
values.append(card.rank)
values.sort()
for i in range(0,4):
if values[i] + 1 != values[i + 1]:
return False
return True
#a hand is a flush if all the cards are of the same suit
def is_flush(self,hand):
suit = hand.hand[0].suit
for card in hand.hand:
if card.suit != suit:
return False
return True | [
"guptaronav@icloud.com"
] | guptaronav@icloud.com |
2441d653960673ac4a1f42bc7f1c0bcecb8153ca | 94d1b45b6a7c25d6585a12b5c028244389c51d4f | /Recursion/venv/bin/python-config | 740ca13a09fcb2c2776e73499432ec41dccab360 | [] | no_license | Erockk/Recursion | 6f543b72474c219ef33807bff79a5f4e87d3495b | 4fe6760e76caee3161b95c20aa4ff19841f56d73 | refs/heads/master | 2021-05-01T12:25:21.040658 | 2018-02-11T20:06:51 | 2018-02-11T20:06:51 | 121,065,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | #!/home/erock/PycharmProjects/Recursion/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"dejaerik@gmail.com"
] | dejaerik@gmail.com | |
b975da264a632583abc06b064c0bdd3fa00f4ec9 | ec2ebf6b2c9e43857815ca857f4f8fb7580c9832 | /.vscode/1925.py | 510d6909161097110f651828f4ab032b59b8c360 | [] | no_license | eunchae2000/codeup | 7b9793b4570fee6cb0cb529ebf1ab33b871b165e | d6f1a2c32262a69bada3c093c87b16832b319471 | refs/heads/master | 2023-03-28T06:17:35.546759 | 2021-03-27T09:18:44 | 2021-03-27T09:18:44 | 328,110,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | n, r = map(int, input().split())
def num(n, r):
if(n==r or r==0):
return 1
elif(r==1):
return n
return num(n-1, r-1) + num(n-1, r)
print(num(n, r)) | [
"tldjs3651@naver.com"
] | tldjs3651@naver.com |
d18ef3231977f0c3dcc4fa4dd7781da7c8cafa55 | 5ec411a831b02791e983e2cf002aa99945a4de08 | /write_urease_html.py | c72358d34b367dc07480a5b72f19903cff1c5dc3 | [
"MIT"
] | permissive | gaughanc/whatbacteria-sigma | 26de9c870b560166218c711f3eae297ea6f858b9 | b273003419867bd3b7f0ed44079444e7f4c6feaf | refs/heads/master | 2021-01-21T13:29:36.973297 | 2017-09-01T20:44:00 | 2017-09-01T20:44:00 | 102,128,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,512 | py | import random
from create_lin_list import create_lin_list
from get_lists import get_genera, get_species, get_urease, get_catcount
#create or define all needed lists
lins = create_lin_list()
genera = get_genera()
species = get_species()
urease = get_urease()
count = get_catcount()
nodes = []
edges = []
#define colors
red = "#ec5148"
maroon = "#9b1b1b"
orange = "#ffce96"
grey = "#e0d8d5"
salmon = "#f28f8a"
blue = "#5094ce"
light_blue = "#c7e3f9"
green = "#82eda7"
edge_color = light_blue
def_node_color = blue
#for each genus
for i, genus in enumerate(genera):
for j, spec in enumerate(species[i]):
if urease[i][j] == "urease positive":
color = green
count[i][0] += 1
elif urease[i][j] == "urease negative":
color = red
count[i][1] += 1
else:
color = blue
#create a node for each of its species
nodes.append({"id":"sn"+str(i)+str(j), "label":spec, "x":10*i-10+j, "y":random.randint(850,1000), "size":5, "color":color})
#create an edge from the genus to each of its species
edges.append({"id":"se"+str(i)+str(j), "source":"gn"+str(i), "target":"sn"+str(i)+str(j),"size":20,"color":edge_color})
if count[i][1] < count[i][0]:
color = green
elif count[i][0] < count[i][1]:
color = red
else:
color = blue
#create a node for the genus
nodes.append({"id":"gn"+str(i), "label":genus, "x":10*i, "y":random.randint(600,800), "size":15, "color":color})
#for each lineadge
for lin in lins:
if len(lin) == 6:
#if the genus appears
if lin[5] == genus:
xs = [10*i+10, 10*i, 11*i, 17*i+25, 750]
ys = [random.randint(400,500), random.randint(200,300), random.randint(90,100), 0, -300]
sizes = [20, 25, 30, 35, 40]
for j, n in enumerate([4, 3, 2, 1, 0]):
#if one of its taxon levels does not have a node, create one
if not any(node["label"] == lin[n] for node in nodes):
nodes.append({"id":"tn"+str(n)+str(i), "label":lin[n], "x":xs[j], "y":ys[j], "size":sizes[j], "color":def_node_color})
#for each node
for node in nodes:
#if its label belongs to the genus' family, connect the node to the genus
if node["label"] == lin[4]:
edges.append({"id":"fe"+str(i), "source":"gn"+str(i), "target":node["id"],"size":20,"color":edge_color})
#mark the family node
source = node["id"]
#for each node
for node in nodes:
#if its label belongs to the genus' order, connect the node to the family
if node["label"] == lin[3]:
edges.append({"id":"oe"+str(i), "source":source, "target":node["id"],"size":20,"color":edge_color})
#mark the order node
source = node["id"]
#for each node
for node in nodes:
#if its label belongs to the genus' class, connect the node to the order
if node["label"] == lin[2]:
edges.append({"id":"ce"+str(i), "source":source, "target":node["id"],"size":20,"color":edge_color})
#mark the class node
source = node["id"]
#for each node
for node in nodes:
#if its label belongs to the genus' phylum, connect the node to the class
if node["label"] == lin[1]:
edges.append({"id":"pe"+str(i), "source":source, "target":node["id"],"size":20,"color":edge_color})
#mark the class node
source = node["id"]
#for each node
for node in nodes:
#if its label belongs to the genus' domain, connect the node to the phylum
if node["label"] == lin[0]:
edges.append({"id":"de"+str(i), "source":source, "target":node["id"],"size":20,"color":edge_color})
break
scripta = """<!-- START SIGMA IMPORTS -->
<script src="../src/sigma.core.js"></script>
<script src="../src/conrad.js"></script>
<script src="../src/utils/sigma.utils.js"></script>
<script src="../src/utils/sigma.polyfills.js"></script>
<script src="../src/sigma.settings.js"></script>
<script src="../src/classes/sigma.classes.dispatcher.js"></script>
<script src="../src/classes/sigma.classes.configurable.js"></script>
<script src="../src/classes/sigma.classes.graph.js"></script>
<script src="../src/classes/sigma.classes.camera.js"></script>
<script src="../src/classes/sigma.classes.quad.js"></script>
<script src="../src/classes/sigma.classes.edgequad.js"></script>
<script src="../src/captors/sigma.captors.mouse.js"></script>
<script src="../src/captors/sigma.captors.touch.js"></script>
<script src="../src/renderers/sigma.renderers.canvas.js"></script>
<script src="../src/renderers/sigma.renderers.webgl.js"></script
<script src="../src/renderers/sigma.renderers.svg.js"></script>
<script src="../src/renderers/sigma.renderers.def.js"></script>
<script src="../src/renderers/webgl/sigma.webgl.nodes.def.js"></script>
<script src="../src/renderers/webgl/sigma.webgl.nodes.fast.js"></script>
<script src="../src/renderers/webgl/sigma.webgl.edges.def.js"></script>
<script src="../src/renderers/webgl/sigma.webgl.edges.fast.js"></script>
<script src="../src/renderers/webgl/sigma.webgl.edges.arrow.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.labels.def.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.hovers.def.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.nodes.def.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edges.def.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edges.curve.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edges.arrow.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edges.curvedArrow.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edgehovers.def.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edgehovers.curve.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edgehovers.arrow.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.edgehovers.curvedArrow.js"></script>
<script src="../src/renderers/canvas/sigma.canvas.extremities.def.js"></script>
<script src="../src/renderers/svg/sigma.svg.utils.js"></script>
<script src="../src/renderers/svg/sigma.svg.nodes.def.js"></script>
<script src="../src/renderers/svg/sigma.svg.edges.def.js"></script>
<script src="../src/renderers/svg/sigma.svg.edges.curve.js"></script>
<script src="../src/renderers/svg/sigma.svg.labels.def.js"></script>
<script src="../src/renderers/svg/sigma.svg.hovers.def.js"></script>
<script src="../src/middlewares/sigma.middlewares.rescale.js"></script>
<script src="../src/middlewares/sigma.middlewares.copy.js"></script>
<script src="../src/misc/sigma.misc.animation.js"></script>
<script src="../src/misc/sigma.misc.bindEvents.js"></script>
<script src="../src/misc/sigma.misc.bindDOMEvents.js"></script>
<script src="../src/misc/sigma.misc.drawHovers.js"></script>
<!-- END SIGMA IMPORTS -->
<div id="container">
<style>
#graph-container {
top: 0;
bottom: 0;
left: 0;
right: 0;
position: absolute;
}
</style>
<div id="graph-container"></div>
</div>
<script>
var g = {
nodes: [],
edges: []
};
// Generate a graph:"""
scriptb = "g.nodes.push(" + str(nodes)[1:-1] + ")"
scriptc = ""
scriptd = "g.edges.push(" + str(edges)[1:-1] + ")"
scripte = """
// Instantiate sigma:
s = new sigma({
graph: g,
container: 'graph-container',
});
</script>"""
print(scripta, scriptb, scriptc, scriptd, scripte, sep="\n")
| [
"gaughanc@carleton.edu"
] | gaughanc@carleton.edu |
7b95333f1a3c4029e1d7f969049b5f60ff949f8c | 00cbe2c2bc49dd139bc9fff2b2d70f440824a102 | /ResourceManager.py | ba2fb6fc68b9769d1572aeac05e338683765c376 | [] | no_license | TCooper1996/Tank-Shooter-PyOpenGL | e10b7041cb6f80d9f3b14b5075fb4c2e9b32a895 | ce3ad3122d10af5b0240e89c3e731df7fa22c96f | refs/heads/master | 2020-06-17T06:06:01.047018 | 2019-08-01T03:10:07 | 2019-08-01T03:10:07 | 195,823,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from OpenGL.GL import *
import Shader
class ResourceManager:
Shaders = {}
@staticmethod
def load_shader(v_shader_file, f_shader_file, s_name):
ResourceManager.Shaders[s_name] = ResourceManager.load_shader_from_file(v_shader_file, f_shader_file)
return ResourceManager.Shaders[s_name]
@staticmethod
def get_shader(s_name):
return ResourceManager.Shaders[s_name]
@staticmethod
def clear():
for s in ResourceManager.Shaders:
glDeleteProgram(s.second.ID)
@staticmethod
def load_shader_from_file(v_shader_file, f_shader_file):
try:
with open(v_shader_file, 'r')as file:
vertex_code = file.read()
with open(f_shader_file, 'r')as file:
fragment_code = file.read()
except FileNotFoundError:
print("ERROR: Failed to read vertex/fragment shader")
shader = Shader.Shader()
shader.compile(vertex_code, fragment_code)
return shader
| [
"trcooper1996@gmail.com"
] | trcooper1996@gmail.com |
2af528c484536afbc1490119fcb2b8ed301d4ea5 | 1189fc1021cec3f5520c67b884ada83112434ec8 | /stars.py | e39ca40a7abea43f33319e217cc7c825f005e525 | [] | no_license | MrMil/code-or-die | ac7991040900d8837b38d950643863f1e8956a52 | cc3ecc2f1e782176268669e2b65000a1e03a6e31 | refs/heads/master | 2020-03-22T01:20:20.117611 | 2018-06-30T22:05:12 | 2018-06-30T22:05:12 | 139,298,978 | 0 | 0 | null | 2018-07-01T04:00:56 | 2018-07-01T04:00:56 | null | UTF-8 | Python | false | false | 7,396 | py | import random
NAMED_STARS = [
"Acamar",
"Achernar",
"Achird",
"Acrab",
"Acrux",
"Acubens",
"Adhafera",
"Adhara",
"Adhil",
"Ain",
"Ainalrami",
"Aladfar",
"Alamak",
"Alathfar",
"Albaldah",
"Albali",
"Albireo",
"Alchiba",
"Alcor",
"Alcyone",
"Aldebaran",
"Alderamin",
"Aldhanab",
"Aldhibah",
"Aldulfin",
"Alfirk",
"Algedi",
"Algenib",
"Algieba",
"Algol",
"Algorab",
"Alhena",
"Alioth",
"Aljanah",
"Alkaid",
"Al Kalb al Rai",
"Alkalurops",
"Alkaphrah",
"Alkarab",
"Alkes",
"Almaaz",
"Almach",
"Al Minliar al Asad",
"Alnair",
"Alnasl",
"Alnilam",
"Alnitak",
"Alniyat",
"Alphard",
"Alphecca",
"Alpheratz",
"Alpherg",
"Alrakis",
"Alrescha",
"Alruba",
"Alsafi",
"Alsciaukat",
"Alsephina",
"Alshain",
"Alshat",
"Altair",
"Altais",
"Alterf",
"Aludra",
"Alula Australis",
"Alula Borealis",
"Alya",
"Alzirr",
"Ancha",
"Angetenar",
"Ankaa",
"Anser",
"Antares",
"Arcturus",
"Arkab Posterior",
"Arkab Prior",
"Arneb",
"Ascella",
"Asellus Australis",
"Asellus Borealis",
"Ashlesha",
"Asellus Primus",
"Asellus Secundus",
"Asellus Thertius",
"Asmidiske",
"Aspidiske",
"Asterope",
"Athebyne",
"Atik",
"Atlas",
"Atria",
"Avior",
"Azelfafage",
"Azha",
"Azmidi",
"Barnard's Star",
"Baten Kaitos",
"Beemim",
"Beid",
"Bellatrix",
"Betelgeuse",
"Bharani",
"Biham",
"Botein",
"Brachium",
"Bunda",
"Canopus",
"Capella",
"Caph",
"Castor",
"Castula",
"Cebalrai",
"Celaeno",
"Cervantes",
"Chalawan",
"Chamukuy",
"Chara",
"Chertan",
"Copernicus",
"Cor Caroli",
"Cujam",
"Cursa",
"Dabih",
"Dalim",
"Deneb",
"Deneb Algedi",
"Denebola",
"Diadem",
"Diphda",
"Dschubba",
"Dubhe",
"Dziban",
"Edasich",
"Electra",
"Elgafar",
"Elkurud",
"Elnath",
"Eltanin",
"Enif",
"Errai",
"Fafnir",
"Fang",
"Fawaris",
"Felis",
"Fomalhaut",
"Fulu",
"Fumalsamakah",
"Furud",
"Fuyue",
"Gacrux",
"Garnet Star",
"Giausar",
"Gienah",
"Ginan",
"Gomeisa",
"Graffias",
"Grumium",
"Hadar",
"Haedus",
"Hamal",
"Hassaleh",
"Hatysa",
"Helvetios",
"Heze",
"Homam",
"Iklil",
"Intercrus",
"Izar",
"Jabbah",
"Jishui",
"Kaffaljidhma",
"Kang",
"Kaus Australis",
"Kaus Borealis",
"Kaus Media",
"Keid",
"Khambalia",
"Kitalpha",
"Kochab",
"Kornephoros",
"Kraz",
"Kuma",
"Kurhah",
"La Superba",
"Larawag",
"Lesath",
"Libertas",
"Lich",
"Lilii Borea",
"Maasym",
"Mahasim",
"Maia",
"Marfark",
"Marfik",
"Markab",
"Markeb",
"Marsic",
"Matar",
"Mebsuta",
"Megrez",
"Meissa",
"Mekbuda",
"Meleph",
"Menkalinan",
"Menkar",
"Menkent",
"Menkib",
"Merak",
"Merga",
"Meridiana",
"Merope",
"Mesarthim",
"Miaplacidus",
"Mimosa",
"Minchir",
"Minelauva",
"Mintaka",
"Mira",
"Mirach",
"Miram",
"Mirfak",
"Mirzam",
"Misam",
"Mizar",
"Mothallah",
"Muliphein",
"Muphrid",
"Muscida",
"Musica",
"Nahn",
"Naos",
"Nashira",
"Navi",
"Nekkar",
"Nembus",
"Nihal",
"Nunki",
"Nusakan",
"Ogma",
"Okab",
"Peacock",
"Phact",
"Phecda",
"Pherkad",
"Piautos",
"Pipirima",
"Pleione",
"Polaris",
"Polaris Australis",
"Polis",
"Pollux",
"Porrima",
"Praecipua",
"Prima Hyadum",
"Procyon",
"Propus",
"Proxima Centauri",
"Ran",
"Rana",
"Rasalas",
"Rasalgethi",
"Rasalhague",
"Rastaban",
"Regor",
"Regulus",
"Revati",
"Rigel",
"Rigil Kentaurus",
"Rotanev",
"Ruchbah",
"Rukbat",
"Sabik",
"Saclateni",
"Sadachbia",
"Sadalbari",
"Sadalmelik",
"Sadalsuud",
"Sadr",
"Saiph",
"Salm",
"Sargas",
"Sarin",
"Sarir",
"Sceptrum",
"Scheat",
"Schedar",
"Secunda Hyadum",
"Segin",
"Seginus",
"Sham",
"Shaula",
"Sheliak",
"Sheratan",
"Sirius",
"Situla",
"Skat",
"Spica",
"Sualocin",
"Subra",
"Suhail",
"Sulafat",
"Syrma",
"Tabit",
"Taiyangshou",
"Taiyi",
"Talitha",
"Tania Australis",
"Tania Borealis",
"Tarazed",
"Tarf",
"Taygeta",
"Tegmine",
"Tejat",
"Terebellum",
"Thabit",
"Theemin",
"Thuban",
"Tiaki",
"Tianguan",
"Tianyi",
"Titawin",
"Tonatiuh",
"Torcular",
"Tureis",
"Ukdah",
"Unukalhai",
"Unurgunite",
"Vega",
"Veritate",
"Vindemiatrix",
"Wasat",
"Wazn",
"Wezen",
"Wurren",
"Xamidimura",
"Xuange",
"Yed Posterior",
"Yed Prior",
"Yildun",
"Zaniah",
"Zaurak",
"Zavijava",
"Zhang",
"Zibal",
"Zosma",
"Zubenelgenubi",
"Zubenelhakrabi",
"Zubeneschamali",
"Pi Persei",
"Beta Magellan",
"Beta Renner",
"Dragon's Egg",
"Rao",
"LV-426",
"Perdide",
"Delta Pavonis",
"Omicron Persei",
"FI Virginis",
"V1216 Sagittarii",
"HH Andromedae",
"Gliese 876",
"Tau Ceti",
"Alpha Centauri",
"Wolf 359 ",
]
GREEK_LETTERS = [
"Alpha",
"Beta",
"Chi",
"Delta",
"Epsilon",
"Eta",
"Gamma",
"Iota",
"Kappa",
"Lambda",
"Mu",
"Nu",
"Omega",
"Omicron",
"Phi",
"Pi",
"Psi",
"Rho",
"Sigma",
"Tau",
"Theta",
"Upsilon",
"Xi",
"Zeta",
]
CONSTELLATIONS = [
"Andromedae",
"Aquarii",
"Aquilae",
"Arae",
"Arietis",
"Aurigae",
"Bootis",
"Cancri",
"Canis",
"Canum",
"Capricorni",
"Carinae",
"Cassiopeiae",
"Centauri",
"Cephei",
"Ceti",
"Columbae",
"Comae",
"Coronae",
"Corvi",
"Crateris",
"Crucis",
"Cygni",
"Delphini",
"Draconis",
"Equulei",
"Eridani",
"Fornacis",
"Geminorum",
"Gruis",
"Herculis",
"Hydrae",
"Leonis",
"Leporis",
"Librae",
"Lyncis",
"Lyrae",
"Octantis",
"Ophiuchi",
"Orionis",
"Pavonis",
"Pegasi",
"Persei",
"Phoenicis",
"Piscis",
"Piscium",
"Puppis",
"Sagittae",
"Sagittarii",
"Scorpii",
"Serpentis",
"Tauri",
"Trianguli",
"Ursae",
"Velorum",
"Virginis",
"Vulpeculae",
]
def random_new_star():
return " ".join([random.choice(GREEK_LETTERS), random.choice(CONSTELLATIONS)])
def random_new_stars(n: int):
"""return a shuffled list of `n` unique random star names."""
names = [random_new_star() for _ in range(n + 10)]
names = list(set(names))
return random.sample(names, n)
def random_star_names(n_total: int, min_random=0):
n_named = min(n_total - min_random, len(NAMED_STARS))
n_random = n_total - n_named
names = random.sample(NAMED_STARS, n_named) + random_new_stars(n_random)
random.shuffle(names)
return names
| [
"tsvikas@gmail.com"
] | tsvikas@gmail.com |
a612fd5ab164d6e0f144cbdf6d58cb2e12dbcb58 | c56221bfadb1536b707da284ffd56a00539c3384 | /python_notes/day11/test2.py | 0797a45403cc76a914803dd18fa1da6e9b6f276a | [] | no_license | sunsharing-note/python | 4f33fb657075780cd8f780eebeb0e197c386beee | f18ddad3e7555386045284ac98183efe4a2a82d3 | refs/heads/master | 2020-06-09T09:26:35.185019 | 2019-06-24T01:57:02 | 2019-06-24T01:57:02 | 144,146,370 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #! /usr/bin/env python
# -*- encoding: utf-8 -*-
# here put the import lib
def f(): # f()这里是生成器对象
print("test1")
yield 1
print("test2")
yield 2
g = f()
#next(g)
#next(g)
for i in g: # for循环会去调用next
print(i) # 1,2为返回值
| [
"hua.zhou@bqrzzl.com"
] | hua.zhou@bqrzzl.com |
411b685eee4c1a0073044de1835cf6c13078974d | efd3c596a878f3ec499e3fb35949c6d8926a4ca3 | /DataProcess/tf2_thread.py | 9488248a0864e5f47550cfe678d7e0da1386655c | [] | no_license | taoshiqian/ImageProcessTensorflow | b85b047f8b3556a86bea9ba258e7acad881b9bfc | 319b48bf50a55429d06b26f9056f6c53a6ef79ae | refs/heads/master | 2020-03-19T02:59:00.289334 | 2018-06-26T13:54:23 | 2018-06-26T13:54:23 | 135,682,424 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | import tensorflow as tf
import numpy as np
import threading
import time
# 每隔一秒打印自己的ID
def MyLoop(coord, worker_id):
# 不需要停止
while not coord.should_stop():
# 随机停止所有线程
if np.random.rand() < 0.1:
print("Stoping from id: %d"%worker_id)
# 通知其他线程停止。其他人的should_stop会变成True
coord.request_stop()
else:
# 打印ID
print("Working on id: %d"%worker_id)
# 暂停1秒
time.sleep(1)
if __name__ == '__main__':
# Coordinator协调者。用来协同多个线程
coord = tf.train.Coordinator()
# 声明5个线程
threads = [
threading.Thread(target=MyLoop, args=(coord,i,)) for i in range(5)
]
# 启动所有线程
for t in threads:
t.start()
# 等待所有线程退出...join(<list of threads>):等待被指定的线程终止。
coord.join(threads) | [
"17210240200@fudan.edu.cn"
] | 17210240200@fudan.edu.cn |
11e1b75a57e9bc7732942119e5dbf9bfc029fa0b | f62fd455e593a7ad203a5c268e23129473d968b6 | /senlin-3.0.1/senlin/tests/unit/engine/actions/test_action_base.py | 058e15a75749ccf0a3430225113222d3bedc14a0 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 35,544 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from senlin.common import consts
from senlin.common import exception
from senlin.common import utils as common_utils
from senlin.engine.actions import base as ab
from senlin.engine import cluster as cluster_mod
from senlin.engine import environment
from senlin.engine import event as EVENT
from senlin.engine import node as node_mod
from senlin.objects import action as ao
from senlin.objects import cluster_policy as cpo
from senlin.objects import dependency as dobj
from senlin.policies import base as policy_mod
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit import fakes
CLUSTER_ID = 'e1cfd82b-dc95-46ad-86e8-37864d7be1cd'
OBJID = '571fffb8-f41c-4cbc-945c-cb2937d76f19'
OWNER_ID = 'c7114713-ee68-409d-ba5d-0560a72a386c'
ACTION_ID = '4c2cead2-fd74-418a-9d12-bd2d9bd7a812'
USER_ID = '3c4d64baadcd437d8dd49054899e73dd'
PROJECT_ID = 'cf7a6ae28dde4f46aa8fe55d318a608f'
class DummyAction(ab.Action):
def __init__(self, target, action, context, **kwargs):
super(DummyAction, self).__init__(target, action, context, **kwargs)
class ActionBaseTest(base.SenlinTestCase):
def setUp(self):
super(ActionBaseTest, self).setUp()
self.ctx = utils.dummy_context(project=PROJECT_ID, user_id=USER_ID)
self.action_values = {
'name': 'FAKE_NAME',
'cause': 'FAKE_CAUSE',
'owner': OWNER_ID,
'interval': 60,
'start_time': 0,
'end_time': 0,
'timeout': 120,
'status': 'FAKE_STATUS',
'status_reason': 'FAKE_STATUS_REASON',
'inputs': {'param': 'value'},
'outputs': {'key': 'output_value'},
'created_at': timeutils.utcnow(True),
'updated_at': None,
'data': {'data_key': 'data_value'},
'user': USER_ID,
'project': PROJECT_ID,
}
def _verify_new_action(self, obj, target, action):
self.assertIsNone(obj.id)
self.assertEqual('', obj.name)
self.assertEqual(target, obj.target)
self.assertEqual(action, obj.action)
self.assertEqual('', obj.cause)
self.assertIsNone(obj.owner)
self.assertEqual(-1, obj.interval)
self.assertIsNone(obj.start_time)
self.assertIsNone(obj.end_time)
self.assertEqual(cfg.CONF.default_action_timeout, obj.timeout)
self.assertEqual('INIT', obj.status)
self.assertEqual('', obj.status_reason)
self.assertEqual({}, obj.inputs)
self.assertEqual({}, obj.outputs)
self.assertIsNone(obj.created_at)
self.assertIsNone(obj.updated_at)
self.assertEqual({}, obj.data)
@mock.patch.object(node_mod.Node, 'load')
@mock.patch.object(cluster_mod.Cluster, 'load')
def test_action_new(self, mock_n_load, mock_c_load):
for action in ['CLUSTER_CREATE', 'NODE_CREATE', 'WHAT_EVER']:
obj = ab.Action(OBJID, action, self.ctx)
self._verify_new_action(obj, OBJID, action)
def test_action_init_with_values(self):
values = copy.deepcopy(self.action_values)
values['id'] = 'FAKE_ID'
values['created_at'] = 'FAKE_CREATED_TIME'
values['updated_at'] = 'FAKE_UPDATED_TIME'
obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
self.assertEqual('FAKE_ID', obj.id)
self.assertEqual('FAKE_NAME', obj.name)
self.assertEqual(OBJID, obj.target)
self.assertEqual('FAKE_CAUSE', obj.cause)
self.assertEqual(OWNER_ID, obj.owner)
self.assertEqual(60, obj.interval)
self.assertEqual(0, obj.start_time)
self.assertEqual(0, obj.end_time)
self.assertEqual(120, obj.timeout)
self.assertEqual('FAKE_STATUS', obj.status)
self.assertEqual('FAKE_STATUS_REASON', obj.status_reason)
self.assertEqual({'param': 'value'}, obj.inputs)
self.assertEqual({'key': 'output_value'}, obj.outputs)
self.assertEqual('FAKE_CREATED_TIME', obj.created_at)
self.assertEqual('FAKE_UPDATED_TIME', obj.updated_at)
self.assertEqual({'data_key': 'data_value'}, obj.data)
def test_action_store_for_create(self):
values = copy.deepcopy(self.action_values)
obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
self.assertEqual(common_utils.isotime(values['created_at']),
common_utils.isotime(obj.created_at))
self.assertIsNone(obj.updated_at)
# store for creation
res = obj.store(self.ctx)
self.assertIsNotNone(res)
self.assertEqual(obj.id, res)
self.assertIsNotNone(obj.created_at)
self.assertIsNone(obj.updated_at)
def test_action_store_for_update(self):
values = copy.deepcopy(self.action_values)
obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
obj_id = obj.store(self.ctx)
self.assertIsNotNone(obj_id)
self.assertIsNotNone(obj.created_at)
self.assertIsNone(obj.updated_at)
# store for creation
res = obj.store(self.ctx)
self.assertIsNotNone(res)
self.assertEqual(obj_id, res)
self.assertEqual(obj.id, res)
self.assertIsNotNone(obj.created_at)
self.assertIsNotNone(obj.updated_at)
def test_from_db_record(self):
values = copy.deepcopy(self.action_values)
obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
obj.store(self.ctx)
record = ao.Action.get(self.ctx, obj.id)
action_obj = ab.Action._from_object(record)
self.assertIsInstance(action_obj, ab.Action)
self.assertEqual(obj.id, action_obj.id)
self.assertEqual(obj.action, action_obj.action)
self.assertEqual(obj.name, action_obj.name)
self.assertEqual(obj.target, action_obj.target)
self.assertEqual(obj.cause, action_obj.cause)
self.assertEqual(obj.owner, action_obj.owner)
self.assertEqual(obj.interval, action_obj.interval)
self.assertEqual(obj.start_time, action_obj.start_time)
self.assertEqual(obj.end_time, action_obj.end_time)
self.assertEqual(obj.timeout, action_obj.timeout)
self.assertEqual(obj.status, action_obj.status)
self.assertEqual(obj.status_reason, action_obj.status_reason)
self.assertEqual(obj.inputs, action_obj.inputs)
self.assertEqual(obj.outputs, action_obj.outputs)
self.assertEqual(common_utils.isotime(obj.created_at),
common_utils.isotime(action_obj.created_at))
self.assertEqual(obj.updated_at, action_obj.updated_at)
self.assertEqual(obj.data, action_obj.data)
self.assertEqual(obj.user, action_obj.user)
self.assertEqual(obj.project, action_obj.project)
self.assertEqual(obj.domain, action_obj.domain)
def test_from_db_record_with_empty_fields(self):
values = copy.deepcopy(self.action_values)
del values['inputs']
del values['outputs']
obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
obj.store(self.ctx)
record = ao.Action.get(self.ctx, obj.id)
action_obj = ab.Action._from_object(record)
self.assertEqual({}, action_obj.inputs)
self.assertEqual({}, action_obj.outputs)
def test_load(self):
values = copy.deepcopy(self.action_values)
obj = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
obj.store(self.ctx)
result = ab.Action.load(self.ctx, obj.id, None)
# no need to do a thorough test here
self.assertEqual(obj.id, result.id)
self.assertEqual(obj.action, result.action)
db_action = ao.Action.get(self.ctx, obj.id)
result = ab.Action.load(self.ctx, None, db_action)
# no need to do a thorough test here
self.assertEqual(obj.id, result.id)
self.assertEqual(obj.action, result.action)
def test_load_not_found(self):
# not found due to bad identity
ex = self.assertRaises(exception.ResourceNotFound,
ab.Action.load,
self.ctx, 'non-existent', None)
self.assertEqual("The action 'non-existent' could not be "
"found.", six.text_type(ex))
# not found due to no object
self.patchobject(ao.Action, 'get', return_value=None)
ex = self.assertRaises(exception.ResourceNotFound,
ab.Action.load,
self.ctx, 'whatever', None)
self.assertEqual("The action 'whatever' could not be found.",
six.text_type(ex))
@mock.patch.object(ab.Action, 'store')
def test_action_create(self, mock_store):
mock_store.return_value = 'FAKE_ID'
result = ab.Action.create(self.ctx, OBJID, 'CLUSTER_DANCE',
name='test')
self.assertEqual('FAKE_ID', result)
mock_store.assert_called_once_with(self.ctx)
def test_action_delete(self):
result = ab.Action.delete(self.ctx, 'non-existent')
self.assertIsNone(result)
values = copy.deepcopy(self.action_values)
action1 = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
action1.store(self.ctx)
result = ab.Action.delete(self.ctx, action1.id)
self.assertIsNone(result)
@mock.patch.object(ao.Action, 'delete')
def test_action_delete_db_call(self, mock_call):
# test db api call
ab.Action.delete(self.ctx, 'FAKE_ID')
mock_call.assert_called_once_with(self.ctx, 'FAKE_ID')
@mock.patch.object(ao.Action, 'signal')
def test_action_signal_bad_command(self, mock_call):
values = copy.deepcopy(self.action_values)
action1 = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
action1.store(self.ctx)
result = action1.signal('BOGUS')
self.assertIsNone(result)
self.assertEqual(0, mock_call.call_count)
@mock.patch.object(ao.Action, 'signal')
def test_action_signal_cancel(self, mock_call):
values = copy.deepcopy(self.action_values)
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, **values)
action.store(self.ctx)
expected = [action.INIT, action.WAITING, action.READY, action.RUNNING]
for status in expected:
action.status = status
result = action.signal(action.SIG_CANCEL)
self.assertIsNone(result)
self.assertEqual(1, mock_call.call_count)
mock_call.reset_mock()
invalid = [action.SUSPENDED, action.SUCCEEDED, action.CANCELLED,
action.FAILED]
for status in invalid:
action.status = status
result = action.signal(action.SIG_CANCEL)
self.assertIsNone(result)
self.assertEqual(0, mock_call.call_count)
mock_call.reset_mock()
@mock.patch.object(ao.Action, 'signal')
def test_action_signal_suspend(self, mock_call):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID)
expected = [action.RUNNING]
for status in expected:
action.status = status
result = action.signal(action.SIG_SUSPEND)
self.assertIsNone(result)
self.assertEqual(1, mock_call.call_count)
mock_call.reset_mock()
invalid = [action.INIT, action.WAITING, action.READY, action.SUSPENDED,
action.SUCCEEDED, action.CANCELLED, action.FAILED]
for status in invalid:
action.status = status
result = action.signal(action.SIG_SUSPEND)
self.assertIsNone(result)
self.assertEqual(0, mock_call.call_count)
mock_call.reset_mock()
@mock.patch.object(ao.Action, 'signal')
def test_action_signal_resume(self, mock_call):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id=ACTION_ID)
expected = [action.SUSPENDED]
for status in expected:
action.status = status
result = action.signal(action.SIG_RESUME)
self.assertIsNone(result)
self.assertEqual(1, mock_call.call_count)
mock_call.reset_mock()
invalid = [action.INIT, action.WAITING, action.READY, action.RUNNING,
action.SUCCEEDED, action.CANCELLED, action.FAILED]
for status in invalid:
action.status = status
result = action.signal(action.SIG_RESUME)
self.assertIsNone(result)
self.assertEqual(0, mock_call.call_count)
mock_call.reset_mock()
def test_execute_default(self):
action = ab.Action.__new__(DummyAction, OBJID, 'BOOM', self.ctx)
self.assertRaises(NotImplementedError,
action.execute)
@mock.patch.object(EVENT, 'info')
@mock.patch.object(EVENT, 'error')
@mock.patch.object(EVENT, 'warning')
@mock.patch.object(ao.Action, 'mark_succeeded')
@mock.patch.object(ao.Action, 'mark_failed')
@mock.patch.object(ao.Action, 'mark_cancelled')
@mock.patch.object(ao.Action, 'abandon')
def test_set_status(self, mock_abandon, mark_cancel, mark_fail,
mark_succeed, mock_event, mock_error, mock_info):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID')
action.entity = mock.Mock()
action.set_status(action.RES_OK, 'FAKE_REASON')
self.assertEqual(action.SUCCEEDED, action.status)
self.assertEqual('FAKE_REASON', action.status_reason)
mark_succeed.assert_called_once_with(action.context, 'FAKE_ID',
mock.ANY)
action.set_status(action.RES_ERROR, 'FAKE_ERROR')
self.assertEqual(action.FAILED, action.status)
self.assertEqual('FAKE_ERROR', action.status_reason)
mark_fail.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY,
'FAKE_ERROR')
mark_fail.reset_mock()
action.set_status(action.RES_TIMEOUT, 'TIMEOUT_ERROR')
self.assertEqual(action.FAILED, action.status)
self.assertEqual('TIMEOUT_ERROR', action.status_reason)
mark_fail.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY,
'TIMEOUT_ERROR')
mark_fail.reset_mock()
action.set_status(action.RES_CANCEL, 'CANCELLED')
self.assertEqual(action.CANCELLED, action.status)
self.assertEqual('CANCELLED', action.status_reason)
mark_cancel.assert_called_once_with(action.context, 'FAKE_ID',
mock.ANY)
mark_fail.reset_mock()
action.set_status(action.RES_RETRY, 'BUSY')
self.assertEqual(action.READY, action.status)
self.assertEqual('BUSY', action.status_reason)
mock_abandon.assert_called_once_with(action.context, 'FAKE_ID')
@mock.patch.object(EVENT, 'info')
@mock.patch.object(EVENT, 'error')
@mock.patch.object(EVENT, 'warning')
@mock.patch.object(ao.Action, 'mark_succeeded')
@mock.patch.object(ao.Action, 'mark_failed')
@mock.patch.object(ao.Action, 'abandon')
def test_set_status_dump_event(self, mock_abandon, mark_fail,
mark_succeed, mock_warning, mock_error,
mock_info):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID')
action.entity = mock.Mock()
action.set_status(action.RES_OK, 'FAKE_SUCCEEDED')
mock_info.assert_called_once_with(action, consts.PHASE_END,
'FAKE_SUCCEEDED')
action.set_status(action.RES_ERROR, 'FAKE_ERROR')
mock_error.assert_called_once_with(action, consts.PHASE_ERROR,
'FAKE_ERROR')
action.set_status(action.RES_RETRY, 'FAKE_RETRY')
mock_warning.assert_called_once_with(action, consts.PHASE_ERROR,
'FAKE_RETRY')
@mock.patch.object(EVENT, 'info')
@mock.patch.object(EVENT, 'error')
@mock.patch.object(EVENT, 'warning')
@mock.patch.object(ao.Action, 'mark_succeeded')
@mock.patch.object(ao.Action, 'mark_failed')
@mock.patch.object(ao.Action, 'abandon')
def test_set_status_reason_is_none(self, mock_abandon, mark_fail,
mark_succeed, mock_warning, mock_error,
mock_info):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID')
action.entity = mock.Mock()
action.set_status(action.RES_OK)
mock_info.assert_called_once_with(action, consts.PHASE_END,
'SUCCEEDED')
action.set_status(action.RES_ERROR)
mock_error.assert_called_once_with(action, consts.PHASE_ERROR,
'ERROR')
action.set_status(action.RES_RETRY)
mock_warning.assert_called_once_with(action, consts.PHASE_ERROR,
'RETRY')
@mock.patch.object(ao.Action, 'check_status')
def test_get_status(self, mock_get):
mock_get.return_value = 'FAKE_STATUS'
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
action.id = 'FAKE_ID'
res = action.get_status()
self.assertEqual('FAKE_STATUS', res)
self.assertEqual('FAKE_STATUS', action.status)
mock_get.assert_called_once_with(action.context, 'FAKE_ID', mock.ANY)
@mock.patch.object(ab, 'wallclock')
def test_is_timeout(self, mock_time):
action = ab.Action.__new__(DummyAction, 'OBJ', 'BOOM', self.ctx)
action.start_time = 1
action.timeout = 10
mock_time.return_value = 9
self.assertFalse(action.is_timeout())
mock_time.return_value = 10
self.assertFalse(action.is_timeout())
mock_time.return_value = 11
self.assertFalse(action.is_timeout())
mock_time.return_value = 12
self.assertTrue(action.is_timeout())
@mock.patch.object(EVENT, 'debug')
def test_check_signal_timeout(self, mock_debug):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx, id='FAKE_ID',
timeout=10)
action.entity = mock.Mock()
self.patchobject(action, 'is_timeout', return_value=True)
res = action._check_signal()
self.assertEqual(action.RES_TIMEOUT, res)
@mock.patch.object(ao.Action, 'signal_query')
def test_check_signal_signals_caught(self, mock_query):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
action.id = 'FAKE_ID'
action.timeout = 100
self.patchobject(action, 'is_timeout', return_value=False)
sig_cmd = mock.Mock()
mock_query.return_value = sig_cmd
res = action._check_signal()
self.assertEqual(sig_cmd, res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
@mock.patch.object(ao.Action, 'signal_query')
def test_is_cancelled(self, mock_query):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
action.id = 'FAKE_ID'
action.timeout = 100
self.patchobject(action, 'is_timeout', return_value=False)
mock_query.return_value = action.SIG_CANCEL
res = action.is_cancelled()
self.assertTrue(res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
mock_query.reset_mock()
mock_query.return_value = None
res = action.is_cancelled()
self.assertFalse(res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
@mock.patch.object(ao.Action, 'signal_query')
def test_is_suspended(self, mock_query):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
action.id = 'FAKE_ID'
action.timeout = 100
self.patchobject(action, 'is_timeout', return_value=False)
mock_query.return_value = action.SIG_SUSPEND
res = action.is_suspended()
self.assertTrue(res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
mock_query.reset_mock()
mock_query.return_value = 'OTHERS'
res = action.is_suspended()
self.assertFalse(res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
@mock.patch.object(ao.Action, 'signal_query')
def test_is_resumed(self, mock_query):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
action.id = 'FAKE_ID'
action.timeout = 100
self.patchobject(action, 'is_timeout', return_value=False)
mock_query.return_value = action.SIG_RESUME
res = action.is_resumed()
self.assertTrue(res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
mock_query.reset_mock()
mock_query.return_value = 'OTHERS'
res = action.is_resumed()
self.assertFalse(res)
mock_query.assert_called_once_with(action.context, 'FAKE_ID')
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
def test_policy_check_target_invalid(self, mock_load):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
res = action.policy_check('FAKE_CLUSTER', 'WHEN')
self.assertIsNone(res)
self.assertEqual(0, mock_load.call_count)
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
def test_policy_check_no_bindings(self, mock_load):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
mock_load.return_value = []
res = action.policy_check('FAKE_CLUSTER', 'BEFORE')
self.assertIsNone(res)
self.assertEqual(policy_mod.CHECK_OK, action.data['status'])
mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER',
sort='priority',
filters={'enabled': True})
@mock.patch.object(dobj.Dependency, 'get_depended')
@mock.patch.object(dobj.Dependency, 'get_dependents')
def test_action_to_dict(self, mock_dep_by, mock_dep_on):
mock_dep_on.return_value = ['ACTION_1']
mock_dep_by.return_value = ['ACTION_2']
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx,
**self.action_values)
action.id = 'FAKE_ID'
ts = common_utils.isotime(self.action_values['created_at'])
expected = {
'id': 'FAKE_ID',
'name': 'FAKE_NAME',
'action': 'OBJECT_ACTION',
'target': OBJID,
'cause': 'FAKE_CAUSE',
'owner': OWNER_ID,
'interval': 60,
'start_time': 0,
'end_time': 0,
'timeout': 120,
'status': 'FAKE_STATUS',
'status_reason': 'FAKE_STATUS_REASON',
'inputs': {'param': 'value'},
'outputs': {'key': 'output_value'},
'depends_on': ['ACTION_1'],
'depended_by': ['ACTION_2'],
'created_at': ts,
'updated_at': None,
'data': {'data_key': 'data_value'},
'user': USER_ID,
'project': PROJECT_ID,
}
res = action.to_dict()
self.assertEqual(expected, res)
mock_dep_on.assert_called_once_with(action.context, 'FAKE_ID')
mock_dep_by.assert_called_once_with(action.context, 'FAKE_ID')
class ActionPolicyCheckTest(base.SenlinTestCase):
def setUp(self):
super(ActionPolicyCheckTest, self).setUp()
self.ctx = utils.dummy_context()
environment.global_env().register_policy('DummyPolicy',
fakes.TestPolicy)
def _create_policy(self):
values = {
'user': self.ctx.user,
'project': self.ctx.project,
}
policy = fakes.TestPolicy('DummyPolicy', 'test-policy', **values)
policy.store(self.ctx)
return policy
def _create_cp_binding(self, cluster_id, policy_id):
return cpo.ClusterPolicy(cluster_id=cluster_id, policy_id=policy_id,
enabled=True, id=uuidutils.generate_uuid(),
last_op=None)
@mock.patch.object(policy_mod.Policy, 'post_op')
@mock.patch.object(policy_mod.Policy, 'pre_op')
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
@mock.patch.object(policy_mod.Policy, 'load')
def test_policy_check_missing_target(self, mock_load, mock_load_all,
mock_pre_op, mock_post_op):
cluster_id = CLUSTER_ID
# Note: policy is mocked
spec = {
'type': 'TestPolicy',
'version': '1.0',
'properties': {'KEY2': 5},
}
policy = fakes.TestPolicy('test-policy', spec)
policy.id = uuidutils.generate_uuid()
policy.TARGET = [('BEFORE', 'OBJECT_ACTION')]
# Note: policy binding is created but not stored
pb = self._create_cp_binding(cluster_id, policy.id)
self.assertIsNone(pb.last_op)
mock_load_all.return_value = [pb]
mock_load.return_value = policy
mock_pre_op.return_value = None
mock_post_op.return_value = None
action = ab.Action(cluster_id, 'OBJECT_ACTION_1', self.ctx)
res = action.policy_check(cluster_id, 'AFTER')
self.assertIsNone(res)
self.assertEqual(policy_mod.CHECK_OK, action.data['status'])
mock_load_all.assert_called_once_with(
action.context, cluster_id, sort='priority',
filters={'enabled': True})
mock_load.assert_called_once_with(action.context, policy.id)
# last_op was updated anyway
self.assertIsNotNone(pb.last_op)
# neither pre_op nor post_op was called, because target not match
self.assertEqual(0, mock_pre_op.call_count)
self.assertEqual(0, mock_post_op.call_count)
def test__check_result_true(self):
cluster_id = CLUSTER_ID
action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx)
action.data['status'] = policy_mod.CHECK_OK
action.data['reason'] = "Completed policy checking."
res = action._check_result('FAKE_POLICY_NAME')
self.assertTrue(res)
def test__check_result_false(self):
cluster_id = CLUSTER_ID
action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx)
action.data['status'] = policy_mod.CHECK_ERROR
reason = ("Policy '%s' cooldown is still in progress." %
'FAKE_POLICY_2')
action.data['reason'] = reason
res = action._check_result('FAKE_POLICY_NAME')
reason = ("Failed policy '%(name)s': %(reason)s"
) % {'name': 'FAKE_POLICY_NAME', 'reason': reason}
self.assertFalse(res)
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
@mock.patch.object(policy_mod.Policy, 'load')
def test_policy_check_pre_op(self, mock_load, mock_load_all):
cluster_id = CLUSTER_ID
# Note: policy is mocked
spec = {
'type': 'TestPolicy',
'version': '1.0',
'properties': {'KEY2': 5},
}
policy = fakes.TestPolicy('test-policy', spec)
policy.id = uuidutils.generate_uuid()
policy.TARGET = [('BEFORE', 'OBJECT_ACTION')]
# Note: policy binding is created but not stored
pb = self._create_cp_binding(cluster_id, policy.id)
self.assertIsNone(pb.last_op)
mock_load_all.return_value = [pb]
mock_load.return_value = policy
entity = mock.Mock()
action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx)
action.entity = entity
res = action.policy_check(cluster_id, 'BEFORE')
self.assertIsNone(res)
self.assertEqual(policy_mod.CHECK_OK, action.data['status'])
mock_load_all.assert_called_once_with(
action.context, cluster_id, sort='priority',
filters={'enabled': True})
mock_load.assert_called_once_with(action.context, policy.id)
# last_op was not updated
self.assertIsNone(pb.last_op)
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
@mock.patch.object(policy_mod.Policy, 'load')
def test_policy_check_post_op(self, mock_load, mock_load_all):
cluster_id = CLUSTER_ID
# Note: policy is mocked
policy = mock.Mock(id=uuidutils.generate_uuid(), cooldown=0,
TARGET=[('AFTER', 'OBJECT_ACTION')])
# Note: policy binding is created but not stored
pb = self._create_cp_binding(cluster_id, policy.id)
self.assertIsNone(pb.last_op)
mock_load_all.return_value = [pb]
mock_load.return_value = policy
entity = mock.Mock()
action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx)
action.entity = entity
res = action.policy_check(CLUSTER_ID, 'AFTER')
self.assertIsNone(res)
self.assertEqual(policy_mod.CHECK_OK, action.data['status'])
mock_load_all.assert_called_once_with(
action.context, cluster_id, sort='priority',
filters={'enabled': True})
mock_load.assert_called_once_with(action.context, policy.id)
# last_op was updated for POST check
self.assertIsNotNone(pb.last_op)
# pre_op is called, but post_op was not called
self.assertEqual(0, policy.pre_op.call_count)
policy.post_op.assert_called_once_with(cluster_id, action)
@mock.patch.object(cpo.ClusterPolicy, 'cooldown_inprogress')
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
@mock.patch.object(policy_mod.Policy, 'load')
def test_policy_check_cooldown_inprogress(self, mock_load, mock_load_all,
mock_inprogress):
cluster_id = CLUSTER_ID
# Note: policy is mocked
policy_id = uuidutils.generate_uuid()
policy = mock.Mock(id=policy_id, TARGET=[('AFTER', 'OBJECT_ACTION')])
# Note: policy binding is created but not stored
pb = self._create_cp_binding(cluster_id, policy.id)
mock_inprogress.return_value = True
mock_load_all.return_value = [pb]
mock_load.return_value = policy
action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx)
# Do it
res = action.policy_check(CLUSTER_ID, 'AFTER')
self.assertIsNone(res)
self.assertEqual(policy_mod.CHECK_ERROR, action.data['status'])
self.assertEqual(
'Policy %s cooldown is still in progress.' % policy_id,
six.text_type(action.data['reason']))
mock_load_all.assert_called_once_with(
action.context, cluster_id, sort='priority',
filters={'enabled': True})
mock_load.assert_called_once_with(action.context, policy.id)
# last_op was updated for POST check
self.assertIsNotNone(pb.last_op)
# neither pre_op nor post_op was not called, due to cooldown
self.assertEqual(0, policy.pre_op.call_count)
self.assertEqual(0, policy.post_op.call_count)
@mock.patch.object(cpo.ClusterPolicy, 'get_all')
@mock.patch.object(policy_mod.Policy, 'load')
@mock.patch.object(ab.Action, '_check_result')
def test_policy_check_abort_in_middle(self, mock_check, mock_load,
mock_load_all):
cluster_id = CLUSTER_ID
# Note: both policies are mocked
policy1 = mock.Mock(id=uuidutils.generate_uuid(), cooldown=0,
TARGET=[('AFTER', 'OBJECT_ACTION')])
policy1.name = 'P1'
policy2 = mock.Mock(id=uuidutils.generate_uuid(), cooldown=0,
TARGET=[('AFTER', 'OBJECT_ACTION')])
policy2.name = 'P2'
action = ab.Action(cluster_id, 'OBJECT_ACTION', self.ctx)
# Note: policy binding is created but not stored
pb1 = self._create_cp_binding(cluster_id, policy1.id)
pb2 = self._create_cp_binding(cluster_id, policy2.id)
mock_load_all.return_value = [pb1, pb2]
# mock return value for two calls
mock_load.side_effect = [policy1, policy2]
mock_check.side_effect = [False, True]
res = action.policy_check(cluster_id, 'AFTER')
self.assertIsNone(res)
# post_op from policy1 was called, but post_op from policy2 was not
policy1.post_op.assert_called_once_with(cluster_id, action)
self.assertEqual(0, policy2.post_op.call_count)
mock_load_all.assert_called_once_with(
action.context, cluster_id, sort='priority',
filters={'enabled': True})
calls = [mock.call(action.context, policy1.id)]
mock_load.assert_has_calls(calls)
class ActionProcTest(base.SenlinTestCase):
def setUp(self):
super(ActionProcTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(EVENT, 'info')
@mock.patch.object(ab.Action, 'load')
@mock.patch.object(ao.Action, 'mark_succeeded')
def test_action_proc_successful(self, mock_mark, mock_load,
mock_event_info):
action = ab.Action(OBJID, 'OBJECT_ACTION', self.ctx)
mock_obj = mock.Mock()
action.entity = mock_obj
self.patchobject(action, 'execute',
return_value=(action.RES_OK, 'BIG SUCCESS'))
mock_status = self.patchobject(action, 'set_status')
mock_load.return_value = action
res = ab.ActionProc(self.ctx, 'ACTION_ID')
self.assertTrue(res)
mock_load.assert_called_once_with(self.ctx, action_id='ACTION_ID',
project_safe=False)
mock_event_info.assert_called_once_with(action, 'start')
mock_status.assert_called_once_with(action.RES_OK, 'BIG SUCCESS')
@mock.patch.object(EVENT, 'info')
@mock.patch.object(ab.Action, 'load')
@mock.patch.object(ao.Action, 'mark_failed')
def test_action_proc_failed_error(self, mock_mark, mock_load, mock_info):
action = ab.Action(OBJID, 'CLUSTER_ACTION', self.ctx, id=ACTION_ID)
action.entity = mock.Mock(id=CLUSTER_ID, name='fake-cluster')
self.patchobject(action, 'execute', side_effect=Exception('Boom!'))
mock_status = self.patchobject(action, 'set_status')
mock_load.return_value = action
res = ab.ActionProc(self.ctx, 'ACTION')
self.assertFalse(res)
mock_load.assert_called_once_with(self.ctx, action_id='ACTION',
project_safe=False)
mock_info.assert_called_once_with(action, 'start')
mock_status.assert_called_once_with(action.RES_ERROR, 'Boom!')
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
01851c215882db7ea5489cb7d037bf4a475fc45d | 6e9ca564a0a806768636c787287607a090e90aed | /src/toolbox/data_converters.py | b0425a82890bc93e370daf8e101376049b2ae722 | [
"MIT"
] | permissive | ENJOY-Yin-jiong/hidden-challenges-MR | 798a513ad5c02df2d004076cd27f2f5c3c1b8e3e | e0c4546b5a614a20b680765fd4ad250564327b6a | refs/heads/master | 2023-03-20T02:14:22.583479 | 2020-09-07T05:29:07 | 2020-09-07T05:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from typing import Tuple, List, Dict, Callable
from pandas import DataFrame
Query = Tuple[str, str]
Location = Tuple[float, float, float] # start, end, length
Instance = Tuple[Query, Location]
Rating = List[float]
Prediction = Tuple[Query, List[Location], Rating]
Result = Tuple[Query, List[Location], Rating, dict]
def ActivityNetCap2Instances(raw_data: dict) -> List[Instance]:
instances: List[Instance] = []
for video_id, anno in raw_data.items():
for sentence, timestamp in zip(anno["sentences"], anno["timestamps"]):
query = (video_id, sentence)
location = (*timestamp, anno["duration"])
instance = (query, location)
instances.append(instance)
return instances
def CharadesSTA2Instances(raw_data: DataFrame) -> List[Instance]:
instances = []
for _, row in raw_data.iterrows():
query = (row["id"], row["description"])
location = tuple(row[["start (sec)", "end (sec)", "length"]].tolist())
instance = (query, location)
instances.append(instance)
return instances
| [
"otani_mayu@cyberagent.co.jp"
] | otani_mayu@cyberagent.co.jp |
91eb9c99063fcaf5b821ef933527ce9a156928d1 | a37e93d23ea659bafaac8034dc7aeed65bc5eb38 | /shopee_api_client/controllers/controllers.py | 86c536dcab4725f74333bed6a851742075677bc7 | [] | no_license | kit9/sct-ecommerce | 71831541264809715b31d802d260474975d04536 | 6c440a514eac8101474b6720e409a81c58bd3b7c | refs/heads/master | 2022-10-03T23:35:31.563670 | 2020-06-09T12:35:06 | 2020-06-09T12:35:06 | 271,020,689 | 0 | 4 | null | 2020-06-09T14:10:49 | 2020-06-09T14:10:48 | null | UTF-8 | Python | false | false | 1,672 | py | # -*- coding: utf-8 -*-
from odoo import http
class ShopeeApiClient(http.Controller):
@http.route('/shopee_client/shop/<model("shopee_client.shop"):shop>/auth', methods=['POST'], auth='public',csrf=False)
def auth_push(self,shop, **kw):
if shop.sudo().write(kw):
return http.Response(status=200)
else:
return http.Response(status=500)
@http.route('/shopee_client/shop/<model("shopee_client.shop"):shop>/order_status', methods=['POST'], auth='public',csrf=False)
def order_status(self,shop, **kw):
if shop.sudo().order_status_push(kw.get('ordersn'), kw.get('status'),kw.get('update_time')):
return http.Response(status=200)
else:
return http.Response(status=500)
@http.route('/shopee_client/shop/<model("shopee_client.shop"):shop>/order_tracking_no', methods=['POST'], auth='public',csrf=False)
def order_tracking_no(self,shop, **kw):
return http.Response(status=200)
# def index(self, **kw):
# return "Hello, world"
# @http.route('/shopee_api_client/shopee_api_client/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('shopee_api_client.listing', {
# 'root': '/shopee_api_client/shopee_api_client',
# 'objects': http.request.env['shopee_api_client.shopee_api_client'].search([]),
# })
# @http.route('/shopee_api_client/shopee_api_client/objects/<model("shopee_api_client.shopee_api_client"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('shopee_api_client.object', {
# 'object': obj
# })
| [
"thanhquenguyenbkhn@gmail.com"
] | thanhquenguyenbkhn@gmail.com |
e664beb018a1c9ae9d3a87597696086278f40c0e | dbe012dbedc967332ae58414473185055136d189 | /maskrcnn_benchmark/data/transforms/transforms.py | 283a9d3055d8b0ed951c6e0ec938684bcaf74ce3 | [
"MIT"
] | permissive | kevincao91/maskrcnn | 87561a023939a71d624252dd44f4c882b2dfa2a6 | a55f6ab82219329e353a20dd53c3f25f4375f537 | refs/heads/master | 2020-09-24T18:41:36.565752 | 2020-05-07T05:45:39 | 2020-05-07T05:45:39 | 225,819,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target=None):
size = self.get_size(image.size)
#print('get size:', size)
image = F.resize(image, size)
if target is None:
return image
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class RandomVerticalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.vflip(image)
target = target.transpose(1)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target=None):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image
return image, target
| [
"kevin_cao_91@163.com"
] | kevin_cao_91@163.com |
59bebd47be55198c6ec48813b99966195120cdd5 | 3b3b9bbc39c50a270e96b4394024f1753e35aaec | /ncbly/spiders/spider.py | 30b8ffc51883646ef9f6e34a1fd77a8c78d021b7 | [] | no_license | hristo-grudev/ncbly | f94e2fdc8d556fba416d556cac5649b7f492c7c5 | 6b33ceb9b287ed0047f4676b3c036dc0b7c8e08a | refs/heads/main | 2023-04-11T02:09:07.152764 | 2021-04-15T06:30:42 | 2021-04-15T06:30:42 | 358,152,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import scrapy
from scrapy import FormRequest
from scrapy.loader import ItemLoader
from ..items import NcblyItem
from itemloaders.processors import TakeFirst
class NcblySpider(scrapy.Spider):
name = 'ncbly'
start_urls = ['https://www.ncb.ly/en/media-center/news/']
def parse(self, response):
post_links = response.xpath('//h4/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//a[text()="Next"]/@href').getall()
if next_page:
yield FormRequest.from_response(response, formdata={
'__EVENTTARGET': 'ctl00$cph_body$pgrCustomRepeater$ctl02$ctl00'}, callback=self.parse)
def parse_post(self, response):
title = response.xpath('//h1[@class="new-mc-big-title"]/text()').get()
description = response.xpath('//div[@class="col col_8_of_12 mc-body"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description if '{' not in p]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="new-mc-big-date"]/text()').get()
item = ItemLoader(item=NcblyItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
fbb0f7310f7f72ed4f42979e7bdb21f2a6601ca5 | 1c99c687c696d780b3817f480129ebaa1fba109a | /codegate2017/pngparser/urllib2.py | 719e419cc28cff25cd16e85ce74590f06be5dcbb | [] | no_license | cExplr/ctf | 6be804d36922b08949154cfeba405c2aee0cb4e5 | 79f6485e6f1383fb382cd1a1453c2d25b0b5c518 | refs/heads/master | 2020-05-07T19:33:09.667499 | 2017-08-24T12:47:07 | 2017-08-24T12:47:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105,026 | py | """An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
urlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
exceptions:
URLError -- A subclass of IOError, individual protocols have their own
specific subclass.
HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response.
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib2
# set up authentication info
authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
# install it
urllib2.install_opener(opener)
f = urllib2.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import base64
import hashlib
import httplib
import mimetools
import os
import posixpath
import random
import re
import socket
import sys
import time
import urlparse
import bisect
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# check for SSL
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from urllib import (unwrap, unquote, splittype, splithost, quote,
addinfourl, splitport, splittag, toBytes,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies, proxy_bypass
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
cafile=None, capath=None, cadefault=False, context=None):
global _opener
if cafile or capath or cadefault:
if context is not None:
raise ValueError(
"You can't pass both context and any of cafile, capath, and "
"cadefault"
)
if not _have_ssl:
raise ValueError('SSL support not available')
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=cafile,
capath=capath)
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif context:
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif _opener is None:
_opener = opener = build_opener()
else:
opener = _opener
return opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 fo discussion.
@property
def reason(self):
return self.msg
def info(self):
return self.hdrs
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.__original = unwrap(url)
self.__original, self.__fragment = splittag(self.__original)
self.type = None
# self.__r_type is what's left after doing the splittype
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr in ('_Request__r_type', '_Request__r_host'):
getattr(self, 'get_' + attr[12:])()
return self.__dict__[attr]
raise AttributeError, attr
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
# XXX these helper methods are lame
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.__fragment:
return '%s#%s' % (self.__original, self.__fragment)
else:
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, "unknown url type: %s" % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
return self.__r_host
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# self.handlers is retained only for backward compatibility
self.handlers = []
# manage the individual handlers
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
# accept a URL or a Request object
if isinstance(fullurl, basestring):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.add_data(data)
req.timeout = timeout
protocol = req.get_type()
# pre-process request
meth_name = protocol+"_request"
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
# post-process response
meth_name = protocol+"_response"
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable, HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses."""
handler_order = 1000 # after all other processing
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
return Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
# Implementation note: To avoid the server sending us into an
# infinite loop, the request object needs to track what URLs we
# have already seen. Do this by adding a handler-specific
# attribute to the Request object.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
# fix a possible malformed URL
urlparts = urlparse.urlparse(newurl)
if not urlparts.path and urlparts.netloc:
urlparts = list(urlparts)
urlparts[2] = "/"
newurl = urlparse.urlunparse(urlparts)
newurl = urlparse.urljoin(req.get_full_url(), newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise HTTPError(newurl, code,
msg + " - Redirection to url '%s' is not allowed" %
newurl,
headers, fp)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req, timeout=req.timeout)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\']?)([^"\']*)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if quote not in ['"', "'"]:
warnings.warn("Basic Auth Realm was unquoted",
UserWarning, 2)
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.get_header(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
response = self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
return response
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
# XXX MD5-sess
else:
raise ValueError("Unsupported digest authentication "
"algorithm %r" % algorithm.lower())
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req, **http_conn_args):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
# will parse host:port
h = http_class(host, timeout=req.timeout, **http_conn_args)
h.set_debuglevel(self._debuglevel)
headers = dict(req.unredirected_hdrs)
headers.update(dict((k, v) for k, v in req.headers.items()
if k not in headers))
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = "Proxy-Authorization"
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
# Proxy-Authorization should not be sent to origin
# server.
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
except socket.error, err: # XXX what error?
h.close()
raise URLError(err)
else:
try:
r = h.getresponse(buffering=True)
except TypeError: # buffering kw not supported
r = h.getresponse()
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, debuglevel=0, context=None):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req,
context=self._context)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar=None):
import cookielib
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/' and (req.host and
req.host != 'localhost'):
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(
socket.gethostbyname_ex('localhost')[2] +
socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.get_host()
filename = req.get_selector()
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = mimetools.Message(StringIO(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or \
(not port and _safe_gethostbyname(host) in self.get_names()):
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError, msg:
# urllib2 users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
fw = ftpwrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
## fw.ftp.set_debuglevel(1)
return fw
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
# then check the size
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear()
"""An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
urlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
exceptions:
URLError -- A subclass of IOError, individual protocols have their own
specific subclass.
HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response.
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib2
# set up authentication info
authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
# install it
urllib2.install_opener(opener)
f = urllib2.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import base64
import hashlib
import httplib
import mimetools
import os
import posixpath
import random
import re
import socket
import sys
import time
import urlparse
import bisect
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# check for SSL
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from urllib import (unwrap, unquote, splittype, splithost, quote,
addinfourl, splitport, splittag, toBytes,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies, proxy_bypass
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
cafile=None, capath=None, cadefault=False, context=None):
global _opener
if cafile or capath or cadefault:
if context is not None:
raise ValueError(
"You can't pass both context and any of cafile, capath, and "
"cadefault"
)
if not _have_ssl:
raise ValueError('SSL support not available')
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=cafile,
capath=capath)
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif context:
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif _opener is None:
_opener = opener = build_opener()
else:
opener = _opener
return opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 fo discussion.
@property
def reason(self):
return self.msg
def info(self):
return self.hdrs
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.__original = unwrap(url)
self.__original, self.__fragment = splittag(self.__original)
self.type = None
# self.__r_type is what's left after doing the splittype
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr in ('_Request__r_type', '_Request__r_host'):
getattr(self, 'get_' + attr[12:])()
return self.__dict__[attr]
raise AttributeError, attr
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
# XXX these helper methods are lame
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.__fragment:
return '%s#%s' % (self.__original, self.__fragment)
else:
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, "unknown url type: %s" % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
return self.__r_host
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# self.handlers is retained only for backward compatibility
self.handlers = []
# manage the individual handlers
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
# accept a URL or a Request object
if isinstance(fullurl, basestring):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.add_data(data)
req.timeout = timeout
protocol = req.get_type()
# pre-process request
meth_name = protocol+"_request"
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
# post-process response
meth_name = protocol+"_response"
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable, HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses."""
handler_order = 1000 # after all other processing
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
return Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
# Implementation note: To avoid the server sending us into an
# infinite loop, the request object needs to track what URLs we
# have already seen. Do this by adding a handler-specific
# attribute to the Request object.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
# fix a possible malformed URL
urlparts = urlparse.urlparse(newurl)
if not urlparts.path and urlparts.netloc:
urlparts = list(urlparts)
urlparts[2] = "/"
newurl = urlparse.urlunparse(urlparts)
newurl = urlparse.urljoin(req.get_full_url(), newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise HTTPError(newurl, code,
msg + " - Redirection to url '%s' is not allowed" %
newurl,
headers, fp)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req, timeout=req.timeout)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\']?)([^"\']*)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if quote not in ['"', "'"]:
warnings.warn("Basic Auth Realm was unquoted",
UserWarning, 2)
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.get_header(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
response = self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
return response
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
# XXX MD5-sess
else:
raise ValueError("Unsupported digest authentication "
"algorithm %r" % algorithm.lower())
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req, **http_conn_args):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
# will parse host:port
h = http_class(host, timeout=req.timeout, **http_conn_args)
h.set_debuglevel(self._debuglevel)
headers = dict(req.unredirected_hdrs)
headers.update(dict((k, v) for k, v in req.headers.items()
if k not in headers))
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = "Proxy-Authorization"
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
# Proxy-Authorization should not be sent to origin
# server.
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
except socket.error, err: # XXX what error?
h.close()
raise URLError(err)
else:
try:
r = h.getresponse(buffering=True)
except TypeError: # buffering kw not supported
r = h.getresponse()
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, debuglevel=0, context=None):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req,
context=self._context)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar=None):
import cookielib
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/' and (req.host and
req.host != 'localhost'):
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(
socket.gethostbyname_ex('localhost')[2] +
socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.get_host()
filename = req.get_selector()
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = mimetools.Message(StringIO(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or \
(not port and _safe_gethostbyname(host) in self.get_names()):
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError, msg:
# urllib2 users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
fw = ftpwrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
## fw.ftp.set_debuglevel(1)
return fw
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
# then check the size
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear()
| [
"kanghee4924@gmail.com"
] | kanghee4924@gmail.com |
d999c73ccdbfd6c1659a34e31175408be91b1250 | b2a2a2f7e19fc8e9c6f5d2dedb0b4b10d7c813ae | /backend/api/migrations/0022_barmeta_first_calll.py | 557b7fe99b2acf7c5983a53c569529ffb89f9489 | [] | no_license | glenstarchman/bar-rate | d7a6e6660bd3fafe7777d435d33334e2be4d0480 | 575e5f695650487a679ede04af6f62d464c53c18 | refs/heads/master | 2022-02-27T01:31:17.879000 | 2019-09-26T14:43:05 | 2019-09-26T14:43:05 | 191,376,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-06-02 14:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0021_barmeta_wifi'),
]
operations = [
migrations.AddField(
model_name='barmeta',
name='first_calll',
field=models.BooleanField(default=False),
),
]
| [
"glen@starchman.com"
] | glen@starchman.com |
dc5410da4cfff303d0f5fbc6d93fce02dc7cad1f | 79ed3f72555aad8548634f523f775f34cfe166e7 | /catch/datasets/guaroa.py | 5acd22fecfb7fdabe5e8b72c0eb5fa30d32a8df1 | [
"MIT"
] | permissive | John-Bioinfo/catch | a2ab188ed598767e7759f74227f24af2b284b379 | fe63b86bc41396c1da0b449ac440c6ae9e52b2c5 | refs/heads/master | 2020-03-18T09:29:10.315733 | 2018-04-17T18:36:47 | 2018-04-17T18:36:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | """Dataset with 'Guaroa orthobunyavirus' sequences.
A dataset with 25 'Guaroa orthobunyavirus' sequences. The virus is
segmented and has 3 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 16 genomes. Many genomes
may have fewer than 3 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
from os.path import dirname
from os.path import join
from os import listdir
import sys
from catch.datasets import GenomesDatasetMultiChrom
__author__ = 'Hayden Metsky <hayden@mit.edu>'
chrs = ["segment_" + seg for seg in ['L', 'M', 'S']]
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (L|M|S)\]')
m = c.search(header)
if not m:
raise ValueError("Unknown segment in header %s" % header)
seg = m.group(1)
valid_segs = ['L', 'M', 'S']
if seg not in valid_segs:
raise ValueError("Unknown segment %s" % seg)
return "segment_" + seg
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr)
for f in listdir(join(dirname(__file__), "data/guaroa/")):
ds.add_fasta_path("data/guaroa/" + f, relative=True)
sys.modules[__name__] = ds
| [
"hmetsky@gmail.com"
] | hmetsky@gmail.com |
9803489d673fad8d596b11ce793086bb11ba7af1 | 4ae5d091e35fad9acea8d5b4044639a72da4b6ab | /src/irobotframework/magic/robot.py | 440ac6611410b6bff29cfe1437ae7f356d2559af | [
"BSD-3-Clause"
] | permissive | nrbgt/irobotframework | 941d76326e2cd71ee77aef23f3a8e48f0483e649 | 865311e0f89e2418e253f60cd7ae50990d4d6e6a | refs/heads/master | 2020-04-30T17:10:32.593605 | 2019-01-15T14:50:53 | 2019-01-15T14:50:53 | 176,971,493 | 0 | 0 | null | 2019-03-21T15:17:52 | 2019-03-21T15:17:51 | null | UTF-8 | Python | false | false | 2,821 | py | # Copyright (c) 2018 Georgia Tech Research Corporation
# Distributed under the terms of the BSD-3-Clause License
import re
from typing import Text, Callable
from tornado.concurrent import Future
from IPython import get_ipython
from ipykernel.ipkernel import IPythonKernel
from ..irobot import StringTidy
from .. import patches
__all__ = ["register_robot_cell_magic", "default_robot_cell_magics"]
# pattern for matching the `%%python module` magic
PY_MAGIC_RE = re.compile(
r"^%%python\s*module\s*(?P<name>[a-zA-Z_][a-zA-Z_\d]*)\s*\n(?P<body>.*)$", re.S
)
# pattern for matching the `%%tidy` magic
TIDY_MAGIC_RE = re.compile(r"^%%tidy\s*\n(?P<body>.*)$", re.S)
# pattern for cleaning weird test/tasks
TIDY_WEIRD = re.compile(r"^\|?\s*\*+\s*(test case|task)s?\s*\*+\|?", re.I | re.M)
def default_robot_cell_magics():
""" The default cell magics
"""
return {
"python module": dict(pattern=PY_MAGIC_RE, func=cell_magic_python_module),
"tidy": dict(pattern=TIDY_MAGIC_RE, func=cell_magic_tidy),
}
def register_robot_cell_magic(name: Text, pattern: Text, func: Callable):
""" Add a robot cell magic. the func is a callable with a notional arg spec
of:
def magic_function(code,
silent=silent,
store_history=store_history,
user_expressions=user_expressions,
allow_stdin=allow_stdin,)
it may return:
- an execute_response dict
- a string of the robot code to be executed
"""
kernel = get_ipython().kernel
kernel.robot_magics["cell"][name] = {"pattern": pattern, "func": func}
def unregister_robot_cell_magic(name: Text):
""" Remove a robot cell magic
"""
kernel = get_ipython().kernel
kernel.robot_magics["cell"].pop(name, None)
def cell_magic_python_module(code, **kwargs):
""" the %%python module cell magic
"""
kernel = get_ipython().kernel
match = re.match(PY_MAGIC_RE, code.strip())
groups = match.groupdict()
with patches.ScopedCodeRunner(kernel.shell, groups["name"]):
result = IPythonKernel.do_execute(kernel, groups["body"], **kwargs)
if isinstance(result, Future):
result = result.result()
return result
def cell_magic_tidy(code, **kwargs):
""" Use robot's tidy to normalize cell content
"""
match = re.match(TIDY_MAGIC_RE, code.strip())
groups = match.groupdict()
tidied = StringTidy().file(groups["body"])
# seems to always inject an empty test case table
if re.match(TIDY_WEIRD, groups["body"]) is None:
tidied = re.sub(TIDY_WEIRD, "", tidied).strip()
return dict(
status="ok", payload=[dict(source="set_next_input", text=tidied, replace=True)]
)
| [
"todd.shayler@gtri.gatech.edu"
] | todd.shayler@gtri.gatech.edu |
bf5657467f3bc562c237bba7906c9b1146e9b92a | 18d7876af265ec974aa5ecf9142093d845b59020 | /module/Favourite.py | d373b337f2caccf5bd99c89382bc95c872888048 | [] | no_license | xiaojieluo/savemylink | b6b2b806b8a760369860e2ec83fd85dece9bfd9d | 7eb64a4742516486bebe8498374b94552c682cfe | refs/heads/master | 2021-06-13T16:49:13.795891 | 2017-03-23T03:48:39 | 2017-03-23T03:48:39 | 77,986,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | #!/usr/bin/env python
# coding=utf-8
from module.DB import db
from module.define import *
class Favourite(object):
db = db
link_list = []
value_dict = dict()
favourite_public = FAVOURITE_PUBLIC
favourite_count = FAVOURITE_COUNT
def __init__(self, fid = 0):
if not isinstance(fid, int):
raise TypeError('Bad operand type')
self.db = db
self.fid = fid
self.favourite_info = FAVOURITE_INFO.format(fid=self.fid)
self.favourite_count = FAVOURITE_COUNT
self.favourite = FAVOURITE.format(fid=self.fid)
self.favourite_public = FAVOURITE_PUBLIC
@classmethod
def create(cls,info):
#info = dict(
# name='name',
# created_at = 'created_at'
#)
favourite_count = FAVOURITE_COUNT
fid = cls.db.r.incr(cls.favourite_count)
favourite_info = FAVOURITE_INFO.format(fid=fid)
cls.db.r.hmset(favourite_info, info)
if info['public']:
cls.db.r.sadd(cls.favourite_public, fid)
# only return fid
# if you want add fid to account_favourite table
# you need run down code
# user = Account(id)
# account_favourite = ACCOUNT_FAVOURITE.format(uid=uid)
# cls.db.r.sadd(account_favourite, fid)
return fid
@classmethod
def public(cls):
"""
返回所有公开的收藏夹
"""
# 在这里可以做分页
pub = cls.db.smembers(cls.favourite_public)
result = []
if pub:
for k in pub:
result.append(Favourite(k))
return result
else:
return []
@property
def isPublic(self):
public = self.db.r.sismembers(self.favourite_public, self.fid)
return public
@property
def name(self):
#favourite_info = FAVOURITE_INFO.format(fid=self.fid)
result = self.db.r.hget(self.favourite_info, 'name')
return result
@property
def author(self):
user_id = int(self.db.hget(self.favourite_info, 'author'))
# print(self.db.r.hgetall(self.favourite_info))
# print(type(user_id))
if user_id:
from lib.Account import Account
return Account(user_id)
@name.setter
def name(self, value):
self.value_dict['name'] = value
@property
def created_at(self):
#favourite_info = FAVOURITE_INFO.format(fid=self.fid)
return self.db.r.hget(self.favourite_info, 'created_at')
@created_at.setter
def created_at(self, value):
self.value_dict['created_at'] = value
# add linkid to favourite , if not run save , the data is in buffer
def addlink(self, lid):
if isinstance(lid, list):
for k in lid:
if k not in self.link_list:
self.link_list.append(lid)
else:
lid = int(lid)
if lid not in self.link_list:
#self.linkid = []
self.link_list.append(lid)
return True
#print(self.link_list)
def save(self):
# save Favourite information
if len(self.value_dict) > 0:
self.db.r.hmset(self.favourite_info, self.value_dict)
# save link id into the favourite
if len(self.link_list) > 0:
for k in self.link_list:
self.db.r.sadd(self.favourite, k)
#del self.link_list[:]
self.link_list = []
self.value_dict = {}
return True
def links(self):
# get all links in favourites,
# return Link Class
#"""
favourite_links = FAVOURITE.format(fid=self.fid)
tmp = self.db.smembers(favourite_links)
print(tmp)
# only return link id
# new class in Handler's
return tmp
#print(tmp)
#if len(tmp) > 0:
# result = []
# from lib.Link import Link
# for k in tmp:
# result.append(Link(k))
# return result
#else:
# return None
| [
"xiaojieluoff@gmail.com"
] | xiaojieluoff@gmail.com |
24e05aac27a7eee6799ab5ec26fcb11af42151c3 | 147389cf28e0c92f0b2ef8000b0562a71d766880 | /pp.py | 764a1b7b187b101da8e094613efc9e68f4b889cc | [] | no_license | dohyekim/hello | 3821ca97079818c9938df33fc6d8d6ea9ca763a5 | 84f5704fe6cb6e5b63fb7903e311b650d65a394a | refs/heads/master | 2022-02-25T11:12:31.077335 | 2019-09-22T06:23:00 | 2019-09-22T06:23:00 | 156,352,382 | 1 | 0 | null | 2022-02-12T11:48:53 | 2018-11-06T08:39:18 | JavaScript | UTF-8 | Python | false | false | 707 | py | import requests
from bs4 import BeautifulSoup
import json
url = "https://www.melon.com/chart/index.htm"
headers = {
'Referer': 'https://www.melon.com/',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
html = requests.get(url, headers = headers).text
soup = BeautifulSoup(html, 'html.parser')
parameter = []
rank_50 = soup.select("table > tbody #lst50")
rank_100 = soup.select("table > tbody #lst100")
for i in rank_50:
a = i.attrs['data-song-no']
parameter.append(a)
for j in rank_100:
b = j.attrs['data-song-no']
parameter.append(b)
print(parameter)
param_ = ",".join(parameter)
print(param_) | [
"dhleen5@hanmail.net"
] | dhleen5@hanmail.net |
7a906032b07ddd8c57c95c5688999c4c40c6cdfd | 8973bc75cd166031df2906df5cb51c9d40d266d3 | /urls.py | 8d4f4452df67860548d021f319225d973277efee | [] | no_license | anujborah1/q | 53dcbbba6c54afb95451e64db4e22a560ad04d1e | ba21db8801efbd4aa27161bca5e48a3e4de68f31 | refs/heads/master | 2021-01-22T21:45:35.844543 | 2017-03-19T12:22:29 | 2017-03-19T12:22:29 | 85,472,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.conf.urls import include, url
from views import *
urlpatterns = [
url(r'^$',q, name='home'),
}
| [
"noreply@github.com"
] | noreply@github.com |
c32c95339c291c9e95d3a9a68086eadfbc24dc88 | 56d62ed2e8815b676743350e68f78902d5751dc0 | /helper-scripts/convert-log-to-json.py | 8441597c1042ccf242ce5eefee0256f3e32a5cc7 | [
"MIT"
] | permissive | ddsky/keras-machine-learning-framework | e6636dc9a9b806c49647bf317bceed670b0807f8 | 049ee78984f2f61165ff67506b0ced74b2928be9 | refs/heads/master | 2021-01-03T07:54:30.609349 | 2020-02-12T08:26:15 | 2020-02-12T08:26:15 | 239,989,480 | 1 | 0 | MIT | 2020-02-12T10:50:10 | 2020-02-12T10:50:10 | null | UTF-8 | Python | false | false | 16,925 | py | # Example:
#
# user$ python helper-scripts/convert-log-to-json.py processed/experiments/best_optimizer/inceptionv3/food-50-80-20-rmsprop-0.01-0.5-7-0.0-0.0
#
import re
import pprint
import json
import os
import sys
pp = pprint.PrettyPrinter(indent=4)
class ConverterLogJson:
path_root = 'F:/data'
path_experiment_relative = 'processed/different_models/all/food-50-80-20-densenet201-8'
file_command_log_name = 'command.txt'
file_data_json_name = 'data.json'
total_data_config = [
# general
{'search': 'render_device', 'field-name': 'render-device', 'type': 'string', 'section': 'general'},
# data
{'search': 'data_path', 'field-name': 'data-path', 'type': 'string', 'section': 'data'},
{'search': 'use_train_val', 'field-name': 'use-train-val', 'type': 'boolean', 'section': 'data'},
# transfer learning
{'search': 'number_trainable_layers', 'field-name': 'number-trainable-layers', 'type': 'string',
'section': 'transfer-learning'},
{'search': 'transfer_learning_model', 'field-name': 'transfer-learning-model', 'type': 'string',
'section': 'transfer-learning'},
{'search': 'input_dimension', 'field-name': 'input-dimension', 'type': 'string',
'section': 'transfer-learning'},
{'search': 'dense_size', 'field-name': 'dense-size', 'type': 'string', 'section': 'transfer-learning'},
{'search': 'weights', 'field-name': 'weights', 'type': 'string', 'section': 'transfer-learning'},
# machine learning
{'search': 'batch_size', 'field-name': 'batch-size', 'type': 'int', 'section': 'machine-learning'},
{'search': 'momentum', 'field-name': 'momentum', 'type': 'float', 'section': 'machine-learning'},
{'search': 'activation_function', 'field-name': 'activation-function', 'type': 'string',
'section': 'machine-learning'},
{'search': 'loss_function', 'field-name': 'loss-function', 'type': 'string', 'section': 'machine-learning'},
{'search': 'optimizer', 'field-name': 'optimizer', 'type': 'string', 'section': 'machine-learning'}
]
template_error_text_command_log_file = 'Something is wrong with the command log file "%s"'
epochs = 0
batches_total_learned = 0
batches_total_total = 0
duration_total_total = 0.0
duration_total_total_unit = 's'
duration_total_batch_average = 0.0
duration_total_batch_average_unit = 's'
config_data = {}
command = None
layers = 0
depth = 0
trainable = 0
best_train = None
experiment_data = None
net_name = None
depth = 0
def __init__(self):
# read parameter
if len(sys.argv) > 1:
self.path_experiment_relative = sys.argv[1]
self.path_experiment_absolute = '%s/%s' % (self.path_root, self.path_experiment_relative)
self.path_experiment_absolute = '%s/%s' % (self.path_root, self.path_experiment_relative)
self.file_command_log_absolute = '%s/%s' % (self.path_experiment_absolute, self.file_command_log_name)
self.file_data_json_absolute = '%s/%s' % (self.path_experiment_absolute, self.file_data_json_name)
# check paths
if not os.path.exists(self.path_experiment_absolute):
raise AssertionError('Path "%s" does not exist.' % self.path_experiment_absolute)
if not os.path.exists(self.file_command_log_absolute):
raise AssertionError('Config file "%s" does not exists.' % self.file_command_log_absolute)
def parse_config_data(self, data):
self.experiment_data = {
'epochs': [],
'total': {
'config': {}
}
}
self.best_train = {
'val': {
'accuracy-top-1': 0.0
}
}
iter_data = iter(data)
for line in iter_data:
# search for command line
pattern = re.compile('.+(ml[ ]+train.+)')
matches = pattern.match(line)
if matches:
self.command = matches.group(1)
continue
# search for command line
pattern = re.compile('[0-9]+[ ]:[ ]+(.+)[ ]+\\((not )?trainable\\)')
matches = pattern.match(line)
if matches:
self.layers += 1
pattern_layer = re.compile('.*_conv')
matches_layer = pattern_layer.match(matches.group(1))
if matches_layer:
self.depth += 1
if matches.group(2) != 'not ':
self.trainable += 1
continue
# collect some total data
for config in self.total_data_config:
pattern = re.compile('%s:[ ]+(.+)' % config['search'])
matches = pattern.match(line)
if matches:
value = matches.group(1)
if not config['section'] in self.config_data:
self.config_data[config['section']] = {}
if config['type'] == 'int':
self.config_data[config['section']][config['field-name']] = int(value)
elif config['type'] == 'float':
self.config_data[config['section']][config['field-name']] = float(value)
elif config['type'] == 'boolean':
self.config_data[config['section']][config['field-name']] = True if value == 'True' else False
else:
if config['section'] == 'data':
value = value.replace('%s/' % self.path_root, '')
if config['section'] == 'general':
if value == 'GPU' or value == 'GPU1':
value = 'GTX 1060 (1)'
if value == 'GPU2':
value = 'GTX 1060 (2)'
self.config_data[config['section']][config['field-name']] = str(value)
continue
# collect all epochs
pattern_first = re.compile('Epoch ([0-9]+):[ ]+LearningRateScheduler setting learning rate to ([0-9]+\\.[0-9]+)')
matches_first = pattern_first.match(line)
if matches_first:
epoch = int(matches_first.group(1))
learning_rate = float(matches_first.group(2))
pattern_second = re.compile(
'^([0-9]+)/([0-9]+).+[ ]-[ ]([0-9]+[\\.]?[0-9]*)([m]?s) ([0-9]+[\\.]?[0-9]*)([m]?s).+' +
'loss:[ ]*([0-9]+[\\.]?[0-9]*).+' +
'acc:[ ]*([0-9]+[\\.]?[0-9]*).+' +
'top_k_categorical_accuracy:[ ]*([0-9]+[\\.]?[0-9]*).+' +
'val_loss:[ ]*([0-9]+[\\.]?[0-9]*).+' +
'val_acc:[ ]*([0-9]+[\\.]?[0-9]*).+' +
'val_top_k_categorical_accuracy:[ ]*([0-9]+[\\.]?[0-9]*).+'
)
matches_second = pattern_second.match(next(iter_data))
if not matches_second:
raise AssertionError(self.template_error_text_command_log_file % self.file_command_log_absolute)
batches_epoch_learned = int(matches_second.group(1))
batches_epoch_total = int(matches_second.group(2))
duration_epoch_total = float(int(matches_second.group(3)))
duration_epoch_total_unit = matches_second.group(4)
duration_epoch_batch_average = float(int(matches_second.group(5)))
duration_epoch_batch_average_unit = matches_second.group(6)
loss_train = float(matches_second.group(7))
accuracy_top_1_train = float(matches_second.group(8))
accuracy_top_5_train = float(matches_second.group(9))
loss_val = float(matches_second.group(10))
accuracy_top_1_val = float(matches_second.group(11))
accuracy_top_5_val = float(matches_second.group(12))
# some assertions
if batches_epoch_learned != batches_epoch_total:
raise AssertionError(self.template_error_text_command_log_file % self.file_command_log_absolute)
if duration_epoch_total_unit == 'ms':
duration_epoch_total = duration_epoch_total / 1000
duration_epoch_total_unit = 's'
if duration_epoch_batch_average_unit == 'ms':
duration_epoch_batch_average = duration_epoch_batch_average / 1000
duration_epoch_batch_average_unit = 's'
# calculate total properties
self.epochs += 1
self.batches_total_learned += batches_epoch_learned
self.batches_total_total += batches_epoch_total
self.duration_total_total += duration_epoch_total
self.duration_total_batch_average += duration_epoch_batch_average
# get best val epoch
if accuracy_top_1_val > self.best_train['val']['accuracy-top-1']:
self.best_train = {
'epoch': epoch,
'train': {
'loss': loss_train,
'accuracy-top-1': accuracy_top_1_train,
'accuracy-top-5': accuracy_top_5_train
},
'val': {
'loss': loss_val,
'accuracy-top-1': accuracy_top_1_val,
'accuracy-top-5': accuracy_top_5_val
}
}
# add epoch to json data variable
self.experiment_data['epochs'].append(
{
'epoch': epoch,
'learning-rate': learning_rate,
'duration-total': duration_epoch_total,
'duration-total-unit': duration_epoch_total_unit,
'duration-batch-average': duration_epoch_batch_average,
'duration-batch-average-unit': duration_epoch_batch_average_unit,
'batches-learned': batches_epoch_learned,
'batches-total': batches_epoch_total,
'train': {
'loss': loss_train,
'accuracy-top-1': accuracy_top_1_train,
'accuracy-top-5': accuracy_top_5_train
},
'val': {
'loss': loss_val,
'accuracy-top-1': accuracy_top_1_val,
'accuracy-top-5': accuracy_top_5_val
}
}
)
@staticmethod
def get_number_of_folder(absolute_path):
return len(next(os.walk(absolute_path))[1])
@staticmethod
def get_number_of_files(absolute_path):
return len([name for name in os.listdir(absolute_path) if os.path.isfile(os.path.join(absolute_path, name))])
def get_classes_of_folder(self, absolute_path):
class_names = [dI for dI in os.listdir(absolute_path) if os.path.isdir(os.path.join(absolute_path, dI))]
folders = {}
for class_name in class_names:
folders[class_name] = self.get_number_of_files('%s/%s' % (absolute_path, class_name))
return folders
def parse_json_from_command_log_file(self):
# read command log file
with open(self.file_command_log_absolute, 'r') as file:
data = file.readlines()
if data is None:
raise AssertionError('Could not read command log file')
# parse command file and write properties
self.parse_config_data(data)
self.write_net_name_and_depth()
data_path_absolute = None
if 'data' in self.config_data and 'data-path' in self.config_data['data']:
data_path_absolute = '%s/%s' % (self.path_root, self.config_data['data']['data-path'])
use_train_val = False
if 'data' in self.config_data and 'use-train-val' in self.config_data['data']:
use_train_val = self.config_data['data']['use-train-val']
if data_path_absolute is None or not os.path.exists(data_path_absolute):
raise AssertionError('Data path "%s" does not exist.' % data_path_absolute)
if not use_train_val:
raise AssertionError('This script only works in use-train-val mode yet.')
self.config_data['data']['data-path-train'] = '%s/%s' % (self.config_data['data']['data-path'], 'train')
self.config_data['data']['data-path-val'] = '%s/%s' % (self.config_data['data']['data-path'], 'val')
data_path_train_absolute = '%s/%s' % (self.path_root, self.config_data['data']['data-path-train'])
data_path_val_absolute = '%s/%s' % (self.path_root, self.config_data['data']['data-path-val'])
if not os.path.exists(data_path_train_absolute):
raise AssertionError('Data train path "%s" does not exist.' % data_path_train_absolute)
if not os.path.exists(data_path_val_absolute):
raise AssertionError('Data val path "%s" does not exist.' % data_path_val_absolute)
self.experiment_data['total'] = {
'command': self.command,
'config': self.config_data,
'epochs': self.epochs,
'learning-rates': [],
'duration-total': self.duration_total_total,
'duration-total-unit': self.duration_total_total_unit,
'duration-batch-average': round(self.duration_total_batch_average / self.epochs, 3),
'duration-batch-average-unit': self.duration_total_batch_average_unit,
'batches-learned': self.batches_total_learned,
'batches-total': self.batches_total_total,
'best-train': self.best_train,
'classes': {
'train': {
'count': self.get_number_of_folder(data_path_train_absolute),
'files': self.get_classes_of_folder(data_path_train_absolute)
},
'val': {
'count': self.get_number_of_folder(data_path_val_absolute),
'files': self.get_classes_of_folder(data_path_val_absolute)
}
},
'net': {
'name': self.net_name,
'layers': self.layers,
'depth': self.depth,
'trainable': self.trainable
}
}
return self.experiment_data
def build_json(self, print_data_json=False):
self.parse_json_from_command_log_file()
# print json
if print_data_json:
pp.pprint(self.experiment_data)
# write json file
print('')
print('Write json file to "%s"' % self.file_data_json_absolute)
with open(self.file_data_json_absolute, 'w') as outfile:
json.dump(self.experiment_data, outfile, indent=4)
print('Done...')
def write_net_name_and_depth(self):
self.net_name = None
if 'transfer-learning' in self.config_data and 'transfer-learning-model' in self.config_data['transfer-learning']:
self.net_name = self.config_data['transfer-learning']['transfer-learning-model']
if self.net_name == 'DenseNet201':
self.depth = 201
elif self.net_name == 'DenseNet169':
self.depth = 169
elif self.net_name == 'DenseNet121':
self.depth = 121
elif self.net_name == 'InceptionResNetV2':
self.depth = 164
elif self.net_name == 'InceptionV3':
self.depth = 48
#elif self.net_name == 'NASNet':
# self.depth = 0
#elif self.net_name == 'NASNetLarge':
# self.depth = 0
#elif self.net_name == 'NASNetMobile':
# self.depth = 0
#elif self.net_name == 'MobileNet':
# self.depth = 0
elif self.net_name == 'MobileNetV2':
self.depth = 53
elif self.net_name == 'ResNet50':
self.depth = 50
elif self.net_name == 'VGG19':
self.depth = 19
elif self.net_name == 'Xception':
self.depth = 71
# create converter
converter_log_json = ConverterLogJson()
# build json file
converter_log_json.build_json(True)
| [
"bjoern@hempel.li"
] | bjoern@hempel.li |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.