hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed58b3e938f78591c8eb769c53b5c6fe7f3ad615
| 4,435
|
py
|
Python
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/show_device_detail_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/show_device_detail_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/show_device_detail_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class ShowDeviceDetailRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_request_id': 'str',
'accept_language': 'str',
'sn': 'str'
}
attribute_map = {
'x_request_id': 'X-Request-Id',
'accept_language': 'Accept-Language',
'sn': 'sn'
}
def __init__(self, x_request_id=None, accept_language=None, sn=None):
"""ShowDeviceDetailRequest - a model defined in huaweicloud sdk"""
self._x_request_id = None
self._accept_language = None
self._sn = None
self.discriminator = None
if x_request_id is not None:
self.x_request_id = x_request_id
if accept_language is not None:
self.accept_language = accept_language
self.sn = sn
@property
def x_request_id(self):
"""Gets the x_request_id of this ShowDeviceDetailRequest.
请求requestId,用来标识一路请求,用于问题跟踪定位,建议使用uuId,若不携带,则后台自动生成
:return: The x_request_id of this ShowDeviceDetailRequest.
:rtype: str
"""
return self._x_request_id
@x_request_id.setter
def x_request_id(self, x_request_id):
"""Sets the x_request_id of this ShowDeviceDetailRequest.
请求requestId,用来标识一路请求,用于问题跟踪定位,建议使用uuId,若不携带,则后台自动生成
:param x_request_id: The x_request_id of this ShowDeviceDetailRequest.
:type: str
"""
self._x_request_id = x_request_id
@property
def accept_language(self):
"""Gets the accept_language of this ShowDeviceDetailRequest.
语言参数,默认为中文zh_CN, 英文为en_US
:return: The accept_language of this ShowDeviceDetailRequest.
:rtype: str
"""
return self._accept_language
@accept_language.setter
def accept_language(self, accept_language):
"""Sets the accept_language of this ShowDeviceDetailRequest.
语言参数,默认为中文zh_CN, 英文为en_US
:param accept_language: The accept_language of this ShowDeviceDetailRequest.
:type: str
"""
self._accept_language = accept_language
@property
def sn(self):
"""Gets the sn of this ShowDeviceDetailRequest.
终端SN号,仅可包含数字、字母和下划线。 maxLength:30 minLength:1
:return: The sn of this ShowDeviceDetailRequest.
:rtype: str
"""
return self._sn
@sn.setter
def sn(self, sn):
"""Sets the sn of this ShowDeviceDetailRequest.
终端SN号,仅可包含数字、字母和下划线。 maxLength:30 minLength:1
:param sn: The sn of this ShowDeviceDetailRequest.
:type: str
"""
self._sn = sn
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDeviceDetailRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.042683
| 84
| 0.580834
|
ca7e01d52a54225465c4be0c0ccc45e69defff7b
| 2,145
|
py
|
Python
|
tests/handlers/test_widgets.py
|
pkuyangchao/redash
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
[
"BSD-2-Clause"
] | null | null | null |
tests/handlers/test_widgets.py
|
pkuyangchao/redash
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
[
"BSD-2-Clause"
] | 4
|
2020-06-18T15:31:02.000Z
|
2021-03-25T23:31:41.000Z
|
tests/handlers/test_widgets.py
|
pkuyangchao/redash
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tests import BaseTestCase
from redash import models
class WidgetAPITest(BaseTestCase):
def create_widget(self, dashboard, visualization, width=1):
data = {
'visualization_id': visualization.id,
'dashboard_id': dashboard.id,
'options': {},
'width': width
}
rv = self.make_request('post', '/api/widgets', data=data)
return rv
def test_create_widget(self):
dashboard = self.factory.create_dashboard()
vis = self.factory.create_visualization()
rv = self.create_widget(dashboard, vis)
self.assertEquals(rv.status_code, 200)
def test_wont_create_widget_for_visualization_you_dont_have_access_to(self):
dashboard = self.factory.create_dashboard()
vis = self.factory.create_visualization()
ds = self.factory.create_data_source(group=self.factory.create_group())
vis.query_rel.data_source = ds
models.db.session.add(vis.query_rel)
data = {
'visualization_id': vis.id,
'dashboard_id': dashboard.id,
'options': {},
'width': 1
}
rv = self.make_request('post', '/api/widgets', data=data)
self.assertEqual(rv.status_code, 403)
def test_create_text_widget(self):
dashboard = self.factory.create_dashboard()
data = {
'visualization_id': None,
'text': 'Sample text.',
'dashboard_id': dashboard.id,
'options': {},
'width': 2
}
rv = self.make_request('post', '/api/widgets', data=data)
self.assertEquals(rv.status_code, 200)
self.assertEquals(rv.json['text'], 'Sample text.')
def test_delete_widget(self):
widget = self.factory.create_widget()
rv = self.make_request('delete', '/api/widgets/{0}'.format(widget.id))
self.assertEquals(rv.status_code, 200)
dashboard = models.Dashboard.get_by_slug_and_org(widget.dashboard.slug, widget.dashboard.org)
self.assertEquals(dashboard.widgets.count(), 0)
| 31.086957
| 101
| 0.616783
|
0bd5a33a4c97287b15afdb330c80677ee4aaee97
| 7,395
|
py
|
Python
|
ComputerVision/CrossSections/most_recent/Measusre.python3.py
|
DelinLi/Phenotyping
|
b4c13fbfcc363d06bc58c2483bd6aca979681cc3
|
[
"MIT"
] | null | null | null |
ComputerVision/CrossSections/most_recent/Measusre.python3.py
|
DelinLi/Phenotyping
|
b4c13fbfcc363d06bc58c2483bd6aca979681cc3
|
[
"MIT"
] | null | null | null |
ComputerVision/CrossSections/most_recent/Measusre.python3.py
|
DelinLi/Phenotyping
|
b4c13fbfcc363d06bc58c2483bd6aca979681cc3
|
[
"MIT"
] | null | null | null |
#using python3
#Write by Delin Li, Schnable Lab @ CAU
#delin.bio@gmail.com
#Start 8:00 PM Jan 05, 2018
#updated 9:30 PM Jan 18, 2018 get rid of effect of white starch
#updated 6:30 PM Jan 30, 2018 adjust unit
import matplotlib
matplotlib.use('Agg')
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
from matplotlib import pyplot as plt
#from pathlib import Path
import imutils
import numpy as np
import argparse
import cv2
import re
import os
'''functions'''
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def BigArea(contours):
Area=0
j=0
for i in range(0,len(contours)):
c=contours[i]
if cv2.contourArea(c)>Area:
j=i
Area=cv2.contourArea(contours[i])
return(j)
def Str(c,orig):
# compute the rotated bounding box of the contour
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-righ and bottom-right
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),(255, 0, 255), 8)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),(255, 0, 255), 8)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# compute the size of the object
if dA>=dB:
dimA = dA * pixelsPerMetric /1000
dimB = dB * pixelsPerMetric /1000
else:
dimA = dB * pixelsPerMetric /1000
dimB = dA * pixelsPerMetric /1000
# draw the object sizes on the image
#cv2.putText(orig, "{:.1f}in".format(dimA),
#(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
#1.65, (255, 0, 0), 2)
#cv2.putText(orig, "{:.1f}in".format(dimB),
# (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
# 1.65, (255, 0, 0), 2)
return(orig,dimA,dimB)
'''read in parameter'''
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
ap.add_argument("-o", "--output", required=True,
help="output file")
args = vars(ap.parse_args())
'''read in '''
img=cv2.imread(args["image"])
#img=cv2.imread("CML191_1_4.tif")
row, col =img.shape[0:2]
original = img.copy()
'''The marker '''
#cropped = img[54:105,44:540,]
#cv2.imwrite("Marker_200um.png",cropped)
#marker=cv2.imread("Marker_200um.png")
#h1,w1=marker.shape[0:2]
#w1: 496 200/496
#res = cv2.matchTemplate(img,marker,cv2.TM_CCOEFF_NORMED)
#min_val,max_val,min_loc,max_loc = cv2.minMaxLoc(res)
pixelsPerMetric= 4.032258
for i in range(50,110):
for j in range(40,580):
img[i,j,]=(55,36,34)#(0,0,0)
'''The Seed'''
B,G,R= cv2.split(img)
gaussian = cv2.GaussianBlur(R.copy(), (7, 7), 1)
th, binary = cv2.threshold(gaussian.copy(), 70, 255,cv2.THRESH_BINARY);
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
dilated = cv2.dilate(binary, kernel)
ForCon=cv2.erode(dilated, None, iterations=1)
_,contours, _ = cv2.findContours(ForCon.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
seed_c=contours[BigArea(contours)]
#plt.imshow(binary)
'''used in early version -> backup
Seed = cv2.Canny(binary, 100, 200)
Seed = cv2.dilate(Seed, None, iterations=1) #8
Seed = cv2.erode(Seed, None, iterations=1) #5
contours, hierarchy = cv2.findContours(Seed.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
seed_c=contours[BigArea(contours)]
'''
Area_seed=cv2.contourArea(seed_c) * pixelsPerMetric * pixelsPerMetric /1000000
embryo=np.full(img.shape,255,dtype=np.uint8)
embryo=cv2.drawContours(embryo, [seed_c], 0, (0, 0, 0), -1)
#create a copy for ouput to show different area and size
Draw_out=embryo.copy()
Draw_out,l_S,s_S=Str(seed_c,Draw_out)
Mask=np.full(img.shape,255,dtype=np.uint8)
Mask=cv2.drawContours(Mask, [seed_c], 0, (0, 0, 0), -1)
Masked=np.maximum(Mask,original)
'''The germ'''
#removed the effect of white starch
B,G,R= cv2.split(Masked)
th,RBin=cv2.threshold(R, 200, 1,cv2.THRESH_BINARY_INV)
HSV=cv2.cvtColor(Masked.copy(), cv2.COLOR_BGR2HSV)
th,S=cv2.threshold(HSV[:,:,1].copy() , 100, 1,cv2.THRESH_BINARY)
gaussian=HSV[:,:,0].copy()
th, binary = cv2.threshold(gaussian.copy(), 150, 255,cv2.THRESH_BINARY)
Combine=binary * RBin * S
######
kernel = np.ones((7,7),np.uint8)
# sure background area
sure_bg = cv2.dilate(Combine,kernel,iterations=10)
# Finding sure foreground area
sure_fg = cv2.erode(sure_bg,kernel,iterations=10)
_,contours, _ = cv2.findContours(sure_fg.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
germ_c=contours[BigArea(contours)]
#mask the embryo as red
Draw_out=cv2.drawContours(Draw_out, [germ_c], 0, (0, 0, 255), -1)
######
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
Germ = cv2.dilate(Combine, kernel)
#edges = cv2.Canny(Combine.copy(),100,200)
#edges=cv2.dilate(edges, None, iterations=1)
#edges=cv2.erode(edges, None)
'''change'''
_,contours, _ = cv2.findContours(Germ.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
germ_c=contours[BigArea(contours)]
#mask the embryo as red
Draw_out=cv2.drawContours(Draw_out, [germ_c], 0, (0, 0, 255), -1)
Draw_out,l_G,s_G=Str(germ_c,Draw_out)
Area_germ= cv2.contourArea(germ_c) * pixelsPerMetric * pixelsPerMetric /1000000
if Area_germ > Area_seed*0.75:
print("Possible un-expected error happens that germ accounts for more than 75% of seed:",args["image"])
if Area_germ < Area_seed*0.15:
print("Possible un-expected error happens that germ accounts for less than 15% of seed:",args["image"])
out=[args["image"], row, col, Area_seed,l_S,s_S, Area_germ,l_G,s_G]
if os.path.exists(args["output"]) and os.path.getsize(args["output"]) > 0:
with open(args["output"], "a") as fh:
for item in out:
fh.write("%s\t" % item)
fh.write("\n")
fh.close
else:
with open(args["output"], "a") as fh:
fh.write("file\trows\tcols\tSeedArea\tSeedLength\tSeedWidth\tEmbryoArea\tEmbryoLength\tEmbryoWidth\n")
for item in out:
fh.write("%s\t" % item)
fh.write("\n")
fh.close
'''output the final seed, embryo and their size'''
'''Compare before and after'''
outfig= "%s_%s" % ("Masked",re.sub(r'.*\/',r'',args["image"]))
plt.subplot(2,1,1),plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.subplot(2,1,2),plt.imshow(cv2.cvtColor(Draw_out, cv2.COLOR_BGR2RGB))
plt.savefig(outfig)
| 32.721239
| 104
| 0.703989
|
935083c70402133eefecb16717f57860ea1ab87e
| 4,679
|
py
|
Python
|
alab_mutations.py
|
AlaaALatif/bjorn
|
3bc4c2b5b5f6b18c93513721f5df96c47ba68ec8
|
[
"MIT"
] | null | null | null |
alab_mutations.py
|
AlaaALatif/bjorn
|
3bc4c2b5b5f6b18c93513721f5df96c47ba68ec8
|
[
"MIT"
] | null | null | null |
alab_mutations.py
|
AlaaALatif/bjorn
|
3bc4c2b5b5f6b18c93513721f5df96c47ba68ec8
|
[
"MIT"
] | null | null | null |
from path import Path
import pandas as pd
import bjorn_support as bs
import mutations as bm
import subprocess
import shlex
date = '13-01-2021'
out_dir = Path(f'/home/al/analysis/alab_mutations_{date}')
if not Path.isdir(out_dir):
Path.mkdir(out_dir)
print(f"Created results directory: {out_dir}")
else:
print(f"Results directory {out_dir} already exists...Continuing...")
num_cpus = 25
in_alab_seqs = Path(f'/home/al/code/HCoV-19-Genomics/consensus_sequences/')
in_alab_meta = Path(f'/home/al/code/HCoV-19-Genomics/metadata.csv')
gisaid_seqs = out_dir/'sequences_2021-01-06_08-17_cleaned.fasta'
gisaid_meta = out_dir/'metadata_2021-01-06_18-26.tsv'
ref_fp = Path('/home/al/data/hcov19/NC045512.fasta')
patient_zero = 'NC_045512.2'
fa_fp = out_dir/'alab_seqs.fa'
if not Path.isfile(fa_fp):
fa_fp = bs.concat_fasta(in_alab_seqs, out_dir/'alab_seqs')
print(f"Concatenated all sequences and wrote to {fa_fp}")
msa_fp = Path(fa_fp.split('.')[0] + '_aligned.fa')
if not Path.isfile(msa_fp):
print(f"Aligning sequences with reference...")
msa_fp = bs.align_fasta_reference(fa_fp, msa_fp, ref_fp=ref_fp, num_cpus=num_cpus)
print(f"Multiple sequence alignment of A-lab samples with reference saved in {msa_fp}")
msa2_fp = Path(fa_fp.split('.')[0] + '_aligned_absolute.fa')
# if not Path.isfile(msa2_fp):
# print(f"Aligning sequences without reference...")
# msa2_fp = bs.align_fasta(fa_fp, msa2_fp, num_cpus=num_cpus)
print(f"Multiple sequence alignment of A-lab samples without reference saved in {msa2_fp}")
tree_fp = msa_fp + '.treefile'
if not Path.isfile(tree_fp):
print(f"Computing phylogenetic tree...")
tree_fp = bs.compute_tree(msa_fp, num_cpus=num_cpus)
print(f"Phylogenetic tree of A-lab samples saved in {tree_fp}")
subs_long_fp = out_dir/f'alab_substitutions_long_{date}.csv'
subs_long, _ = bm.identify_replacements_per_sample(msa_fp, in_alab_meta,
bm.GENE2POS, data_src='alab')
subs_long.to_csv(subs_long_fp, index=False)
subs_wide = bm.identify_replacements(msa_fp, in_alab_meta)
subs_wide_fp = out_dir/f'alab_substitutions_wide_{date}.csv'
subs_wide.sort_values('num_samples', ascending=False).to_csv(subs_wide_fp, index=False)
print(f"Substitution-based mutations of A-lab samples saved in {subs_wide_fp}")
dels_long_fp = out_dir/f'alab_deletions_long_{date}.csv'
dels_long, _ = bm.identify_deletions_per_sample(msa_fp, in_alab_meta, patient_zero, bm.GENE2POS)
dels_long.to_csv(dels_long_fp, index=False)
dels_wide = bm.identify_deletions(msa_fp, in_alab_meta)
dels_wide_fp = out_dir/f'alab_deletions_wide_{date}.csv'
dels_wide.sort_values('num_samples', ascending=False).to_csv(dels_wide_fp, index=False)
print(f"Deletion-based mutations of A-lab samples saved in {dels_wide_fp}")
print(f"Aligning GISAID Sequences from {gisaid_seqs}...")
# gisaid_msa_fp = Path(gisaid_seqs.split('.')[0] + '_aligned.fa')
# if not Path.isfile(gisaid_msa_fp):
# gisaid_msa_fp = bs.align_fasta_reference(gisaid_seqs, out_filepath=gisaid_msa_fp, num_cpus=25, ref_fp=ref_fp)
# print(f"Multiple sequence alignment of GISAID Sequences saved in {gisaid_msa_fp}")
# print("Analyzing Mutations...")
# gisaid_subs_wide_fp = out_dir/f'gisaid_substitutions_wide_{date}.csv'
# if not Path.isfile(gisaid_subs_wide_fp):
# print("Identifying substitution-based mutations - wide (aggregated)...")
# gisaid_subs = bm.identify_replacements(gisaid_msa_fp, gisaid_meta, data_src='gisaid')
# gisaid_subs.to_csv(gisaid_subs_wide_fp, index=False)
# gisaid_subs_long_fp = out_dir/f'gisaid_substitutions_long_{date}.csv'
# if not Path.isfile(gisaid_subs_long_fp):
# print("Identifying substitution-based mutations - long...")
# gisaid_subs_long, _ = bm.identify_replacements_per_sample(gisaid_msa_fp, gisaid_meta, bm.GENE2POS, data_src='gisaid')
# gisaid_subs_long.to_csv(gisaid_subs_long_fp, index=False)
# gisaid_dels_wide_fp = out_dir/f'gisaid_deletions_wide_{date}.csv'
# if not Path.isfile(gisaid_dels_wide_fp):
# print("Identifying deletion-based mutations - wide (aggregated)...")
# gisaid_dels = bm.identify_deletions(gisaid_msa_fp, gisaid_meta, data_src='gisaid')
# gisaid_dels.to_csv(gisaid_dels_wide_fp, index=False)
# gisaid_dels_long_fp = out_dir/f'gisaid_deletions_long_{date}.csv'
# if not Path.isfile(gisaid_subs_long_fp):
# print("Identifying deletion-based mutations - long...")
# gisaid_dels_long, _ = bm.identify_deletions_per_sample(gisaid_msa_fp, gisaid_meta, patient_zero, bm.GENE2POS, data_src='gisaid')
# gisaid_dels_long.to_csv(gisaid_dels_long_fp, index=False)
# print(f"Deletion-based mutations of GISAID data saved in {gisaid_subs_fp}")
| 55.047059
| 134
| 0.767258
|
3c464e25fb35f21f9c3c6110e1d400e81a5fdb08
| 1,467
|
py
|
Python
|
shlibvischeck/analysis/header.py
|
yugr/ShlibVisibilityChecker
|
da0c3ba6d67bbf67085205be76985efe0af19e38
|
[
"MIT"
] | 27
|
2018-05-15T11:55:44.000Z
|
2022-03-02T21:10:25.000Z
|
shlibvischeck/analysis/header.py
|
yugr/ShlibVisibilityChecker
|
da0c3ba6d67bbf67085205be76985efe0af19e38
|
[
"MIT"
] | 3
|
2018-05-21T10:49:28.000Z
|
2022-01-11T18:53:21.000Z
|
shlibvischeck/analysis/header.py
|
yugr/ShlibVisibilityChecker
|
da0c3ba6d67bbf67085205be76985efe0af19e38
|
[
"MIT"
] | 3
|
2018-05-21T10:31:46.000Z
|
2022-01-27T21:54:07.000Z
|
# The MIT License (MIT)
#
# Copyright 2020-2022 Yury Gribov
#
# Use of this source code is governed by MIT license that can be
# found in the LICENSE.txt file.
"""
APIs for analyzing C headers
"""
import os
import os.path
import re
from shlibvischeck.common.process import *
from shlibvischeck.common.error import error
__all__ = ['read_header_api']
def read_header_api(hdr, whitelist, cflags, v=0):
""" Returns functions declared in header
(and included headers from whitelist). """
if not cflags:
cflags = ['']
# Is this a helper header and so not intended for direct inclusion?
is_helper = 'private' in hdr # E.g. json_object_private.h
hdr_base = os.path.basename(hdr)
for filename in whitelist:
with open(filename) as f:
txt = f.read()
if re.search(fr'^\s*#\s*include\s+[<"].*{hdr_base}[>"]', txt, re.M):
is_helper = True
errors = []
syms = []
for f in cflags:
cmd = ['read_header_api', '--only', ' '.join(whitelist), '--cflags', f, hdr]
rc, out, err = run(cmd, fatal=False)
if rc == 0:
syms = out.split('\n')
break
errors.append((' '.join(cmd), out, err))
if not syms and not is_helper:
msgs = ["failed to parse:"]
for cmd, out, err in errors:
msgs.append(f"compiling '{cmd}':")
msgs.append(err)
error('\n'.join(msgs))
if v > 0 and syms:
print("Public functions in header %s:\n %s"
% (hdr, '\n '.join(syms)))
return syms
| 24.45
| 80
| 0.625085
|
2ec8136353ae8fd5d69657fcc24d471a76ff0602
| 4,359
|
py
|
Python
|
tests/test_configs.py
|
q0w/aiodocker
|
08e8cd97cbe0fb7e38c97ac91df189e99879f885
|
[
"Apache-2.0"
] | 330
|
2017-04-12T19:36:03.000Z
|
2022-03-29T09:24:53.000Z
|
tests/test_configs.py
|
q0w/aiodocker
|
08e8cd97cbe0fb7e38c97ac91df189e99879f885
|
[
"Apache-2.0"
] | 623
|
2017-04-13T02:49:16.000Z
|
2022-03-29T12:21:48.000Z
|
tests/test_configs.py
|
romasku/aiodocker
|
daf32e791efea97a87744e5e66a90e478e0a8c2f
|
[
"Apache-2.0"
] | 76
|
2017-04-22T08:00:18.000Z
|
2021-11-23T04:34:06.000Z
|
import pytest
from aiodocker.exceptions import DockerError
@pytest.fixture
def tmp_config(event_loop, swarm, random_name):
config = event_loop.run_until_complete(
swarm.configs.create(name="config-" + random_name(), data=random_name())
)
yield config["ID"]
event_loop.run_until_complete(swarm.configs.delete(config["ID"]))
@pytest.mark.asyncio
async def test_config_list_with_filter(swarm, tmp_config):
docker_config = await swarm.configs.inspect(config_id=tmp_config)
name = docker_config["Spec"]["Name"]
filters = {"name": name}
filtered_list = await swarm.configs.list(filters=filters)
assert len(filtered_list) == 1
@pytest.mark.asyncio
async def test_config_update(swarm, tmp_config):
config = await swarm.configs.inspect(config_id=tmp_config)
config_id = config["ID"]
config = await swarm.configs.inspect(config_id)
current_labels = config["Spec"]["Labels"]
assert current_labels == {}
version = config["Version"]["Index"]
# update the config labels
await swarm.configs.update(
config_id=config_id, version=version, labels={"label1": "value1"}
)
config = await swarm.configs.inspect(config_id)
current_labels = config["Spec"]["Labels"]
version = config["Version"]["Index"]
assert current_labels == {"label1": "value1"}
@pytest.mark.asyncio
async def test_config_labels(swarm, tmp_config):
config = await swarm.configs.inspect(config_id=tmp_config)
config_id1 = config["ID"]
config = await swarm.configs.inspect(config_id1)
version = config["Version"]["Index"]
await swarm.configs.update(
config_id=config_id1, version=version, labels={"label1": "value1"}
)
# create a config with labels
name = "test_config2"
config = await swarm.configs.create(
name=name, data="test config2", labels={"label2": "value2"}
)
config_id2 = config["ID"]
config = await swarm.configs.inspect(config_id2)
version = config["Version"]["Index"]
current_labels = config["Spec"]["Labels"]
assert current_labels == {"label2": "value2"}
# search config based on labels
filters = {"label": "label1=value1"}
filtered_list = await swarm.configs.list(filters=filters)
assert len(filtered_list) == 1
await swarm.configs.update(
config_id=config_id2, version=version, labels={"label1": "value1"}
)
config = await swarm.configs.inspect(config_id2)
version = config["Version"]["Index"]
current_labels = config["Spec"]["Labels"]
assert current_labels == {"label1": "value1"}
filters = {"label": "label1=value1"}
filtered_list = await swarm.configs.list(filters=filters)
assert len(filtered_list) == 2
await swarm.configs.delete(config_id2)
@pytest.mark.asyncio
async def test_config_update_error(swarm, tmp_config):
config = await swarm.configs.inspect(config_id=tmp_config)
config_id = config["ID"]
# await asyncio.sleep(1)
config = await swarm.configs.inspect(config_id)
version = config["Version"]["Index"]
with pytest.raises(DockerError) as error:
await swarm.configs.update(
config_id=config_id, version=version, name="new name"
)
assert (
error.value.message == "rpc error: "
"code = InvalidArgument "
"desc = only updates to Labels are allowed"
)
@pytest.mark.asyncio
async def test_config_create_nodata_error(swarm):
name = "test_config-create_nodata_error"
with pytest.raises(TypeError):
await swarm.configs.create(name=name)
@pytest.mark.asyncio
async def test_config_create_b64_error(swarm):
name = "test_config-create_b64_error"
not_b64 = "I'm not base64 encoded"
with pytest.raises(DockerError) as error:
await swarm.configs.create(name=name, data=not_b64, b64=True)
assert error.value.message == "illegal base64 data at input byte 1"
@pytest.mark.asyncio
async def test_config_create_duplicated_error(swarm, tmp_config):
config = await swarm.configs.inspect(config_id=tmp_config)
name = config["Spec"]["Name"]
with pytest.raises(DockerError) as error:
await swarm.configs.create(name=name, data="test config")
assert (
error.value.message == "rpc error: "
"code = AlreadyExists "
"desc = config " + name + " already exists"
)
| 31.817518
| 80
| 0.690525
|
e7bc87d179881312f29567b73095edc9ccbc392d
| 1,392
|
py
|
Python
|
jrecommengine/random_ratings.py
|
joesonitaly/jrecommengine
|
f4ff432ae41bee89ad63341be6100ca1dd7ba6b5
|
[
"BSD-3-Clause"
] | null | null | null |
jrecommengine/random_ratings.py
|
joesonitaly/jrecommengine
|
f4ff432ae41bee89ad63341be6100ca1dd7ba6b5
|
[
"BSD-3-Clause"
] | null | null | null |
jrecommengine/random_ratings.py
|
joesonitaly/jrecommengine
|
f4ff432ae41bee89ad63341be6100ca1dd7ba6b5
|
[
"BSD-3-Clause"
] | null | null | null |
from random import seed, randint
from .config import *
from .engine import *
from .models import Like, Dislike, Similarity, Suggestion
def setRandomRatings():
print("***** Starting *****\n")
items = Item.objects.all()
itemsLength = len(items)
users = User.objects.all()
engine = Engine()
Like.objects.all().delete()
Dislike.objects.all().delete()
Similarity.objects.all().delete()
seed()
for user in users:
selectedItems = []
for _ in range(0, randint(0, itemsLength)):
item = items[randint(0, itemsLength - 1)]
while item in selectedItems:
item = items[randint(0, itemsLength - 1)]
engine.likes.add(user=user, item=item)
selectedItems.append(item)
try:
print("<Like: " + str(Like.objects.get(user=user, item=item)) + ">")
except ObjectDoesNotExist:
pass
for _ in range(0, randint(0, itemsLength - len(selectedItems))):
item = items[randint(0, itemsLength - 1)]
while item in selectedItems:
item = items[randint(0, itemsLength - 1)]
engine.dislikes.add(user=user, item=item)
selectedItems.append(item)
try:
print("<Dislike: " + str(Dislike.objects.get(user=user, item=item)) + ">")
except ObjectDoesNotExist:
pass
print("\n***** Done *****")
| 24.421053
| 86
| 0.589799
|
27f67f66ff6ded739eb4bc0acc19efa43e7c9709
| 14,250
|
py
|
Python
|
src/utils.py
|
gtk-rs/release
|
7e7b65477145600b69c68557e9bf7827f46d3e2d
|
[
"MIT"
] | 1
|
2020-07-05T14:41:31.000Z
|
2020-07-05T14:41:31.000Z
|
src/utils.py
|
gtk-rs/release
|
7e7b65477145600b69c68557e9bf7827f46d3e2d
|
[
"MIT"
] | 80
|
2017-06-11T16:34:16.000Z
|
2022-02-11T09:02:12.000Z
|
src/utils.py
|
gtk-rs/release
|
7e7b65477145600b69c68557e9bf7827f46d3e2d
|
[
"MIT"
] | 6
|
2017-05-07T20:33:04.000Z
|
2022-03-25T10:26:42.000Z
|
from os.path import join
import json
import subprocess
import sys
import time
# pip3 install requests
import requests
# local import
import consts
from globals import PULL_REQUESTS
from my_toml import TomlHandler
def write_error(error_msg):
sys.stderr.write('{}\n'.format(error_msg))
def write_msg(msg):
sys.stdout.write('{}\n'.format(msg))
def convert_to_string(content):
if content.__class__.__name__ == 'bytes':
return content.decode('utf-8')
return content
def get_file_content(file_path):
try:
with open(file_path, 'r') as file:
return file.read()
except Exception as err:
write_error('get_file_content failed: "{}": {}'.format(file_path, err))
return None
def write_into_file(file_path, content):
try:
with open(file_path, 'w') as file:
file.write(content)
return True
except Exception as err:
write_error('write_into_file failed: "{}": {}'.format(file_path, err))
return False
def exec_command(command, timeout=None):
# pylint: disable=consider-using-with
child = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = child.communicate(timeout=timeout)
return (child.returncode == 0,
convert_to_string(stdout),
convert_to_string(stderr))
def exec_command_and_print_error(command, timeout=None):
ret, stdout, stderr = exec_command(command, timeout=timeout)
if not ret:
write_error('Command "{}" failed:'.format(' '.join(command)))
if len(stdout) > 0:
write_error('=== STDOUT ===\n{}\n'.format(stdout))
if len(stderr) > 0:
write_error('=== STDERR ===\n{}\n'.format(stderr))
return ret
def clone_repo(repo_name, temp_dir, depth=None):
repo_url = '{}/{}/{}.git'.format(consts.GIT_URL, consts.ORGANIZATION, repo_name)
target_dir = join(temp_dir, repo_name)
try:
write_msg('=> Cloning "{}" from "{}"'.format(repo_name, repo_url))
command = ['git', 'clone', repo_url, target_dir]
if depth is not None:
command = ['git', 'clone', '--depth', '{}'.format(depth), repo_url, target_dir]
ret, stdout, stderr = exec_command(command, timeout=300)
if not ret:
write_error('command "{}" failed: ===STDOUT===\n{}\n===STDERR===\n{}'.format(
' '.join(command),
stdout,
stderr))
return False
command = ['bash', '-c', 'cd {} && git submodule update --init'.format(target_dir)]
if not exec_command_and_print_error(command):
input('Failed to init submodule... Press ENTER to continue')
return True
except subprocess.TimeoutExpired:
write_error('command timed out: {}'.format(' '.join(command)))
except Exception as err:
write_error('command "{}" got an exception: {}'.format(' '.join(command), err))
return False
def create_headers(token):
headers = {
'User-Agent': 'gtk-rs',
'Accept': 'application/vnd.github.v3+json',
}
if token is not None:
# Authentication to github.
headers['Authorization'] = 'token {}'.format(token)
return headers
def post_content(url, token, details, method='post', header_extras=None):
if header_extras is None:
header_extras = {}
headers = create_headers(token)
for extra in header_extras:
headers[extra] = header_extras[extra]
try:
req = None
if method == 'post':
req = requests.post(url, data=json.dumps(details), headers=headers)
else:
req = requests.put(url, data=json.dumps(details), headers=headers)
try:
req.raise_for_status()
except Exception:
write_msg('Sent by bithub api: {}'.format(req.json()))
req.raise_for_status()
return req.json()
except Exception as err:
write_error('post_content: An error occurred: {}'.format(err))
return None
def get_highest_feature_version(v1_feature, v2_feature):
t_v1 = v1_feature[1:].split('_')
t_v2 = v2_feature[1:].split('_')
i = 0
while i < len(t_v1) and i < len(t_v2):
try:
x1_version = int(t_v1[i])
x2_version = int(t_v2[i])
if x1_version > x2_version:
return v1_feature
if x1_version < x2_version:
return v2_feature
i += 1
except Exception:
write_error('get_highest_feature_version int conversion error: int("{}") vs int("{}")'
' from "{}" and "{}"'.format(t_v1[i], t_v2[i], v1_feature, v2_feature))
break
return v1_feature
# This function does two things:
#
# 1. Check if dox feature is present or try getting the highest version feature
# 2. Getting all the other features (for cairo it's very important)
def get_features(path):
# pylint: disable=too-many-branches
features = []
highest_version = None
content = get_file_content(path)
if content is None:
return ''
toml = TomlHandler(content)
dox_present = False
for section in toml.sections:
if section.name == 'features':
for entry in section.entries:
if entry['key'] in ['purge-lgpl-docs', 'default']:
continue
if entry['key'] == 'dox':
dox_present = True
if entry['key'].startswith('v'):
if highest_version is None:
highest_version = entry['key']
else:
highest_version = get_highest_feature_version(highest_version, entry['key'])
else:
features.append(entry['key'])
if dox_present is True:
if 'dox' not in features:
features.append('dox')
elif highest_version is not None:
write_msg("/!\\ Seems there is no dox feature so let's just use the highest version "
"instead...")
features.append(highest_version)
else:
write_msg("/!\\ That's weird: no dox or version feature. Is everything fine with this one?")
return ' '.join(features)
# def compare_versions(v1, v2):
# v1 = v1.split('.')
# v2 = v2.split('.')
#
# for x in range(0, min(len(v1), len(v2))):
# try:
# entry1 = int(v1)
# entry2 = int(v2)
# except Exception:
# # If it cannot be converted into a number, better just compare strings then.
# entry1 = v1
# entry2 = v2
# if entry1 > entry2:
# return 1
# if entry1 < entry2:
# return -1
# # In here, "3.2" is considered littler than "3.2.0". That's how life goes.
# return len(v1) - len(v2)
def commit_and_push(repo_name, temp_dir, commit_msg, target_branch):
commit(repo_name, temp_dir, commit_msg)
push(repo_name, temp_dir, target_branch)
def commit(repo_name, temp_dir, commit_msg):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c', 'cd {} && git commit . -m "{}"'.format(repo_path, commit_msg)]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def push(repo_name, temp_dir, target_branch):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c', 'cd {} && git push origin HEAD:{}'.format(repo_path, target_branch)]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def add_to_commit(repo_name, temp_dir, files_to_add):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c', 'cd {} && git add {}'
.format(repo_path, ' '.join(['"{}"'.format(f) for f in files_to_add]))]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def revert_changes(repo_name, temp_dir, files):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c',
'cd {0} && git rm -f {1} && git checkout -- {1}'.format(
repo_path,
' '.join(['"{}"'.format(f) for f in files]))]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def checkout_target_branch(repo_name, temp_dir, target_branch):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c', 'cd {} && git checkout {}'.format(repo_path, target_branch)]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def checkout_to_new_branch(repo_name, temp_dir, target_branch):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c', 'cd {} && git checkout -b {}'.format(repo_path, target_branch)]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def get_last_commit_date(repo_name, temp_dir):
repo_path = join(temp_dir, repo_name)
success, out, err = exec_command(['bash', '-c',
'cd {} && git log --format=%at --no-merges -n 1'.format(
repo_path)
])
return (success, out, err)
def get_last_commit_hash(repo_path):
success, out, _ = exec_command(['bash', '-c',
'cd {} && git rev-parse HEAD'.format(repo_path)
])
if success is True:
return out.strip()
return ''
def get_repo_last_commit_hash(repo_url):
success, out, _ = exec_command(['bash', '-c',
'git ls-remote {} HEAD'.format(repo_url)
])
if success is True:
return out.split('\n')[0].strip().split('\t')[0].split(' ')[0]
return '<unknown>'
def merging_branches(repo_name, temp_dir, merge_branch):
repo_path = join(temp_dir, repo_name)
command = ['bash', '-c', 'cd {} && git merge "origin/{}"'.format(repo_path, merge_branch)]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
def publish_crate(repository, crate_dir_path, temp_dir, crate_name):
write_msg('=> publishing crate {}'.format(crate_name))
path = join(join(temp_dir, repository), crate_dir_path)
# In case we needed to fix bugs, we checkout to crate branch before publishing crate.
command = [
'bash',
'-c',
'cd {} && cargo publish --no-verify'.format(path)]
retry = 3
error_messages = []
final_success = False
wait_time = 30
while retry > 0:
ret, stdout, stderr = exec_command(command)
if not ret:
error_messages.append('Command "{}" failed:'.format(' '.join(command)))
if len(stdout) > 0:
error_messages[len(error_messages) - 1] += '\n=== STDOUT ===\n{}\n'.format(stdout)
if len(stderr) > 0:
error_messages[len(error_messages) - 1] += '\n=== STDERR ===\n{}\n'.format(stderr)
retry -= 1
if retry > 0:
write_msg("Let's sleep for {} seconds before retrying, {} retr{} remaining..."
.format(wait_time, retry + 1, 'ies' if retry > 0 else 'y'))
time.sleep(wait_time)
else:
final_success = True
break
if final_success is False:
errors = set(error_messages)
write_msg('== ERRORS ==\n{}'.format('====\n'.join(errors)))
input("Something bad happened! Try to fix it and then press ENTER to continue...")
write_msg('> crate {} has been published'.format(crate_name))
def create_tag_and_push(tag_name, repository, temp_dir):
path = join(temp_dir, repository)
command = ['bash', '-c', 'cd {0} && git tag "{1}" && git push origin "{1}"'
.format(path, tag_name)]
if not exec_command_and_print_error(command):
input("Something bad happened! Try to fix it and then press ENTER to continue...")
def create_pull_request(repo_name, from_branch, target_branch, token, add_to_list=True):
req = post_content('{}/repos/{}/{}/pulls'.format(consts.GH_API_URL, consts.ORGANIZATION,
repo_name),
token,
{'title': '[release] merging {} into {}'.format(from_branch, target_branch),
'body': 'cc @GuillaumeGomez @sdroege @bilelmoussaoui',
'base': target_branch,
'head': from_branch,
'maintainer_can_modify': True})
if req is None:
write_error("Pull request from {repo}/{from_b} to {repo}/{target} couldn't be created. You "
"need to do it yourself... (url provided at the end)"
.format(repo=repo_name,
from_b=from_branch,
target=target_branch))
input("Press ENTER once done to continue...")
PULL_REQUESTS.append('|=> "{}/{}/{}/compare/{}...{}?expand=1"'
.format(consts.GITHUB_URL,
consts.ORGANIZATION,
repo_name,
target_branch,
from_branch))
else:
write_msg("===> Pull request created: {}".format(req['html_url']))
if add_to_list is True:
PULL_REQUESTS.append('> {}'.format(req['html_url']))
def check_if_up_to_date():
remote_repo = "git://github.com/gtk-rs/release.git"
last_commit = get_last_commit_hash(".")
remote_last_commit = get_repo_last_commit_hash(remote_repo)
if last_commit != remote_last_commit:
write_msg("Remote repository `{}` has a different last commit than local: `{}` != `{}`"
.format(remote_repo, remote_last_commit, last_commit))
text = input("Do you want to continue anyway? [y/N] ").strip().lower()
if len(text) == 0 or text != 'y':
write_msg("Ok, leaving then. Don't forget to update!")
return False
return True
| 38
| 100
| 0.580982
|
51192872a8a5c546544ff76518e70c7314d90391
| 6,923
|
py
|
Python
|
Adversarial-Playground-Text-viz/virt/lib/python3.6/site-packages/ebcli/core/abstractcontroller.py
|
AnupKumarGupta/deepWordBug
|
c6513e6421fa5204b07652fc3f619b1018696df2
|
[
"Apache-2.0"
] | 72
|
2018-07-02T07:47:15.000Z
|
2022-03-29T10:02:14.000Z
|
Adversarial-Playground-Text-viz/virt/lib/python3.6/site-packages/ebcli/core/abstractcontroller.py
|
AnupKumarGupta/deepWordBug
|
c6513e6421fa5204b07652fc3f619b1018696df2
|
[
"Apache-2.0"
] | 5
|
2018-07-12T10:55:46.000Z
|
2018-10-25T20:33:13.000Z
|
virtual_env/Lib/site-packages/ebcli/core/abstractcontroller.py
|
laubnyc/DommesticWebsite
|
ea4f00ae6998fbe3a8a264d528ad13e59be1fc5b
|
[
"MIT"
] | 29
|
2018-09-17T06:10:32.000Z
|
2022-03-19T13:15:30.000Z
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import json
import sys
from cement.core import controller
from ebcli import __version__
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, utils
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import (
NoEnvironmentForBranchError,
PlatformWorkspaceNotSupportedError,
ApplicationWorkspaceNotSupportedError,
EBCLIException,
NotInitializedError
)
from ebcli.resources.strings import strings, flag_text
from ebcli.objects import region
from ebcli.operations import commonops
class AbstractBaseController(controller.CementBaseController):
"""
This is an abstract base class that is useless on its own, but used
by other classes to sub-class from and to share common commands and
arguments.
"""
class Meta:
label = 'abstract'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['environment_name'], dict(action='store', nargs='?',
default=[],
help=flag_text['general.env'])),
]
epilog = ''
usage = 'eb {cmd} <environment_name> [options ...]'
def do_command(self):
pass
@classmethod
def validate_workspace(cls):
workspace_type = fileoperations.get_workspace_type(None)
is_platform_workspace_only_command = cls.Meta.__dict__.get(
'is_platform_workspace_only_command'
)
requires_directory_initialization = cls.Meta.__dict__.get(
'requires_directory_initialization'
)
if '--modules' in sys.argv:
pass
elif '--help' in sys.argv:
pass
elif requires_directory_initialization and not workspace_type:
raise NotInitializedError(strings['exit.notsetup'])
elif is_platform_workspace_only_command:
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
@controller.expose(hide=True)
def default(self):
"""
This command will be shared within all controllers that sub-class
from here. It can also be overridden in the sub-class
"""
self.validate_workspace()
self.do_command()
self.check_for_cli_update(__version__)
def check_workspace_type(self, expected_type):
workspace_type = fileoperations.get_workspace_type()
if workspace_type != expected_type:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise PlatformWorkspaceNotSupportedError(
strings['exit.platformworkspacenotsupported']
)
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
def check_for_cli_update(self, version):
label = self.Meta.label
if label in ('create', 'deploy', 'status', 'clone', 'config'):
if cli_update_exists(version):
io.log_alert(strings['base.update_available'])
def get_app_name(self):
app_name = fileoperations.get_application_name()
return app_name
def get_env_name(self, cmd_example=None, noerror=False, varname='environment_name'):
env_name = getattr(self.app.pargs, varname, None)
if not env_name:
env_name = commonops. \
get_current_branch_environment()
workspace_type = fileoperations.get_workspace_type(Constants.WorkSpaceTypes.APPLICATION)
if not env_name:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise EBCLIException(strings['platform.nobuilderenv'])
if noerror:
return None
if not cmd_example:
message = strings['branch.noenv'].replace('{cmd}',
self.Meta.label)
else:
message = strings['branch.noenv'].replace('eb {cmd}',
cmd_example)
io.log_error(message)
raise NoEnvironmentForBranchError()
return env_name
@classmethod
def _add_to_handler(cls, handler):
handler.register(cls)
@property
def _help_text(self):
"""
Returns the help text displayed when for the commands of the type `eb <command> <subcommand>`
except where <command> is "platform".
"""
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
cmd_name = label
cmd_aliases = cmd['aliases']
if len(cmd_aliases) > 0 and cmd['aliases_only']:
cmd_name = cmd_aliases.pop(0)
cmd_txt += '{}'.format(cmd_name)
if cmd['help']:
cmd_txt += '{}{}'.format(pad(cmd_txt), cmd['help'])
if len(cmd_aliases) > 0:
cmd_txt += '\n{}(alias: {})'.format(pad(''), ', '.join(cmd_aliases))
cmd_txt += '\n'
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''{}
commands:
{}
'''.format(self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt)
def cli_update_exists(current_version):
try:
data = utils.get_data_from_url(
'https://pypi.python.org/pypi/awsebcli/json', timeout=5)
data = json.loads(data)
latest = data['info']['version']
return latest != current_version
except:
return False
| 32.65566
| 101
| 0.602484
|
f505d3da3ebdecc63e3c211dbf9479e1ae473853
| 1,060
|
py
|
Python
|
corehq/apps/export/migrations/0011_defaultexportsettings_usecouchfiletypes.py
|
omari-funzone/commcare-hq
|
5edb462c891fc08e51c4babd7acdf12c0006a602
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/export/migrations/0011_defaultexportsettings_usecouchfiletypes.py
|
omari-funzone/commcare-hq
|
5edb462c891fc08e51c4babd7acdf12c0006a602
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/export/migrations/0011_defaultexportsettings_usecouchfiletypes.py
|
omari-funzone/commcare-hq
|
5edb462c891fc08e51c4babd7acdf12c0006a602
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
# Generated by Django 2.2.16 on 2021-01-12 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('export', '0010_defaultexportsettings'),
]
operations = [
migrations.AlterField(
model_name='defaultexportsettings',
name='cases_filetype',
field=models.CharField(choices=[('csv', 'CSV (zip file)'),
('xlsx', 'Excel 2007+'),
('xls', 'Excel (older versions)')],
default='xlsx', max_length=25),
),
migrations.AlterField(
model_name='defaultexportsettings',
name='forms_filetype',
field=models.CharField(choices=[('csv', 'CSV (zip file)'),
('xlsx', 'Excel 2007+'),
('xls', 'Excel (older versions)')],
default='xlsx', max_length=25),
),
]
| 35.333333
| 79
| 0.460377
|
0b6c14457bdc9c0916932d5f0e984ddaca2588a8
| 2,320
|
py
|
Python
|
CI/CA/CA.py
|
PiscesDream/Ideas
|
9ba710e62472f183ae4525f35659cd265c71392e
|
[
"Apache-2.0"
] | null | null | null |
CI/CA/CA.py
|
PiscesDream/Ideas
|
9ba710e62472f183ae4525f35659cd265c71392e
|
[
"Apache-2.0"
] | null | null | null |
CI/CA/CA.py
|
PiscesDream/Ideas
|
9ba710e62472f183ae4525f35659cd265c71392e
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from numpy import ones_like, array, zeros
import matplotlib.pyplot as plt
class CA(object):
'''
Cellular Automata with:
Numerical grid
Circular/Null boundary
Moore neighbourhood
'''
def __init__(self, init, radius, rule, circular = False, Null = .0, global_update = False):
'''
init: initial grid
radius: sense radius
rule: update rule
-local update: arg is a list
-global update: cur grid, arg as a 3d array
circular: boundary rule
'''
self.grid = init
self.w, self.h = self.grid.shape
self.radius = radius
self.rule = rule
self.circular = circular
self.Null = Null
self.global_update = global_update
def update(self):
if self.global_update:
arg_grid = zeros((self.grid.shape+((2 * self.radius+1) ** 2,)))
else:
newgrid = ones_like(self.grid) * self.Null
for x in range(self.w):
for y in range(self.h):
args = []
for kx in range(-self.radius, self.radius+1):
for ky in range(-self.radius, self.radius+1):
if 0 <= x+kx < self.w and 0 <= y+ky < self.h:
args.append(self.grid[x+kx, y+ky])
else:
if self.circular:
args.append(
self.grid[(x+kx >= self.w and (x+kx - self.w)) or (x+kx < self.w and x+kx),
(y+ky >= self.h and (y+ky - self.h)) or (y+ky < self.h and y+ky)])
else:
args.append(self.Null)
if self.global_update:
arg_grid[x][y] = args
else:
newgrid[x][y] = self.rule(array(args))
if self.global_update:
self.grid = self.rule(self.grid, arg_grid)
else:
self.grid = newgrid
return self.grid
def get_grid(self):
return self.grid
def plot(self):
plt.imshow(self.grid, interpolation = 'none', cmap = 'binary')#or cmap = 'gray'
| 35.692308
| 112
| 0.474569
|
5450e632b49e22219f280e4ba7d22a7767c25d54
| 33,394
|
py
|
Python
|
storm_analysis/sa_library/dao_fit_c.py
|
KimLab-SNU/storm-analysis-kimlab
|
31944769cdaa82a9ea5779b66c75d1d651852dc7
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/sa_library/dao_fit_c.py
|
KimLab-SNU/storm-analysis-kimlab
|
31944769cdaa82a9ea5779b66c75d1d651852dc7
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/sa_library/dao_fit_c.py
|
KimLab-SNU/storm-analysis-kimlab
|
31944769cdaa82a9ea5779b66c75d1d651852dc7
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/env python
"""
Python interface to the dao_fit C library.
Hazen
"""
import ctypes
import math
import numpy
from numpy.ctypeslib import ndpointer
import os
import sys
import storm_analysis.sa_library.ia_utilities_c as utilC
import storm_analysis.sa_library.loadclib as loadclib
#
# The Python definitions of the C structures in sa_library/multi_fit.h
#
class fitData(ctypes.Structure):
_fields_ = [('n_dposv', ctypes.c_int),
('n_iterations', ctypes.c_int),
('n_lost', ctypes.c_int),
('n_margin', ctypes.c_int),
('n_neg_fi', ctypes.c_int),
('n_neg_height', ctypes.c_int),
('n_non_converged', ctypes.c_int),
('n_non_decr', ctypes.c_int),
('fit_size_x', ctypes.c_int),
('fit_size_y', ctypes.c_int),
('image_size_x', ctypes.c_int),
('image_size_y', ctypes.c_int),
('jac_size', ctypes.c_int),
('max_nfit', ctypes.c_int),
('nfit', ctypes.c_int),
('roi_n_index', ctypes.c_int),
('min_height', ctypes.c_double),
('xoff', ctypes.c_double),
('yoff', ctypes.c_double),
('zoff', ctypes.c_double),
('tolerance', ctypes.c_double),
('bg_counts', ctypes.POINTER(ctypes.c_int)),
('roi_x_index', ctypes.POINTER(ctypes.c_int)),
('roi_y_index', ctypes.POINTER(ctypes.c_int)),
('stale', ctypes.POINTER(ctypes.c_int)),
('as_xi', ctypes.POINTER(ctypes.c_double)),
('bg_data', ctypes.POINTER(ctypes.c_double)),
('bg_estimate', ctypes.POINTER(ctypes.c_double)),
('err_i', ctypes.POINTER(ctypes.c_double)),
('f_data', ctypes.POINTER(ctypes.c_double)),
('rqe', ctypes.POINTER(ctypes.c_double)),
('scmos_term', ctypes.POINTER(ctypes.c_double)),
('t_fi', ctypes.POINTER(ctypes.c_double)),
('x_data', ctypes.POINTER(ctypes.c_double)),
('working_peak', ctypes.c_void_p),
('fit', ctypes.c_void_p),
('fit_model', ctypes.c_void_p),
('fn_alloc_peaks', ctypes.c_void_p),
('fn_calc_JH', ctypes.c_void_p),
('fn_calc_peak_shape', ctypes.c_void_p),
('fn_check', ctypes.c_void_p),
('fn_copy_peak', ctypes.c_void_p),
('fn_error_fn', ctypes.c_void_p),
('fn_free_peaks', ctypes.c_void_p),
('fn_peak_sum', ctypes.c_void_p),
('fn_update', ctypes.c_void_p)]
def formatPeaksArbitraryPSF(peaks, peaks_type):
"""
Input peaks array formatter for arbitrary PSFs.
Based on peaks_type, create a properly formatted ndarray to pass
to the C library. This is primarily for internal use by newPeaks().
"""
# These come from the finder, or the unit test code, create peaks
# as (N,3) with columns x, y, z.
#
# Note: "testing" is designed specifically for use by the unit
# tests. If you use this then initial height estimation
# for the peaks is not performed.
#
if (peaks_type == "testing") or (peaks_type == "finder"):
c_peaks = numpy.stack((peaks["x"],
peaks["y"],
peaks["z"]), axis = 1)
# These come from pre-specified peak fitting locations, create peaks
# as (N,5) with columns x, y, z, background, height.
#
elif (peaks_type == "text") or (peaks_type == "hdf5"):
c_peaks = numpy.stack((peaks["x"],
peaks["y"],
peaks["z"],
peaks["background"],
peaks["height"]), axis = 1)
else:
raise MultiFitterException("Unknown peaks type '" + peaks_type + "'")
return numpy.ascontiguousarray(c_peaks, dtype = numpy.float64)
def formatPeaksGaussianPSF(peaks, peaks_type):
"""
Input peaks array formatter for Gaussian PSFs.
Based on peaks_type, create a properly formatted ndarray to pass
to the C library. This is primarily for internal use by newPeaks().
"""
# These come from the finder, or the unit test code, create peaks
# as (N,4) with columns x, y, z, and sigma.
#
if (peaks_type == "testing") or (peaks_type == "finder"):
c_peaks = numpy.stack((peaks["x"],
peaks["y"],
peaks["z"],
peaks["sigma"]), axis = 1)
# These come from pre-specified peak fitting locations, create peaks
# as (N,7) with columns x, y, z, background, height, xsigma, ysigma.
#
elif (peaks_type == "text") or (peaks_type == "hdf5"):
c_peaks = numpy.stack((peaks["x"],
peaks["y"],
peaks["z"],
peaks["background"],
peaks["height"],
peaks["xsigma"],
peaks["ysigma"]), axis = 1)
else:
raise MultiFitterException("Unknown peaks type '" + peaks_type + "'")
return numpy.ascontiguousarray(c_peaks, dtype = numpy.float64)
def loadDaoFitC():
daofit = loadclib.loadCLibrary("dao_fit")
# These are from sa_library/multi_fit.c
daofit.mFitAnscombeTransformImage.argtypes = [ctypes.c_void_p]
daofit.mFitGetFitImage.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
daofit.mFitGetNError.argtypes = [ctypes.c_void_p]
daofit.mFitGetNError.restype = ctypes.c_int
daofit.mFitGetPeakPropertyDouble.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64),
ctypes.c_char_p]
daofit.mFitGetPeakPropertyInt.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.int32),
ctypes.c_char_p]
daofit.mFitGetResidual.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
daofit.mFitGetUnconverged.argtypes = [ctypes.c_void_p]
daofit.mFitGetUnconverged.restype = ctypes.c_int
daofit.mFitIterateLM.argtypes = [ctypes.c_void_p]
daofit.mFitNewBackground.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
daofit.mFitNewImage.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64)]
daofit.mFitRemoveErrorPeaks.argtypes = [ctypes.c_void_p]
daofit.mFitRemoveRunningPeaks.argtypes = [ctypes.c_void_p]
daofit.mFitSetPeakStatus.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.int32)]
# These are from sa_library/dao_fit.c
daofit.daoCleanup.argtypes = [ctypes.c_void_p]
daofit.daoInitialize.argtypes = [ndpointer(dtype=numpy.float64),
ndpointer(dtype=numpy.float64),
ctypes.c_double,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
daofit.daoInitialize.restype = ctypes.POINTER(fitData)
daofit.daoInitialize2DFixed.argtypes = [ctypes.c_void_p]
daofit.daoInitialize2DFixedALS.argtypes = [ctypes.c_void_p]
daofit.daoInitialize2DFixedLS.argtypes = [ctypes.c_void_p]
daofit.daoInitialize2DFixedDWLS.argtypes = [ctypes.c_void_p]
daofit.daoInitialize2DFixedFWLS.argtypes = [ctypes.c_void_p]
daofit.daoInitialize2D.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double]
daofit.daoInitialize2DALS.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double]
daofit.daoInitialize2DLS.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double]
daofit.daoInitialize2DDWLS.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double]
daofit.daoInitialize2DFWLS.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double]
daofit.daoInitialize3D.argtypes = [ctypes.c_void_p,
ctypes.c_double,
ctypes.c_double]
daofit.daoInitializeZ.argtypes = [ctypes.c_void_p]
daofit.daoInitializeZ.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64),
ndpointer(dtype=numpy.float64),
ctypes.c_double,
ctypes.c_double]
daofit.daoNewPeaks.argtypes = [ctypes.c_void_p,
ndpointer(dtype=numpy.float64),
ctypes.c_char_p,
ctypes.c_int]
return daofit
def printFittingInfo(mfit, spacing = " "):
"""
Print out some of the information the C fitting library keeps track of.
"""
print("--- Fitting ---")
print(spacing, mfit.contents.n_dposv, "fits reset due to Cholesky failure.")
print(spacing, mfit.contents.n_margin, "fits reset due to image margin.")
print(spacing, mfit.contents.n_neg_fi, "fits reset due to negative value in fit function.")
print(spacing, mfit.contents.n_neg_height, "fits reset due to negative height.")
print(spacing, mfit.contents.n_non_decr, "fits reset due to non-decreasing error (LM).")
print(spacing, mfit.contents.n_non_converged, "fits did not converge.")
print(spacing, mfit.contents.n_lost, "fits were lost.")
class MultiFitterException(Exception):
pass
class MultiFitter(object):
"""
Base class for fitting multiple possibly overlapping localizations. This is designed to be
used as follows:
1. At the start of the analysis, create a single instance of the appropriate fitting sub-class.
2. For each new image, call newImage() once.
3. Provide an estimate of the background with newBackground().
4. Add peaks to fit with newPeaks().
5. Call doFit() to fit the peaks.
6. Use multiple calls to getPeakProperties() to get the properties you are interested in.
7. Call cleanup() when you are done with this object and plan to throw it away.
See sa_library/fitting.py for a typical work flow.
As all the static variables have been removed from the C library you should
be able to use several of these objects simultaneuosly for fitting.
All of the parameters are optional, use None if they are not relevant.
"""
def __init__(self,
rqe = None,
scmos_cal = None,
verbose = False, #(originaly False, modified by ioah)
min_z = None,
max_z = None,
**kwds):
super(MultiFitter, self).__init__(**kwds)
self.clib = None
self.default_tol = 1.0e-6
self.im_shape = None
self.iterations = 0
self.max_z = max_z
self.mfit = None
self.min_z = min_z
self.n_proximity = 0
self.n_significance = 0
# These are all the peak (localization) properties that the C libraries
# estimate. Not all C libraries will provide estimates for all of these
# properties. It is used by the getPeakProperty() method to check that
# the requested property is valid.
#
self.peak_properties = {"background" : "float",
"bg_sum" : "float",
"error" : "float",
"fg_sum" : "float",
"height" : "float",
"iterations" : "int",
"jacobian" : "float",
"significance" : "compound",
"sum" : "float",
"status" : "int",
"x" : "float",
"xsigma" : "float",
"y" : "float",
"ysigma" : "float",
"z" : "float"}
self.rqe = rqe
self.scmos_cal = scmos_cal
self.verbose = verbose
def cleanup(self, spacing = " ", verbose = True):
"""
This just prints the analysis statistics, it does not do any actual cleanup.
"""
if self.mfit is not None:
if verbose:
printFittingInfo(self.mfit, spacing = spacing)
print(spacing, self.n_proximity, "peaks lost to proximity filter.")
print(spacing, self.n_significance, "peaks lost to low significance.")
print(spacing, self.iterations, "fitting iterations.")
def doFit(self, max_iterations = 2000):
"""
This is where the fitting actually happens.
FIXME: Why we always do at least one iteration? I guess it doesn't
matter because this should be a NOP as the C library will
not do anything if all the peaks have converged.
"""
i = 0
self.iterate()
while(self.getUnconverged() and (i < max_iterations)):
if self.verbose and ((i%20)==0):
print("iteration", i)
self.iterate()
i += 1
if self.verbose:
if (i == max_iterations):
print(" Failed to converge in:", i, self.getUnconverged())
else:
print(" Multi-fit converged in:", i, self.getUnconverged())
print("")
# Get number of fitting iterations.
self.getIterations()
def getFitImage(self):
"""
Get the fit image, i.e. f(x), an image created from drawing all of
the current fits into a 2D array.
"""
fit_image = numpy.ascontiguousarray(numpy.zeros(self.im_shape, dtype = numpy.float64))
self.clib.mFitGetFitImage(self.mfit, fit_image)
return fit_image
def getIterations(self):
"""
Update iterations and reset C library counter. The idea anyway
is that the Python counter won't overflow where as the C counter
might, particularly on a 32 bit system.
"""
self.iterations += self.mfit.contents.n_iterations
self.mfit.contents.n_iterations = 0
return self.iterations
def getNError(self):
"""
Return the number of peaks in the C library that are in the ERROR state.
"""
return self.clib.mFitGetNError(self.mfit)
def getNFit(self):
"""
Return the current number of peaks that the C library is handling.
"""
return self.mfit.contents.nfit
def getNFitMax(self):
"""
Return the current maximum number of peaks.
Note this is not a fixed value as the C library can dynamically
increase this. This method is primarily for testing purposes.
"""
return self.mfit.contents.max_nfit
def getPeakProperty(self, p_name):
"""
Return a numpy array containing the requested property.
"""
if not p_name in self.peak_properties:
raise MultiFitterException("No such property '" + p_name + "'")
# Properties that are calculated from other properties.
if(self.peak_properties[p_name] == "compound"):
# Return 0 length array if there are no localizations.
if(self.getNFit() == 0):
return numpy.zeros(0, dtype = numpy.float64)
# Peak significance calculation.
if(p_name == "significance"):
bg_sum = self.getPeakProperty("bg_sum")
fg_sum = self.getPeakProperty("fg_sum")
return fg_sum/numpy.sqrt(bg_sum)
# Floating point properties.
elif(self.peak_properties[p_name] == "float"):
if (p_name == "jacobian"):
values = numpy.ascontiguousarray(numpy.zeros((self.getNFit(), self.mfit.contents.jac_size),
dtype = numpy.float64))
self.clib.mFitGetPeakPropertyDouble(self.mfit,
values,
ctypes.c_char_p(p_name.encode()))
else:
values = numpy.ascontiguousarray(numpy.zeros(self.getNFit(), dtype = numpy.float64))
self.clib.mFitGetPeakPropertyDouble(self.mfit,
values,
ctypes.c_char_p(p_name.encode()))
return values
# Integer properties.
elif(self.peak_properties[p_name] == "int"):
values = numpy.ascontiguousarray(numpy.zeros(self.getNFit(), dtype = numpy.int32))
self.clib.mFitGetPeakPropertyInt(self.mfit,
values,
ctypes.c_char_p(p_name.encode()))
return values
def getResidual(self):
"""
Get the residual, the data minus the fit image, xi - f(x).
"""
residual = numpy.ascontiguousarray(numpy.zeros(self.im_shape, dtype = numpy.float64))
self.clib.mFitGetResidual(self.mfit, residual)
return residual
def getUnconverged(self):
"""
Return the number of fits that have not yet converged.
"""
return self.clib.mFitGetUnconverged(self.mfit)
def incProximityCounter(self, n_inc):
self.n_proximity += n_inc
def incSignificanceCounter(self, n_inc):
self.n_significance += n_inc
def initializeC(self, image):
"""
This initializes the C fitting library.
It needs the image in order to know what size arrays to create
as we won't always have SCMOS calibration data.
"""
if self.scmos_cal is None:
if self.verbose:
print("Using zeros for sCMOS calibration data.")
self.scmos_cal = numpy.ascontiguousarray(numpy.zeros(image.shape), dtype = numpy.float64)
else:
self.scmos_cal = numpy.ascontiguousarray(self.scmos_cal, dtype = numpy.float64)
if self.rqe is None:
if self.verbose:
print("Using ones for relative quantum efficiency data.")
self.rqe = numpy.ascontiguousarray(numpy.ones(image.shape), dtype = numpy.float64)
else:
self.rqe = numpy.ascontiguousarray(self.rqe, dtype = numpy.float64)
if (image.shape[0] != self.scmos_cal.shape[0]) or (image.shape[1] != self.scmos_cal.shape[1]):
raise MultiFitterException("Image shape and sCMOS calibration shape do not match.")
if (image.shape[0] != self.rqe.shape[0]) or (image.shape[1] != self.rqe.shape[1]):
raise MultiFitterException("Image shape and RQE shape do not match.")
self.im_shape = self.scmos_cal.shape
def isInitialized(self):
return (self.mfit != None)
def iterate(self):
self.clib.mFitIterateLM(self.mfit)
def newBackground(self, background):
"""
Update the current background estimate.
"""
if (background.shape[0] != self.im_shape[0]) or (background.shape[1] != self.im_shape[1]):
raise MultiFitterException("Background image shape and the original image shape are not the same.")
self.clib.mFitNewBackground(self.mfit,
numpy.ascontiguousarray(background, dtype = numpy.float64))
def newImage(self, image):
"""
Initialize C fitter with a new image.
"""
if (image.shape[0] != self.im_shape[0]) or (image.shape[1] != self.im_shape[1]):
raise MultiFitterException("Current image shape and the original image shape are not the same.")
self.clib.mFitNewImage(self.mfit,
numpy.ascontiguousarray(image, dtype = numpy.float64))
def newPeaks(self, peaks, peaks_type):
"""
Sub classes override this to provide analysis specific peak addition.
"""
raise MultiFitterException("newPeaks() method not defined.")
def removeErrorPeaks(self):
"""
Instruct the C library to discard all the peaks in the ERROR state
from the list of peaks that it is maintaining.
"""
self.clib.mFitRemoveErrorPeaks(self.mfit)
def removeRunningPeaks(self):
"""
Instruct the C library to discard all the peaks in the RUNNING state
from the list of peaks that it is maintaining. This is usually called
at the end of the analysis after all of the peaks in the ERROR state
have been removed.
"""
self.clib.mFitRemoveRunningPeaks(self.mfit)
def rescaleZ(self, z):
"""
Convert Z from fitting units to microns.
"""
return z
def setPeakStatus(self, status):
"""
Set the status (RUNNING, CONVERGED, ERROR) of the peaks in the C library.
"""
assert (status.size == self.getNFit())
self.clib.mFitSetPeakStatus(self.mfit,
numpy.ascontiguousarray(status, dtype = numpy.int32))
class MultiFitterArbitraryPSF(MultiFitter):
"""
Base class for arbitrary PSF fitters (Spliner, PupilFn, PSFFFT)
"""
def formatPeaks(self, peaks, peaks_type):
return formatPeaksArbitraryPSF(peaks, peaks_type)
class MultiFitterGaussian(MultiFitter):
"""
Base class for Gaussian fitters (3D-DAOSTORM and sCMOS).
"""
def __init__(self, roi_size = 10, wx_params = None, wy_params = None, **kwds):
super(MultiFitterGaussian, self).__init__(**kwds)
self.roi_size = roi_size
self.wx_params = wx_params
self.wy_params = wy_params
self.clib = loadDaoFitC()
def cleanup(self, verbose = True):
super(MultiFitterGaussian, self).cleanup(verbose = verbose)
if self.mfit is not None:
self.clib.daoCleanup(self.mfit)
self.mfit = None
def formatPeaks(self, peaks, peaks_type):
return formatPeaksGaussianPSF(peaks, peaks_type)
def initializeC(self, image):
"""
This initializes the C fitting library.
"""
super(MultiFitterGaussian, self).initializeC(image)
self.mfit = self.clib.daoInitialize(self.rqe,
self.scmos_cal,
self.default_tol,
self.scmos_cal.shape[1],
self.scmos_cal.shape[0],
self.roi_size)
def newPeaks(self, peaks, peaks_type):
"""
Pass new peaks to add to the C library.
"""
c_peaks = self.formatPeaks(peaks, peaks_type)
self.clib.daoNewPeaks(self.mfit,
c_peaks,
ctypes.c_char_p(peaks_type.encode()),
c_peaks.shape[0])
class MultiFitter2DFixed(MultiFitterGaussian):
"""
Fit with a fixed peak width.
"""
def initializeC(self, image):
super(MultiFitter2DFixed, self).initializeC(image)
self.clib.daoInitialize2DFixed(self.mfit)
class MultiFitter2DFixedALS(MultiFitterGaussian):
"""
Fit with a fixed peak width using the Anscombe least squares fitting
error model.
"""
def initializeC(self, image):
super(MultiFitter2DFixedALS, self).initializeC(image)
self.clib.daoInitialize2DFixedALS(self.mfit)
def newImage(self, image):
super(MultiFitter2DFixedALS, self).newImage(image)
self.clib.mFitAnscombeTransformImage(self.mfit)
class MultiFitter2DFixedLS(MultiFitterGaussian):
"""
Fit with a fixed peak width using the Least squares fitting
error model.
"""
def initializeC(self, image):
super(MultiFitter2DFixedLS, self).initializeC(image)
self.clib.daoInitialize2DFixedLS(self.mfit)
class MultiFitter2DFixedDWLS(MultiFitterGaussian):
"""
Fit with a fixed peak width using the data weighted least squares
fitting error model.
"""
def initializeC(self, image):
super(MultiFitter2DFixedDWLS, self).initializeC(image)
self.clib.daoInitialize2DFixedDWLS(self.mfit)
class MultiFitter2DFixedFWLS(MultiFitterGaussian):
"""
Fit with a fixed peak width using the fit weighted least squares
fitting error model.
"""
def initializeC(self, image):
super(MultiFitter2DFixedFWLS, self).initializeC(image)
self.clib.daoInitialize2DFixedFWLS(self.mfit)
class MultiFitter2DFixedNC(MultiFitter2DFixed):
"""
Fit with a fixed peak width, but without correcting for RQE. More
specifically we set the RQE correction to 1.0 so that the fitter
will use the same RQE correction approach as the finder (the
original image is divided by the RQE). At least in theory this
will be slightly worse as the statistics will no longer be
exactly Poisson. In practice it appears that the differences are
somewhere in the 4th or 5th digit, so pretty small.
This is primarily for testing.
"""
def __init__(self, **kwds):
super(MultiFitter2DFixedNC, self).__init__(**kwds)
self.rqe = None
class MultiFitter2DFixedDWLSNC(MultiFitter2DFixedDWLS):
"""
Fit with a fixed peak width using the data weighted least squares
fitting error model, but without correcting for RQE. More
specifically we set the RQE correction to 1.0 so that the fitter
will use the same RQE correction approach as the finder (the
original image is divided by the RQE).
Using this we can test the performance of the combination of mean
gain and flat field correction with weighted least squares fitting
as reported by other labs. We'll use the same value for all of the
gains and use the RQE term as the flat field correction.
This is primarily for testing.
"""
def __init__(self, **kwds):
super(MultiFitter2DFixedDWLSNC, self).__init__(**kwds)
self.rqe = None
class MultiFitter2D(MultiFitterGaussian):
"""
Fit with a variable peak width (of the same size in X and Y).
"""
def __init__(self, sigma_range = None, **kwds):
super(MultiFitter2D, self).__init__(**kwds)
self.sigma_range = sigma_range
def initializeC(self, image):
super(MultiFitter2D, self).initializeC(image)
width_max = 1.0/(2.0 * self.sigma_range[0] * self.sigma_range[0])
width_min = 1.0/(2.0 * self.sigma_range[1] * self.sigma_range[1])
self.clib.daoInitialize2D(self.mfit,
width_min,
width_max)
class MultiFitter2DALS(MultiFitterGaussian):
"""
Fit with a variable peak width (of the same size in X and Y) using
the Anscombe least squares fitting model.
"""
def __init__(self, sigma_range = None, **kwds):
super(MultiFitter2DALS, self).__init__(**kwds)
self.sigma_range = sigma_range
def initializeC(self, image):
super(MultiFitter2DALS, self).initializeC(image)
width_max = 1.0/(2.0 * self.sigma_range[0] * self.sigma_range[0])
width_min = 1.0/(2.0 * self.sigma_range[1] * self.sigma_range[1])
self.clib.daoInitialize2DALS(self.mfit,
width_min,
width_max)
def newImage(self, image):
super(MultiFitter2DALS, self).newImage(image)
self.clib.mFitAnscombeTransformImage(self.mfit)
class MultiFitter2DLS(MultiFitterGaussian):
"""
Fit with a variable peak width (of the same size in X and Y) using the
least squares fitting model.
"""
def __init__(self, sigma_range = None, **kwds):
super(MultiFitter2DLS, self).__init__(**kwds)
self.sigma_range = sigma_range
def initializeC(self, image):
super(MultiFitter2DLS, self).initializeC(image)
width_max = 1.0/(2.0 * self.sigma_range[0] * self.sigma_range[0])
width_min = 1.0/(2.0 * self.sigma_range[1] * self.sigma_range[1])
self.clib.daoInitialize2DLS(self.mfit,
width_min,
width_max)
class MultiFitter2DDWLS(MultiFitterGaussian):
"""
Fit with a variable peak width (of the same size in X and Y) using the
data weighted least squares fitting model.
"""
def __init__(self, sigma_range = None, **kwds):
super(MultiFitter2DDWLS, self).__init__(**kwds)
self.sigma_range = sigma_range
def initializeC(self, image):
super(MultiFitter2DDWLS, self).initializeC(image)
width_max = 1.0/(2.0 * self.sigma_range[0] * self.sigma_range[0])
width_min = 1.0/(2.0 * self.sigma_range[1] * self.sigma_range[1])
self.clib.daoInitialize2DDWLS(self.mfit,
width_min,
width_max)
class MultiFitter2DFWLS(MultiFitterGaussian):
"""
Fit with a variable peak width (of the same size in X and Y) using the
fit weighted least squares fitting model.
"""
def __init__(self, sigma_range = None, **kwds):
super(MultiFitter2DFWLS, self).__init__(**kwds)
self.sigma_range = sigma_range
def initializeC(self, image):
super(MultiFitter2DFWLS, self).initializeC(image)
width_max = 1.0/(2.0 * self.sigma_range[0] * self.sigma_range[0])
width_min = 1.0/(2.0 * self.sigma_range[1] * self.sigma_range[1])
self.clib.daoInitialize2DFWLS(self.mfit,
width_min,
width_max)
class MultiFitter3D(MultiFitterGaussian):
"""
Fit with peak width that can change independently in X and Y.
"""
def __init__(self, sigma_range = None, **kwds):
super(MultiFitter3D, self).__init__(**kwds)
self.sigma_range = sigma_range
def initializeC(self, image):
super(MultiFitter3D, self).initializeC(image)
width_max = 1.0/(2.0 * self.sigma_range[0] * self.sigma_range[0])
width_min = 1.0/(2.0 * self.sigma_range[1] * self.sigma_range[1])
self.clib.daoInitialize3D(self.mfit,
width_min,
width_max)
class MultiFitterZ(MultiFitterGaussian):
"""
Fit with peak width that varies in X and Y as a function of Z.
"""
def initializeC(self, image):
super(MultiFitterZ, self).initializeC(image)
self.clib.daoInitializeZ(self.mfit,
numpy.ascontiguousarray(self.wx_params),
numpy.ascontiguousarray(self.wy_params),
self.min_z,
self.max_z)
#
# The MIT License
#
# Copyright (c) 2018 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 39.011682
| 111
| 0.576301
|
8ca47055aafd0b0723958da7dab151cfb8b33fb5
| 8,481
|
py
|
Python
|
src/backend/AdcpTerminal.py
|
rowetechinc/River_electron
|
3897a8cae56241a64ea7167b3f3699fb5d392429
|
[
"MIT"
] | null | null | null |
src/backend/AdcpTerminal.py
|
rowetechinc/River_electron
|
3897a8cae56241a64ea7167b3f3699fb5d392429
|
[
"MIT"
] | null | null | null |
src/backend/AdcpTerminal.py
|
rowetechinc/River_electron
|
3897a8cae56241a64ea7167b3f3699fb5d392429
|
[
"MIT"
] | 1
|
2020-12-22T16:57:23.000Z
|
2020-12-22T16:57:23.000Z
|
import rti_python.Comm.adcp_serial_port as adcp_serial
import rti_python.Writer.rti_binary as RtiBinaryWriter
import threading
import time
import serial
import logging
from obsub import event
from threading import Lock
from rti_python.Utilities.config import RtiConfig
class AdcpTerminalVM:
"""
Setup a view to monitor for waves data and covert it to MATLAB format for WaveForce AutoWaves.
"""
def __init__(self, rti_config: RtiConfig):
self.rti_config = rti_config
self.rti_config.init_terminal_config()
self.adcp = None
self.adcp_thread = None
self.adcp_thread_alive = False
self.serial_recorder = None
self.is_recording = False
self.bytesWrittenLabel = 0
self.totalBytesWrittenLabel = 0
self.bytesWrittenLabel = ""
self.MAX_SERIAL_CONSOLE_LEN = 9000
self.serialTextBrowser = ""
self.thread_lock = Lock()
def comm_port_list(self):
"""
Ste the serial ports to the list.
:return:
"""
# Add all the found serial ports
return adcp_serial.get_serial_ports()
def baud_rate_list(self):
"""
Set the baud rates to the list.
:return:
"""
return adcp_serial.get_baud_rates()
def get_data(self):
# Lock the object
self.thread_lock.acquire()
is_connected = False
if self.adcp:
is_connected = True
term_data = {
"isConnected": is_connected,
"termData": self.serialTextBrowser,
"baud": self.rti_config.config['Comm']['Baud'],
"commPort": self.rti_config.config['Comm']['Port']
}
# Release lock
self.thread_lock.release()
logging.info(term_data)
return term_data
def connect_serial(self, port, baud):
"""
Connect the serial port and the read thread.
:return:
"""
logging.info("Serial Connect: " + port + " : " + str(baud))
self.serialTextBrowser += "Serial Connect: " + port + " : " + str(baud)
# Store the configuration
self.rti_config.config['Comm']['Port'] = port
self.rti_config.config['Comm']['Baud'] = str(baud)
self.rti_config.write()
try:
self.adcp = adcp_serial.AdcpSerialPort(port, baud)
except ValueError as ve:
self.serialTextBrowser += "Error opening serial port. " + str(ve)
logging.error("Error opening serial port. " + str(ve))
return
except serial.SerialException as se:
self.serialTextBrowser += "Error opening serial port. " + str(se)
logging.error("Error opening serial port. " + str(se))
return
except Exception as e:
self.serialTextBrowser += "Error opening serial port. " + str(e)
logging.error("Error opening serial port. " + str(e))
return
# Start the read thread
self.adcp_thread_alive = True
self.adcp_thread = threading.Thread(name="Serial Terminal Thread", target=thread_worker, args=(self,))
self.adcp_thread.start()
def disconnect_serial(self):
"""
Disconnect the serial port and stop the read thread.
:return:
"""
self.adcp_thread_alive = False
if self.adcp:
self.adcp.disconnect()
self.adcp = None
self.serialTextBrowser += "Serial Disconnect."
logging.info("Serial Disconnect")
def serial_break(self):
"""
Send a BREAK to the serial port.
:return:
"""
# Clear the display
self.serialTextBrowser = ""
# Send a BREAK
if self.adcp:
self.adcp.send_break(1.25)
logging.info("BREAK SENT")
def send_cmd(self, cmd: str):
"""
Send a command to the ADCP.
:return:
"""
if self.adcp:
if len(cmd) > 0:
self.adcp.send_cmd(cmd)
logging.info("Write to serial port: " + cmd)
def start_pinging(self):
"""
Send the command to start pinging.
:return:
"""
if self.adcp:
self.adcp.start_pinging()
logging.info("Start Pinging")
def stop_pinging(self):
"""
Send the command to stop pinging.
:return:
"""
if self.adcp:
self.serialTextBrowser = ""
self.adcp.stop_pinging()
logging.info("Stop Pinging")
def fix_adcp_comm(self):
"""
If the ADCP stops communicating, try to fix the ADCP and regain communication.
:return:
"""
if self.adcp:
# Send a BREAK
self.adcp.send_break(1.25)
# Wait
time.sleep(1.0)
# Send a STOP
self.adcp.stop_pinging()
time.sleep(1.0)
# Send command to start pinging
self.adcp.start_pinging()
else:
logging.error("ADCP is not connected.")
def shutdown(self):
"""
Shutdown the VM.
:return:
"""
logging.debug("Shutdown Terminal VM")
self.disconnect_serial()
if self.serial_recorder:
self.serial_recorder.close()
def turn_on_off_record(self):
if self.is_reocording:
self.serial_recorder = RtiBinaryWriter.RtiBinaryWriter(folder_path=self.rti_config.config['Comm']['output_dir'])
logging.debug("Start Recording")
else:
if self.serial_recorder:
self.serial_recorder.close()
logging.debug("Stop Recording")
self.serial_recorder = None
def record_data(self, data):
if self.serial_recorder:
self.serial_recorder.write(data)
def update_record_count(self, file_count, total_count, file_path):
"""
Update the recording file sizes.
:param file_count: Total file size of current file.
:param total_count: Total size of all files written.
:param file_path: Path of current filr.
:return:
"""
self.bytesWrittenLabel = str(file_count)
self.totalBytesWrittenLabel = str(total_count)
self.bytesWrittenLabel = file_path
def clear_console(self):
self.serialTextBrowser = ""
def clear_bulk_cmd(self):
self.bulkCmdMlainTextEdit = ""
def send_bulk_cmd(self, bulk_cmds: str):
cmds = bulk_cmds.splitlines()
for cmd in cmds:
self.adcp.send_cmd(cmd + "\n")
logging.debug("Write to serial port: " + cmd)
time.sleep(0.25)
@event
def on_serial_data(self, data):
"""
Subscribe to receive serial data.
:param data: Data from the serial port.
:return:
"""
logging.info("Data Received")
def thread_worker(vm):
"""
Thread worker to handle reading the serial port.
:param vm: This VM to get access to the variables.
:return:
"""
while vm.adcp_thread_alive:
try:
if vm.adcp.raw_serial.in_waiting:
# Read the data from the serial port
data = vm.adcp.read(vm.adcp.raw_serial.in_waiting)
try:
# Display the data as ASCII if it is a response from the ADCP
# If it is raw binary ADCP data, this will fail so just display binary data
ascii_data = data.decode('ascii')
vm.serialTextBrowser += ascii_data
logging.debug(ascii_data)
except Exception:
# Do nothing
vm.serialTextBrowser += str(data)
# Prevent overflow of buffer, if greater than buffer limit
# Get the last bytes in buffer
if len(vm.serialTextBrowser) > 5000:
vm.serialTextBrowser = vm.serialTextBrowser[-5000]
# Record data if turned on
vm.record_data(data)
# Publish the data
vm.on_serial_data(data)
time.sleep(0.01)
except serial.SerialException as se:
logging.error("Error using the serial port.\n" + str(se))
vm.disconnect_serial()
except Exception as ex:
logging.error("Error processing the data.\n" + str(ex))
| 29.757895
| 124
| 0.570687
|
7051380b739a3624583e0f99bd2dd4cb5788bd66
| 7,579
|
py
|
Python
|
tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_evaluator.py
|
sergeant-wizard/pytorch-pfn-extras
|
221c07aedb9d88e4b96b55da49f6c104f631e01a
|
[
"MIT"
] | null | null | null |
tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_evaluator.py
|
sergeant-wizard/pytorch-pfn-extras
|
221c07aedb9d88e4b96b55da49f6c104f631e01a
|
[
"MIT"
] | null | null | null |
tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_evaluator.py
|
sergeant-wizard/pytorch-pfn-extras
|
221c07aedb9d88e4b96b55da49f6c104f631e01a
|
[
"MIT"
] | null | null | null |
import numpy
import pytest
import torch
import pytorch_pfn_extras as ppe
class DummyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.args = []
def forward(self, x):
self.args.append(x)
ppe.reporting.report({'loss': x.sum()}, self)
class DummyModelTwoArgs(torch.nn.Module):
def __init__(self):
super().__init__()
self.args = []
def forward(self, x, y):
self.args.append((x, y))
ppe.reporting.report({'loss': x.sum() + y.sum()}, self)
def _torch_batch_to_numpy(batch):
# In Pytorch, a batch has the batch dimension. Squeeze it for comparison.
assert isinstance(batch, torch.Tensor)
assert batch.shape[0] == 1
return batch.squeeze(0).numpy()
@pytest.fixture(scope='function')
def evaluator_dummies():
data = [
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
for _ in range(2)]
data_loader = torch.utils.data.DataLoader(data)
target = DummyModel()
evaluator = ppe.training.extensions.Evaluator(data_loader, target)
expect_mean = numpy.mean([numpy.sum(x) for x in data])
return data, data_loader, target, evaluator, expect_mean
def test_evaluate(evaluator_dummies):
data, data_loader, target, evaluator, expect_mean = evaluator_dummies
reporter = ppe.reporting.Reporter()
reporter.add_observer('target', target)
with reporter:
mean = evaluator.evaluate()
# No observation is reported to the current reporter. Instead the
# evaluator collect results in order to calculate their mean.
assert len(reporter.observation) == 0
assert len(target.args) == len(data)
for i in range(len(data)):
numpy.testing.assert_array_equal(
_torch_batch_to_numpy(target.args[i]), data[i])
numpy.testing.assert_almost_equal(
mean['target/loss'], expect_mean, decimal=4)
evaluator.finalize()
def test_call(evaluator_dummies):
data, data_loader, target, evaluator, expect_mean = evaluator_dummies
mean = evaluator()
# 'main' is used by default
numpy.testing.assert_almost_equal(
mean['main/loss'], expect_mean, decimal=4)
def test_evaluator_name(evaluator_dummies):
data, data_loader, target, evaluator, expect_mean = evaluator_dummies
evaluator.name = 'eval'
mean = evaluator()
# name is used as a prefix
numpy.testing.assert_almost_equal(
mean['eval/main/loss'], expect_mean, decimal=4)
def test_current_report(evaluator_dummies):
data, data_loader, target, evaluator, expect_mean = evaluator_dummies
reporter = ppe.reporting.Reporter()
with reporter:
mean = evaluator()
# The result is reported to the current reporter.
assert reporter.observation == mean
def test_evaluator_tuple_data():
data = [
(numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'))
for _ in range(2)]
data_loader = torch.utils.data.DataLoader(data)
target = DummyModelTwoArgs()
evaluator = ppe.training.extensions.Evaluator(data_loader, target)
reporter = ppe.reporting.Reporter()
reporter.add_observer('target', target)
with reporter:
mean = evaluator.evaluate()
assert len(target.args) == len(data)
for i in range(len(data)):
assert len(target.args[i]) == len(data[i])
numpy.testing.assert_array_equal(
_torch_batch_to_numpy(target.args[i][0]), data[i][0])
numpy.testing.assert_array_equal(
_torch_batch_to_numpy(target.args[i][1]), data[i][1])
expect_mean = numpy.mean([numpy.sum(x) for x in data])
numpy.testing.assert_almost_equal(
mean['target/loss'], expect_mean, decimal=4)
def test_evaluator_dict_data():
data = [
{'x': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
'y': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')}
for _ in range(2)]
data_loader = torch.utils.data.DataLoader(data)
target = DummyModelTwoArgs()
evaluator = ppe.training.extensions.Evaluator(data_loader, target)
reporter = ppe.reporting.Reporter()
reporter.add_observer('target', target)
with reporter:
mean = evaluator.evaluate()
assert len(target.args) == len(data)
for i in range(len(data)):
numpy.testing.assert_array_equal(
_torch_batch_to_numpy(target.args[i][0]), data[i]['x'])
numpy.testing.assert_array_equal(
_torch_batch_to_numpy(target.args[i][1]), data[i]['y'])
expect_mean = numpy.mean(
[numpy.sum(x['x']) + numpy.sum(x['y']) for x in data])
numpy.testing.assert_almost_equal(
mean['target/loss'], expect_mean, decimal=4)
def test_evaluator_with_eval_func():
data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
data_loader = torch.utils.data.DataLoader(data)
target = DummyModel()
evaluator = ppe.training.extensions.Evaluator(
data_loader, {}, eval_func=target)
reporter = ppe.reporting.Reporter()
reporter.add_observer('target', target)
with reporter:
evaluator.evaluate()
assert len(target.args) == len(data)
for i in range(len(data)):
numpy.testing.assert_array_equal(
_torch_batch_to_numpy(target.args[i]), data[i])
def test_evaluator_progress_bar():
data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
data_loader = torch.utils.data.DataLoader(data, batch_size=1)
target = DummyModel()
evaluator = ppe.training.extensions.Evaluator(
data_loader, {}, eval_func=target, progress_bar=True)
reporter = ppe.reporting.Reporter()
reporter.add_observer('target', target)
with reporter:
evaluator.evaluate()
# Code excerpts to test IgniteEvaluator
class IgniteDummyModel(torch.nn.Module):
def __init__(self):
super(IgniteDummyModel, self).__init__()
self.count = 0.
def forward(self, *args):
ppe.reporting.report({'x': self.count}, self)
self.count += 1.
return 0.
def create_dummy_evaluator(model):
from ignite.engine import Engine
def update_fn(engine, batch):
y_pred = batch[1].clone().detach()
model()
# We return fake results for the reporters to
# and metrics to work
return (y_pred, y_pred)
evaluator = Engine(update_fn)
return evaluator
def test_ignite_evaluator_reporting_metrics():
try:
from ignite.metrics import MeanSquaredError
except ImportError:
pytest.skip('pytorch-ignite is not installed')
# This tests verifies that either, usuer manually reported metrics
# and ignite calculated ones are correctly reflected in the reporter
# observation
model = IgniteDummyModel()
n_data = 10
x = torch.randn((n_data, 2), requires_grad=True)
y = torch.randn((n_data, 2))
dataset = torch.utils.data.TensorDataset(x, y)
loader = torch.utils.data.DataLoader(dataset, batch_size=3)
evaluator = create_dummy_evaluator(model)
# Attach metrics to the evaluator
metric = MeanSquaredError()
metric.attach(evaluator, 'mse')
evaluator_ignite_ext = ppe.training.extensions.IgniteEvaluator(
evaluator, loader, model, progress_bar=False
)
reporter = ppe.reporting.Reporter()
with reporter:
result = evaluator_ignite_ext()
# Internally reported metrics
assert result['main/x'] == 1.5
# Ignite calculated metric
assert result['val/mse'] == 0.0
| 30.560484
| 77
| 0.663808
|
b55331658a85a14959a5fa89815535f16352bcf6
| 380
|
py
|
Python
|
tern/utils/metadata.py
|
ManishaTripathy/tern
|
bf3da704d2731417fd070bab888be7b9685080c9
|
[
"BSD-2-Clause"
] | null | null | null |
tern/utils/metadata.py
|
ManishaTripathy/tern
|
bf3da704d2731417fd070bab888be7b9685080c9
|
[
"BSD-2-Clause"
] | null | null | null |
tern/utils/metadata.py
|
ManishaTripathy/tern
|
bf3da704d2731417fd070bab888be7b9685080c9
|
[
"BSD-2-Clause"
] | null | null | null |
#
# Copyright (c) 2017-2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
#
"""
Container metadata operations
"""
import os
import shutil
from tern.utils.constants import temp_folder
def clean_temp():
'''Remove the temp directory'''
temp_path = os.path.abspath(temp_folder)
if os.path.exists(temp_path):
shutil.rmtree(temp_path)
| 20
| 59
| 0.718421
|
cecde9bd0ea13020125e0f8902020e808a6de15b
| 6,536
|
py
|
Python
|
bin/Python27/Lib/site-packages/sphinx/pycode/nodes.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2021-11-06T17:09:04.000Z
|
2021-11-06T17:09:04.000Z
|
bin/Python27/Lib/site-packages/sphinx/pycode/nodes.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2019-02-10T23:52:09.000Z
|
2019-02-10T23:52:09.000Z
|
bin/Python27/Lib/site-packages/sphinx/pycode/nodes.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2021-11-06T17:08:54.000Z
|
2021-11-06T17:08:54.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
if False:
# For type annotation
from typing import Callable # NOQA
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None # type: BaseNode
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None # type: Callable[[object], int]
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node: # type: ignore
self.visit(child)
| 30.685446
| 77
| 0.575428
|
d6b5c58097c5f56f9a1a2bebd5d0cad47370af01
| 7,447
|
py
|
Python
|
zerver/webhooks/gogs/view.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 2
|
2020-11-12T12:28:46.000Z
|
2020-11-16T11:17:46.000Z
|
zerver/webhooks/gogs/view.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 1
|
2021-03-24T12:50:52.000Z
|
2021-03-24T13:11:42.000Z
|
zerver/webhooks/gogs/view.py
|
TylerPham2000/zulip
|
2e7aaba0dde5517b4a55cb0bd782f009be45e3ba
|
[
"Apache-2.0"
] | 1
|
2021-07-22T10:14:08.000Z
|
2021-07-22T10:14:08.000Z
|
# vim:fenc=utf-8
from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from typing_extensions import Protocol
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import (
check_send_webhook_message,
get_http_headers_from_filename,
validate_extract_webhook_http_header,
)
from zerver.lib.webhooks.git import (
TOPIC_WITH_BRANCH_TEMPLATE,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
TOPIC_WITH_RELEASE_TEMPLATE,
get_create_branch_event_message,
get_issue_event_message,
get_pull_request_event_message,
get_push_commits_event_message,
get_release_event_message,
)
from zerver.models import UserProfile
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GOGS_EVENT")
def get_issue_url(repo_url: str, issue_nr: int) -> str:
return f"{repo_url}/issues/{issue_nr}"
def format_push_event(payload: Dict[str, Any]) -> str:
for commit in payload["commits"]:
commit["sha"] = commit["id"]
commit["name"] = commit["author"]["username"] or commit["author"]["name"].split()[0]
data = {
"user_name": payload["sender"]["username"],
"compare_url": payload["compare_url"],
"branch_name": payload["ref"].replace("refs/heads/", ""),
"commits_data": payload["commits"],
}
return get_push_commits_event_message(**data)
def format_new_branch_event(payload: Dict[str, Any]) -> str:
branch_name = payload["ref"]
url = "{}/src/{}".format(payload["repository"]["html_url"], branch_name)
data = {
"user_name": payload["sender"]["username"],
"url": url,
"branch_name": branch_name,
}
return get_create_branch_event_message(**data)
def format_pull_request_event(payload: Dict[str, Any], include_title: bool = False) -> str:
data = {
"user_name": payload["pull_request"]["user"]["username"],
"action": payload["action"],
"url": payload["pull_request"]["html_url"],
"number": payload["pull_request"]["number"],
"target_branch": payload["pull_request"]["head_branch"],
"base_branch": payload["pull_request"]["base_branch"],
"title": payload["pull_request"]["title"] if include_title else None,
}
if payload["pull_request"]["merged"]:
data["user_name"] = payload["pull_request"]["merged_by"]["username"]
data["action"] = "merged"
return get_pull_request_event_message(**data)
def format_issues_event(payload: Dict[str, Any], include_title: bool = False) -> str:
issue_nr = payload["issue"]["number"]
assignee = payload["issue"]["assignee"]
return get_issue_event_message(
payload["sender"]["login"],
payload["action"],
get_issue_url(payload["repository"]["html_url"], issue_nr),
issue_nr,
payload["issue"]["body"],
assignee=assignee["login"] if assignee else None,
title=payload["issue"]["title"] if include_title else None,
)
def format_issue_comment_event(payload: Dict[str, Any], include_title: bool = False) -> str:
action = payload["action"]
comment = payload["comment"]
issue = payload["issue"]
if action == "created":
action = "[commented]"
else:
action = f"{action} a [comment]"
action += "({}) on".format(comment["html_url"])
return get_issue_event_message(
payload["sender"]["login"],
action,
get_issue_url(payload["repository"]["html_url"], issue["number"]),
issue["number"],
comment["body"],
title=issue["title"] if include_title else None,
)
def format_release_event(payload: Dict[str, Any], include_title: bool = False) -> str:
data = {
"user_name": payload["release"]["author"]["username"],
"action": payload["action"],
"tagname": payload["release"]["tag_name"],
"release_name": payload["release"]["name"],
"url": payload["repository"]["html_url"],
}
return get_release_event_message(**data)
@webhook_view("Gogs")
@has_request_variables
def api_gogs_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
branches: Optional[str] = REQ(default=None),
user_specified_topic: Optional[str] = REQ("topic", default=None),
) -> HttpResponse:
return gogs_webhook_main(
"Gogs",
"X_GOGS_EVENT",
format_pull_request_event,
request,
user_profile,
payload,
branches,
user_specified_topic,
)
class FormatPullRequestEvent(Protocol):
def __call__(self, payload: Dict[str, Any], include_title: bool) -> str:
...
def gogs_webhook_main(
integration_name: str,
http_header_name: str,
format_pull_request_event: FormatPullRequestEvent,
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any],
branches: Optional[str],
user_specified_topic: Optional[str],
) -> HttpResponse:
repo = payload["repository"]["name"]
event = validate_extract_webhook_http_header(request, http_header_name, integration_name)
if event == "push":
branch = payload["ref"].replace("refs/heads/", "")
if branches is not None and branch not in branches.split(","):
return json_success()
body = format_push_event(payload)
topic = TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=repo,
branch=branch,
)
elif event == "create":
body = format_new_branch_event(payload)
topic = TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=repo,
branch=payload["ref"],
)
elif event == "pull_request":
body = format_pull_request_event(
payload,
include_title=user_specified_topic is not None,
)
topic = TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=repo,
type="PR",
id=payload["pull_request"]["id"],
title=payload["pull_request"]["title"],
)
elif event == "issues":
body = format_issues_event(
payload,
include_title=user_specified_topic is not None,
)
topic = TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=repo,
type="Issue",
id=payload["issue"]["number"],
title=payload["issue"]["title"],
)
elif event == "issue_comment":
body = format_issue_comment_event(
payload,
include_title=user_specified_topic is not None,
)
topic = TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=repo,
type="Issue",
id=payload["issue"]["number"],
title=payload["issue"]["title"],
)
elif event == "release":
body = format_release_event(
payload,
include_title=user_specified_topic is not None,
)
topic = TOPIC_WITH_RELEASE_TEMPLATE.format(
repo=repo,
tag=payload["release"]["tag_name"],
title=payload["release"]["name"],
)
else:
raise UnsupportedWebhookEventType(event)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| 32.099138
| 93
| 0.639452
|
83c5fe7199ad62eaca066e134245195804caca9c
| 10,117
|
py
|
Python
|
tutorials/setup-gcp/rdbox-gui/flask_rdbox/views/entries.py
|
hamstick/rdbox
|
780928388a5e31a3180c1bea1e2c96ab15e4357c
|
[
"MIT"
] | 278
|
2019-03-09T02:11:50.000Z
|
2022-03-30T14:54:23.000Z
|
tutorials/setup-gcp/rdbox-gui/flask_rdbox/views/entries.py
|
horacezh/rdbox
|
4eaefc7719283982117212b529c234ec9c5267d4
|
[
"MIT"
] | 24
|
2020-05-08T00:39:23.000Z
|
2022-02-27T02:47:42.000Z
|
tutorials/setup-gcp/rdbox-gui/flask_rdbox/views/entries.py
|
horacezh/rdbox
|
4eaefc7719283982117212b529c234ec9c5267d4
|
[
"MIT"
] | 39
|
2019-03-22T07:30:31.000Z
|
2022-03-15T07:07:37.000Z
|
#!/usr/bin/env python3
from flask import request, Response
from flask_rdbox import app
from flask_rdbox.views.utils import Utils
from flask_rdbox.views.g_cloud_setupper import GCloudSetupper
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from os.path import expanduser
import os
import subprocess
import json
@app.route('/api/bootstrap/gcp/login/url', methods=['GET'])
def getLoginURL():
gcloud_setupper = GCloudSetupper()
retObj = gcloud_setupper.getURL()
return json.dumps(retObj)
@app.route('/api/bootstrap/gcp/login/status', methods=['GET'])
def getAuthList():
try:
ret = subprocess.run(['gcloud', 'auth', 'list', '--format', 'json'],
timeout=30,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if ret.returncode == 0:
return Response(response=str(ret.stdout.decode("utf8")),
status=200)
else:
return Response(response=json.dumps({"msg": 'Internal Server Error.'}),
status=500)
except Exception as e:
return Response(response=json.dumps({"msg": type(e).__name__}),
status=500)
@app.route('/api/bootstrap/gcp/login/token', methods=['POST'])
def setLoginToken():
ret = json.loads(request.get_data())
gcloud_setupper = GCloudSetupper()
code, msg = gcloud_setupper.setToken(ret['token'], ret['date'])
if code == 200:
subprocess.run(['bash', os.path.dirname(os.path.abspath(__file__))+'/static/hook/afterlogin.sh'])
return Response(response=json.dumps({'ret': str(code), 'msg': str(msg)}),
status=code)
@app.route('/api/bootstrap/gcp/projects', methods=['GET'])
def getProjectList():
projects = {}
try:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
request = service.projects().list()
while request:
response = request.execute()
for project in response.get('projects', []):
if project.get('lifecycleState') == 'ACTIVE':
projects.setdefault(project.get('projectId'), project.get('name'))
request = service.projects().list_next(previous_request=request, previous_response=response)
except Exception as e:
return Response(response=json.dumps({"msg": type(e).__name__}),
status=500)
else:
return Response(response=json.dumps(projects),
status=200)
@app.route('/api/bootstrap/gcp/compute/zones', methods=['GET'])
def getZoneList():
zones = {}
try:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', 'v1', credentials=credentials)
req = service.zones().list(project=request.args.get('project'))
while req:
response = req.execute()
for zone in response.get('items', []):
region = zone.get('region').split('/')[-1]
zones.setdefault(region, [])
zones[region].append(zone.get('name'))
req = service.zones().list_next(previous_request=req, previous_response=response)
except Exception as e:
return Response(response=json.dumps({"msg": e}),
status=500)
else:
return Response(response=json.dumps(zones),
status=200)
@app.route('/api/bootstrap/gcp/compute/machine-types', methods=['GET'])
def getMachineTypeList():
machines = {}
try:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', 'v1', credentials=credentials)
req = service.machineTypes().list(project=request.args.get('project'), zone=request.args.get('zone'))
while req:
response = req.execute()
for machine in response.get('items', []):
machines.setdefault(machine.get('name'), machine.get('description'))
req = service.machineTypes().list_next(previous_request=req, previous_response=response)
except Exception as e:
return Response(response=json.dumps({"msg": type(e).__name__}),
status=500)
else:
return Response(response=json.dumps(machines),
status=200)
@app.route('/api/bootstrap/gcp/deployment-manager/deployments/resources', methods=['GET'])
def getDeploymentResourcesList():
resources = {
'done': [],
'update': [],
'error': [],
'emsg': [],
'status': []
}
try:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('deploymentmanager', 'v2', credentials=credentials)
req = service.resources().list(project=request.args.get('project'), deployment=request.args.get('deployment'))
total = 0
while req:
response = req.execute()
for resource in response.get('resources', []):
total = total + 1
if 'update' in resource:
if 'error' in resource.get('update'):
resources['error'].append(resource.get('name'))
for err in resource['update']['error'].get('errors', []):
try:
message = json.loads(err.get('message'))
if 'ResourceErrorMessage' in message:
resources['emsg'].append(message.get('ResourceErrorMessage').get('message', 'unknown'))
except Exception:
pass
else:
resources['update'].append(resource.get('name', 'unknown'))
else:
resources['done'].append(resource.get('name', 'unknown'))
req = service.resources().list_next(previous_request=req, previous_response=response)
percentage = total if total == 0 else int(len(resources['done']) / total * 100)
resources['status'].append(str(percentage))
if resources['status'][0] == '100':
try:
process_list = Utils.find_procs_by_name('after_bootstrap.sh')
if len(process_list) == 0:
subprocess.Popen(['bash',
os.path.dirname(os.path.abspath(__file__))+'/static/hook/after_bootstrap.sh',
request.args.get('project'),
request.args.get('deployment')],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
except Exception:
pass
except Exception as e:
return Response(response=json.dumps({"msg": type(e).__name__}),
status=500)
else:
return Response(response=json.dumps(resources),
status=200)
@app.route('/api/bootstrap/gcp/deployment-manager/deployments', methods=['POST'])
def setDeploymentCreateProperties():
try:
ret = json.loads(request.get_data())
secretKey = ret.pop('adminSecretKey')
publicKey = ret.pop('adminPubKey')
properties_str = Utils.format_properties(ret)
template_path = os.path.dirname(os.path.abspath(__file__)) + '/static/deployment_manager/network-template.jinja'
except Exception:
return Response(response=json.dumps({"msg": 'Invalid Request.'}),
status=400)
try:
os.mkdir(os.path.join(expanduser("~"), '.ssh'), 0o700)
with open(os.path.join(expanduser("~"), '.ssh', 'id_rsa'), mode='w') as f:
f.write(secretKey)
with open(os.path.join(expanduser("~"), '.ssh', 'authorized_keys'), mode='w') as f:
f.write(publicKey)
with open(os.path.join(expanduser("~"), '.ssh', 'id_rsa.pub'), mode='w') as f:
f.write(publicKey)
os.chmod(os.path.join(expanduser("~"), '.ssh', 'id_rsa'), 0o600)
os.chmod(os.path.join(expanduser("~"), '.ssh', 'authorized_keys'), 0o600)
except Exception:
pass
try:
ret = subprocess.run(['gcloud', 'deployment-manager', 'deployments', 'create', ret['resourcesPrefix'],
'--template', template_path,
'--properties', properties_str,
'--project', ret['project'],
'--format', 'json',
'--async'],
timeout=30,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if ret.returncode == 0:
return Response(response=str(ret.stdout.decode("utf8")),
status=200)
else:
if ret.stdout.decode("utf8") != '':
subprocess.run(['gcloud', 'deployment-manager', 'deployments', 'delete', ret['resourcesPrefix'],
'--delete-policy', 'DELETE',
'--project', ret['project'],
'--format', 'json',
'--async'],
timeout=30,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return Response(response=str(ret.stdout.decode("utf8")),
status=412)
else:
return Response(response=json.dumps({"msg": ret.stderr.decode("utf8")}),
status=400)
except subprocess.TimeoutExpired as e:
return Response(response=json.dumps({"msg": type(e).__name__}),
status=500)
except Exception as e:
return Response(response=json.dumps({"msg": type(e).__name__}),
status=500)
| 45.165179
| 123
| 0.54868
|
7f6a958627dfe113f67e5c361f173187a3608c59
| 652
|
py
|
Python
|
py/226. Invert Binary Tree.py
|
longwangjhu/LeetCode
|
a5c33e8d67e67aedcd439953d96ac7f443e2817b
|
[
"MIT"
] | 3
|
2021-08-07T07:01:34.000Z
|
2021-08-07T07:03:02.000Z
|
py/226. Invert Binary Tree.py
|
longwangjhu/LeetCode
|
a5c33e8d67e67aedcd439953d96ac7f443e2817b
|
[
"MIT"
] | null | null | null |
py/226. Invert Binary Tree.py
|
longwangjhu/LeetCode
|
a5c33e8d67e67aedcd439953d96ac7f443e2817b
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/invert-binary-tree/
# Given the root of a binary tree, invert the tree, and return its root.
################################################################################
# divide and conquer
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return None
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
| 29.636364
| 87
| 0.558282
|
4b6bd0b949eafe7e684cead09c5d35eb7ce93cf8
| 402
|
py
|
Python
|
EN1/experiment 1/analyse a1.py
|
jvdoorn/EN
|
4ca3ec37ebff4d277677cfaea2b77daa60a54d82
|
[
"MIT"
] | null | null | null |
EN1/experiment 1/analyse a1.py
|
jvdoorn/EN
|
4ca3ec37ebff4d277677cfaea2b77daa60a54d82
|
[
"MIT"
] | 2
|
2022-01-13T03:44:55.000Z
|
2022-03-12T00:56:18.000Z
|
EN1/experiment 1/analyse a1.py
|
jvdoorn/EN
|
4ca3ec37ebff4d277677cfaea2b77daa60a54d82
|
[
"MIT"
] | null | null | null |
from math import sqrt
V = 1.06304 # Volts
I = 9.3900 * 10 ** -3 # Amps
Vf = 0.00015 * V + 0.00003 * 2 # Volts
If = 0.00055 * I + 0.00005 * 2 * 10 ** -3 # Amps
R = V / I # Ohms
Rf = sqrt((Vf / V) ** 2 + (If / I) ** 2) # Ohms
T = 30.84 + (2.232 * R) + (2.43 * 10 ** -3 * R ** 2) - (5.33 * 10 ** -6 * R ** 3) # Kelvin
Tf = (Rf / R) * 2.232 + 2 * (Rf / R) * 2.43 + 3 * (Rf / R) * 5.33 # Kelvin
| 28.714286
| 91
| 0.430348
|
393ed855e5010cec8c3240d711d663701c459fa0
| 10,128
|
py
|
Python
|
chrome/common/extensions/docs/server2/new_github_file_system_test.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
chrome/common/extensions/docs/server2/new_github_file_system_test.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
chrome/common/extensions/docs/server2/new_github_file_system_test.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from copy import deepcopy
from cStringIO import StringIO
from functools import partial
from hashlib import sha1
from random import random
import unittest
from zipfile import ZipFile
from caching_file_system import CachingFileSystem
from file_system import FileNotFoundError, StatInfo
from fake_url_fetcher import FakeURLFSFetcher, MockURLFetcher
from local_file_system import LocalFileSystem
from new_github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
class _TestBundle(object):
'''Bundles test file data with a GithubFileSystem and test utilites. Create
GithubFileSystems via |CreateGfs()|, the Fetcher it uses as |fetcher|,
randomly mutate its contents via |Mutate()|, and access the underlying zip
data via |files|.
'''
def __init__(self):
self.files = {
'zipfile/': '',
'zipfile/hello.txt': 'world',
'zipfile/readme': 'test zip',
'zipfile/dir/file1': 'contents',
'zipfile/dir/file2': 'more contents'
}
self._test_files = {
'test_owner': {
'changing-repo': {
'commits': {
'HEAD': self._MakeShaJson(self._GenerateHash())
},
'zipball': self._ZipFromFiles(self.files)
}
}
}
self._fake_fetcher = None
def CreateGfsAndFetcher(self):
fetchers = []
def create_mock_url_fetcher(base_path):
assert not fetchers
# Save this reference so we can replace the TestFileSystem in Mutate.
self._fake_fetcher = FakeURLFSFetcher(
TestFileSystem(self._test_files), base_path)
fetchers.append(MockURLFetcher(self._fake_fetcher))
return fetchers[-1]
# Constructing |gfs| will create a fetcher.
gfs = GithubFileSystem.ForTest(
'changing-repo/', create_mock_url_fetcher, path='')
assert len(fetchers) == 1
return gfs, fetchers[0]
def Mutate(self):
fake_version = self._GenerateHash()
fake_data = self._GenerateHash()
self.files['zipfile/hello.txt'] = fake_data
self.files['zipfile/new-file'] = fake_data
self.files['zipfile/dir/file1'] = fake_data
self._test_files['test_owner']['changing-repo']['zipball'] = (
self._ZipFromFiles(self.files))
self._test_files['test_owner']['changing-repo']['commits']['HEAD'] = (
self._MakeShaJson(fake_version))
# Update the file_system used by FakeURLFSFetcher so the above mutations
# propagate.
self._fake_fetcher.UpdateFS(TestFileSystem(self._test_files))
return fake_version, fake_data
def _GenerateHash(self):
'''Generates an arbitrary SHA1 hash.
'''
return sha1(str(random())).hexdigest()
def _MakeShaJson(self, hash_value):
commit_json = json.loads(deepcopy(LocalFileSystem('').ReadSingle(
'test_data/github_file_system/test_owner/repo/commits/HEAD').Get()))
commit_json['sha'] = hash_value
return json.dumps(commit_json)
def _ZipFromFiles(self, file_dict):
string = StringIO()
zipfile = ZipFile(string, 'w')
for filename, contents in file_dict.iteritems():
zipfile.writestr(filename, contents)
zipfile.close()
return string.getvalue()
class TestGithubFileSystem(unittest.TestCase):
def setUp(self):
self._gfs = GithubFileSystem.ForTest(
'repo/', partial(FakeURLFSFetcher, LocalFileSystem('')))
# Start and finish the repository load.
self._cgfs = CachingFileSystem(self._gfs, ObjectStoreCreator.ForTest())
def testReadDirectory(self):
self._gfs.Refresh().Get()
self.assertEqual(
sorted(['requirements.txt', '.gitignore', 'README.md', 'src/']),
sorted(self._gfs.ReadSingle('').Get()))
self.assertEqual(
sorted(['__init__.notpy', 'hello.notpy']),
sorted(self._gfs.ReadSingle('src/').Get()))
def testReadFile(self):
self._gfs.Refresh().Get()
expected = (
'# Compiled Python files\n'
'*.pyc\n'
)
self.assertEqual(expected, self._gfs.ReadSingle('.gitignore').Get())
def testMultipleReads(self):
self._gfs.Refresh().Get()
self.assertEqual(
self._gfs.ReadSingle('requirements.txt').Get(),
self._gfs.ReadSingle('requirements.txt').Get())
def testReads(self):
self._gfs.Refresh().Get()
expected = {
'src/': sorted(['hello.notpy', '__init__.notpy']),
'': sorted(['requirements.txt', '.gitignore', 'README.md', 'src/'])
}
read = self._gfs.Read(['', 'src/']).Get()
self.assertEqual(expected['src/'], sorted(read['src/']))
self.assertEqual(expected[''], sorted(read['']))
def testStat(self):
# This is the hash value from the zip on disk.
real_hash = 'c36fc23688a9ec9e264d3182905dc0151bfff7d7'
self._gfs.Refresh().Get()
dir_stat = StatInfo(real_hash, {
'hello.notpy': StatInfo(real_hash),
'__init__.notpy': StatInfo(real_hash)
})
self.assertEqual(StatInfo(real_hash), self._gfs.Stat('README.md'))
self.assertEqual(StatInfo(real_hash), self._gfs.Stat('src/hello.notpy'))
self.assertEqual(dir_stat, self._gfs.Stat('src/'))
def testBadReads(self):
self._gfs.Refresh().Get()
self.assertRaises(FileNotFoundError, self._gfs.Stat, 'DONT_README.md')
self.assertRaises(FileNotFoundError,
self._gfs.ReadSingle('DONT_README.md').Get)
def testCachingFileSystem(self):
self._cgfs.Refresh().Get()
initial_cgfs_read_one = self._cgfs.ReadSingle('src/hello.notpy').Get()
self.assertEqual(initial_cgfs_read_one,
self._gfs.ReadSingle('src/hello.notpy').Get())
self.assertEqual(initial_cgfs_read_one,
self._cgfs.ReadSingle('src/hello.notpy').Get())
initial_cgfs_read_two = self._cgfs.Read(
['README.md', 'requirements.txt']).Get()
self.assertEqual(
initial_cgfs_read_two,
self._gfs.Read(['README.md', 'requirements.txt']).Get())
self.assertEqual(
initial_cgfs_read_two,
self._cgfs.Read(['README.md', 'requirements.txt']).Get())
def testWithoutRefresh(self):
# Without refreshing it will still read the content from blobstore, and it
# does this via the magic of the FakeURLFSFetcher.
self.assertEqual(['__init__.notpy', 'hello.notpy'],
sorted(self._gfs.ReadSingle('src/').Get()))
def testRefresh(self):
test_bundle = _TestBundle()
gfs, fetcher = test_bundle.CreateGfsAndFetcher()
# It shouldn't fetch until Refresh does so; then it will do 2, one for the
# stat, and another for the read.
self.assertTrue(*fetcher.CheckAndReset())
gfs.Refresh().Get()
self.assertTrue(*fetcher.CheckAndReset(fetch_count=1,
fetch_async_count=1,
fetch_resolve_count=1))
# Refresh is just an alias for Read('').
gfs.Refresh().Get()
self.assertTrue(*fetcher.CheckAndReset())
initial_dir_read = sorted(gfs.ReadSingle('').Get())
initial_file_read = gfs.ReadSingle('dir/file1').Get()
version, data = test_bundle.Mutate()
# Check that changes have not effected the file system yet.
self.assertEqual(initial_dir_read, sorted(gfs.ReadSingle('').Get()))
self.assertEqual(initial_file_read, gfs.ReadSingle('dir/file1').Get())
self.assertNotEqual(StatInfo(version), gfs.Stat(''))
gfs, fetcher = test_bundle.CreateGfsAndFetcher()
gfs.Refresh().Get()
self.assertTrue(*fetcher.CheckAndReset(fetch_count=1,
fetch_async_count=1,
fetch_resolve_count=1))
# Check that the changes have affected the file system.
self.assertEqual(data, gfs.ReadSingle('new-file').Get())
self.assertEqual(test_bundle.files['zipfile/dir/file1'],
gfs.ReadSingle('dir/file1').Get())
self.assertEqual(StatInfo(version), gfs.Stat('new-file'))
# Regression test: ensure that reading the data after it's been mutated,
# but before Refresh() has been realised, still returns the correct data.
gfs, fetcher = test_bundle.CreateGfsAndFetcher()
version, data = test_bundle.Mutate()
refresh_future = gfs.Refresh()
self.assertTrue(*fetcher.CheckAndReset(fetch_count=1, fetch_async_count=1))
self.assertEqual(data, gfs.ReadSingle('new-file').Get())
self.assertEqual(test_bundle.files['zipfile/dir/file1'],
gfs.ReadSingle('dir/file1').Get())
self.assertEqual(StatInfo(version), gfs.Stat('new-file'))
refresh_future.Get()
self.assertTrue(*fetcher.CheckAndReset(fetch_resolve_count=1))
def testGetThenRefreshOnStartup(self):
# Regression test: Test that calling Get() but never resolving the future,
# then Refresh()ing the data, causes the data to be refreshed.
test_bundle = _TestBundle()
gfs, fetcher = test_bundle.CreateGfsAndFetcher()
self.assertTrue(*fetcher.CheckAndReset())
# Get a predictable version.
version, data = test_bundle.Mutate()
read_future = gfs.ReadSingle('hello.txt')
# Fetch for the Stat(), async-fetch for the Read().
self.assertTrue(*fetcher.CheckAndReset(fetch_count=1, fetch_async_count=1))
refresh_future = gfs.Refresh()
self.assertTrue(*fetcher.CheckAndReset())
self.assertEqual(data, read_future.Get())
self.assertTrue(*fetcher.CheckAndReset(fetch_resolve_count=1))
self.assertEqual(StatInfo(version), gfs.Stat('hello.txt'))
self.assertTrue(*fetcher.CheckAndReset())
# The fetch will already have been resolved, so resolving the Refresh won't
# affect anything.
refresh_future.Get()
self.assertTrue(*fetcher.CheckAndReset())
# Read data should not have changed.
self.assertEqual(data, gfs.ReadSingle('hello.txt').Get())
self.assertEqual(StatInfo(version), gfs.Stat('hello.txt'))
self.assertTrue(*fetcher.CheckAndReset())
if __name__ == '__main__':
unittest.main()
| 36.042705
| 79
| 0.678219
|
feaf39bbc3e4e738d1e62ba113f2f7dd2c0f48c9
| 2,957
|
py
|
Python
|
investments/src/event-profiling/eventProfiler.py
|
suggitpe/python
|
27ccc033d13774e8ea4355849374152246b3f4a4
|
[
"Apache-2.0"
] | null | null | null |
investments/src/event-profiling/eventProfiler.py
|
suggitpe/python
|
27ccc033d13774e8ea4355849374152246b3f4a4
|
[
"Apache-2.0"
] | null | null | null |
investments/src/event-profiling/eventProfiler.py
|
suggitpe/python
|
27ccc033d13774e8ea4355849374152246b3f4a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import copy
import datetime as dt
import QSTK.qstkstudy.EventProfiler as ep
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.qsdateutil as du
import numpy as np
def get_data_from_market(start_of_period, end_of_period, symbols_from):
daysOfMarketOpen = du.getNYSEdays(start_of_period, end_of_period, dt.timedelta(hours=16))
dataObject = da.DataAccess('Yahoo')
symbols = dataObject.get_symbols_from_list(symbols_from)
symbols.append('SPY')
keys = create_market_keys()
rawMarketData = dataObject.get_data(daysOfMarketOpen, symbols, keys)
dataDictionary = dict(zip(keys, rawMarketData))
clean_dictionary_of_nans(keys, dataDictionary)
return [symbols, dataDictionary]
def create_market_keys():
return ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# return ['actual_close']
def clean_dictionary_of_nans(keys, dataDictionary):
for key in keys:
dataDictionary[key] = dataDictionary[key].fillna(method='ffill')
dataDictionary[key] = dataDictionary[key].fillna(method='bfill')
dataDictionary[key] = dataDictionary[key].fillna(1.0)
def find_events_from(symbols, dataDictionary, eventTrigger):
closeData = dataDictionary['actual_close']
theMarket = closeData['SPY']
events = create_data_frame_same_size_as(closeData)
timestamps = closeData.index
print "Finding events"
for symbol in symbols:
for day in range(1, len(timestamps)):
symbolPriceToday = closeData[symbol].ix[timestamps[day]]
marketPriceToday = theMarket.ix[timestamps[day]]
symbolPriceYesterday = closeData[symbol].ix[timestamps[day - 1]]
marketPriceYesterday = theMarket.ix[timestamps[day - 1]]
symbolReturnToday = (symbolPriceToday / symbolPriceYesterday) - 1
marketReturnToday = (marketPriceToday / marketPriceYesterday) - 1
if symbolPriceYesterday >= eventTrigger and symbolPriceToday < eventTrigger:
events[symbol].ix[timestamps[day]] = 1
return events
def create_data_frame_same_size_as(exampleDataFrame):
dataFrame = copy.deepcopy(exampleDataFrame)
dataFrame = dataFrame * np.NAN
return dataFrame
def create_the_event_profile_from(events, dataDictionary):
print "Profiling the event data"
ep.eventprofiler(events, dataDictionary, i_lookback=20, i_lookforward=20,
s_filename="eventStudy.pdf", b_market_neutral=True, b_errorbars=True,
s_market_sym='SPY')
def profile_period():
start_of_period = dt.datetime(2008, 1, 1)
end_of_period = dt.datetime(2009, 12, 31)
symbols_from = 'sp5002012'
event_trigger = 6.0
symbols, data_dictionary = get_data_from_market(start_of_period, end_of_period, symbols_from)
events = find_events_from(symbols, data_dictionary, event_trigger)
create_the_event_profile_from(events, data_dictionary)
profile_period()
| 35.626506
| 97
| 0.725736
|
040239884f342924698073f5b3547d54965bd2a3
| 1,988
|
py
|
Python
|
logo/test.py
|
greschd/PhaseMap
|
d70586cdfd13956ff4c0073a1c26b214295dc7fc
|
[
"Apache-2.0"
] | 2
|
2020-11-06T12:14:44.000Z
|
2021-05-24T16:41:33.000Z
|
logo/test.py
|
greschd/PhaseMap
|
d70586cdfd13956ff4c0073a1c26b214295dc7fc
|
[
"Apache-2.0"
] | 1
|
2020-10-06T09:28:36.000Z
|
2020-10-06T09:28:36.000Z
|
logo/test.py
|
greschd/PhaseMap
|
d70586cdfd13956ff4c0073a1c26b214295dc7fc
|
[
"Apache-2.0"
] | 1
|
2019-11-08T00:58:51.000Z
|
2019-11-08T00:58:51.000Z
|
#!/usr/bin/env python
# © 2015-2018, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
import numpy as np
import scipy.misc
import phasemap2 as pm
import matplotlib.pyplot as plt
# ~ def circle(x, y):
# ~ return 1 if x**2 + y**2 < 1 else 0
# ~ def wedge(x, y):
# ~ return 1 if (y - abs(x)) > 0 and (x**2 + y**2 < 1) else 0
# ~ def phase(val):
# ~ return [wedge(x, y) + circle(x, y) for x, y in val]
def rhombus(x, y):
return 1 if (abs(x) + abs(y) < 1) else 0
def circle2(x, y, r=1):
return 1 if (x ** 2 + y ** 2) < r ** 2 else 0
# ~ def box(x, y, l=0.5):
# ~ return 1 if (-l < x < l) and (-l < y < l) else 0
def phase(val):
return [rhombus(x, y) + circle2(x, y, r=np.sqrt(0.5)) for x, y in val]
# ~ def phase(val):
# ~ return [rhombus(x, y) for x, y in val]
if __name__ == "__main__":
plt.set_cmap("viridis")
res = pm.get_phase_map(phase, [(-1.1, 1.1), (-1.1, 1.1)], num_steps=6, mesh=3)
A = np.zeros(res.mesh, dtype=int) - 1
for k, v in res.items():
A[k] = v
B = np.zeros(res.mesh, dtype=int) - 1
for k, v in res.items():
B[k] = v
for i in range(B.shape[0]):
iterator = range(B.shape[1])
if i % 2 == 1:
iterator = reversed(iterator)
for j in iterator:
if B[i, j] == -1:
B[i, j] = current
else:
current = B[i, j]
color_mapping_1 = {
-1: [0, 0, 0],
0: [0x00, 0x00, 0x00],
1: [0, 0x33, 0x99],
2: [0xEE, 0x66, 0],
}
color_mapping_2 = {
-1: [0, 0, 0],
0: [0xFF, 0xFF, 0xFF],
1: [0, 0x1F, 0x5C],
2: [0xC2, 0x3B, 5],
}
A = np.zeros(tuple(list(res.mesh) + [3]), dtype=int)
for i, line in enumerate(B):
for j, elem in enumerate(line):
A[i, j] = color_mapping_1[elem]
for k, v in res.items():
A[k] = color_mapping_2[v]
scipy.misc.imsave("logo_out.png", A, format="png")
| 25.164557
| 82
| 0.512575
|
dc4cc12385f3e8f89cbda5eb2991736f51741197
| 5,750
|
py
|
Python
|
selfdrive/controls/lib/longitudinal_planner.py
|
fallen8angel/forNEXO-YONG
|
5661ae0fb2fefc41fda9e474e094d4b5440ecb8e
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/longitudinal_planner.py
|
fallen8angel/forNEXO-YONG
|
5661ae0fb2fefc41fda9e474e094d4b5440ecb8e
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/longitudinal_planner.py
|
fallen8angel/forNEXO-YONG
|
5661ae0fb2fefc41fda9e474e094d4b5440ecb8e
|
[
"MIT"
] | 2
|
2022-02-25T03:36:09.000Z
|
2022-03-13T12:24:37.000Z
|
#!/usr/bin/env python3
import math
import numpy as np
from common.numpy_fast import interp
import cereal.messaging as messaging
from common.conversions import Conversions as CV
from common.filter_simple import FirstOrderFilter
from common.realtime import DT_MDL
from selfdrive.modeld.constants import T_IDXS
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc
from selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N
from selfdrive.swaglog import cloudlog
from common.params import Params
LON_MPC_STEP = 0.2 # first step is 0.2s
AWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted
A_CRUISE_MIN = -1.2
A_CRUISE_MAX_VALS = [1.5, 1.2, 0.8, 0.6]
A_CRUISE_MAX_BP = [0., 15., 25., 40.]
# Lookup table for turns
_A_TOTAL_MAX_V = [1.7, 3.2]
_A_TOTAL_MAX_BP = [20., 40.]
def get_max_accel(v_ego):
return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
"""
This function returns a limited long acceleration allowed, depending on the existing lateral acceleration
this should avoid accelerating when losing the target in turns
"""
# FIXME: This function to calculate lateral accel is incorrect and should use the VehicleModel
# The lookup table for turns should also be updated if we do this
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego ** 2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)
a_x_allowed = math.sqrt(max(a_total_max ** 2 - a_y ** 2, 0.))
return [a_target[0], min(a_target[1], a_x_allowed)]
class Planner:
def __init__(self, CP, init_v=0.0, init_a=0.0):
self.CP = CP
self.mpc = LongitudinalMpc()
self.fcw = False
self.a_desired = init_a
self.v_desired_filter = FirstOrderFilter(init_v, 2.0, DT_MDL)
self.v_desired_trajectory = np.zeros(CONTROL_N)
self.a_desired_trajectory = np.zeros(CONTROL_N)
self.j_desired_trajectory = np.zeros(CONTROL_N)
self.solverExecutionTime = 0.0
self.use_cluster_speed = Params().get_bool('UseClusterSpeed')
def update(self, sm):
v_ego = sm['carState'].vEgo
v_cruise_kph = sm['controlsState'].vCruise
v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)
v_cruise = v_cruise_kph * CV.KPH_TO_MS
# neokii
if not self.use_cluster_speed:
vCluRatio = sm['carState'].vCluRatio
if vCluRatio > 0.5:
v_cruise *= vCluRatio
v_cruise = int(v_cruise * CV.MS_TO_KPH + 0.25) * CV.KPH_TO_MS
long_control_state = sm['controlsState'].longControlState
force_slow_decel = sm['controlsState'].forceDecel
# Reset current state when not engaged, or user is controlling the speed
reset_state = long_control_state == LongCtrlState.off
# No change cost when user is controlling the speed, or when standstill
prev_accel_constraint = not (reset_state or sm['carState'].standstill)
if reset_state:
self.v_desired_filter.x = v_ego
self.a_desired = 0.0
# Prevent divergence, smooth in current v_ego
self.v_desired_filter.x = max(0.0, self.v_desired_filter.update(v_ego))
accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]
accel_limits_turns = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)
if force_slow_decel:
# if required so, force a smooth deceleration
accel_limits_turns[1] = min(accel_limits_turns[1], AWARENESS_DECEL)
accel_limits_turns[0] = min(accel_limits_turns[0], accel_limits_turns[1])
# clip limits, cannot init MPC outside of bounds
accel_limits_turns[0] = min(accel_limits_turns[0], self.a_desired + 0.05)
accel_limits_turns[1] = max(accel_limits_turns[1], self.a_desired - 0.05)
self.mpc.set_weights(prev_accel_constraint)
self.mpc.set_accel_limits(accel_limits_turns[0], accel_limits_turns[1])
self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired)
self.mpc.update(sm['carState'], sm['radarState'], v_cruise)
self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)
self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)
self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)
# TODO counter is only needed because radar is glitchy, remove once radar is gone
self.fcw = self.mpc.crash_cnt > 5
if self.fcw:
cloudlog.info("FCW triggered")
# Interpolate 0.05 seconds and save as starting point for next iteration
a_prev = self.a_desired
self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))
self.v_desired_filter.x = self.v_desired_filter.x + DT_MDL * (self.a_desired + a_prev) / 2.0
def publish(self, sm, pm):
plan_send = messaging.new_message('longitudinalPlan')
plan_send.valid = sm.all_checks(service_list=['carState', 'controlsState'])
longitudinalPlan = plan_send.longitudinalPlan
longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']
longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']
longitudinalPlan.speeds = self.v_desired_trajectory.tolist()
longitudinalPlan.accels = self.a_desired_trajectory.tolist()
longitudinalPlan.jerks = self.j_desired_trajectory.tolist()
longitudinalPlan.hasLead = sm['radarState'].leadOne.status
longitudinalPlan.longitudinalPlanSource = self.mpc.source
longitudinalPlan.fcw = self.fcw
longitudinalPlan.solverExecutionTime = self.mpc.solve_time
pm.send('longitudinalPlan', plan_send)
| 40.20979
| 108
| 0.749217
|
a0b5566b55b57f707f0f974fe105c9d22a413beb
| 301
|
py
|
Python
|
config/cfg.py
|
lucasxlu/CRNet
|
17d27e39a77181921cc2bd5a5a8866a25282b4de
|
[
"MIT"
] | 13
|
2018-06-26T07:13:39.000Z
|
2021-11-30T02:12:38.000Z
|
config/cfg.py
|
lucasxlu/CRNet
|
17d27e39a77181921cc2bd5a5a8866a25282b4de
|
[
"MIT"
] | null | null | null |
config/cfg.py
|
lucasxlu/CRNet
|
17d27e39a77181921cc2bd5a5a8866a25282b4de
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
cfg = OrderedDict()
cfg['use_gpu'] = False
cfg['scut_fbp_dir'] = 'E:/DataSet/Face/SCUT-FBP/Crop'
cfg['batch_size'] = 16
cfg['hotornot_dir'] = 'E:/DataSet/Face/eccv2010_beauty_data_v1.0/eccv2010_beauty_data/hotornot_face'
cfg['jaffe_dir'] = 'E:/DataSet/Face/jaffe'
| 33.444444
| 100
| 0.754153
|
cef5fc6d60a94799dbd665d796fbfbbed6f0c0f4
| 2,196
|
py
|
Python
|
modules/utils.py
|
inconvergent/differential-mesh-3d
|
cfa52b21d8524410c0ae2e98a6456c6520ae9ae6
|
[
"MIT"
] | 51
|
2015-08-27T16:05:17.000Z
|
2022-03-12T21:05:21.000Z
|
modules/utils.py
|
inconvergent/differential-mesh-3d
|
cfa52b21d8524410c0ae2e98a6456c6520ae9ae6
|
[
"MIT"
] | 1
|
2015-11-25T18:50:23.000Z
|
2015-11-26T22:21:10.000Z
|
modules/utils.py
|
inconvergent/differential-mesh-3d
|
cfa52b21d8524410c0ae2e98a6456c6520ae9ae6
|
[
"MIT"
] | 5
|
2016-09-12T12:58:49.000Z
|
2021-05-28T23:59:11.000Z
|
# -*- coding: utf-8 -*-
def make_info_str(args):
s = ''
for k in vars(args):
s += '# ' + str(k) + ': ' + str(getattr(args,k)) + '\n'
return s
def print_stats(steps,dm, meta=False):
from time import strftime
from time import time
if isinstance(meta, str):
meta = ' | {:s}'.format(meta)
else:
meta = ''
print(
'{:s} | stp: {:d} sec: {:.2f} v: {:d} e: {:d} f: {:d}{:s}'
.format(
strftime('%d/%m/%y %H:%M:%S'),
steps,
time()-dm.get_start_time(),
dm.get_vnum(),
dm.get_henum(),
dm.get_fnum(),
meta
)
)
return
def get_exporter(dm, fn, nmax):
from numpy import zeros
from .geometry import move_scale
from iutils.ioOBJ import export
np_verts = zeros((nmax, 3), 'float')
np_tris = zeros((nmax, 3), 'int')
np_int = zeros(nmax, 'float')
def e():
vnum = dm.np_get_vertices(np_verts)
tnum = dm.np_get_triangles_vertices(np_tris)
dm.np_get_triangles_intensity(np_int)
move_scale(np_verts[:vnum, :], s=1000)
export(
'thing_mesh',
fn.name(),
verts=np_verts[:vnum, :],
tris=np_tris[:tnum, :]
)
return e
def get_surface_vertices(dm):
res = []
for he in range(dm.get_henum()):
e = dm.is_surface_edge(he)
if e > 0:
d = dm.get_edge_dict(he)
res.append(d['first'])
res.append(d['last'])
return list(set(res))
def get_seed_selector(dm, t, sr=None):
from numpy import array
from numpy import arange
from numpy import ones
from numpy.random import random
if sr is not None:
get_mask = lambda n, sr: (random(size=n) < sr).nonzero()[0]
else:
get_mask = lambda n, sr: ones(n, 'bool')
if t == 'surface':
def f():
vertices = array(get_surface_vertices(dm))
rm = get_mask(len(vertices), sr)
if len(rm) < 1:
return array([])
return vertices[rm]
elif t == 'random':
def f():
vn = dm.get_vnum()
vertices = arange(vn)
rm = get_mask(len(vertices), sr)
if len(rm) < 1:
return array([])
return vertices[rm]
else:
raise ValueError('use "surface" or "random".')
return f
| 21.742574
| 64
| 0.561931
|
8baa85b9e03ac5c53ebc26c6ca72de35977dbed3
| 1,697
|
py
|
Python
|
WeightController/weight/tests.py
|
williamHuang5468/WeightController
|
146673d543ed7d6a20a642a7a9203aa55cf1a935
|
[
"MIT"
] | null | null | null |
WeightController/weight/tests.py
|
williamHuang5468/WeightController
|
146673d543ed7d6a20a642a7a9203aa55cf1a935
|
[
"MIT"
] | null | null | null |
WeightController/weight/tests.py
|
williamHuang5468/WeightController
|
146673d543ed7d6a20a642a7a9203aa55cf1a935
|
[
"MIT"
] | null | null | null |
from django.core.urlresolvers import resolve
from django.test import TestCase
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils import timezone
from weight.views import editorWeight
from weight.models import Weight
class EditorWeightTest(TestCase):
def test_url(self):
editorWeightPage = resolve('/')
self.assertEqual(editorWeightPage.func, editorWeight)
def test_response_html(self):
request = HttpRequest()
response = editorWeight(request)
expected_html = render_to_string('editorWeight.html')
self.assertEqual(response.content.decode(), expected_html)
def test_save_request(self):
request = HttpRequest()
request.method = 'POST'
request.POST['weightInput'] = '50'
response = editorWeight(request)
self.assertIn('50', response.content.decode())
expected_html = render_to_string(
'editorweight.html',
{'new_weight': '50'}
)
print(expected_html)
self.assertEqual(response.content.decode(), expected_html)
class WeightModelTest(TestCase):
def test_save_weight(self):
weight1 = Weight()
weight1.weight = 100
weight1.record_date=timezone.now()
weight1.save()
weight2 = Weight()
weight2.weight = 50
weight2.record_date=timezone.now()
weight2.save()
saved_weights = Weight.objects.all()
self.assertEqual(saved_weights.count(), 2)
weight1 = saved_weights[0]
weight2 = saved_weights[1]
self.assertEqual(weight1.weight, 100)
self.assertEqual(weight2.weight, 50)
| 28.283333
| 66
| 0.668827
|
31f6ddc74203078749bf4bc12e4d45d6e885991a
| 10,001
|
py
|
Python
|
mesonbuild/linkers/detect.py
|
jpakkane/meson
|
ee7a7fec10298e8dabc423f6163e866d19473e7f
|
[
"Apache-2.0"
] | 64
|
2015-01-09T13:45:23.000Z
|
2015-06-13T20:16:01.000Z
|
mesonbuild/linkers/detect.py
|
jpakkane/meson
|
ee7a7fec10298e8dabc423f6163e866d19473e7f
|
[
"Apache-2.0"
] | 110
|
2015-01-09T01:35:56.000Z
|
2015-06-14T11:26:04.000Z
|
mesonbuild/linkers/detect.py
|
jpakkane/meson
|
ee7a7fec10298e8dabc423f6163e866d19473e7f
|
[
"Apache-2.0"
] | 13
|
2015-01-05T09:08:37.000Z
|
2015-06-04T08:34:45.000Z
|
# Copyright 2012-2022 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ..mesonlib import (
EnvironmentException, OptionKey,
Popen_safe, search_version
)
from .linkers import (
AppleDynamicLinker,
GnuGoldDynamicLinker,
GnuBFDDynamicLinker,
MoldDynamicLinker,
LLVMDynamicLinker,
QualcommLLVMDynamicLinker,
MSVCDynamicLinker,
ClangClDynamicLinker,
SolarisDynamicLinker,
AIXDynamicLinker,
OptlinkDynamicLinker,
)
import re
import shlex
import typing as T
if T.TYPE_CHECKING:
from .linkers import DynamicLinker, GnuDynamicLinker
from ..environment import Environment
from ..compilers import Compiler
from ..mesonlib import MachineChoice
defaults: T.Dict[str, T.List[str]] = {}
defaults['static_linker'] = ['ar', 'gar']
defaults['vs_static_linker'] = ['lib']
defaults['clang_cl_static_linker'] = ['llvm-lib']
defaults['cuda_static_linker'] = ['nvlink']
defaults['gcc_static_linker'] = ['gcc-ar']
defaults['clang_static_linker'] = ['llvm-ar']
def __failed_to_detect_linker(compiler: T.List[str], args: T.List[str], stdout: str, stderr: str) -> 'T.NoReturn':
msg = 'Unable to detect linker for compiler "{} {}"\nstdout: {}\nstderr: {}'.format(
' '.join(compiler), ' '.join(args), stdout, stderr)
raise EnvironmentException(msg)
def guess_win_linker(env: 'Environment', compiler: T.List[str], comp_class: T.Type['Compiler'],
comp_version: str, for_machine: MachineChoice, *,
use_linker_prefix: bool = True, invoked_directly: bool = True,
extra_args: T.Optional[T.List[str]] = None) -> 'DynamicLinker':
env.coredata.add_lang_args(comp_class.language, comp_class, for_machine, env)
# Explicitly pass logo here so that we can get the version of link.exe
if not use_linker_prefix or comp_class.LINKER_PREFIX is None:
check_args = ['/logo', '--version']
elif isinstance(comp_class.LINKER_PREFIX, str):
check_args = [comp_class.LINKER_PREFIX + '/logo', comp_class.LINKER_PREFIX + '--version']
elif isinstance(comp_class.LINKER_PREFIX, list):
check_args = comp_class.LINKER_PREFIX + ['/logo'] + comp_class.LINKER_PREFIX + ['--version']
check_args += env.coredata.options[OptionKey('args', lang=comp_class.language, machine=for_machine)].value
override = [] # type: T.List[str]
value = env.lookup_binary_entry(for_machine, comp_class.language + '_ld')
if value is not None:
override = comp_class.use_linker_args(value[0], comp_version)
check_args += override
if extra_args is not None:
check_args.extend(extra_args)
p, o, _ = Popen_safe(compiler + check_args)
if 'LLD' in o.split('\n')[0]:
if '(compatible with GNU linkers)' in o:
return LLVMDynamicLinker(
compiler, for_machine, comp_class.LINKER_PREFIX,
override, version=search_version(o))
elif not invoked_directly:
return ClangClDynamicLinker(
for_machine, override, exelist=compiler, prefix=comp_class.LINKER_PREFIX,
version=search_version(o), direct=False, machine=None)
if value is not None and invoked_directly:
compiler = value
# We've already hanedled the non-direct case above
p, o, e = Popen_safe(compiler + check_args)
if 'LLD' in o.split('\n')[0]:
return ClangClDynamicLinker(
for_machine, [],
prefix=comp_class.LINKER_PREFIX if use_linker_prefix else [],
exelist=compiler, version=search_version(o), direct=invoked_directly)
elif 'OPTLINK' in o:
# Opltink's stdout *may* beging with a \r character.
return OptlinkDynamicLinker(compiler, for_machine, version=search_version(o))
elif o.startswith('Microsoft') or e.startswith('Microsoft'):
out = o or e
match = re.search(r'.*(X86|X64|ARM|ARM64).*', out)
if match:
target = str(match.group(1))
else:
target = 'x86'
return MSVCDynamicLinker(
for_machine, [], machine=target, exelist=compiler,
prefix=comp_class.LINKER_PREFIX if use_linker_prefix else [],
version=search_version(out), direct=invoked_directly)
elif 'GNU coreutils' in o:
import shutil
fullpath = shutil.which(compiler[0])
raise EnvironmentException(
f"Found GNU link.exe instead of MSVC link.exe in {fullpath}.\n"
"This link.exe is not a linker.\n"
"You may need to reorder entries to your %PATH% variable to resolve this.")
__failed_to_detect_linker(compiler, check_args, o, e)
def guess_nix_linker(env: 'Environment', compiler: T.List[str], comp_class: T.Type['Compiler'],
comp_version: str, for_machine: MachineChoice, *,
extra_args: T.Optional[T.List[str]] = None) -> 'DynamicLinker':
"""Helper for guessing what linker to use on Unix-Like OSes.
:compiler: Invocation to use to get linker
:comp_class: The Compiler Type (uninstantiated)
:comp_version: The compiler version string
:for_machine: which machine this linker targets
:extra_args: Any additional arguments required (such as a source file)
"""
env.coredata.add_lang_args(comp_class.language, comp_class, for_machine, env)
extra_args = extra_args or []
extra_args += env.coredata.options[OptionKey('args', lang=comp_class.language, machine=for_machine)].value
if isinstance(comp_class.LINKER_PREFIX, str):
check_args = [comp_class.LINKER_PREFIX + '--version'] + extra_args
else:
check_args = comp_class.LINKER_PREFIX + ['--version'] + extra_args
override = [] # type: T.List[str]
value = env.lookup_binary_entry(for_machine, comp_class.language + '_ld')
if value is not None:
override = comp_class.use_linker_args(value[0], comp_version)
check_args += override
_, o, e = Popen_safe(compiler + check_args)
v = search_version(o + e)
linker: DynamicLinker
if 'LLD' in o.split('\n')[0]:
linker = LLVMDynamicLinker(
compiler, for_machine, comp_class.LINKER_PREFIX, override, version=v)
elif 'Snapdragon' in e and 'LLVM' in e:
linker = QualcommLLVMDynamicLinker(
compiler, for_machine, comp_class.LINKER_PREFIX, override, version=v)
elif e.startswith('lld-link: '):
# The LLD MinGW frontend didn't respond to --version before version 9.0.0,
# and produced an error message about failing to link (when no object
# files were specified), instead of printing the version number.
# Let's try to extract the linker invocation command to grab the version.
_, o, e = Popen_safe(compiler + check_args + ['-v'])
try:
linker_cmd = re.match(r'.*\n(.*?)\nlld-link: ', e, re.DOTALL).group(1)
linker_cmd = shlex.split(linker_cmd)[0]
except (AttributeError, IndexError, ValueError):
pass
else:
_, o, e = Popen_safe([linker_cmd, '--version'])
v = search_version(o)
linker = LLVMDynamicLinker(compiler, for_machine, comp_class.LINKER_PREFIX, override, version=v)
# first is for apple clang, second is for real gcc, the third is icc
elif e.endswith('(use -v to see invocation)\n') or 'macosx_version' in e or 'ld: unknown option:' in e:
if isinstance(comp_class.LINKER_PREFIX, str):
_, _, e = Popen_safe(compiler + [comp_class.LINKER_PREFIX + '-v'] + extra_args)
else:
_, _, e = Popen_safe(compiler + comp_class.LINKER_PREFIX + ['-v'] + extra_args)
for line in e.split('\n'):
if 'PROJECT:ld' in line:
v = line.split('-')[1]
break
else:
v = 'unknown version'
linker = AppleDynamicLinker(compiler, for_machine, comp_class.LINKER_PREFIX, override, version=v)
elif 'GNU' in o or 'GNU' in e:
cls: T.Type[GnuDynamicLinker]
# this is always the only thing on stdout, except for swift
# which may or may not redirect the linker stdout to stderr
if o.startswith('GNU gold') or e.startswith('GNU gold'):
cls = GnuGoldDynamicLinker
elif o.startswith('mold') or e.startswith('mold'):
cls = MoldDynamicLinker
else:
cls = GnuBFDDynamicLinker
linker = cls(compiler, for_machine, comp_class.LINKER_PREFIX, override, version=v)
elif 'Solaris' in e or 'Solaris' in o:
for line in (o+e).split('\n'):
if 'ld: Software Generation Utilities' in line:
v = line.split(':')[2].lstrip()
break
else:
v = 'unknown version'
linker = SolarisDynamicLinker(
compiler, for_machine, comp_class.LINKER_PREFIX, override,
version=v)
elif 'ld: 0706-012 The -- flag is not recognized' in e:
if isinstance(comp_class.LINKER_PREFIX, str):
_, _, e = Popen_safe(compiler + [comp_class.LINKER_PREFIX + '-V'] + extra_args)
else:
_, _, e = Popen_safe(compiler + comp_class.LINKER_PREFIX + ['-V'] + extra_args)
linker = AIXDynamicLinker(
compiler, for_machine, comp_class.LINKER_PREFIX, override,
version=search_version(e))
else:
__failed_to_detect_linker(compiler, check_args, o, e)
return linker
| 44.057269
| 114
| 0.656734
|
1e379d2843866791bef92c399963d5d15b9dc6d3
| 2,850
|
py
|
Python
|
db/country.py
|
testsibirtsv/opncrt
|
c9780696e6e3849f8711ee2c6a9c997c4ac66b41
|
[
"Apache-2.0"
] | null | null | null |
db/country.py
|
testsibirtsv/opncrt
|
c9780696e6e3849f8711ee2c6a9c997c4ac66b41
|
[
"Apache-2.0"
] | null | null | null |
db/country.py
|
testsibirtsv/opncrt
|
c9780696e6e3849f8711ee2c6a9c997c4ac66b41
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import relationship
from models.personaldetails import PersonalDetails
engine = create_engine('mysql://root@localhost/opencart')
Session = sessionmaker(bind=engine)
Base = declarative_base()
def session_factory():
Base.metadata.create_all(engine)
return Session()
class Customer(Base):
__tablename__ = 'oc_customer'
customer_id = Column(Integer, primary_key=True)
firstname = Column(String)
lastname = Column(String)
email = Column(String)
telephone = Column(String)
countries = relationship('Country')
def __init__(self,
firstname,
lastname,
email,
telephone):
self.firstname = firstname
self.lastname = lastname
self.email = email
self.telephone = telephone
def __repr__(self):
return f'{self.firstname} {self.lastname} {self.email} {self.telephone}'
class Address(Base):
__tablename__ = 'oc_address'
address_id = Column(Integer, primary_key=True)
customer_id = Column(Integer)
firstname = Column(String)
lastname = Column(String)
company = Column(String)
address_1 = Column(String)
address_2 = Column(String)
city = Column(String)
postcode = Column(String)
country_id = Column(Integer)
zone_id = Column(Integer)
def __init__(self,
address_id,
customer_id,
firstname,
lastname,
company,
address_1,
address_2,
city,
postcode,
country_id,
zone_id):
self.address_id = address_id
self.customer_id = customer_id
self.firstname = firstname
self.lastname = lastname
self.company = company
self.address_1 = address_1
self.address_2 = address_2
self.city = city
self.postcode = postcode
self.country_id = country_id
self.zone_id = zone_id
class Country(Base):
__tablename__ = 'oc_country'
country_id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
def __repr__(self):
return f'{self.name}'
class Zone(Base):
__tablename__ = 'oc_zone'
zone_id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
def __repr__(self):
return f'{self.name}'
def get_people():
session = session_factory()
people_query = session.query(Customer)
session.close()
return people_query.all()
| 25
| 80
| 0.627018
|
f24f8e08b77411d6befe18236cc241505fa6537b
| 1,745
|
py
|
Python
|
tests/tf/block/test_mlp.py
|
Jwmc999/Transformers4Rec
|
e6cdf13a7c0102303c0258120274f88b2d42c9c2
|
[
"Apache-2.0"
] | 415
|
2021-09-20T20:47:34.000Z
|
2022-03-31T16:51:03.000Z
|
tests/tf/block/test_mlp.py
|
Jwmc999/Transformers4Rec
|
e6cdf13a7c0102303c0258120274f88b2d42c9c2
|
[
"Apache-2.0"
] | 128
|
2021-09-21T07:19:38.000Z
|
2022-03-31T15:08:27.000Z
|
tests/tf/block/test_mlp.py
|
Jwmc999/Transformers4Rec
|
e6cdf13a7c0102303c0258120274f88b2d42c9c2
|
[
"Apache-2.0"
] | 44
|
2021-09-23T07:25:36.000Z
|
2022-03-29T04:17:53.000Z
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
tf = pytest.importorskip("tensorflow")
tr = pytest.importorskip("transformers4rec.tf")
@pytest.mark.parametrize("dim", [32, 64])
@pytest.mark.parametrize("activation", ["relu", "tanh"])
@pytest.mark.parametrize("dropout", [None, 0.5])
@pytest.mark.parametrize(
"normalization", [None, "batch_norm", tf.keras.layers.BatchNormalization()]
)
def test_mlp_block_yoochoose(
tabular_schema, tf_tabular_data, dim, activation, dropout, normalization
):
inputs = tr.TabularFeatures.from_schema(tabular_schema, aggregation="concat")
mlp = tr.MLPBlock([dim], activation=activation, dropout=dropout, normalization=normalization)
body = tr.SequentialBlock([inputs, mlp])
outputs = body(tf_tabular_data)
assert list(outputs.shape) == [100, dim]
assert mlp.layers[0].units == dim
assert mlp.layers[0].activation.__name__ == activation
if dropout:
assert mlp.layers[1].rate == dropout
if normalization:
if normalization == "batch_norm":
normalization = tf.keras.layers.BatchNormalization()
assert mlp.layers[-1].__class__.__name__ == normalization.__class__.__name__
| 35.612245
| 97
| 0.72894
|
2be20bb8fdbb83ef3270917ffdc80981158547e1
| 2,556
|
py
|
Python
|
azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeDeactivationInfo(Model):
"""Information about the node deactivation. This information is valid for a
node that is undergoing deactivation or has already been deactivated.
:param node_deactivation_intent: The intent or the reason for deactivating
the node. Following are the possible values for it. Possible values
include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode'
:type node_deactivation_intent: str or
~azure.servicefabric.models.NodeDeactivationIntent
:param node_deactivation_status: The status of node deactivation
operation. Following are the possible values. Possible values include:
'None', 'SafetyCheckInProgress', 'SafetyCheckComplete', 'Completed'
:type node_deactivation_status: str or
~azure.servicefabric.models.NodeDeactivationStatus
:param node_deactivation_task: List of tasks representing the deactivation
operation on the node.
:type node_deactivation_task:
list[~azure.servicefabric.models.NodeDeactivationTask]
:param pending_safety_checks: List of pending safety checks
:type pending_safety_checks:
list[~azure.servicefabric.models.SafetyCheckWrapper]
"""
_attribute_map = {
'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'},
'node_deactivation_status': {'key': 'NodeDeactivationStatus', 'type': 'str'},
'node_deactivation_task': {'key': 'NodeDeactivationTask', 'type': '[NodeDeactivationTask]'},
'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'},
}
def __init__(self, node_deactivation_intent=None, node_deactivation_status=None, node_deactivation_task=None, pending_safety_checks=None):
super(NodeDeactivationInfo, self).__init__()
self.node_deactivation_intent = node_deactivation_intent
self.node_deactivation_status = node_deactivation_status
self.node_deactivation_task = node_deactivation_task
self.pending_safety_checks = pending_safety_checks
| 50.117647
| 142
| 0.710876
|
659b30042a389e6d196cc39ceae16e142e1fab05
| 774
|
py
|
Python
|
graphgallery/gallery/utils/bvat_utils.py
|
Aria461863631/GraphGallery
|
7b62f80ab36b29013bea2538a6581fc696a80201
|
[
"MIT"
] | null | null | null |
graphgallery/gallery/utils/bvat_utils.py
|
Aria461863631/GraphGallery
|
7b62f80ab36b29013bea2538a6581fc696a80201
|
[
"MIT"
] | null | null | null |
graphgallery/gallery/utils/bvat_utils.py
|
Aria461863631/GraphGallery
|
7b62f80ab36b29013bea2538a6581fc696a80201
|
[
"MIT"
] | null | null | null |
"""
util functions for 'Batch Virtual Adversarial Training' (BVAT)
"""
import tensorflow as tf
def get_normalized_vector(d):
d /= (1e-12 + tf.reduce_max(tf.abs(d)))
d /= tf.sqrt(1e-6 + tf.reduce_sum(tf.pow(d, 2.0), 1, keepdims=True))
return d
def kl_divergence_with_logit(q_logit, p_logit,
mask=None):
q = tf.math.softmax(q_logit)
if mask is None:
qlogp = tf.reduce_mean(tf.reduce_sum(q * tf.math.log_softmax(p_logit), 1))
else:
mask /= tf.reduce_mean(mask)
qlogp = tf.reduce_mean(tf.reduce_sum(q * tf.math.log_softmax(p_logit), 1) * mask)
return -qlogp
def entropy_y_x(logit):
p = tf.math.softmax(logit)
return -tf.reduce_mean(tf.reduce_sum(p * tf.math.log_softmax(logit), 1))
| 27.642857
| 89
| 0.640827
|
10c12946e5d2536aaca438bbdedb2473ef972577
| 4,203
|
py
|
Python
|
src/DenseCellPrint.py
|
KasparMatas/MLontoFPGAs
|
084633459dfc03c694cc0a849e883913985f4dbe
|
[
"Apache-2.0"
] | null | null | null |
src/DenseCellPrint.py
|
KasparMatas/MLontoFPGAs
|
084633459dfc03c694cc0a849e883913985f4dbe
|
[
"Apache-2.0"
] | 27
|
2018-09-27T20:35:22.000Z
|
2019-03-22T11:43:52.000Z
|
src/DenseCellPrint.py
|
KasparMatas/MLontoFPGAs
|
084633459dfc03c694cc0a849e883913985f4dbe
|
[
"Apache-2.0"
] | 1
|
2022-01-22T02:51:34.000Z
|
2022-01-22T02:51:34.000Z
|
import numpy as np
class DenseCellPrinter:
def printWires(self, wire_widths, output_wire_names, output_file):
output_file.write("wire [{}-1:0] {};\n".format(wire_widths[0], output_wire_names[0]))
output_file.write("wire [{}-1:0] {};\n".format(wire_widths[1], output_wire_names[1]))
output_file.write("wire [{}:0] {};\n".format(wire_widths[2], output_wire_names[2]))
output_file.write("wire {};".format(output_wire_names[3]))
def printInitialParameters(self, wire_widths, weight_amount, output_file):
output_file.write("weight_comp_cell #(\n")
output_file.write(" .DATA_WIDTH({}),\n".format(wire_widths[1]))
output_file.write(" .RESULT_WIDTH({}),\n".format(wire_widths[2]))
output_file.write(" .INDEX_WIDTH({}),\n".format(wire_widths[0]))
output_file.write(" .WEIGHT_AMOUNT({}),\n".format(weight_amount))
def printAdditionalParameters(self, weight_zero_point, input_zero_point, output_file):
output_file.write(" .WEIGHT_OFFSET({}),\n".format(int(weight_zero_point)))
output_file.write(" .INPUT_OFFSET({}),\n".format(int(input_zero_point)))
def formatWeights(self, weights):
weightString = ""
for weight in np.flip(weights):
weightString += "8'd{}, ".format(int(weight))
weightString = weightString[:-2]
return "{" + weightString + "}"
def printWeights(self, weights, output_file):
output_file.write(" .WEIGHTS({})\n".format(self.formatWeights(weights)))
def printInstanceName(self, cell_index, layer_index, output_file):
output_file.write(") cell_{0}_{1} (\n".format(layer_index, cell_index))
def printInputsOutputs(self, input_wire_names, output_wire_names, output_file):
output_file.write(" .clk(clk),\n")
output_file.write(" .input_index({}),\n".format(input_wire_names[0]))
output_file.write(" .input_value({}),\n".format(input_wire_names[1]))
output_file.write(" .input_result({}),\n".format(input_wire_names[2]))
output_file.write(" .input_enable({}),\n".format(input_wire_names[3]))
output_file.write(" .output_index({}),\n".format(output_wire_names[0]))
output_file.write(" .output_value({}),\n".format(output_wire_names[1]))
output_file.write(" .output_result({}),\n".format(output_wire_names[2]))
output_file.write(" .output_enable({})\n".format(output_wire_names[3]))
output_file.write(");\n")
def generateOutputWireNames(self, cell_index, layer_index):
output_wire_names = []
output_wire_names.append("index_{0}_{1}_{2}".format(layer_index, cell_index, cell_index+1))
output_wire_names.append("value_{0}_{1}_{2}".format(layer_index, cell_index, cell_index+1))
output_wire_names.append("result_{0}_{1}_{2}".format(layer_index, cell_index, cell_index+1))
output_wire_names.append("enable_{0}_{1}_{2}".format(layer_index, cell_index, cell_index+1))
return output_wire_names
def printIndividualCell(self, weights, unit_index, layer_index, input_wire_names, wire_widths, output_wire_names,
quantizer, output_file):
self.printInitialParameters(wire_widths, len(weights[unit_index]), output_file)
self.printAdditionalParameters(quantizer.quantized_weight_zeros[layer_index],
quantizer.quantized_output_zeros[layer_index], output_file)
self.printWeights(weights[unit_index], output_file)
self.printInstanceName(unit_index, layer_index, output_file)
self.printInputsOutputs(input_wire_names, output_wire_names, output_file)
def printCells(self, weights, layer_index, quantizer, amount_of_units_in_a_layer, wire_widths,
input_wire_names, output_file):
for unit_index in range (amount_of_units_in_a_layer):
output_wire_names = self.generateOutputWireNames(unit_index, layer_index)
output_file.write("\n")
self.printWires(wire_widths, output_wire_names, output_file)
output_file.write("\n")
self.printIndividualCell(weights, unit_index, layer_index, input_wire_names, wire_widths, output_wire_names,
quantizer, output_file)
input_wire_names = output_wire_names
return input_wire_names
| 53.884615
| 116
| 0.707828
|
d3b298ff37455c0023b0de9adb43c559af64d15f
| 3,772
|
py
|
Python
|
python_experiments/cnn_model_lookback.py
|
AniruddhaDas1998/jelly-bean-world
|
c1ae093e851260c47a65794a1fab484372518a96
|
[
"Apache-2.0"
] | null | null | null |
python_experiments/cnn_model_lookback.py
|
AniruddhaDas1998/jelly-bean-world
|
c1ae093e851260c47a65794a1fab484372518a96
|
[
"Apache-2.0"
] | null | null | null |
python_experiments/cnn_model_lookback.py
|
AniruddhaDas1998/jelly-bean-world
|
c1ae093e851260c47a65794a1fab484372518a96
|
[
"Apache-2.0"
] | null | null | null |
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
# code adapted from
# https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
# model inspired from original JBW paper: https://arxiv.org/pdf/2002.06306.pdf
class DQN_Lookback(nn.Module):
def __init__(self, h, w, n_actions, lookback=1):
super(DQN_Lookback, self).__init__()
self.conv1 = nn.Conv2d(3*lookback, 16*lookback, kernel_size=3, stride=2)
# self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16*lookback, 16*lookback, kernel_size=2, stride=1)
# self.bn2 = nn.BatchNorm2d(16)
OUT_DIM = 512
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size = 3, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = (conv2d_size_out(conv2d_size_out(w, 3, 2), 2, 1))
convh = (conv2d_size_out(conv2d_size_out(h, 3, 2), 2, 1))
conv_output_size = convw * convh * 16*lookback
self.conv_head = nn.Linear(conv_output_size, OUT_DIM)
linear_input_size = 3*lookback
self.sl1 = nn.Linear(linear_input_size, 32)
self.scent_head = nn.Linear(32, OUT_DIM)
# NOTE: these are given LSTM-like names because this model was a
# placeholder/sanity-check for before the LSTM model was fully implemented
self.lstm = nn.Linear(
OUT_DIM+OUT_DIM,
(OUT_DIM+OUT_DIM)//4,
)
self.lstm_dense_action = nn.Linear((OUT_DIM+OUT_DIM)//4, n_actions)
self.lstm_dense_value = nn.Linear((OUT_DIM+OUT_DIM)//4, 1)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x, lstm_inputs):
# lstm_inputs is not really used and was just a placeholder
# for when the LSTM model was implemented -- can pass in anything
# some of the variable names will also be LSTM-like but they're all
# generally Linear layers as defined above
# h_n, c_n = lstm_inputs
vision_input, scent_input, moved = x
v1 = F.gelu(self.conv1(vision_input.permute(0, 3, 1, 2)))
v2 = F.gelu(self.conv2(v1))
v_flat = v2.view((v2.size(0), -1))
vision_out = F.gelu(self.conv_head(v_flat))
s1 = F.gelu(self.sl1(scent_input))
scent_out = F.gelu(self.scent_head(s1))
concat_vs_out = torch.cat((vision_out, scent_out), dim=-1)
lstm_out = self.lstm(concat_vs_out)
h_out, c_out = lstm_out, lstm_out
action_logits = self.lstm_dense_action(h_out)
action_values = self.lstm_dense_value(h_out)
return (action_logits, action_values, (h_out, c_out))
| 34.290909
| 82
| 0.65562
|
9c693774e8ddca9ebe050f20abe3b9bb3d4c9148
| 5,314
|
py
|
Python
|
dbas/helper/tests/test_relation.py
|
tbsschroeder/dbas
|
9c86eccde65cd64bc5719573b3b8449d8f333e08
|
[
"MIT"
] | null | null | null |
dbas/helper/tests/test_relation.py
|
tbsschroeder/dbas
|
9c86eccde65cd64bc5719573b3b8449d8f333e08
|
[
"MIT"
] | null | null | null |
dbas/helper/tests/test_relation.py
|
tbsschroeder/dbas
|
9c86eccde65cd64bc5719573b3b8449d8f333e08
|
[
"MIT"
] | null | null | null |
import transaction
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import Argument, Premise, ClickedArgument, SeenArgument
from dbas.helper.relation import get_undermines_for_argument_uid, get_undercuts_for_argument_uid, \
get_rebuts_for_argument_uid, get_supports_for_argument_uid, set_new_undermine_or_support_for_pgroup, \
set_new_undercut, set_new_rebut, set_new_support
from dbas.tests.utils import TestCaseWithConfig
class RelationHelperTest(TestCaseWithConfig):
def tearDown(self):
for uid in [arg.uid for arg in
DBDiscussionSession.query(Argument).filter_by(author_uid=self.user_christian.uid).all()]:
DBDiscussionSession.query(ClickedArgument).filter_by(argument_uid=uid).delete()
DBDiscussionSession.query(SeenArgument).filter_by(argument_uid=uid).delete()
DBDiscussionSession.query(Argument).filter_by(author_uid=self.user_christian.uid).delete()
DBDiscussionSession.flush()
transaction.commit()
super().tearDown()
def test_get_undermines_for_argument_uid(self):
val = get_undermines_for_argument_uid('a')
self.assertIsNone(val)
val = get_undermines_for_argument_uid(0)
self.assertIsNone(val)
val = get_undermines_for_argument_uid(100)
self.assertEqual(len(val), 0)
val = get_undermines_for_argument_uid(11)
self.assertEqual(len(val), 1)
val = get_undermines_for_argument_uid('11')
self.assertEqual(len(val), 1)
def test_get_undercuts_for_argument_uid(self):
val = get_undercuts_for_argument_uid('a')
self.assertIsNone(val)
val = get_undercuts_for_argument_uid(100)
self.assertIsNone(val)
val = get_undercuts_for_argument_uid(0)
self.assertIsNone(val)
val = get_undercuts_for_argument_uid(36)
self.assertGreaterEqual(len(val), 1)
val = get_undercuts_for_argument_uid('36')
self.assertGreaterEqual(len(val), 1)
def test_get_rebuts_for_argument_uid(self):
val = get_rebuts_for_argument_uid('a')
self.assertIsNone(val)
val = get_rebuts_for_argument_uid(0)
self.assertIsNone(val)
val = get_rebuts_for_argument_uid(100)
self.assertIsNone(val)
val = get_rebuts_for_argument_uid(62)
self.assertEqual(len(val), 2)
val = get_rebuts_for_argument_uid('62')
self.assertEqual(len(val), 2)
def test_get_supports_for_argument_uid(self):
val = get_supports_for_argument_uid('a')
self.assertIsNone(val)
val = get_supports_for_argument_uid(0)
self.assertIsNone(val)
val = get_supports_for_argument_uid(100)
self.assertEqual(len(val), 0)
val = get_supports_for_argument_uid(3)
self.assertEqual(len(val), 1)
val = get_supports_for_argument_uid('3')
self.assertEqual(len(val), 1)
def test_set_new_undermine_or_support_for_pgroup(self):
db_argument = DBDiscussionSession.query(Argument).get(1)
db_premise = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=db_argument.premisegroup_uid).first()
before = DBDiscussionSession.query(Argument).filter(Argument.premisegroup_uid == 1,
Argument.conclusion_uid == db_premise.statement_uid).all()
set_new_undermine_or_support_for_pgroup(1, db_argument, False, self.user_christian, self.issue_cat_or_dog)
after = DBDiscussionSession.query(Argument).filter(Argument.premisegroup_uid == 1,
Argument.conclusion_uid == db_premise.statement_uid).all()
self.assertEqual(len(before), len(after))
def test_set_new_undercut(self):
db_argument = DBDiscussionSession.query(Argument).get(1)
before = DBDiscussionSession.query(Argument).filter_by(argument_uid=1).all()
set_new_undercut(1, db_argument, self.user_christian, self.issue_cat_or_dog)
after = DBDiscussionSession.query(Argument).filter_by(argument_uid=1).all()
self.assertLess(len(before), len(after))
def test_set_new_rebut(self):
db_argument = DBDiscussionSession.query(Argument).get(1)
before = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=db_argument.conclusion_uid).all()
set_new_rebut(1, db_argument, self.user_christian, self.issue_cat_or_dog)
after = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=db_argument.conclusion_uid).all()
self.assertLess(len(before), len(after))
def test_set_new_support(self):
db_argument = DBDiscussionSession.query(Argument).get(1)
before = DBDiscussionSession.query(Argument).filter(Argument.premisegroup_uid == 1,
Argument.conclusion_uid == db_argument.conclusion_uid).all()
set_new_support(1, db_argument, self.user_christian, self.issue_cat_or_dog)
after = DBDiscussionSession.query(Argument).filter(Argument.premisegroup_uid == 1,
Argument.conclusion_uid == db_argument.conclusion_uid).all()
self.assertLess(len(before), len(after))
| 43.557377
| 120
| 0.695898
|
d7f9d21085897795b247f1d610170d15a7a39799
| 2,126
|
py
|
Python
|
setup.py
|
jcaberio/Murmur
|
ceb93292cde7ca431b4d850f50126fa5e69e433e
|
[
"MIT"
] | null | null | null |
setup.py
|
jcaberio/Murmur
|
ceb93292cde7ca431b4d850f50126fa5e69e433e
|
[
"MIT"
] | null | null | null |
setup.py
|
jcaberio/Murmur
|
ceb93292cde7ca431b4d850f50126fa5e69e433e
|
[
"MIT"
] | 2
|
2017-05-21T17:00:29.000Z
|
2019-01-29T15:06:47.000Z
|
# -*- coding: utf-8 -*-
"""
Murmur Hash Library
===================
Murmur Hash Library is a simple c level implementation developed for
high speed hashing of in memory strings, on disk files, and the contents
of zip files.
As the name implies the hashes are generated via an implementation of
`MurmurHash 2.0`_.
A few quick NOTES and WARNINGS:
The implementation of MurMur that is used in this code makes the
following assumptions about your machine:
1. A 4-byte value can be read from any address without crashing
2. sizeof(int) == 4
It will also not produce the same results on little-endian and big-endian
machines.
I believe it would be easily possible to get around these limitations.
.. _MurmurHash 2.0: http://murmurhash.googlepages.com/
"""
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, Extension
setup(
name='MurmurV3',
version='0.3',
license='MIT',
author='Jorick Caberio, Bryan McLemore',
author_email='jorick.caberio@voyagerinnovation, kaelten@gmail.com',
description='Provides fast murmur hashes for strings, files, and ziped files in Python 3.',
url = 'https://github.com/jcaberio/MurmurV3',
download_url = 'https://github.com/jcaberio/MurmurV3/tarball/0.5',
zip_safe=False, # I'm not sure if it is egg safe so I'm erring on the side of caution.
long_description=__doc__,
ext_modules = [
Extension('murmur',
[ "murmur/murmur.cpp",
"murmur/unzip.c",
"murmur/ioapi.c",
],
depends=[
"murmur/include/crypt.h",
"murmur/include/ioapi.h",
"murmur/include/unzip.h",
],
include_dirs=['murmur/include'],
libraries=['z'],
language='c++')
],
platforms='any',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: C++',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=[]
)
| 29.123288
| 95
| 0.682973
|
3cb24255d1569c254e4e912b700c6012c55fe142
| 81,690
|
py
|
Python
|
tensorflow/python/keras/engine/training_test.py
|
Mainframed69/tensorflow
|
f8b5c95a7882efd58b123aeb308dfb173658a9e6
|
[
"Apache-2.0"
] | 1
|
2019-07-22T02:37:51.000Z
|
2019-07-22T02:37:51.000Z
|
tensorflow/python/keras/engine/training_test.py
|
Mainframed69/tensorflow
|
f8b5c95a7882efd58b123aeb308dfb173658a9e6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/engine/training_test.py
|
Mainframed69/tensorflow
|
f8b5c95a7882efd58b123aeb308dfb173658a9e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import logging
import sys
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.engine.training_utils import weighted_masked_objective
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import scipy.sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
class TrainingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_fit_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test model with input data as a list of lists
model.fit(
[np.ndarray.tolist(input_a_np), np.ndarray.tolist(input_b_np)],
[output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
validation_data=({
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'])
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {
'dense': 'mse',
'dropout': metrics_module.CategoricalAccuracy()
}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer, loss='mse')
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
# Test model on a list of floats
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 4))
model.fit([np.ndarray.tolist(input_a_np)],
[np.ndarray.tolist(input_b_np)],
epochs=2,
batch_size=5,
verbose=2)
@tf_test_util.run_in_graph_and_eager_modes
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {
'dense': output_d_np,
'dropout': output_e_np
},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
@tf_test_util.run_in_graph_and_eager_modes
def test_activity_regularizer_fit(self):
loss = {}
for reg in [None, 'l2']:
inputs = keras.layers.Input(shape=(10,))
x = keras.layers.Dense(
10, activation='relu', activity_regularizer=reg,
kernel_initializer='ones', use_bias=False)(inputs)
outputs = keras.layers.Dense(1, activation='sigmoid',
kernel_initializer='ones', use_bias=False)(x)
model = keras.Model(inputs, outputs)
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy')
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@tf_test_util.run_in_graph_and_eager_modes
def test_activity_regularizer_loss_value(self):
inputs = keras.layers.Input(shape=(10,))
outputs = keras.layers.Dense(
1,
kernel_initializer=keras.initializers.zeros(),
bias_initializer=keras.initializers.ones(),
activity_regularizer='l2')(
inputs)
model = keras.Model(inputs, outputs)
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy')
loss = model.test_on_batch(x, y)
self.assertAlmostEqual(0.01, loss, places=4)
@tf_test_util.run_in_graph_and_eager_modes
def test_activity_regularizer_batch_independent(self):
inputs = keras.layers.Input(shape=(10,))
x = keras.layers.Dense(
10, activation='relu', activity_regularizer='l2')(
inputs)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, 'binary_crossentropy')
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
loss_small_batch = model.test_on_batch(x, y)
x2 = np.ones((20, 10), 'float32')
y2 = np.ones((20, 1), 'float32')
loss_big_batch = model.test_on_batch(x2, y2)
self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
@tf_test_util.run_in_graph_and_eager_modes
def test_activity_regularizer_in_model_call(self):
class MyModel(keras.Model):
def call(self, inputs):
self.add_loss(inputs)
return inputs
x = ops.convert_to_tensor(1.)
model = MyModel()
_ = model(x)
self.assertEqual(1, len(model.losses))
def test_training_on_sparse_data_with_dense_placeholders(self):
if scipy_sparse is None:
return
with self.cached_session():
test_inputs = [
scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)
]
test_outputs = [
scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)
]
in1 = keras.layers.Input(shape=(3,))
in2 = keras.layers.Input(shape=(3,))
out1 = keras.layers.Dropout(0.5, name='dropout')(in1)
out2 = keras.layers.Dense(4, name='dense_1')(in2)
model = keras.Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(
optimizer,
'mse',
metrics=['mae', metrics_module.CategoricalAccuracy()])
model.fit(test_inputs, test_outputs,
epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
def test_compile_with_sparse_placeholders(self):
with self.cached_session():
input_layer = keras.layers.Input(shape=(10,), sparse=True)
weights = variables_lib.Variable(
np.ones((10, 1)).astype(np.float32), name='weights')
weights_mult = lambda x: sparse_ops.sparse_tensor_dense_matmul(x, weights)
output_layer = keras.layers.Lambda(weights_mult)(input_layer)
model = keras.Model([input_layer], output_layer)
model.compile(
loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'])
def test_that_trainable_disables_updates(self):
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
with self.cached_session():
a = keras.layers.Input(shape=(4,))
layer = keras.layers.BatchNormalization(input_shape=(4,))
b = layer(a)
model = keras.Model(a, b)
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
layer.trainable = False
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
def test_logs_passed_to_callbacks(self):
with self.cached_session():
input_dim = 5
num_classes = 1
class TestCallback(Callback):
def __init__(self):
super(TestCallback, self).__init__()
self.epoch_end_logs = None
self.batch_end_logs = None
self.epoch_end_call_count = 0
self.batch_end_call_count = 0
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_logs = logs
self.epoch_end_call_count += 1
def on_batch_end(self, batch, logs=None):
self.batch_end_logs = logs
self.batch_end_call_count += 1
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
loss='binary_crossentropy',
metrics=['acc'],
weighted_metrics=['mae'],
optimizer=RMSPropOptimizer(learning_rate=0.01))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
test_callback = TestCallback()
model.fit(
x_train,
y_train,
batch_size=2,
epochs=2,
verbose=0,
callbacks=[test_callback],
validation_data=(x_train, y_train))
self.assertEqual(test_callback.batch_end_call_count, 10)
self.assertEqual(test_callback.epoch_end_call_count, 2)
self.assertSetEqual(
set(test_callback.batch_end_logs.keys()),
set(['batch', 'size', 'acc', 'loss', 'weighted_mean_absolute_error']))
self.assertSetEqual(
set(test_callback.epoch_end_logs.keys()),
set([
'acc', 'loss', 'weighted_mean_absolute_error', 'val_acc',
'val_loss', 'val_weighted_mean_absolute_error'
]))
class TestExceptionsAndWarnings(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_invalid_loss(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
optimizer = RMSPropOptimizer(learning_rate=0.001)
model.compile(optimizer, loss='categorical_crossentropy')
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
if not context.executing_eagerly():
# TODO(psv): Investigate these use cases in eager mode.
with self.assertRaises(ValueError):
model.fit(x_train, y_train)
with self.assertRaises(ValueError):
model.compile(optimizer, loss=None)
@tf_test_util.run_in_graph_and_eager_modes
def test_compile_warning_for_loss_missing_output(self):
with self.cached_session():
inp = keras.layers.Input(shape=(16,), name='input_a')
out_1 = keras.layers.Dense(8, name='dense_1')(inp)
out_2 = keras.layers.Dense(3, activation='softmax', name='dense_2')(out_1)
model = keras.models.Model(inputs=[inp], outputs=[out_1, out_2])
optimizer = RMSPropOptimizer(learning_rate=0.001)
with test.mock.patch.object(logging, 'warning') as mock_log:
model.compile(
optimizer,
loss={
'dense_2': 'categorical_crossentropy',
},
metrics={
'dense_2': 'categorical_accuracy',
'dense_1': metrics_module.CategoricalAccuracy(),
})
msg = ('Output "dense_1" missing from loss dictionary. We assume this '
'was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to "dense_1".')
self.assertRegexpMatches(str(mock_log.call_args), msg)
class LossWeightingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_class_weights(self):
num_classes = 5
batch_size = 5
epochs = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
loss='categorical_crossentropy',
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=learning_rate))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train, sample_weight))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score[0], ref_score[0])
@tf_test_util.run_in_graph_and_eager_modes
def test_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
learning_rate = 0.001
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
loss='categorical_crossentropy')
np.random.seed(43)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
ref_score = model.evaluate(x_test, y_test, verbose=0)
if not context.executing_eagerly():
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score[0], ref_score[0])
@tf_test_util.run_in_graph_and_eager_modes
def test_warning_for_concurrent_sample_and_class_weights(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(3,)))
model.compile(
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.01))
x_train = np.random.random((10, 3))
y_train = np.random.random((10, 10))
sample_weight = np.ones((y_train.shape[0]))
class_weight = {0: 1., 1: 1.}
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit(
x_train,
y_train,
epochs=1,
verbose=0,
sample_weight=sample_weight,
class_weight=class_weight)
msg = ('The `class_weight` argument will be ignored.')
self.assertRegexpMatches(str(mock_log.call_args), msg)
@tf_test_util.run_in_graph_and_eager_modes
def test_temporal_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
temporal_x_train = np.reshape(x_train, (len(x_train), 1,
x_train.shape[1]))
temporal_x_train = np.repeat(temporal_x_train, timesteps, axis=1)
temporal_x_test = np.reshape(x_test, (len(x_test), 1, x_test.shape[1]))
temporal_x_test = np.repeat(temporal_x_test, timesteps, axis=1)
temporal_y_train = np.reshape(y_train, (len(y_train), 1,
y_train.shape[1]))
temporal_y_train = np.repeat(temporal_y_train, timesteps, axis=1)
temporal_y_test = np.reshape(y_test, (len(y_test), 1, y_test.shape[1]))
temporal_y_test = np.repeat(temporal_y_test, timesteps, axis=1)
temporal_sample_weight = np.reshape(sample_weight, (len(sample_weight),
1))
temporal_sample_weight = np.repeat(
temporal_sample_weight, timesteps, axis=1)
model.compile(
RMSPropOptimizer(learning_rate=learning_rate),
loss='binary_crossentropy',
metrics=['acc', metrics_module.CategoricalAccuracy()],
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
sample_weight_mode='temporal')
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight)
model.fit(
temporal_x_train,
temporal_y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=temporal_sample_weight,
validation_split=0.1)
model.train_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size])
model.test_on_batch(
temporal_x_train[:batch_size],
temporal_y_train[:batch_size],
sample_weight=temporal_sample_weight[:batch_size])
ref_score = model.evaluate(temporal_x_test, temporal_y_test, verbose=0)
if not context.executing_eagerly():
score = model.evaluate(
temporal_x_test[test_ids], temporal_y_test[test_ids], verbose=0)
self.assertLess(score[0], ref_score[0])
@tf_test_util.run_in_graph_and_eager_modes
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
model.compile(optimizer, loss='binary_crossentropy')
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
optimizer, loss='binary_crossentropy', sample_weight_mode=[])
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer, loss='mse')
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1,
sample_weight={'1': bad_w_np})
@tf_test_util.run_in_graph_and_eager_modes
def test_default_sample_weight(self):
"""Verifies that fit works without having to set sample_weight."""
num_classes = 5
input_dim = 5
timesteps = 3
learning_rate = 0.001
with self.cached_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
x = np.random.random((10, timesteps, input_dim))
y = np.random.random((10, timesteps, num_classes))
optimizer = RMSPropOptimizer(learning_rate=learning_rate)
# sample_weight_mode is a list and mode value is None
model.compile(optimizer, loss='mse', sample_weight_mode=[None])
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a list and mode value is `temporal`
model.compile(optimizer, loss='mse', sample_weight_mode=['temporal'])
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is None
model.compile(
optimizer, loss='mse', sample_weight_mode={'time_distributed': None})
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is `temporal`
model.compile(
optimizer,
loss='mse',
sample_weight_mode={'time_distributed': 'temporal'})
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is None
model.compile(optimizer, loss='mse', sample_weight_mode=None)
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is `temporal`
model.compile(optimizer, loss='mse', sample_weight_mode='temporal')
model.fit(x, y, epochs=1, batch_size=10)
class LossMaskingTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_masking_graph_sequential(self):
with self.cached_session():
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
@tf_test_util.run_in_graph_and_eager_modes
def test_masking_deferred_sequential(self):
with self.cached_session():
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
@tf_test_util.run_in_graph_and_eager_modes
def test_masking_functional(self):
with self.cached_session():
x = np.array([[[1], [1]], [[0], [0]]])
inputs = keras.layers.Input((2, 1))
outputs = keras.layers.Masking(mask_value=0)(inputs)
outputs = keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one'))(outputs)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
@tf_test_util.run_in_graph_and_eager_modes
def test_mask_argument_in_layer(self):
# Test that the mask argument gets correctly passed to a layer in the
# functional API.
class CustomMaskedLayer(keras.layers.Layer):
def __init__(self):
super(CustomMaskedLayer, self).__init__()
self.supports_masking = True
def call(self, inputs, mask=None):
assert mask is not None
return inputs
def compute_output_shape(self, input_shape):
return input_shape
with self.cached_session():
x = np.random.random((5, 3))
inputs = keras.layers.Input((3,))
masked = keras.layers.Masking(mask_value=0)(inputs)
outputs = CustomMaskedLayer()(masked)
model = keras.Model(inputs, outputs)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y = np.random.random((5, 3))
model.train_on_batch(x, y)
def test_loss_masking(self):
with self.cached_session():
weighted_loss = weighted_masked_objective(keras.losses.get('mae'))
shape = (3, 4, 2)
x = np.arange(24).reshape(shape)
y = 2 * x
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
keras.backend.eval(
weighted_loss(
keras.backend.variable(x),
keras.backend.variable(y),
keras.backend.variable(weights), keras.backend.variable(mask)))
class TestDynamicTrainability(test.TestCase):
def test_trainable_warning(self):
with self.cached_session():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=3))
model.trainable = False
model.compile('rmsprop', 'mse')
model.trainable = True
model.train_on_batch(x, y)
self.assertRaises(Warning)
def test_trainable_argument(self):
with self.cached_session():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
# test with nesting
inputs = keras.layers.Input(shape=(3,))
output = model(inputs)
model = keras.models.Model(inputs, output)
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
self.assertAllClose(out, out_2)
def test_layer_trainability_switch(self):
with self.cached_session():
# with constructor argument, in Sequential
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, trainable=False, input_dim=1))
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Sequential
model = keras.models.Sequential()
layer = keras.layers.Dense(2, input_dim=1)
model.add(layer)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
# with constructor argument, in Model
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2, trainable=False)(x)
model = keras.models.Model(x, y)
self.assertListEqual(model.trainable_weights, [])
# by setting the `trainable` argument, in Model
x = keras.layers.Input(shape=(1,))
layer = keras.layers.Dense(2)
y = layer(x)
model = keras.models.Model(x, y)
self.assertListEqual(model.trainable_weights, layer.trainable_weights)
layer.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_model_trainability_switch(self):
with self.cached_session():
# a non-trainable model has no trainable weights
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
# same for Sequential
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_dim=1))
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
def test_nested_model_trainability(self):
with self.cached_session():
# a Sequential inside a Model
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(2, input_dim=1))
x = keras.layers.Input(shape=(1,))
y = inner_model(x)
outer_model = keras.models.Model(x, y)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Sequential inside a Sequential
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(2, input_dim=1))
outer_model = keras.models.Sequential()
outer_model.add(inner_model)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Model
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
inner_model = keras.models.Model(x, y)
x = keras.layers.Input(shape=(1,))
y = inner_model(x)
outer_model = keras.models.Model(x, y)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
# a Model inside a Sequential
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(2)(x)
inner_model = keras.models.Model(x, y)
outer_model = keras.models.Sequential()
outer_model.add(inner_model)
self.assertListEqual(outer_model.trainable_weights,
inner_model.trainable_weights)
inner_model.trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
inner_model.trainable = True
inner_model.layers[-1].trainable = False
self.assertListEqual(outer_model.trainable_weights, [])
class TestTrainingWithDataTensors(test.TestCase):
def test_training_and_eval_methods_on_symbolic_tensors_single_io(self):
with self.cached_session():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()])
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(inputs, targets,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=(inputs, targets), validation_steps=2)
# Test with dynamic shape
inputs = array_ops.placeholder_with_default(
np.zeros((2, 3)), shape=tensor_shape.TensorShape([None, 3]))
targets = array_ops.placeholder_with_default(
np.zeros((2, 4)), shape=tensor_shape.TensorShape([None, 4]))
self.assertEqual(inputs.shape.dims[0].value, None)
model.fit(inputs, targets, epochs=1, steps_per_epoch=2, verbose=0)
model.evaluate(inputs, targets, steps=2, verbose=0)
model.predict(inputs, steps=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
model.fit(inputs, targets,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=(inputs, targets), validation_steps=2)
def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self):
with self.cached_session():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights)
input_a_tf = keras.backend.zeros(shape=(10, 3))
input_b_tf = keras.backend.zeros(shape=(10, 3))
output_d_tf = keras.backend.zeros(shape=(10, 4))
output_e_tf = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=1,
steps_per_epoch=2,
verbose=0)
with self.assertRaisesRegexp(ValueError,
'should specify the `steps_per_epoch`'):
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])
# Test with dictionary inputs
model.fit(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf},
epochs=1,
steps_per_epoch=2,
verbose=0)
model.fit(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf},
validation_data=({'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf}),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0)
model.train_on_batch(
{'input_a': input_a_tf,
'input_b': input_b_tf},
{'dense': output_d_tf,
'dropout': output_e_tf})
# Test with validation data
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
validation_data=([input_a_tf, input_b_tf],
[output_d_tf, output_e_tf]),
epochs=1,
steps_per_epoch=2,
validation_steps=2,
verbose=0)
# Test with validation split
with self.assertRaisesRegexp(ValueError,
'you cannot use `validation_split`'):
model.fit(
[input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
epochs=2,
steps_per_epoch=2,
verbose=0,
validation_split=0.2,
validation_steps=2)
# Test evaluation / prediction methods
model.evaluate([input_a_tf, input_b_tf], [output_d_tf, output_e_tf],
steps=2, verbose=0)
model.predict([input_a_tf, input_b_tf], steps=2)
model.test_on_batch([input_a_tf, input_b_tf], [output_d_tf, output_e_tf])
def test_model_with_input_feed_tensor(self):
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
with self.cached_session():
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
input_v = keras.backend.variables_module.Variable(
input_a_np, dtype='float32')
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
b = keras.Input(shape=(3,), name='input_b')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
model = keras.models.Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'],
loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch(input_b_np,
[output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
# test fit
out = model.fit({'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np,
[output_a_np, output_b_np], epochs=1, batch_size=10)
# test evaluate
out = model.evaluate({'input_b': input_b_np},
[output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np,
[output_a_np, output_b_np], batch_size=10)
# test predict
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
self.assertEqual(len(out), 2)
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)
model = keras.models.Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=3)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=3)
_ = model.evaluate(None, output_a_np, steps=3)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Same, without learning phase
# i.e. we don't pass any data to fit the model.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
model = keras.models.Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
_ = model.fit(None, output_a_np, epochs=1, steps_per_epoch=10)
# test evaluate
_ = model.evaluate(None, output_a_np, steps=10)
_ = model.evaluate(None, output_a_np, steps=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
def test_model_with_partial_loss(self):
with self.cached_session():
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dropout': 'mse'}
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, [output_a_np])
# evaluate
_ = model.evaluate(input_a_np, [output_a_np])
# Same without dropout.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_3 = keras.layers.Dense(4, name='dense_2')(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dense_2': 'mse'}
model.compile(optimizer, loss, metrics={'dense_1': 'mae'})
# test train_on_batch
_ = model.train_on_batch(input_a_np, output_a_np)
_ = model.test_on_batch(input_a_np, output_a_np)
# fit
_ = model.fit(input_a_np, [output_a_np])
# evaluate
_ = model.evaluate(input_a_np, [output_a_np])
def test_model_with_external_loss(self):
with self.cached_session():
# None loss, only regularization loss.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1',
kernel_regularizer='l1',
bias_regularizer='l2')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = keras.models.Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = keras.Input(shape=(3,), name='input_a')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_3 = keras.layers.Dense(4, name='dense_2')(a)
model = keras.models.Model(a, [a_2, a_3])
model.add_loss(keras.backend.mean(a_3 + a_2))
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test model with no external data at all.
input_v = keras.backend.variables_module.Variable(
input_a_np, dtype='float32')
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_2 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_2)
model = keras.models.Model(a, a_2)
model.add_loss(keras.backend.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None, epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with self.assertRaises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with self.assertRaises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
self.assertEqual(out.shape, (10 * 3, 4))
# Test multi-output model with no external data at all.
self.evaluate(variables_lib.variables_initializer([input_v]))
a = keras.Input(tensor=input_v)
a_1 = keras.layers.Dense(4, name='dense_1')(a)
a_2 = keras.layers.Dropout(0.5, name='dropout')(a_1)
model = keras.models.Model(a, [a_1, a_2])
model.add_loss(keras.backend.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with self.assertRaises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
out = model.fit(None, None, epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with self.assertRaises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with self.assertRaises(ValueError):
out = model.predict(None, batch_size=10, verbose=1)
out = model.predict(None, steps=3)
self.assertEqual(len(out), 2)
self.assertEqual(out[0].shape, (10 * 3, 4))
self.assertEqual(out[1].shape, (10 * 3, 4))
def test_target_tensors(self):
with self.cached_session():
# single-output, as list
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))
input_val = np.random.random((10, 4))
target_val = np.random.random((10, 4))
target = keras.backend.variable(target_val)
model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])
model.train_on_batch(input_val, None)
# single-output, as single tensor
model.compile(optimizer='rmsprop', loss='mse', target_tensors=target)
model.train_on_batch(input_val, None)
# single-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense': target})
model.train_on_batch(input_val, None)
# test invalid arguments
with self.assertRaises(TypeError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=set())
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target, target])
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense2': None})
with self.assertRaises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target])
model.train_on_batch(input_val, target_val)
# multi-output, as list
input_val = np.random.random((10, 4))
target_val_a = np.random.random((10, 4))
target_val_b = np.random.random((10, 4))
target_a = keras.backend.variable(target_val_a)
target_b = keras.backend.variable(target_val_b)
inputs = keras.layers.Input(shape=(4,))
output_a = keras.layers.Dense(4, name='dense_a')(inputs)
output_b = keras.layers.Dense(4, name='dense_b')(inputs)
model = keras.models.Model(inputs, [output_a, output_b])
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None)
# multi-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_a': target_a,
'dense_b': target_b})
model.train_on_batch(input_val, None)
# test with sample weights
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None,
sample_weight={'dense_a': np.random.random((10,))})
def test_model_custom_target_tensors(self):
with self.cached_session():
a = keras.Input(shape=(3,), name='input_a')
b = keras.Input(shape=(3,), name='input_b')
a_2 = keras.layers.Dense(4, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
y = keras.backend.placeholder([10, 4], name='y')
y1 = keras.backend.placeholder([10, 3], name='y1')
y2 = keras.backend.placeholder([7, 5], name='y2')
model = keras.models.Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
# test list of target tensors
with self.assertRaises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1, y2])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
_ = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
{y: np.random.random((10, 4)),
y1: np.random.random((10, 3))})
# test dictionary of target_tensors
with self.assertRaises(ValueError):
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'does_not_exist': y2})
# test dictionary of target_tensors
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'dense_1': y, 'dropout': y1})
_ = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
{y: np.random.random((10, 4)),
y1: np.random.random((10, 3))})
# test with custom TF placeholder as target
pl_target_a = keras.backend.array_ops.placeholder('float32',
shape=(None, 4))
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_1': pl_target_a})
model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
class TestTrainingWithDatasetIterators(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_training_and_eval_methods_on_iterators_single_io(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(iterator, steps=2, verbose=1)
model.predict(iterator, steps=2)
model.train_on_batch(iterator)
model.test_on_batch(iterator)
model.predict_on_batch(iterator)
# Test with validation data
model.fit(iterator,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=iterator, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(iterator,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(
iterator,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(ValueError,
'you should not specify a target'):
model.fit(iterator, iterator,
epochs=1, steps_per_epoch=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(iterator, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(iterator, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(iterator, verbose=0)
@tf_test_util.run_in_graph_and_eager_modes
def test_get_next_op_created_once(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
# Finalize graph to make sure we are not appending another iterator
# get_next op in the graph.
ops.get_default_graph().finalize()
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
@tf_test_util.run_in_graph_and_eager_modes
def test_iterators_running_out_of_data(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(2)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
self.assertRegexpMatches(
str(mock_log.call_args),
'dataset iterator ran out of data')
class TestTrainingWithDataset(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_calling_model_on_same_dataset(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Finalize the graph to make sure new ops aren't added when calling on the
# same dataset
ops.get_default_graph().finalize()
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@tf_test_util.run_in_graph_and_eager_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
model.train_on_batch(dataset)
model.predict_on_batch(dataset)
# Test with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(ValueError,
'you should not specify a target'):
model.fit(dataset, dataset,
epochs=1, steps_per_epoch=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
@tf_test_util.run_in_graph_and_eager_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
model.train_on_batch(dataset)
model.predict_on_batch(dataset)
@tf_test_util.run_in_graph_and_eager_modes
def test_dataset_with_sparse_labels(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'sparse_categorical_crossentropy'
model.compile(optimizer, loss)
inputs = np.zeros((10, 3))
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(
ValueError,
r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
r'expected (.*?) to have shape \(3,\)'):
model.train_on_batch(dataset)
class TestTrainingWithMetrics(test.TestCase):
"""Training tests related to metrics."""
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_names(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
metrics = ['mse', metrics_module.BinaryAccuracy()]
model.compile(optimizer, loss='mae', metrics=metrics)
reference_metric_names = [
'loss', 'dense_loss', 'dropout_loss', 'dense_mean_squared_error',
'dense_binary_accuracy', 'dropout_mean_squared_error',
'dropout_binary_accuracy'
]
self.assertEqual(reference_metric_names, model.metrics_names)
# Verify that model metric names are not altered during training.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
self.assertEqual(reference_metric_names, model.metrics_names)
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_correctness(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=0.001))
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4))
y = np.ones((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
8, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_correctness_with_weighted_metrics(self):
np.random.seed(1337)
x = np.array([[[1.], [1.]], [[0.], [0.]]])
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='ones'),
input_shape=(2, 1)))
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='mse',
sample_weight_mode='temporal',
weighted_metrics=['accuracy', 'mse'])
y = np.array([[[1.], [1.]], [[1.], [1.]]])
outs = model.evaluate(x, y)
self.assertEqual(outs, [0.5, 0.5, 0.5])
w = np.array([[0., 0.], [0., 0.]])
outs = model.evaluate(x, y, sample_weight=w)
self.assertEqual(outs, [0., 0., 0.])
w = np.array([[3., 4.], [1., 2.]])
outs = model.evaluate(x, y, sample_weight=w)
self.assertArrayNear(outs, [0.3, 0.7, 0.3], .001)
# Verify that metric value is same with arbitrary weights and batch size.
x = np.random.random((50, 2, 1))
y = np.random.random((50, 2, 1))
w = np.random.random((50, 2))
mse1 = model.evaluate(x, y, sample_weight=w, batch_size=5)[2]
mse2 = model.evaluate(x, y, sample_weight=w, batch_size=10)[2]
self.assertNear(mse1, mse2, err=1e-7)
@tf_test_util.run_in_graph_and_eager_modes
def test_metric_state_reset_between_fit_and_evaluate(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3, activation='relu', input_dim=4))
model.add(keras.layers.Dense(1, activation='sigmoid'))
acc_obj = metrics_module.BinaryAccuracy()
model.compile(
loss='mae',
metrics=[acc_obj],
optimizer=RMSPropOptimizer(learning_rate=0.001))
x_train = np.random.random((100, 4))
y_train = np.random.random((100, 1))
model.fit(x_train, y_train, batch_size=5, epochs=2)
self.assertEqual(self.evaluate(acc_obj.count), 100)
x_test = np.random.random((10, 4))
y_test = np.random.random((10, 1))
model.evaluate(x_test, y_test, batch_size=5)
self.assertEqual(self.evaluate(acc_obj.count), 10)
@tf_test_util.run_in_graph_and_eager_modes
def test_invalid_metrics(self):
num_classes = 5
input_dim = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes, input_dim=input_dim)
with self.assertRaisesRegexp(
TypeError, 'Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: '):
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=metrics_module.CategoricalAccuracy())
@tf_test_util.run_in_graph_and_eager_modes
def test_metrics_masking(self):
with self.cached_session():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='ones')))
model.compile(
RMSPropOptimizer(learning_rate=0.001),
loss='mse',
weighted_metrics=['accuracy'])
# verify that masking is applied.
x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]])
y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]])
scores = model.train_on_batch(x, y)
self.assertArrayNear(scores, [0.25, 0.75], 0.1)
# verify that masking is combined with sample weights.
w = np.array([3, 2, 4])
scores = model.train_on_batch(x, y, sample_weight=w)
self.assertArrayNear(scores, [0.2, 0.8], 0.1)
@tf_test_util.run_in_graph_and_eager_modes
def test_logging(self):
mock_stdout = io.BytesIO() if six.PY2 else io.StringIO()
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
RMSPropOptimizer(learning_rate=0.001), loss='binary_crossentropy')
with test.mock.patch.object(sys, 'stdout', mock_stdout):
model.fit(
np.ones((10, 10), 'float32'), np.ones((10, 1), 'float32'), epochs=10)
self.assertTrue('Epoch 5/10' in mock_stdout.getvalue())
def test_losses_in_defun(self):
with context.eager_mode():
layer = keras.layers.Dense(1, kernel_regularizer='l1')
layer(array_ops.ones([1, 10]))
@function.defun
def get_losses():
return layer.losses
self.assertAllEqual(self.evaluate(layer.losses),
self.evaluate(get_losses()))
if __name__ == '__main__':
test.main()
| 36.226164
| 83
| 0.634031
|
6ae3e08d1649ebf34bec8054b39dec03ed2134de
| 5,953
|
py
|
Python
|
src/elevator_pitch/story.py
|
JnyJny/elevator-pitch-faker
|
4ff5015b61c2eb9fe87feab3ae72995772f43796
|
[
"Apache-2.0"
] | 1
|
2019-05-01T22:41:49.000Z
|
2019-05-01T22:41:49.000Z
|
src/elevator_pitch/story.py
|
JnyJny/elevator-pitch-faker
|
4ff5015b61c2eb9fe87feab3ae72995772f43796
|
[
"Apache-2.0"
] | null | null | null |
src/elevator_pitch/story.py
|
JnyJny/elevator-pitch-faker
|
4ff5015b61c2eb9fe87feab3ae72995772f43796
|
[
"Apache-2.0"
] | null | null | null |
'''
'''
from faker.providers import BaseProvider
class StoryPitchProvider(BaseProvider):
_superlatives = [
"edge-of-your-seat",
"keenly observed",
"lyrical",
"profound",
"erotic",
"inspiring",
"razor-sharp",
"heartrending",
"dream-like",
"darkly comic",
"uncompromising",
"courageous",
"compulsively readable",
"unflinching",
"fiercely honest",
"richly drawn",
"unforgettable",
"riveting",
"high-voltage",
"psycho-sexual",
"riotously funny",
"passionate",
"surreal",
"dystopian",
"hysterical",
"meditative",
]
_genres = [
"thriller",
"meditation",
"coming of age story",
"family drama",
"war epic",
"episotolary novel",
"romance",
"tragedy",
"story",
"tour de force",
"comedy",
"noir",
"instant classic",
"fairy tale",
"autobiographical novel",
"romp",
"fictional memoir",
"trilogy",
"detective novel",
"page-turner",
"tragicomedy",
"murder mystery",
"novel in stories",
"historical novel",
"graphic novel",
"saga"
]
_protagonist_descriptions = [
"depressed",
"wealthy",
"doomed",
"exuberant",
"agoraphobic",
"maladjusted",
"misanthropic",
"alcoholic",
"young",
"philosophical",
"hopelessly romantic",
"hyper-sexual",
"precocious",
"unlucky",
"quixotic",
"desperate",
"refugee",
"dissatisfied",
"bored",
"morally compromised",
"lovesick",
"drug-addled",
"jilted",
"vengeful",
"overbearing",
"closeted",
]
_protagonists = [
"man",
"orphan",
"daughter",
"mother",
"adoloscent",
"soldier",
"student",
"widow",
"woman",
"professor",
"divorcee",
"adventurer",
"extended family",
"child",
"mistress",
"dictator",
"vampire",
"ghost",
"starship captain",
"doctor",
"writer",
"private investigator",
"couple",
"coven",
"murder detective",
"octogenarian",
]
_commitments = [
"adventure",
"commitment",
"desire",
"devotion",
"dream",
"effort",
"strategy",
"pains",
"failure",
"inability",
"journey",
"mission",
"not-so-secret desire",
"quest",
"endeavour",
"secret longing",
"struggle",
"vacation",
"wish",
"expedition",
"plan",
"scheme",
"resolve",
"project",
"promise",
"battle"
]
_verbs = [
"re-awaken",
"come to grips with",
"grapple with",
"understand",
"explore",
"accept",
"overcome",
"avenge",
"pursue",
"defend",
"undertake",
"discover",
"contemplate",
"transcend",
"withdraw from",
"avoid",
"betray",
"circumvent",
"confront",
"expose",
"give up",
"investigate",
"navigate",
"reconnect with",
"embrace",
"reconcile to"
]
_conflicts = [
"fear of spiders",
"adoption",
"traumatic childhood",
"mother's death",
"sexless marriage",
"Oedipal complex",
"feminism",
"religious upbringing",
"political apathy",
"biological clock",
"ugly divorce",
"write's block",
"toxic friendships",
"eating disorder",
"own birth",
"cancer",
"23andMe results",
"privilege",
"untimely death",
"social media addiction",
"spiritual evolutin",
"secret second family",
"sexual awakening",
"Amazon reviews",
"father's murder",
"disinheritance"
]
_pronouns = ['his', 'her', 'their']
@property
def superlative(self):
return self.random_element(self._superlatives)
@property
def genre(self):
return self.random_element(self._genres)
@property
def protagonist_description(self):
return self.random_element(self._protagonist_descriptions)
@property
def protagonist(self):
return self.random_element(self._protagonists)
@property
def commitment(self):
return self.random_element(self._commitments)
@property
def verb(self):
return self.random_element(self._verbs)
@property
def conflict(self):
return self.random_element(self._conflicts)
@property
def _pronoun(self):
return self.random_element(self._pronouns)
def _determiner_for(self, word):
return 'an' if word[0].lower() in 'aeiou' else 'a'
def story_pitch(self, genre=None):
'''
'''
superlative = self.superlative
protagonist_description = self.protagonist_description
return ' '.join([self._determiner_for(superlative).capitalize(),
superlative,
self.genre,
'about',
self._determiner_for(protagonist_description),
protagonist_description,
self.protagonist + "'s",
self.commitment,
'to',
self.verb,
self._pronoun,
self.conflict]) + '.'
| 22.212687
| 72
| 0.478582
|
fa867fa04cb04002171db2da4130e77504065501
| 5,242
|
py
|
Python
|
spacy/cli/convert.py
|
gandersen101/spaCy
|
109849bd311490f17a29b320cb032e43d153f36f
|
[
"MIT"
] | 10
|
2021-05-31T07:18:08.000Z
|
2022-03-19T09:20:11.000Z
|
spacy/cli/convert.py
|
gandersen101/spaCy
|
109849bd311490f17a29b320cb032e43d153f36f
|
[
"MIT"
] | 4
|
2021-06-02T00:49:27.000Z
|
2022-01-13T01:59:34.000Z
|
spacy/cli/convert.py
|
gandersen101/spaCy
|
109849bd311490f17a29b320cb032e43d153f36f
|
[
"MIT"
] | 2
|
2021-12-09T07:23:21.000Z
|
2022-03-31T06:13:10.000Z
|
# coding: utf8
from __future__ import unicode_literals
import plac
from pathlib import Path
from wasabi import Printer
import srsly
import re
from .converters import conllu2json, iob2json, conll_ner2json
from .converters import ner_jsonl2json
# Converters are matched by file extension except for ner/iob, which are
# matched by file extension and content. To add a converter, add a new
# entry to this dict with the file extension mapped to the converter function
# imported from /converters.
CONVERTERS = {
"conllubio": conllu2json,
"conllu": conllu2json,
"conll": conllu2json,
"ner": conll_ner2json,
"iob": iob2json,
"jsonl": ner_jsonl2json,
}
# File types
FILE_TYPES = ("json", "jsonl", "msg")
FILE_TYPES_STDOUT = ("json", "jsonl")
@plac.annotations(
input_file=("Input file", "positional", None, str),
output_dir=("Output directory. '-' for stdout.", "positional", None, str),
file_type=("Type of data to produce: {}".format(FILE_TYPES), "option", "t", str),
n_sents=("Number of sentences per doc (0 to disable)", "option", "n", int),
seg_sents=("Segment sentences (for -c ner)", "flag", "s"),
model=("Model for sentence segmentation (for -s)", "option", "b", str),
converter=("Converter: {}".format(tuple(CONVERTERS.keys())), "option", "c", str),
lang=("Language (if tokenizer required)", "option", "l", str),
morphology=("Enable appending morphology to tags", "flag", "m", bool),
)
def convert(
input_file,
output_dir="-",
file_type="json",
n_sents=1,
seg_sents=False,
model=None,
morphology=False,
converter="auto",
lang=None,
):
"""
Convert files into JSON format for use with train command and other
experiment management functions. If no output_dir is specified, the data
is written to stdout, so you can pipe them forward to a JSON file:
$ spacy convert some_file.conllu > some_file.json
"""
no_print = output_dir == "-"
msg = Printer(no_print=no_print)
input_path = Path(input_file)
if file_type not in FILE_TYPES:
msg.fail(
"Unknown file type: '{}'".format(file_type),
"Supported file types: '{}'".format(", ".join(FILE_TYPES)),
exits=1,
)
if file_type not in FILE_TYPES_STDOUT and output_dir == "-":
# TODO: support msgpack via stdout in srsly?
msg.fail(
"Can't write .{} data to stdout.".format(file_type),
"Please specify an output directory.",
exits=1,
)
if not input_path.exists():
msg.fail("Input file not found", input_path, exits=1)
if output_dir != "-" and not Path(output_dir).exists():
msg.fail("Output directory not found", output_dir, exits=1)
input_data = input_path.open("r", encoding="utf-8").read()
if converter == "auto":
converter = input_path.suffix[1:]
if converter == "ner" or converter == "iob":
converter_autodetect = autodetect_ner_format(input_data)
if converter_autodetect == "ner":
msg.info("Auto-detected token-per-line NER format")
converter = converter_autodetect
elif converter_autodetect == "iob":
msg.info("Auto-detected sentence-per-line NER format")
converter = converter_autodetect
else:
msg.warn(
"Can't automatically detect NER format. Conversion may not succeed. See https://spacy.io/api/cli#convert"
)
if converter not in CONVERTERS:
msg.fail("Can't find converter for {}".format(converter), exits=1)
# Use converter function to convert data
func = CONVERTERS[converter]
data = func(
input_data,
n_sents=n_sents,
seg_sents=seg_sents,
use_morphology=morphology,
lang=lang,
model=model,
no_print=no_print,
)
if output_dir != "-":
# Export data to a file
suffix = ".{}".format(file_type)
output_file = Path(output_dir) / Path(input_path.parts[-1]).with_suffix(suffix)
if file_type == "json":
srsly.write_json(output_file, data)
elif file_type == "jsonl":
srsly.write_jsonl(output_file, data)
elif file_type == "msg":
srsly.write_msgpack(output_file, data)
msg.good(
"Generated output file ({} documents): {}".format(len(data), output_file)
)
else:
# Print to stdout
if file_type == "json":
srsly.write_json("-", data)
elif file_type == "jsonl":
srsly.write_jsonl("-", data)
def autodetect_ner_format(input_data):
# guess format from the first 20 lines
lines = input_data.split("\n")[:20]
format_guesses = {"ner": 0, "iob": 0}
iob_re = re.compile(r"\S+\|(O|[IB]-\S+)")
ner_re = re.compile(r"\S+\s+(O|[IB]-\S+)$")
for line in lines:
line = line.strip()
if iob_re.search(line):
format_guesses["iob"] += 1
if ner_re.search(line):
format_guesses["ner"] += 1
if format_guesses["iob"] == 0 and format_guesses["ner"] > 0:
return "ner"
if format_guesses["ner"] == 0 and format_guesses["iob"] > 0:
return "iob"
return None
| 35.90411
| 121
| 0.620183
|
816743d6042a7922263bf8fc7db0641861082ff2
| 6,071
|
py
|
Python
|
infrastructure/jtgltext.py
|
kyapp69/GCodeViewer
|
1f5b15083580896a3c5de8294c60c9654953b82b
|
[
"Apache-2.0"
] | 3
|
2017-03-08T02:48:59.000Z
|
2018-04-22T17:59:24.000Z
|
infrastructure/jtgltext.py
|
kyapp69/GCodeViewer
|
1f5b15083580896a3c5de8294c60c9654953b82b
|
[
"Apache-2.0"
] | 4
|
2019-11-23T01:40:22.000Z
|
2021-02-26T01:30:39.000Z
|
infrastructure/jtgltext.py
|
kyapp69/GCodeViewer
|
1f5b15083580896a3c5de8294c60c9654953b82b
|
[
"Apache-2.0"
] | 10
|
2016-05-12T01:15:12.000Z
|
2020-02-15T09:54:48.000Z
|
from __future__ import division
import OpenGL
OpenGL.FORWARD_COMPATIBLE_ONLY = True
# ^ See http://pyopengl.sourceforge.net/documentation/deprecations.html
import OpenGL.GL as gl
import numpy as np
import ctypes
import os
import logging
try:
from shader_loader import ShaderLoader
except ModuleNotFoundError:
from infrastructure.shader_loader import ShaderLoader
from PIL import Image
class JTGLText(object):
def __init__(self, resource_location, window_width, window_height):
self.window_width = window_width
self.window_height = window_height
logging.info("X / Y : %s / %s" % (window_width, window_height))
self.text_width = 6.0
self.text_height = self.text_width * 2.0
self.kern = 1.0
self.text_size = 0
self.color_size = 0
self.last_text = ''
self.last_color = [0.0, 0.0, 0.0, 1.0]
self.dirty = True
text_vertex_shader = os.path.join(resource_location, 'shaders', 'text_shader_vr.glsl')
text_fragme_shader = os.path.join(resource_location, 'shaders', 'text_shader_fr.glsl')
self._text_shader_program = ShaderLoader.load_shaders(text_vertex_shader, text_fragme_shader)
font_texture_path = os.path.join(resource_location, 'textures', 'courier10.png')
self.font_texture_id = self._load_font_texture(font_texture_path)
self.text_vao = gl.glGenVertexArrays(1)
self.text_vbo = gl.glGenBuffers(1)
def viewPortChanged(self, window_width, window_height):
self.window_width = window_width
self.window_height = window_height
logging.info("X / Y : %s / %s" % (self.window_width, self.window_height))
self.dirty = True
def _load_font_texture(self, location):
im = Image.open(location)
image = im.tobytes("raw", "RGBA", 0, -1)
texture_id = gl.glGenTextures(1)
gl.glActiveTexture(gl.GL_TEXTURE1)
gl.glBindTexture(gl.GL_TEXTURE_2D, texture_id)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, im.size[0], im.size[1], 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, image)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_REPEAT)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_REPEAT)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
return texture_id
def square(self, tl, br):
a = tl
b = [br[0], tl[1]]
c = [tl[0], br[1]]
d = br
return [a, b, c, c, b, d]
def _load_text(self, text, color):
posisitions = []
colors = []
texture_coords = []
texture_spacing = 1.0 / 510.0
y_pos = self.window_height - (self.kern + self.text_height)
x_pos = self.kern
for char in text:
if "\n" == char:
y_pos -= self.text_height + self.kern
x_pos = self.kern
else:
x1 = x_pos
y1 = y_pos + self.text_height
x2 = x1 + self.text_width
y2 = y_pos
posisitions += self.square([x1, y1], [x2, y2])
letter_start = (ord(char) * 2.0 * texture_spacing)
colors += [color for i in range(0, 6)]
texture_coords += self.square([letter_start, 1], [letter_start + texture_spacing, 0])
x_pos += self.text_width + self.kern
posisitions = np.array(posisitions, dtype=np.float32).flatten()
colors = np.array(colors, dtype=np.float32).flatten()
texture_coords = np.array(texture_coords, dtype=np.float32).flatten()
gl.glBindVertexArray(self.text_vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.text_vbo)
gl.glBufferData(gl.GL_ARRAY_BUFFER, (posisitions.size + colors.size + texture_coords.size) * 4, None, gl.GL_STATIC_DRAW)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, posisitions)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, posisitions.size * 4, colors)
gl.glBufferSubData(gl.GL_ARRAY_BUFFER, (posisitions.size + colors.size) * 4, texture_coords)
self.text_size = posisitions.size
self.color_size = colors.size
def printgl(self, text, color=[1.0, 1.0, 1.0, 1.0]):
if self.last_text != text or self.last_color != color or self.dirty:
self._load_text(text, color)
self.last_text = text
self.last_color = color
self.dirty = False
gl.glUseProgram(self._text_shader_program)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glBindVertexArray(self.text_vao)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.text_vbo)
vPosisition = gl.glGetAttribLocation(self._text_shader_program, "vPosisition")
vColor = gl.glGetAttribLocation(self._text_shader_program, "vColor")
vTexCoord = gl.glGetAttribLocation(self._text_shader_program, "vTexCoord")
vWindow = gl.glGetUniformLocation(self._text_shader_program, 'vWindow')
gl.glUniform2fv(vWindow, 1, [self.window_width, self.window_height])
gl.glEnableVertexAttribArray(vPosisition)
gl.glEnableVertexAttribArray(vColor)
gl.glEnableVertexAttribArray(vTexCoord)
texture_data = gl.glGetUniformLocation(self._text_shader_program, "texture_data")
gl.glUniform1i(texture_data, 1)
gl.glActiveTexture(gl.GL_TEXTURE1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.font_texture_id)
gl.glVertexAttribPointer(vPosisition, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
gl.glVertexAttribPointer(vColor, 4, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(self.text_size * 4))
gl.glVertexAttribPointer(vTexCoord, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p((self.text_size+self.color_size) * 4))
gl.glDrawArrays(gl.GL_TRIANGLES, 0, self.text_size // 2)
gl.glDisable(gl.GL_BLEND)
| 41.582192
| 130
| 0.662659
|
084691bdf06b9dd6b1b3831187c4f0075270bbfd
| 28,960
|
py
|
Python
|
research/slim/nets/mobilenet_v1_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | 1
|
2021-05-17T01:42:29.000Z
|
2021-05-17T01:42:29.000Z
|
research/slim/nets/mobilenet_v1_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
research/slim/nets/mobilenet_v1_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for MobileNet v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from research.slim.nets import mobilenet_v1
slim = tf.contrib.slim
class MobilenetV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'MobilenetV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'MobilenetV1/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildCustomNetworkUsingConvDefs(self):
batch_size = 5
height, width = 224, 224
conv_defs = [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
]
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 56, 56, 512])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise',
use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 7, 7, 512],
'Conv2d_12_pointwise': [batch_size, 7, 7, 1024],
'Conv2d_13_depthwise': [batch_size, 7, 7, 1024],
'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 16
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 14, 14, 512],
'Conv2d_12_pointwise': [batch_size, 14, 14, 1024],
'Conv2d_13_depthwise': [batch_size, 14, 14, 1024],
'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 8
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 28, 28, 256],
'Conv2d_6_pointwise': [batch_size, 28, 28, 512],
'Conv2d_7_depthwise': [batch_size, 28, 28, 512],
'Conv2d_7_pointwise': [batch_size, 28, 28, 512],
'Conv2d_8_depthwise': [batch_size, 28, 28, 512],
'Conv2d_8_pointwise': [batch_size, 28, 28, 512],
'Conv2d_9_depthwise': [batch_size, 28, 28, 512],
'Conv2d_9_pointwise': [batch_size, 28, 28, 512],
'Conv2d_10_depthwise': [batch_size, 28, 28, 512],
'Conv2d_10_pointwise': [batch_size, 28, 28, 512],
'Conv2d_11_depthwise': [batch_size, 28, 28, 512],
'Conv2d_11_pointwise': [batch_size, 28, 28, 512],
'Conv2d_12_depthwise': [batch_size, 28, 28, 512],
'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],
'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],
'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsApproximateFaceNet(self):
batch_size = 5
height, width = 128, 128
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,
use_explicit_padding=True)
# For the Conv2d_0 layer FaceNet has depth=16
endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],
'Conv2d_1_depthwise': [batch_size, 64, 64, 24],
'Conv2d_1_pointwise': [batch_size, 64, 64, 48],
'Conv2d_2_depthwise': [batch_size, 32, 32, 48],
'Conv2d_2_pointwise': [batch_size, 32, 32, 96],
'Conv2d_3_depthwise': [batch_size, 32, 32, 96],
'Conv2d_3_pointwise': [batch_size, 32, 32, 96],
'Conv2d_4_depthwise': [batch_size, 16, 16, 96],
'Conv2d_4_pointwise': [batch_size, 16, 16, 192],
'Conv2d_5_depthwise': [batch_size, 16, 16, 192],
'Conv2d_5_pointwise': [batch_size, 16, 16, 192],
'Conv2d_6_depthwise': [batch_size, 8, 8, 192],
'Conv2d_6_pointwise': [batch_size, 8, 8, 384],
'Conv2d_7_depthwise': [batch_size, 8, 8, 384],
'Conv2d_7_pointwise': [batch_size, 8, 8, 384],
'Conv2d_8_depthwise': [batch_size, 8, 8, 384],
'Conv2d_8_pointwise': [batch_size, 8, 8, 384],
'Conv2d_9_depthwise': [batch_size, 8, 8, 384],
'Conv2d_9_pointwise': [batch_size, 8, 8, 384],
'Conv2d_10_depthwise': [batch_size, 8, 8, 384],
'Conv2d_10_pointwise': [batch_size, 8, 8, 384],
'Conv2d_11_depthwise': [batch_size, 8, 8, 384],
'Conv2d_11_pointwise': [batch_size, 8, 8, 384],
'Conv2d_12_depthwise': [batch_size, 4, 4, 384],
'Conv2d_12_pointwise': [batch_size, 4, 4, 768],
'Conv2d_13_depthwise': [batch_size, 4, 4, 768],
'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
mobilenet_v1.mobilenet_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(3217920, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = mobilenet_v1.mobilenet_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet_v1.mobilenet_v1_arg_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| 54.029851
| 85
| 0.580421
|
9f62fc44d1f222eea6357aea9206807922f94831
| 5,197
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/training/adadelta.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/python/training/adadelta.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/python/training/adadelta.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adadelta for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdadeltaOptimizer")
class AdadeltaOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
See [M. D. Zeiler](http://arxiv.org/abs/1212.5701)
([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))
"""
def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-8,
use_locking=False, name="Adadelta"):
"""Construct a new Adadelta optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
To match the exact form in the original paper use 1.0.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `rho`, and `epsilon` can
each be a callable that takes no arguments and returns the actual value to
use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
super(AdadeltaOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._rho = rho
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._rho_t = None
self._epsilon_t = None
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "accum", self._name)
self._zeros_slot(v, "accum_update", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
rho = self._call_if_callable(self._rho)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="lr")
self._rho_t = ops.convert_to_tensor(rho, name="rho")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.apply_adadelta(
var,
accum,
accum_update,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._rho_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.resource_apply_adadelta(
var.handle,
accum.handle,
accum_update.handle,
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._rho_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.sparse_apply_adadelta(
var,
accum,
accum_update,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._rho_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.resource_sparse_apply_adadelta(
var.handle,
accum.handle,
accum_update.handle,
math_ops.cast(self._lr_t, grad.dtype),
math_ops.cast(self._rho_t, grad.dtype),
math_ops.cast(self._epsilon_t, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
| 38.783582
| 81
| 0.676159
|
81ff15f0f77cc9d8494374946f86caa83e391afe
| 222,054
|
py
|
Python
|
awx/api/serializers.py
|
chris93111/awx
|
cd49213924f1cd451e5c17390b9c2b237085d1be
|
[
"Apache-2.0"
] | null | null | null |
awx/api/serializers.py
|
chris93111/awx
|
cd49213924f1cd451e5c17390b9c2b237085d1be
|
[
"Apache-2.0"
] | 1
|
2020-11-05T16:07:07.000Z
|
2020-11-05T16:07:07.000Z
|
awx/api/serializers.py
|
Mattlk13/awx
|
cd49213924f1cd451e5c17390b9c2b237085d1be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import logging
import re
from collections import OrderedDict
from datetime import timedelta
# OAuth2
from oauthlib import oauth2
from oauthlib.common import generate_token
# Jinja
from jinja2 import sandbox, StrictUndefined
from jinja2.exceptions import TemplateSyntaxError, UndefinedError, SecurityError
# Django
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.relations import ManyRelatedField
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.access import get_user_capabilities
from awx.main.constants import (
SCHEDULEABLE_PROVIDERS,
ACTIVE_STATES,
CENSOR_VALUE,
)
from awx.main.models import (
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,
CredentialType, CustomInventoryScript, Group, Host, Instance,
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
JobNotificationMixin, JobTemplate, Label, Notification, NotificationTemplate,
OAuth2AccessToken, OAuth2Application, Organization, Project,
ProjectUpdate, ProjectUpdateEvent, RefreshToken, Role, Schedule,
SystemJob, SystemJobEvent, SystemJobTemplate, Team, UnifiedJob,
UnifiedJobTemplate, WorkflowApproval, WorkflowApprovalTemplate, WorkflowJob,
WorkflowJobNode, WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded
)
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
from awx.main.models.rbac import (
get_roles_on_resource, role_summary_fields_generator
)
from awx.main.fields import ImplicitRoleField, JSONBField
from awx.main.utils import (
get_type_for_model, get_model_for_type,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars, encrypt_dict,
prefetch_page_capabilities, get_external_account, truncate_stdout,
)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse
from awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,
VerbatimField, DeprecatedCredentialField)
logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type.
DEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')
# Keys are fields (foreign keys) where, if found on an instance, summary info
# should be added to the serialized data. Values are a tuple of field names on
# the related object to include in the summary data (if the field is present on
# the related object).
SUMMARIZABLE_FK_FIELDS = {
'organization': DEFAULT_SUMMARY_FIELDS,
'user': ('id', 'username', 'first_name', 'last_name'),
'application': ('id', 'name'),
'team': DEFAULT_SUMMARY_FIELDS,
'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'has_inventory_sources',
'total_inventory_sources',
'inventory_sources_with_failures',
'organization_id',
'kind',
'insights_credential_id',),
'host': DEFAULT_SUMMARY_FIELDS,
'group': DEFAULT_SUMMARY_FIELDS,
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type', 'canceled_on'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'workflow_approval_template': DEFAULT_SUMMARY_FIELDS + ('timeout',),
'workflow_approval': DEFAULT_SUMMARY_FIELDS + ('timeout',),
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error', 'canceled_on'),
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'inventory_source': ('source', 'last_updated', 'status'),
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
'source_script': ('name', 'description'),
'role': ('id', 'role_field'),
'notification_template': DEFAULT_SUMMARY_FIELDS,
'instance_group': ('id', 'name', 'controller_id', 'is_containerized'),
'insights_credential': DEFAULT_SUMMARY_FIELDS,
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'webhook_credential': DEFAULT_SUMMARY_FIELDS,
'approved_or_denied_by': ('id', 'username', 'first_name', 'last_name'),
'credential_type': DEFAULT_SUMMARY_FIELDS,
}
def reverse_gfk(content_object, request):
'''
Computes a reverse for a GenericForeignKey field.
Returns a dictionary of the form
{ '<type>': reverse(<type detail>) }
for example
{ 'organization': '/api/v2/organizations/1/' }
'''
if content_object is None or not hasattr(content_object, 'get_absolute_url'):
return {}
return {
camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)
}
class CopySerializer(serializers.Serializer):
name = serializers.CharField()
def validate(self, attrs):
name = attrs.get('name')
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_(
'The original object is already named {}, a copy from'
' it cannot have the same name.'.format(name)
))
return attrs
class BaseSerializerMetaclass(serializers.SerializerMetaclass):
'''
Custom metaclass to enable attribute inheritance from Meta objects on
serializer base classes.
Also allows for inheriting or updating field lists from base class(es):
class Meta:
# Inherit all fields from base class.
fields = ('*',)
# Inherit all fields from base class and add 'foo'.
fields = ('*', 'foo')
# Inherit all fields from base class except 'bar'.
fields = ('*', '-bar')
# Define fields as 'foo' and 'bar'; ignore base class fields.
fields = ('foo', 'bar')
# Extra field kwargs dicts are also merged from base classes.
extra_kwargs = {
'foo': {'required': True},
'bar': {'read_only': True},
}
# If a subclass were to define extra_kwargs as:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'label': 'New Label for Bar'},
}
# The resulting value of extra_kwargs would be:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'read_only': True, 'label': 'New Label for Bar'},
}
# Extra field kwargs cannot be removed in subclasses, only replaced.
'''
@staticmethod
def _is_list_of_strings(x):
return isinstance(x, (list, tuple)) and all([isinstance(y, str) for y in x])
@staticmethod
def _is_extra_kwargs(x):
return isinstance(x, dict) and all([isinstance(k, str) and isinstance(v, dict) for k,v in x.items()])
@classmethod
def _update_meta(cls, base, meta, other=None):
for attr in dir(other):
if attr.startswith('_'):
continue
val = getattr(other, attr)
meta_val = getattr(meta, attr, None)
# Special handling for lists/tuples of strings (field names).
if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):
meta_val = meta_val or []
new_vals = []
except_vals = []
if base: # Merge values from all bases.
new_vals.extend([x for x in meta_val])
for v in val:
if not base and v == '*': # Inherit all values from previous base(es).
new_vals.extend([x for x in meta_val])
elif not base and v.startswith('-'): # Except these values.
except_vals.append(v[1:])
else:
new_vals.append(v)
val = []
for v in new_vals:
if v not in except_vals and v not in val:
val.append(v)
val = tuple(val)
# Merge extra_kwargs dicts from base classes.
elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):
meta_val = meta_val or {}
new_val = {}
if base:
for k,v in meta_val.items():
new_val[k] = copy.deepcopy(v)
for k,v in val.items():
new_val.setdefault(k, {}).update(copy.deepcopy(v))
val = new_val
# Any other values are copied in case they are mutable objects.
else:
val = copy.deepcopy(val)
setattr(meta, attr, val)
def __new__(cls, name, bases, attrs):
meta = type('Meta', (object,), {})
for base in bases[::-1]:
cls._update_meta(base, meta, getattr(base, 'Meta', None))
cls._update_meta(None, meta, attrs.get('Meta', meta))
attrs['Meta'] = meta
return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',
'modified', 'name', 'description')
summary_fields = ()
summarizable_fields = ()
# add the URL and related resources
type = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(BaseSerializer, self).__init__(*args, **kwargs)
# The following lines fix the problem of being able to pass JSON dict into PrimaryKeyRelatedField.
data = kwargs.get('data', False)
if data:
for field_name, field_instance in self.fields.items():
if isinstance(field_instance, ManyRelatedField) and not field_instance.read_only:
if isinstance(data.get(field_name, False), dict):
raise serializers.ValidationError(_('Cannot use dictionary for %s' % field_name))
@property
def version(self):
return 2
def get_type(self, obj):
return get_type_for_model(self.Meta.model)
def get_types(self):
return [self.get_type(None)]
def get_type_choices(self):
type_name_map = {
'job': _('Playbook Run'),
'ad_hoc_command': _('Command'),
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
'job_template': _('Job Template')
}
choices = []
for t in self.get_types():
name = _(type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title()))
choices.append((t, name))
return choices
def get_url(self, obj):
if obj is None or not hasattr(obj, 'get_absolute_url'):
return ''
elif isinstance(obj, User):
return self.reverse('api:user_detail', kwargs={'pk': obj.pk})
else:
return obj.get_absolute_url(request=self.context.get('request'))
def filter_field_metadata(self, fields, method):
"""
Filter field metadata based on the request method.
This it intended to be extended by subclasses.
"""
return fields
def _get_related(self, obj):
return {} if obj is None else self.get_related(obj)
def _generate_named_url(self, url_path, obj, node):
url_units = url_path.split('/')
named_url = node.generate_named_url(obj)
url_units[4] = named_url
return '/'.join(url_units)
def get_related(self, obj):
res = OrderedDict()
view = self.context.get('view', None)
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \
type(obj) in settings.NAMED_URL_GRAPH:
original_url = self.get_url(obj)
res['named_url'] = self._generate_named_url(
original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]
)
if getattr(obj, 'created_by', None):
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
if getattr(obj, 'modified_by', None):
res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})
return res
def _get_summary_fields(self, obj):
return {} if obj is None else self.get_summary_fields(obj)
def get_summary_fields(self, obj):
# Return values for certain fields on related objects, to simplify
# displaying lists of items without additional API requests.
summary_fields = OrderedDict()
for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():
try:
# A few special cases where we don't want to access the field
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
try:
fkval = getattr(obj, fk, None)
except ObjectDoesNotExist:
continue
if fkval is None:
continue
if fkval == obj:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
fval = getattr(fkval, field, None)
if fval is None and field == 'type':
if isinstance(fkval, PolymorphicModel):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval)
elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval._get_unified_job_class())
if fval is not None:
summary_fields[fk][field] = fval
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
# RBAC summary fields
roles = {}
for field in obj._meta.get_fields():
if type(field) is ImplicitRoleField:
roles[field.name] = role_summary_fields_generator(obj, field.name)
if len(roles) > 0:
summary_fields['object_roles'] = roles
# Advance display of RBAC capabilities
if hasattr(self, 'show_capabilities'):
user_capabilities = self._obj_capability_dict(obj)
if user_capabilities:
summary_fields['user_capabilities'] = user_capabilities
return summary_fields
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get('view', None)
parent_obj = None
if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if self.parent and hasattr(self, 'capabilities_prefetch') and self.capabilities_prefetch:
qs = self.parent.instance
if 'capability_map' not in self.context:
if hasattr(self, 'polymorphic_base'):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base._capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context['capability_map'] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context['capability_map']:
capabilities_cache = self.context['capability_map'][obj.id]
return get_user_capabilities(
view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj,
capabilities_cache=capabilities_cache
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
def get_created(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.date_joined
elif hasattr(obj, 'created'):
return obj.created
return None
def get_modified(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.last_login # Not actually exposed for User.
elif hasattr(obj, 'modified'):
return obj.modified
return None
def get_extra_kwargs(self):
extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()
if self.instance:
read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())
for field_name in read_only_on_update_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
# when a Model's editable field is set to False. The short circuit skips choice rendering.
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)
if hasattr(model_field, 'choices') and model_field.choices:
model_field.editable = was_editable
if was_editable is False:
field_kwargs['read_only'] = True
# Pass model field default onto the serializer field if field is not read-only.
if model_field.has_default() and not field_kwargs.get('read_only', False):
field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()
# Enforce minimum value of 0 for PositiveIntegerFields.
if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:
field_kwargs['min_value'] = 0
# Use custom boolean field that allows null and empty string as False values.
if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):
field_class = BooleanNullField
# Use custom char or choice field that coerces null to an empty string.
if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):
if 'choices' in field_kwargs:
field_class = ChoiceNullField
else:
field_class = CharNullField
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
for validator in field_kwargs.get('validators', []):
if isinstance(validator, validators.UniqueValidator):
unique_error_message = model_field.error_messages.get('unique', None)
if unique_error_message:
unique_error_message = unique_error_message % {
'model_name': capfirst(opts.verbose_name),
'field_label': capfirst(model_field.verbose_name),
}
validator.message = unique_error_message
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)
# Don't include choices for foreign key fields.
field_kwargs.pop('choices', None)
return field_class, field_kwargs
def get_unique_together_validators(self):
# Allow the model's full_clean method to handle the unique together validation.
return []
def run_validation(self, data=fields.empty):
try:
return super(BaseSerializer, self).run_validation(data)
except ValidationError as exc:
# Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.
raise ValidationError(detail=serializers.as_serializer_error(exc))
def get_validation_exclusions(self, obj=None):
# Borrowed from DRF 2.x - return model fields that should be excluded
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
continue
if field.read_only:
continue
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs)
try:
# Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model()
for k,v in attrs.items():
if k not in exclusions:
setattr(obj, k, v)
obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved.
for k in attrs.keys():
if k not in exclusions:
attrs[k] = getattr(obj, k)
except DjangoValidationError as exc:
# DjangoValidationError may contain a list or dict; normalize into a
# dict where the keys are the field name and the values are a list
# of error messages, then raise as a DRF ValidationError. DRF would
# normally convert any DjangoValidationError to a non-field specific
# error message; here we preserve field-specific errors raised from
# the model's full_clean method.
d = exc.update_error_dict({})
for k,v in d.items():
v = v if isinstance(v, list) else [v]
v2 = []
for e in v:
if isinstance(e, DjangoValidationError):
v2.extend(list(e))
elif isinstance(e, list):
v2.extend(e)
else:
v2.append(e)
d[k] = list(map(force_text, v2))
raise ValidationError(d)
return attrs
def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs)
@property
def is_detail_view(self):
if 'view' in self.context:
if 'pk' in self.context['view'].kwargs:
return True
return False
class EmptySerializer(serializers.Serializer):
pass
class UnifiedJobTemplateSerializer(BaseSerializer):
# As a base serializer, the capabilities prefetch is not used directly
_capabilities_prefetch = [
'admin', 'execute',
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
'workflowjobtemplate.organization.workflow_admin']}
]
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
return res
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
elif isinstance(obj, WorkflowApprovalTemplate):
serializer_class = WorkflowApprovalTemplateSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = self._capabilities_prefetch
else:
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this '
'unified job have been saved to the database.'),
read_only=True
)
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'canceled_on', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation',
'execution_node', 'controller_node',
'result_traceback', 'event_processing_finished')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
'label': 'unified job template',
},
'job_env': {
'read_only': True,
'label': 'job_env',
}
}
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))
if isinstance(obj, ProjectUpdate):
res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, InventoryUpdate):
res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, Job):
res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, AdHocCommand):
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
if obj.workflow_job_id:
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
summary_fields['source_workflow_job'][field] = val
return summary_fields
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateSerializer
elif isinstance(obj, Job):
serializer_class = JobSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
elif isinstance(obj, WorkflowApproval):
serializer_class = WorkflowApprovalSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# TODO: restrict models for capabilities prefetch, when it is added
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobSerializer, self).to_representation(obj)
if 'elapsed' in ret:
if obj and obj.pk and obj.started and not obj.finished:
td = now() - obj.started
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-event_processing_finished')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'event_processing_finished'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobListSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateListSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateListSerializer
elif isinstance(obj, Job):
serializer_class = JobListSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobListSerializer
elif isinstance(obj, WorkflowApproval):
serializer_class = WorkflowApprovalListSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobListSerializer, self).to_representation(obj)
if 'elapsed' in ret:
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
fields = ('result_stdout',)
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
else:
return super(UnifiedJobStdoutSerializer, self).get_types()
class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', write_only=True,
help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = User
fields = ('*', '-name', '-description', '-modified',
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
def to_representation(self, obj):
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj and type(self) is UserSerializer:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
def get_validation_exclusions(self, obj=None):
ret = super(UserSerializer, self).get_validation_exclusions(obj)
ret.extend(['password', 'is_system_auditor'])
return ret
def validate_password(self, value):
if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.'))
return value
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
# Cycle the session key, but if the requesting user is the same
# as the modified user then inject a session key derived from
# the updated user to prevent logout. This is the logic used by
# the Django admin's own user_change_password view.
update_session_auth_hash(self.context['request'], obj)
elif not obj.password:
obj.set_unusable_password()
obj.save(update_fields=['password'])
def get_external_account(self, obj):
return get_external_account(obj)
def create(self, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).create(validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def update(self, obj, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).update(obj, validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def get_related(self, obj):
res = super(UserSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),
organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),
admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),
projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),
tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),
authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),
personal_tokens = self.reverse('api:user_personal_token_list', kwargs={'pk': obj.pk}),
))
return res
def _validate_ldap_managed_field(self, value, field_name):
if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
return value
try:
is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)
except AttributeError:
is_ldap_user = False
if is_ldap_user:
ldap_managed_fields = ['username']
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
if field_name in ldap_managed_fields:
if value != getattr(self.instance, field_name):
raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)
return value
def validate_username(self, value):
return self._validate_ldap_managed_field(value, 'username')
def validate_first_name(self, value):
return self._validate_ldap_managed_field(value, 'first_name')
def validate_last_name(self, value):
return self._validate_ldap_managed_field(value, 'last_name')
def validate_email(self, value):
return self._validate_ldap_managed_field(value, 'email')
def validate_is_superuser(self, value):
return self._validate_ldap_managed_field(value, 'is_superuser')
class UserActivityStreamSerializer(UserSerializer):
"""Changes to system auditor status are shown as separate entries,
so by excluding it from fields here we avoid duplication, which
would carry some unintended consequences.
"""
class Meta:
model = User
fields = ('*', '-is_system_auditor')
class BaseOAuth2TokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
ALLOWED_SCOPES = ['read', 'write']
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires', 'refresh_token')
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True}
}
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if not obj.refresh_token:
return None
elif request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return None
def get_related(self, obj):
ret = super(BaseOAuth2TokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def _is_valid_scope(self, value):
if not value or (not isinstance(value, str)):
return False
words = value.split()
for word in words:
if words.count(word) > 1:
return False # do not allow duplicates
if word not in self.ALLOWED_SCOPES:
return False
return True
def validate_scope(self, value):
if not self._is_valid_scope(value):
raise serializers.ValidationError(_(
'Must be a simple space-separated string with allowed scopes {}.'
).format(self.ALLOWED_SCOPES))
return value
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
try:
return super(BaseOAuth2TokenSerializer, self).create(validated_data)
except oauth2.AccessDeniedError as e:
raise PermissionDenied(str(e))
class UserAuthorizedTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True},
'application': {'allow_null': False, 'required': True}
}
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(UserAuthorizedTokenSerializer, self).create(validated_data)
obj.save()
if obj.application:
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenSerializer(BaseOAuth2TokenSerializer):
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application:
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenDetailSerializer(OAuth2TokenSerializer):
class Meta:
read_only_fields = ('*', 'user', 'application')
class UserPersonalTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
read_only_fields = ('user', 'token', 'expires', 'application')
def create(self, validated_data):
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
validated_data['application'] = None
obj = super(UserPersonalTokenSerializer, self).create(validated_data)
obj.save()
return obj
class OAuth2ApplicationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = OAuth2Application
fields = (
'*', 'description', '-user', 'client_id', 'client_secret', 'client_type',
'redirect_uris', 'authorization_grant_type', 'skip_authorization', 'organization'
)
read_only_fields = ('client_id', 'client_secret')
read_only_on_update_fields = ('user', 'authorization_grant_type')
extra_kwargs = {
'user': {'allow_null': True, 'required': False},
'organization': {'allow_null': False},
'authorization_grant_type': {'allow_null': False, 'label': _('Authorization Grant Type')},
'client_secret': {
'label': _('Client Secret')
},
'client_type': {
'label': _('Client Type')
},
'redirect_uris': {
'label': _('Redirect URIs')
},
'skip_authorization': {
'label': _('Skip Authorization')
},
}
def to_representation(self, obj):
ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if request.method != 'POST' and obj.client_type == 'confidential':
ret['client_secret'] = CENSOR_VALUE
if obj.client_type == 'public':
ret.pop('client_secret', None)
return ret
def get_related(self, obj):
res = super(OAuth2ApplicationSerializer, self).get_related(obj)
res.update(dict(
tokens = self.reverse('api:o_auth2_application_token_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse(
'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}
)
))
return res
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def _summary_field_tokens(self, obj):
token_list = [{'id': x.pk, 'token': CENSOR_VALUE, 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]
if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):
token_count = len(obj.oauth2accesstoken_set.all())
else:
if len(token_list) < 10:
token_count = len(token_list)
else:
token_count = obj.oauth2accesstoken_set.count()
return {'count': token_count, 'results': token_list}
def get_summary_fields(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)
ret['tokens'] = self._summary_field_tokens(obj)
return ret
class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Organization
fields = ('*', 'max_hosts', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),
applications = self.reverse('api:organization_applications_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),
notification_templates_started = self.reverse('api:organization_notification_templates_started_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),
notification_templates_approvals = self.reverse('api:organization_notification_templates_approvals_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)
counts_dict = self.context.get('related_field_counts', None)
if counts_dict is not None and summary_dict is not None:
if obj.id not in counts_dict:
summary_dict['related_field_counts'] = {
'inventories': 0, 'teams': 0, 'users': 0,
'job_templates': 0, 'admins': 0, 'projects': 0}
else:
summary_dict['related_field_counts'] = counts_dict[obj.id]
return summary_dict
def validate(self, attrs):
obj = self.instance
view = self.context['view']
obj_limit = getattr(obj, 'max_hosts', None)
api_limit = attrs.get('max_hosts')
if not view.request.user.is_superuser:
if api_limit is not None and api_limit != obj_limit:
# Only allow superusers to edit the max_hosts field
raise serializers.ValidationError(_('Cannot change max_hosts.'))
return super(OrganizationSerializer, self).validate(attrs)
class ProjectOptionsSerializer(BaseSerializer):
class Meta:
fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch', 'scm_refspec',
'scm_clean', 'scm_delete_on_update', 'credential', 'timeout', 'scm_revision')
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential:
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential.pk})
return res
def validate(self, attrs):
errors = {}
# Don't allow assigning a local_path used by another project.
# Don't allow assigning a local_path when scm_type is set.
valid_local_paths = Project.get_local_path_choices()
if self.instance:
scm_type = attrs.get('scm_type', self.instance.scm_type) or u''
else:
scm_type = attrs.get('scm_type', u'') or u''
if self.instance and not scm_type:
valid_local_paths.append(self.instance.local_path)
if scm_type:
attrs.pop('local_path', None)
if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:
errors['local_path'] = _('This path is already being used by another manual project.')
if attrs.get('scm_refspec') and scm_type != 'git':
errors['scm_refspec'] = _('SCM refspec can only be used with git projects.')
if errors:
raise serializers.ValidationError(errors)
return super(ProjectOptionsSerializer, self).validate(attrs)
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete', 'copy']
capabilities_prefetch = [
'admin', 'update',
{'copy': 'organization.project_admin'}
]
class Meta:
model = Project
fields = ('*', 'organization', 'scm_update_on_launch',
'scm_update_cache_timeout', 'allow_override', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
def get_related(self, obj):
res = super(ProjectSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),
playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),
inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),
update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),
project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),
scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_started = self.reverse('api:project_notification_templates_started_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:project_copy', kwargs={'pk': obj.pk})
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail',
kwargs={'pk': obj.organization.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.last_update.pk})
return res
def to_representation(self, obj):
ret = super(ProjectSerializer, self).to_representation(obj)
if 'scm_revision' in ret and obj.scm_type == '':
ret['scm_revision'] = ''
return ret
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
organization = None
if 'organization' in attrs:
organization = attrs['organization']
elif self.instance:
organization = self.instance.organization
if 'allow_override' in attrs and self.instance:
# case where user is turning off this project setting
if self.instance.allow_override and not attrs['allow_override']:
used_by = set(
JobTemplate.objects.filter(
models.Q(project=self.instance),
models.Q(ask_scm_branch_on_launch=True) | ~models.Q(scm_branch="")
).values_list('pk', flat=True)
)
if used_by:
raise serializers.ValidationError({
'allow_override': _('One or more job templates depend on branch override behavior for this project (ids: {}).').format(
' '.join([str(pk) for pk in used_by])
)})
view = self.context.get('view', None)
if not organization and not view.request.user.is_superuser:
# Only allow super users to create orgless projects
raise serializers.ValidationError(_('Organization is missing'))
elif get_field_from_model_or_attrs('scm_type') == '':
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
if get_field_from_model_or_attrs(fd):
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
return super(ProjectSerializer, self).validate(attrs)
class ProjectPlaybooksSerializer(ProjectSerializer):
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
class Meta:
model = Project
fields = ('playbooks',)
def get_playbooks(self, obj):
return obj.playbook_files if obj.scm_type else obj.playbooks
@property
def data(self):
ret = super(ProjectPlaybooksSerializer, self).data
ret = ret.get('playbooks', [])
return ReturnList(ret, serializer=self)
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_(
'Array of inventory files and directories available within this project, '
'not comprehensive.'))
class Meta:
model = Project
fields = ('inventory_files',)
@property
def data(self):
ret = super(ProjectInventoriesSerializer, self).data
ret = ret.get('inventory_files', [])
return ReturnList(ret, serializer=self)
class ProjectUpdateViewSerializer(ProjectSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', 'project', 'job_type', 'job_tags', '-controller_node')
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
class Meta:
model = ProjectUpdate
fields = ('*', 'host_status_counts', 'playbook_counts',)
def get_playbook_counts(self, obj):
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except ProjectUpdateEvent.DoesNotExist:
counts = {}
return counts
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class ProjectUpdateCancelSerializer(ProjectUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class BaseSerializerWithVariables(BaseSerializer):
def validate_variables(self, value):
return vars_validate_or_raise(value)
class InventorySerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = [
'admin', 'adhoc',
{'copy': 'organization.inventory_admin'}
]
class Meta:
model = Inventory
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'has_inventory_sources', 'total_inventory_sources',
'inventory_sources_with_failures', 'insights_credential',
'pending_deletion',)
def get_related(self, obj):
res = super(InventorySerializer, self).get_related(obj)
res.update(dict(
hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk})
))
if obj.insights_credential:
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == 'exact':
# __exact is allowed
continue
match = '__{}'.format(match)
if re.match(
'ansible_facts[^=]+{}='.format(match),
host_filter
):
raise models.base.ValidationError({
'host_filter': 'ansible_facts does not support searching with {}'.format(match)
})
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
class InventoryScriptSerializer(InventorySerializer):
class Meta:
fields = ()
class HostSerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete']
capabilities_prefetch = ['inventory.admin']
has_active_failures = serializers.SerializerMethodField()
has_inventory_sources = serializers.SerializerMethodField()
class Meta:
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new host.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(HostSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),
groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),
all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),
smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
insights = self.reverse('api:host_insights', kwargs={'pk': obj.pk}),
ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job:
res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})
if obj.last_job_host_summary:
res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})
return res
def get_summary_fields(self, obj):
d = super(HostSerializer, self).get_summary_fields(obj)
try:
d['last_job']['job_template_id'] = obj.last_job.job_template.id
d['last_job']['job_template_name'] = obj.last_job.job_template.name
except (KeyError, AttributeError):
pass
if has_model_field_prefetched(obj, 'groups'):
group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]
else:
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
d.setdefault('recent_jobs', [{
'id': j.job.id,
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])
return d
def _get_host_port_from_name(self, name):
# Allow hostname (except IPv6 for now) to specify the port # inline.
port = None
if name.count(':') == 1:
name, port = name.split(':')
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError
except ValueError:
raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))
return name, port
def validate_name(self, value):
name = force_text(value or '')
# Validate here only, update in main validate method.
host, port = self._get_host_port_from_name(name)
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value
def validate_variables(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
name = force_text(attrs.get('name', self.instance and self.instance.name or ''))
host, port = self._get_host_port_from_name(name)
if port:
attrs['name'] = host
variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
return super(HostSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
def get_has_active_failures(self, obj):
return bool(
obj.last_job_host_summary and obj.last_job_host_summary.failed
)
def get_has_inventory_sources(self, obj):
return obj.inventory_sources.exists()
class AnsibleFactsSerializer(BaseSerializer):
class Meta:
model = Host
def to_representation(self, obj):
return obj.ansible_facts
class GroupSerializer(BaseSerializerWithVariables):
show_capabilities = ['copy', 'edit', 'delete']
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
class Meta:
model = Group
fields = ('*', 'inventory', 'variables')
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new group.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(GroupSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),
potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),
children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),
all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
return res
def validate_name(self, value):
if value in ('all', '_meta'):
raise serializers.ValidationError(_('Invalid group name.'))
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('*', 'children')
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
class BaseVariableDataSerializer(BaseSerializer):
class Meta:
fields = ('variables',)
def to_representation(self, obj):
if obj is None:
return {}
ret = super(BaseVariableDataSerializer, self).to_representation(obj)
return parse_yaml_or_json(ret.get('variables', '') or '{}')
def to_internal_value(self, data):
data = {'variables': json.dumps(data)}
return super(BaseVariableDataSerializer, self).to_internal_value(data)
class InventoryVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Inventory
class HostVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Host
class GroupVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Group
class CustomInventoryScriptSerializer(BaseSerializer):
script = serializers.CharField(trim_whitespace=False)
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [
{'edit': 'admin'}
]
class Meta:
model = CustomInventoryScript
fields = ('*', "script", "organization")
def validate_script(self, value):
if not value.startswith("#!"):
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
return value
def to_representation(self, obj):
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
if obj is None:
return ret
request = self.context.get('request', None)
if request.user not in obj.admin_role and \
not request.user.is_superuser and \
not request.user.is_system_auditor and \
not (obj.organization is not None and request.user in obj.organization.auditor_role):
ret['script'] = None
return ret
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
res.update(dict(
object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class InventorySourceOptionsSerializer(BaseSerializer):
credential = DeprecatedCredentialField(
help_text=_('Cloud credential to use for inventory updates.')
)
class Meta:
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'custom_virtualenv', 'timeout', 'verbosity')
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential: # TODO: remove when 'credential' field is removed
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential})
if obj.source_script:
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
return res
def validate_source_vars(self, value):
ret = vars_validate_or_raise(value)
for env_k in parse_yaml_or_json(value):
if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
return ret
def validate(self, attrs):
# TODO: Validate source, validate source_regions
errors = {}
source = attrs.get('source', self.instance and self.instance.source or '')
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
if source == 'custom':
if source_script is None or source_script == '':
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
else:
try:
if not self.instance:
dest_inventory = attrs.get('inventory', None)
if not dest_inventory:
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
return super(InventorySourceOptionsSerializer, self).validate(attrs)
# TODO: remove when old 'credential' fields are removed
def get_summary_fields(self, obj):
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
all_creds = []
if 'credential' in summary_fields:
cred = obj.get_cloud_credential()
if cred:
summarized_cred = {
'id': cred.id, 'name': cred.name, 'description': cred.description,
'kind': cred.kind, 'cloud': True
}
summary_fields['credential'] = summarized_cred
all_creds.append(summarized_cred)
summary_fields['credential']['credential_type_id'] = cred.credential_type_id
else:
summary_fields.pop('credential')
summary_fields['credentials'] = all_creds
return summary_fields
class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):
status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
capabilities_prefetch = [
{'admin': 'inventory.admin'},
{'start': 'inventory.update'}
]
class Meta:
model = InventorySource
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',
'source_project', 'update_on_project_update') + \
('last_update_failed', 'last_updated') # Backwards compatibility.
extra_kwargs = {
'inventory': {'required': True}
}
def get_related(self, obj):
res = super(InventorySourceSerializer, self).get_related(obj)
res.update(dict(
update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),
inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),
notification_templates_started = self.reverse('api:inventory_source_notification_templates_started_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.source_project_id is not None:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.last_update.pk})
else:
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
return res
def get_group(self, obj): # TODO: remove in 3.3
if obj.deprecated_group:
return obj.deprecated_group.id
return None
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
# SCM Project and inventory are read-only unless creating a new inventory.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
# TODO: remove when old 'credential' fields are removed
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name, self.credential)
return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
def to_representation(self, obj):
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
def validate_source_project(self, value):
if value and value.scm_type == '':
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
return value
def validate_update_on_project_update(self, value):
if value and self.instance and self.instance.schedules.exists():
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
return value
def validate_inventory(self, value):
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
# TODO: remove when old 'credential' fields are removed
def create(self, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).create(validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def update(self, obj, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).update(obj, validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def _update_deprecated_fields(self, fields, obj):
if 'credential' in fields:
new_cred = fields['credential']
existing = obj.credentials.all()
if new_cred not in existing:
for cred in existing:
# Remove all other cloud credentials
obj.credentials.remove(cred)
if new_cred:
# Add new credential
obj.credentials.add(new_cred)
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when 'credential' field removed
deprecated_fields['credential'] = attrs.pop('credential')
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if get_field_from_model_or_attrs('source') != 'scm':
redundant_scm_fields = list(filter(
lambda x: attrs.get(x, None),
['source_project', 'source_path', 'update_on_project_update']
))
if redundant_scm_fields:
raise serializers.ValidationError(
{"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))}
)
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
cred_error = InventorySource.cloud_credential_validation(
get_field_from_model_or_attrs('source'),
cred
)
if cred_error:
raise serializers.ValidationError({"credential": cred_error})
return attrs
class InventorySourceUpdateSerializer(InventorySourceSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = InventoryUpdate
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',
'source_project_update', 'custom_virtualenv', '-controller_node',)
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.source_project_update.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
res['credentials'] = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk})
return res
class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
source_project = serializers.SerializerMethodField(
help_text=_('The project used for this job.'),
method_name='get_source_project_id'
)
class Meta:
model = InventoryUpdate
fields = ('*', 'source_project',)
def get_source_project(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template', None)
def get_source_project_id(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template.id', None)
def get_related(self, obj):
res = super(InventoryUpdateDetailSerializer, self).get_related(obj)
source_project_id = self.get_source_project_id(obj)
if source_project_id:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)
source_project = self.get_source_project(obj)
if source_project:
summary_fields['source_project'] = {}
for field in SUMMARIZABLE_FK_FIELDS['project']:
value = getattr(source_project, field, None)
if value is not None:
summary_fields['source_project'][field] = value
cred = obj.credentials.first()
if cred:
summary_fields['credential'] = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
return summary_fields
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = InventoryUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class TeamSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Team
fields = ('*', 'organization')
def get_related(self, obj):
res = super(TeamSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
class RoleSerializer(BaseSerializer):
class Meta:
model = Role
fields = ('*', '-created', '-modified')
read_only_fields = ('id', 'role_field', 'description', 'name')
def to_representation(self, obj):
ret = super(RoleSerializer, self).to_representation(obj)
if obj.object_id:
content_object = obj.content_object
if hasattr(content_object, 'username'):
ret['summary_fields']['resource_name'] = obj.content_object.username
if hasattr(content_object, 'name'):
ret['summary_fields']['resource_name'] = obj.content_object.name
content_model = obj.content_type.model_class()
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
return ret
def get_related(self, obj):
ret = super(RoleSerializer, self).get_related(obj)
ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})
ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})
try:
if obj.content_object:
ret.update(reverse_gfk(obj.content_object, self.context.get('request')))
except AttributeError:
# AttributeError's happen if our content_object is pointing at
# a model that no longer exists. This is dirty data and ideally
# doesn't exist, but in case it does, let's not puke.
pass
return ret
class RoleSerializerWithParentAccess(RoleSerializer):
show_capabilities = ['unattach']
class ResourceAccessListElementSerializer(UserSerializer):
show_capabilities = [] # Clear fields from UserSerializer parent class
def to_representation(self, user):
'''
With this method we derive "direct" and "indirect" access lists. Contained
in the direct access list are all the roles the user is a member of, and
all of the roles that are directly granted to any teams that the user is a
member of.
The indirect access list is a list of all of the roles that the user is
a member of that are ancestors of any roles that grant permissions to
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
requesting_user = None
if 'summary_fields' not in ret:
ret['summary_fields'] = {}
def format_role_perm(role):
role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}
try:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
except AttributeError:
pass
if role.content_type is not None:
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = []
team_role = naive_team_role
if naive_team_role.role_field == 'admin_role':
team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = {
'id': role.id,
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
return ret
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles \
.filter(id__in=direct_permissive_role_ids).all()
direct_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=direct_permissive_role_ids)
if content_type == team_content_type:
# When looking at the access list for a team, exclude the entries
# for that team. This exists primarily so we don't list the read role
# as a direct role when a user is a member or admin of a team
direct_team_roles = direct_team_roles.exclude(
children__content_type=team_content_type,
children__object_id=obj.id
)
indirect_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=all_permissive_role_ids) \
.exclude(id__in=direct_team_roles)
indirect_access_roles = user.roles \
.filter(id__in=all_permissive_role_ids) \
.exclude(id__in=direct_permissive_role_ids) \
.exclude(id__in=direct_team_roles) \
.exclude(id__in=indirect_team_roles)
ret['summary_fields']['direct_access'] \
= [format_role_perm(r) for r in direct_access_roles.distinct()] \
+ [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \
+ [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]
ret['summary_fields']['indirect_access'] \
= [format_role_perm(r) for r in indirect_access_roles.distinct()]
return ret
class CredentialTypeSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
managed_by_tower = serializers.ReadOnlyField()
class Meta:
model = CredentialType
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs',
'injectors')
def validate(self, attrs):
if self.instance and self.instance.managed_by_tower:
raise PermissionDenied(
detail=_("Modifications not allowed for managed credential types")
)
old_inputs = {}
if self.instance:
old_inputs = copy.deepcopy(self.instance.inputs)
ret = super(CredentialTypeSerializer, self).validate(attrs)
if self.instance and self.instance.credentials.exists():
if 'inputs' in attrs and old_inputs != self.instance.inputs:
raise PermissionDenied(
detail= _("Modifications to inputs are not allowed for credential types that are in use")
)
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
raise serializers.ValidationError({
"kind": _("Must be 'cloud' or 'net', not %s") % attrs['kind']
})
fields = attrs.get('inputs', {}).get('fields', [])
for field in fields:
if field.get('ask_at_runtime', False):
raise serializers.ValidationError({"inputs": _("'ask_at_runtime' is not supported for custom credentials.")})
return ret
def get_related(self, obj):
res = super(CredentialTypeSerializer, self).get_related(obj)
res['credentials'] = self.reverse(
'api:credential_type_credential_list',
kwargs={'pk': obj.pk}
)
res['activity_stream'] = self.reverse(
'api:credential_type_activity_stream_list',
kwargs={'pk': obj.pk}
)
return res
def to_representation(self, data):
value = super(CredentialTypeSerializer, self).to_representation(data)
# translate labels and help_text for credential fields "managed by Tower"
if value.get('managed_by_tower'):
value['name'] = _(value['name'])
for field in value.get('inputs', {}).get('fields', []):
field['label'] = _(field['label'])
if 'help_text' in field:
field['help_text'] = _(field['help_text'])
return value
def filter_field_metadata(self, fields, method):
# API-created/modified CredentialType kinds are limited to
# `cloud` and `net`
if method in ('PUT', 'POST'):
fields['kind']['choices'] = list(filter(
lambda choice: choice[0] in ('cloud', 'net'),
fields['kind']['choices']
))
return fields
class CredentialSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy', 'use']
capabilities_prefetch = ['admin', 'use']
class Meta:
model = Credential
fields = ('*', 'organization', 'credential_type', 'inputs', 'kind', 'cloud', 'kubernetes')
extra_kwargs = {
'credential_type': {
'label': _('Credential Type'),
},
}
def to_representation(self, data):
value = super(CredentialSerializer, self).to_representation(data)
if 'inputs' in value:
value['inputs'] = data.display_inputs()
return value
def get_related(self, obj):
res = super(CredentialSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
res.update(dict(
activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),
owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),
owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:credential_copy', kwargs={'pk': obj.pk}),
input_sources = self.reverse('api:credential_input_source_sublist', kwargs={'pk': obj.pk}),
credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),
))
parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]
if parents:
res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})
elif len(obj.admin_role.members.all()) > 0:
user = obj.admin_role.members.all()[0]
res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})
return res
def get_summary_fields(self, obj):
summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)
summary_dict['owners'] = []
for user in obj.admin_role.members.all():
summary_dict['owners'].append({
'id': user.pk,
'type': 'user',
'name': user.username,
'description': ' '.join([user.first_name, user.last_name]),
'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),
})
for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:
summary_dict['owners'].append({
'id': parent.content_object.pk,
'type': camelcase_to_underscore(parent.content_object.__class__.__name__),
'name': parent.content_object.name,
'description': parent.content_object.description,
'url': parent.content_object.get_absolute_url(self.context.get('request')),
})
return summary_dict
def get_validation_exclusions(self, obj=None):
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
for field in ('credential_type', 'inputs'):
if field in ret:
ret.remove(field)
return ret
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for related_objects in (
'ad_hoc_commands',
'insights_inventories',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, related_objects).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, '
'do not give either team or organization. Only valid for creation.'))
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, '
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
fields = ('*', 'user', 'team')
def validate(self, attrs):
owner_fields = set()
for field in ('user', 'team', 'organization'):
if field in attrs:
if attrs[field]:
owner_fields.add(field)
else:
attrs.pop(field)
if not owner_fields:
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
return super(CredentialSerializerCreate, self).validate(attrs)
def create(self, validated_data):
user = validated_data.pop('user', None)
team = validated_data.pop('team', None)
credential = super(CredentialSerializerCreate, self).create(validated_data)
if user:
credential.admin_role.members.add(user)
if team:
if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
credential.admin_role.parents.add(team.admin_role)
credential.use_role.parents.add(team.member_role)
return credential
class CredentialInputSourceSerializer(BaseSerializer):
show_capabilities = ['delete']
class Meta:
model = CredentialInputSource
fields = (
'*',
'input_field_name',
'metadata',
'target_credential',
'source_credential',
'-name',
)
extra_kwargs = {
'input_field_name': {'required': True},
'target_credential': {'required': True},
'source_credential': {'required': True},
}
def get_related(self, obj):
res = super(CredentialInputSourceSerializer, self).get_related(obj)
res['source_credential'] = obj.source_credential.get_absolute_url(request=self.context.get('request'))
res['target_credential'] = obj.target_credential.get_absolute_url(request=self.context.get('request'))
return res
class UserCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-team', '-organization')
class TeamCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-organization')
class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-team')
class LabelsListMixin(object):
def _summary_field_labels(self, obj):
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
if has_model_field_prefetched(obj, 'labels'):
label_ct = len(obj.labels.all())
else:
if len(label_list) < 10:
label_ct = len(label_list)
else:
label_ct = obj.labels.count()
return {'count': label_ct, 'results': label_list}
def get_summary_fields(self, obj):
res = super(LabelsListMixin, self).get_summary_fields(obj)
res['labels'] = self._summary_field_labels(obj)
return res
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
class Meta:
fields = ('*', 'job_type', 'inventory', 'project', 'playbook', 'scm_branch',
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
'use_fact_cache',)
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
'api:job_template_extra_credentials_list',
kwargs={'pk': obj.pk}
)
res['credentials'] = self.reverse(
'api:job_template_credentials_list',
kwargs={'pk': obj.pk}
)
elif isinstance(obj, UnifiedJob):
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
return ret
def validate(self, attrs):
if 'project' in self.fields and 'playbook' in self.fields:
project = attrs.get('project', self.instance.project if self.instance else None)
playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')
scm_branch = attrs.get('scm_branch', self.instance.scm_branch if self.instance else None)
ask_scm_branch_on_launch = attrs.get(
'ask_scm_branch_on_launch', self.instance.ask_scm_branch_on_launch if self.instance else None)
if not project:
raise serializers.ValidationError({'project': _('This field is required.')})
playbook_not_found = bool(
(
project and project.scm_type and (not project.allow_override) and
playbook and force_text(playbook) not in project.playbook_files
) or
(project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks) # manual
)
if playbook_not_found:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not playbook:
raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})
if scm_branch and not project.allow_override:
raise serializers.ValidationError({'scm_branch': _('Project does not allow overriding branch.')})
if ask_scm_branch_on_launch and not project.allow_override:
raise serializers.ValidationError({'ask_scm_branch_on_launch': _('Project does not allow overriding branch.')})
ret = super(JobOptionsSerializer, self).validate(attrs)
return ret
class JobTemplateMixin(object):
'''
Provide recent jobs and survey details in summary_fields
'''
def _recent_jobs(self, obj):
# Exclude "joblets", jobs that ran as part of a sliced workflow job
uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')
# Would like to apply an .only, but does not play well with non_polymorphic
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
optimized_qs = uj_qs.non_polymorphic()
return [{
'id': x.id, 'status': x.status, 'finished': x.finished, 'canceled_on': x.canceled_on,
# Make type consistent with API top-level key, for instance workflow_job
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
} for x in optimized_qs[:10]]
def get_summary_fields(self, obj):
d = super(JobTemplateMixin, self).get_summary_fields(obj)
if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):
d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])
d['recent_jobs'] = self._recent_jobs(obj)
return d
def validate(self, attrs):
webhook_service = attrs.get('webhook_service', getattr(self.instance, 'webhook_service', None))
webhook_credential = attrs.get('webhook_credential', getattr(self.instance, 'webhook_credential', None))
if webhook_credential:
if webhook_credential.credential_type.kind != 'token':
raise serializers.ValidationError({
'webhook_credential': _("Must be a Personal Access Token."),
})
msg = {'webhook_credential': _("Must match the selected webhook service.")}
if webhook_service:
if webhook_credential.credential_type.namespace != '{}_token'.format(webhook_service):
raise serializers.ValidationError(msg)
else:
raise serializers.ValidationError(msg)
return super().validate(attrs)
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['project.use', 'inventory.use']}
]
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
class Meta:
model = JobTemplate
fields = (
'*', 'host_config_key', 'ask_scm_branch_on_launch', 'ask_diff_mode_on_launch',
'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch', 'survey_enabled',
'become_enabled', 'diff_mode', 'allow_simultaneous', 'custom_virtualenv',
'job_slice_count', 'webhook_service', 'webhook_credential',
)
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
res.update(
jobs=self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules=self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),
activity_stream=self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),
launch=self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),
webhook_key=self.reverse('api:webhook_key', kwargs={'model_kwarg': 'job_templates', 'pk': obj.pk}),
webhook_receiver=(
self.reverse('api:webhook_receiver_{}'.format(obj.webhook_service),
kwargs={'model_kwarg': 'job_templates', 'pk': obj.pk})
if obj.webhook_service else ''
),
notification_templates_started=self.reverse('api:job_template_notification_templates_started_list', kwargs={'pk': obj.pk}),
notification_templates_success=self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error=self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list=self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),
survey_spec=self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),
labels=self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles=self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups=self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
slice_workflow_jobs=self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
copy=self.reverse('api:job_template_copy', kwargs={'pk': obj.pk}),
)
if obj.host_config_key:
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
return res
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
inventory = get_field_from_model_or_attrs('inventory')
project = get_field_from_model_or_attrs('project')
if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _(
"Cannot enable provisioning callback without an inventory set."
)})
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
raise serializers.ValidationError({'inventory': prompting_error_message})
return super(JobTemplateSerializer, self).validate(attrs)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobTemplateWithSpecSerializer(JobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = JobTemplate
fields = ('*', 'survey_spec')
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
passwords_needed_to_start = serializers.ReadOnlyField()
artifacts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = (
'*', 'job_template', 'passwords_needed_to_start', 'allow_simultaneous',
'artifacts', 'scm_revision', 'instance_group', 'diff_mode', 'job_slice_number',
'job_slice_count', 'webhook_service', 'webhook_credential', 'webhook_guid',
)
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
create_schedule = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk}),
))
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
def get_artifacts(self, obj):
if obj:
return obj.display_artifacts()
return {}
def to_representation(self, obj):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobDetailSerializer(JobSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = Job
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
def get_playbook_counts(self, obj):
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except JobEvent.DoesNotExist:
counts = {}
return counts
class JobCancelSerializer(BaseSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
model = Job
fields = ('can_cancel',)
class JobRelaunchSerializer(BaseSerializer):
passwords_needed_to_start = serializers.SerializerMethodField()
retry_counts = serializers.SerializerMethodField()
hosts = serializers.ChoiceField(
required=False, allow_null=True, default='all',
choices=[
('all', _('No change to job limit')),
('failed', _('All failed and unreachable hosts'))
],
write_only=True
)
credential_passwords = VerbatimField(required=True, write_only=True)
class Meta:
model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords',)
def validate_credential_passwords(self, value):
pnts = self.instance.passwords_needed_to_start
missing = set(pnts) - set(key for key in value if value[key])
if missing:
raise serializers.ValidationError(_(
'Missing passwords needed to start: {}'.format(', '.join(missing))
))
return value
def to_representation(self, obj):
res = super(JobRelaunchSerializer, self).to_representation(obj)
view = self.context.get('view', None)
if hasattr(view, '_raw_data_form_marker'):
password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])
res.update(password_keys)
return res
def get_passwords_needed_to_start(self, obj):
if obj:
return obj.passwords_needed_to_start
return ''
def get_retry_counts(self, obj):
if obj.status in ACTIVE_STATES:
return _('Relaunch by host status not available until job finishes running.')
data = OrderedDict([])
for status in self.fields['hosts'].choices.keys():
data[status] = obj.retry_qs(status).count()
return data
def get_validation_exclusions(self, *args, **kwargs):
r = super(JobRelaunchSerializer, self).get_validation_exclusions(*args, **kwargs)
r.append('credential_passwords')
return r
def validate(self, attrs):
obj = self.instance
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
model = AdHocCommand
fields = ('*', 'job_type', 'inventory', 'limit', 'credential',
'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',
'become_enabled', 'diff_mode', '-unified_job_template', '-description')
extra_kwargs = {
'name': {
'read_only': True,
},
}
def get_field_names(self, declared_fields, info):
field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)
# Load module name choices dynamically from DB settings.
if field_name == 'module_name':
field_class = serializers.ChoiceField
module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]
module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''
field_kwargs['choices'] = module_name_choices
field_kwargs['required'] = bool(not module_name_default)
field_kwargs['default'] = module_name_default or serializers.empty
field_kwargs['allow_blank'] = bool(module_name_default)
field_kwargs.pop('max_length', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.credential_id:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
res.update(dict(
events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),
))
res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and not obj.inventory_id:
ret['inventory'] = None
if 'credential' in ret and not obj.credential_id:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
if 'name' in ret:
ret['name'] = obj.module_name
return ret
def validate(self, attrs):
ret = super(AdHocCommandSerializer, self).validate(attrs)
return ret
def validate_extra_vars(self, value):
redacted_extra_vars, removed_vars = extract_ansible_vars(value)
if removed_vars:
raise serializers.ValidationError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(sorted(removed_vars, reverse=True))))
return vars_validate_or_raise(value)
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
class Meta:
model = AdHocCommand
fields = ('*', 'host_status_counts',)
def get_host_status_counts(self, obj):
try:
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except AdHocCommandEvent.DoesNotExist:
counts = {}
return counts
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class AdHocCommandRelaunchSerializer(AdHocCommandSerializer):
class Meta:
fields = ()
def to_representation(self, obj):
if obj:
return dict([(p, u'') for p in obj.passwords_needed_to_start])
else:
return {}
class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = SystemJobTemplate
fields = ('*', 'job_type',)
def get_related(self, obj):
res = super(SystemJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_started = self.reverse('api:system_job_template_notification_templates_started_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
return res
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout', '-controller_node',)
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template:
res['system_job_template'] = self.reverse('api:system_job_template_detail',
kwargs={'pk': obj.system_job_template.pk})
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
class SystemJobCancelSerializer(SystemJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):
show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': 'organization.workflow_admin'}
]
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta:
model = WorkflowJobTemplate
fields = (
'*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch', 'inventory', 'limit', 'scm_branch',
'ask_inventory_on_launch', 'ask_scm_branch_on_launch', 'ask_limit_on_launch',
'webhook_service', 'webhook_credential',
)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(
workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),
webhook_key=self.reverse('api:webhook_key', kwargs={'model_kwarg': 'workflow_job_templates', 'pk': obj.pk}),
webhook_receiver=(
self.reverse('api:webhook_receiver_{}'.format(obj.webhook_service),
kwargs={'model_kwarg': 'workflow_job_templates', 'pk': obj.pk})
if obj.webhook_service else ''
),
workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_started = self.reverse('api:workflow_job_template_notification_templates_started_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
notification_templates_approvals = self.reverse('api:workflow_job_template_notification_templates_approvals_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
copy = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
attrs = super(WorkflowJobTemplateSerializer, self).validate(attrs)
# process char_prompts, these are not direct fields on the model
mock_obj = self.Meta.model()
for field_name in ('scm_branch', 'limit'):
if field_name in attrs:
setattr(mock_obj, field_name, attrs[field_name])
attrs.pop(field_name)
# Model `.save` needs the container dict, not the pseudo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'survey_spec')
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta:
model = WorkflowJob
fields = (
'*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous', 'job_template',
'is_sliced_job', '-execution_node', '-event_processing_finished', '-controller_node',
'inventory', 'limit', 'scm_branch', 'webhook_service', 'webhook_credential', 'webhook_guid',
)
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
if obj.job_template_id:
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(WorkflowJobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-execution_node', '-controller_node',)
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowApprovalViewSerializer(UnifiedJobSerializer):
class Meta:
model = WorkflowApproval
fields = []
class WorkflowApprovalSerializer(UnifiedJobSerializer):
can_approve_or_deny = serializers.SerializerMethodField()
approval_expiration = serializers.SerializerMethodField()
timed_out = serializers.ReadOnlyField()
class Meta:
model = WorkflowApproval
fields = ('*', '-controller_node', '-execution_node', 'can_approve_or_deny', 'approval_expiration', 'timed_out',)
def get_approval_expiration(self, obj):
if obj.status != 'pending' or obj.timeout == 0:
return None
return obj.created + timedelta(seconds=obj.timeout)
def get_can_approve_or_deny(self, obj):
request = self.context.get('request', None)
allowed = request.user.can_access(WorkflowApproval, 'approve_or_deny', obj)
return allowed is True and obj.status == 'pending'
def get_related(self, obj):
res = super(WorkflowApprovalSerializer, self).get_related(obj)
if obj.workflow_approval_template:
res['workflow_approval_template'] = self.reverse('api:workflow_approval_template_detail',
kwargs={'pk': obj.workflow_approval_template.pk})
res['approve'] = self.reverse('api:workflow_approval_approve', kwargs={'pk': obj.pk})
res['deny'] = self.reverse('api:workflow_approval_deny', kwargs={'pk': obj.pk})
if obj.approved_or_denied_by:
res['approved_or_denied_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.approved_or_denied_by.pk})
return res
class WorkflowApprovalActivityStreamSerializer(WorkflowApprovalSerializer):
"""
timed_out and status are usually read-only fields
However, when we generate an activity stream record, we *want* to record
these types of changes. This serializer allows us to do so.
"""
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES)
timed_out = serializers.BooleanField()
class WorkflowApprovalListSerializer(WorkflowApprovalSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-controller_node', '-execution_node', 'can_approve_or_deny', 'approval_expiration', 'timed_out',)
class WorkflowApprovalTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = WorkflowApprovalTemplate
fields = ('*', 'timeout', 'name',)
def get_related(self, obj):
res = super(WorkflowApprovalTemplateSerializer, self).get_related(obj)
if 'last_job' in res:
del res['last_job']
res.update(jobs = self.reverse('api:workflow_approval_template_jobs_list', kwargs={'pk': obj.pk}))
return res
class LaunchConfigurationBaseSerializer(BaseSerializer):
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
exclude_errors = ()
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'scm_branch', 'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in list(attrs.items()):
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def to_representation(self, obj):
ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_data' in ret and obj.survey_passwords:
ret['extra_data'] = obj.display_extra_vars()
return ret
def validate(self, attrs):
db_extra_data = {}
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
if ujt is None:
if 'workflow_job_template' in attrs:
return {'workflow_job_template': attrs['workflow_job_template']}
return {}
# build additional field survey_passwords to track redacted variables
password_dict = {}
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
# Prepare additional field survey_passwords for save
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
# Replace $encrypted$ submissions with db value if exists
if 'extra_data' in attrs:
if password_dict:
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict.copy()
# Force dict type (cannot preserve YAML formatting if passwords are involved)
# Encrypt the extra_data for save, only current password vars in JT survey
# but first, make a copy or else this is referenced by request.data, and
# user could get encrypted string in form data in API browser
attrs['extra_data'] = extra_data.copy()
encrypt_dict(attrs['extra_data'], password_dict.keys())
# For any raw $encrypted$ string, either
# - replace with existing DB value
# - raise a validation error
# - ignore, if default present
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
element = ujt.pivot_spec(ujt.survey_spec)[key]
# NOTE: validation _of_ the default values of password type
# questions not done here or on launch, but doing so could
# leak info about values, so it should not be added
if not ('default' in element and element['default']):
raise serializers.ValidationError(
{"extra_data": _('Provided variable {} has no database value to replace with.').format(key)})
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
# Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict:
for key, value in attrs['extra_data'].copy().items():
if value == REPLACE_STR:
if key in password_dict:
attrs['extra_data'].pop(key)
attrs.get('survey_passwords', {}).pop(key, None)
else:
errors.setdefault('extra_vars', []).append(
_('"$encrypted$ is a reserved keyword, may not be used for {var_name}."'.format(key))
)
# Launch configs call extra_vars extra_data for historical reasons
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the pseudo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes', 'all_parents_must_converge',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['create_approval_template'] = self.reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': obj.pk})
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
except WorkflowJobTemplate.DoesNotExist:
pass
return res
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)
# workflow_job_template is read-only unless creating a new node.
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_summary_fields(self, obj):
summary_fields = super(WorkflowJobTemplateNodeSerializer, self).get_summary_fields(obj)
if isinstance(obj.unified_job_template, WorkflowApprovalTemplate):
summary_fields['unified_job_template']['timeout'] = obj.unified_job_template.timeout
return summary_fields
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
'all_parents_must_converge', 'do_not_run',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})
return res
def get_summary_fields(self, obj):
summary_fields = super(WorkflowJobNodeSerializer, self).get_summary_fields(obj)
if isinstance(obj.job, WorkflowApproval):
summary_fields['job']['timed_out'] = obj.job.timed_out
return summary_fields
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this through the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class WorkflowJobTemplateNodeCreateApprovalSerializer(BaseSerializer):
class Meta:
model = WorkflowApprovalTemplate
fields = ('timeout', 'name', 'description',)
def to_representation(self, obj):
return {}
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):
pass
class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):
class Meta:
model = SystemJob
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class JobHostSummarySerializer(BaseSerializer):
class Meta:
model = JobHostSummary
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',
'ignored', 'rescued')
def get_related(self, obj):
res = super(JobHostSummarySerializer, self).get_related(obj)
res.update(dict(
job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))
if obj.host is not None:
res.update(dict(
host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
))
return res
def get_summary_fields(self, obj):
d = super(JobHostSummarySerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
class JobEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display2', read_only=True)
event_level = serializers.IntegerField(read_only=True)
class Meta:
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res.update(dict(
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
))
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
if obj.host_id:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
return res
def get_summary_fields(self, obj):
d = super(JobEventSerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
def to_representation(self, obj):
data = super(JobEventSerializer, self).to_representation(obj)
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return data
# If the view logic says to not trunctate (request was to the detail view or a param was used)
if self.context.get('no_truncate', False):
return data
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if 'stdout' in data:
data['stdout'] = truncate_stdout(data['stdout'], max_bytes)
return data
class ProjectUpdateEventSerializer(JobEventSerializer):
stdout = serializers.SerializerMethodField()
event_data = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
def get_stdout(self, obj):
return UriCleaner.remove_sensitive(obj.stdout)
def get_event_data(self, obj):
try:
return json.loads(
UriCleaner.remove_sensitive(
json.dumps(obj.event_data)
)
)
except Exception:
logger.exception("Failed to sanitize event_data")
return {}
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
class Meta:
model = AdHocCommandEvent
fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',
'counter', 'event_display', 'event_data', 'failed',
'changed', 'uuid', 'host', 'host_name', 'stdout',
'start_line', 'end_line', 'verbosity')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res.update(dict(
ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),
))
if obj.host:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
return res
def to_representation(self, obj):
data = super(AdHocCommandEventSerializer, self).to_representation(obj)
# If the view logic says to not trunctate (request was to the detail view or a param was used)
if self.context.get('no_truncate', False):
return data
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if 'stdout' in data:
data['stdout'] = truncate_stdout(data['stdout'], max_bytes)
return data
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'inventory', 'scm_branch', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords',
'ask_scm_branch_on_launch', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')
read_only_fields = (
'ask_scm_branch_on_launch',
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
def get_credential_needed_to_start(self, obj):
return False
def get_inventory_needed_to_start(self, obj):
return not (obj and obj.inventory)
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors.setdefault('credentials', []).append(_(
'Cannot assign multiple {} credentials.'
).format(cred.unique_hash(display=True)))
if cred.credential_type.kind not in ('ssh', 'vault', 'cloud', 'net'):
errors.setdefault('credentials', []).append(_(
'Cannot assign a Credential of kind `{}`'
).format(cred.credential_type.kind))
distinct_cred_kinds.append(cred.unique_hash())
# Prohibit removing credentials from the JT list (unsupported for now)
template_credentials = template.credentials.all()
if 'credentials' in attrs:
removed_creds = set(template_credentials) - set(attrs['credentials'])
provided_mapping = Credential.unique_dict(attrs['credentials'])
for cred in removed_creds:
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(_(
'Removing {} credential at launch time without replacement is not supported. '
'Provided list lacked credential(s): {}.'
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template_credentials
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
can_start_without_user_input = serializers.BooleanField(read_only=True)
defaults = serializers.SerializerMethodField()
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('ask_inventory_on_launch', 'ask_limit_on_launch', 'ask_scm_branch_on_launch',
'can_start_without_user_input', 'defaults', 'extra_vars',
'inventory', 'limit', 'scm_branch',
'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_workflow_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Workflow is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
if errors:
raise serializers.ValidationError(errors)
WFJT_extra_vars = template.extra_vars
WFJT_inventory = template.inventory
WFJT_limit = template.limit
WFJT_scm_branch = template.scm_branch
super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory
template.limit = WFJT_limit
template.scm_branch = WFJT_scm_branch
return accepted
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [{'copy': 'organization.admin'}]
class Meta:
model = NotificationTemplate
fields = ('*', 'organization', 'notification_type', 'notification_configuration', 'messages')
type_map = {"string": (str,),
"int": (int,),
"bool": (bool,),
"list": (list,),
"password": (str,),
"object": (dict, OrderedDict)}
def to_representation(self, obj):
ret = super(NotificationTemplateSerializer, self).to_representation(obj)
if 'notification_configuration' in ret:
ret['notification_configuration'] = obj.display_notification_configuration()
return ret
def get_related(self, obj):
res = super(NotificationTemplateSerializer, self).get_related(obj)
res.update(dict(
test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def _recent_notifications(self, obj):
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
def get_summary_fields(self, obj):
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
d['recent_notifications'] = self._recent_notifications(obj)
return d
def validate_messages(self, messages):
if messages is None:
return None
error_list = []
collected_messages = []
def check_messages(messages):
for message_type in messages:
if message_type not in ('message', 'body'):
error_list.append(_("Message type '{}' invalid, must be either 'message' or 'body'").format(message_type))
continue
message = messages[message_type]
if message is None:
continue
if not isinstance(message, str):
error_list.append(_("Expected string for '{}', found {}, ").format(message_type, type(message)))
continue
if message_type == 'message':
if '\n' in message:
error_list.append(_("Messages cannot contain newlines (found newline in {} event)".format(event)))
continue
collected_messages.append(message)
# Validate structure / content types
if not isinstance(messages, dict):
error_list.append(_("Expected dict for 'messages' field, found {}".format(type(messages))))
else:
for event in messages:
if event not in ('started', 'success', 'error', 'workflow_approval'):
error_list.append(_("Event '{}' invalid, must be one of 'started', 'success', 'error', or 'workflow_approval'").format(event))
continue
event_messages = messages[event]
if event_messages is None:
continue
if not isinstance(event_messages, dict):
error_list.append(_("Expected dict for event '{}', found {}").format(event, type(event_messages)))
continue
if event == 'workflow_approval':
for subevent in event_messages:
if subevent not in ('running', 'approved', 'timed_out', 'denied'):
error_list.append(_("Workflow Approval event '{}' invalid, must be one of "
"'running', 'approved', 'timed_out', or 'denied'").format(subevent))
continue
subevent_messages = event_messages[subevent]
if subevent_messages is None:
continue
if not isinstance(subevent_messages, dict):
error_list.append(_("Expected dict for workflow approval event '{}', found {}").format(subevent, type(subevent_messages)))
continue
check_messages(subevent_messages)
else:
check_messages(event_messages)
# Subclass to return name of undefined field
class DescriptiveUndefined(StrictUndefined):
# The parent class prevents _accessing attributes_ of an object
# but will render undefined objects with 'Undefined'. This
# prevents their use entirely.
__repr__ = __str__ = StrictUndefined._fail_with_undefined_error
def __init__(self, *args, **kwargs):
super(DescriptiveUndefined, self).__init__(*args, **kwargs)
# When an undefined field is encountered, return the name
# of the undefined field in the exception message
# (StrictUndefined refers to the explicitly set exception
# message as the 'hint')
self._undefined_hint = self._undefined_name
# Ensure messages can be rendered
for msg in collected_messages:
env = sandbox.ImmutableSandboxedEnvironment(undefined=DescriptiveUndefined)
try:
env.from_string(msg).render(JobNotificationMixin.context_stub())
except TemplateSyntaxError as exc:
error_list.append(_("Unable to render message '{}': {}".format(msg, exc.message)))
except UndefinedError as exc:
error_list.append(_("Field '{}' unavailable".format(exc.message)))
except SecurityError as exc:
error_list.append(_("Security error due to field '{}'".format(exc.message)))
# Ensure that if a webhook body was provided, that it can be rendered as a dictionary
notification_type = ''
if self.instance:
notification_type = getattr(self.instance, 'notification_type', '')
else:
notification_type = self.initial_data.get('notification_type', '')
if notification_type == 'webhook':
for event in messages:
if not messages[event]:
continue
if not isinstance(messages[event], dict):
continue
body = messages[event].get('body', {})
if body:
try:
potential_body = json.loads(body)
if not isinstance(potential_body, dict):
error_list.append(_("Webhook body for '{}' should be a json dictionary. Found type '{}'."
.format(event, type(potential_body).__name__)))
except json.JSONDecodeError as exc:
error_list.append(_("Webhook body for '{}' is not a valid json dictionary ({}).".format(event, exc)))
if error_list:
raise serializers.ValidationError(error_list)
return messages
def validate(self, attrs):
from awx.api.views import NotificationTemplateDetail
notification_type = None
if 'notification_type' in attrs:
notification_type = attrs['notification_type']
elif self.instance:
notification_type = self.instance.notification_type
else:
notification_type = None
if not notification_type:
raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))
notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]
missing_fields = []
incorrect_type_fields = []
password_fields_to_forward = []
error_list = []
if 'notification_configuration' not in attrs:
return attrs
if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):
object_actual = self.context['view'].get_object()
else:
object_actual = None
for field, params in notification_class.init_parameters.items():
if field not in attrs['notification_configuration']:
if 'default' in params:
attrs['notification_configuration'][field] = params['default']
else:
missing_fields.append(field)
continue
field_val = attrs['notification_configuration'][field]
field_type = params['type']
expected_types = self.type_map[field_type]
if not type(field_val) in expected_types:
incorrect_type_fields.append((field, field_type))
continue
if field_type == "list" and len(field_val) < 1:
error_list.append(_("No values specified for field '{}'").format(field))
continue
if field_type == "password" and field_val == "$encrypted$" and object_actual is not None:
password_fields_to_forward.append(field)
if field == "http_method" and field_val.lower() not in ['put', 'post']:
error_list.append(_("HTTP method must be either 'POST' or 'PUT'."))
if missing_fields:
error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields))
if incorrect_type_fields:
for type_field_error in incorrect_type_fields:
error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0],
type_field_error[1]))
if error_list:
raise serializers.ValidationError(error_list)
# Only pull the existing encrypted passwords from the existing objects
# to assign to the attribute and forward on the call stack IF AND ONLY IF
# we know an error will not be raised in the validation phase.
# Otherwise, the encrypted password will be exposed.
for field in password_fields_to_forward:
attrs['notification_configuration'][field] = object_actual.notification_configuration[field]
return super(NotificationTemplateSerializer, self).validate(attrs)
class NotificationSerializer(BaseSerializer):
body = serializers.SerializerMethodField(
help_text=_('Notification body')
)
class Meta:
model = Notification
fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',
'notification_type', 'recipients', 'subject', 'body')
def get_body(self, obj):
if obj.notification_type in ('webhook', 'pagerduty'):
if isinstance(obj.body, dict):
if 'body' in obj.body:
return obj.body['body']
elif isinstance(obj.body, str):
# attempt to load json string
try:
potential_body = json.loads(obj.body)
if isinstance(potential_body, dict):
return potential_body
except json.JSONDecodeError:
pass
return obj.body
def get_related(self, obj):
res = super(NotificationSerializer, self).get_related(obj)
res.update(dict(
notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),
))
return res
def to_representation(self, obj):
ret = super(NotificationSerializer, self).to_representation(obj)
if obj.notification_type == 'webhook':
ret.pop('subject')
if obj.notification_type not in ('email', 'webhook', 'pagerduty'):
ret.pop('body')
return ret
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = ('*', '-description', 'organization')
def get_related(self, obj):
res = super(LabelSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
# - INTERVAL is not included
# - SECONDLY is used
# - TZID is used
# - BYDAY prefixed with a number (MO is good but not 20MO)
# - BYYEARDAY
# - BYWEEKNO
# - Multiple DTSTART or RRULE elements
# - Can't contain both COUNT and UNTIL
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
if re.match(multi_by_month, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
if re.match(by_day_with_numeric_prefix, rrule_value):
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
if 'byyearday' in rrule_value.lower():
raise serializers.ValidationError(_("BYYEARDAY not supported."))
if 'byweekno' in rrule_value.lower():
raise serializers.ValidationError(_("BYWEEKNO not supported."))
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
if match_count:
count_val = match_count.groups()[0].strip().split("=")
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
timezone = serializers.SerializerMethodField()
until = serializers.SerializerMethodField()
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'timezone',
'until')
def get_timezone(self, obj):
return obj.timezone
def get_until(self, obj):
return obj.until
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def get_summary_fields(self, obj):
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
if isinstance(obj.unified_job_template, SystemJobTemplate):
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
if 'inventory' in summary_fields:
return summary_fields
inventory = None
if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
inventory = obj.unified_job_template.inventory
else:
return summary_fields
summary_fields['inventory'] = dict()
for field in SUMMARIZABLE_FK_FIELDS['inventory']:
summary_fields['inventory'][field] = getattr(inventory, field, None)
return summary_fields
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
'Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
class InstanceSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance'),
read_only=True
)
class Meta:
model = Instance
read_only_fields = ('uuid', 'hostname', 'version')
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running", "jobs_total",
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled", "managed_by_policy")
def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
return res
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance group'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance group'),
read_only=True
)
instances = serializers.SerializerMethodField()
is_controller = serializers.BooleanField(
help_text=_('Indicates whether instance group controls any other group'),
read_only=True
)
is_isolated = serializers.BooleanField(
help_text=_('Indicates whether instances in this group are isolated.'
'Isolated groups have a designated controller group.'),
read_only=True
)
is_containerized = serializers.BooleanField(
help_text=_('Indicates whether instances in this group are containerized.'
'Containerized groups have a designated Openshift or Kubernetes cluster.'),
read_only=True
)
# NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text
policy_instance_percentage = serializers.IntegerField(
default=0, min_value=0, max_value=100, required=False, initial=0,
label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to "
"this group when new instances come online.")
)
policy_instance_minimum = serializers.IntegerField(
default=0, min_value=0, required=False, initial=0,
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to "
"this group when new instances come online.")
)
policy_instance_list = serializers.ListField(
child=serializers.CharField(), required=False,
label=_('Policy Instance List'),
help_text=_("List of exact-match Instances that will be assigned to this group")
)
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "jobs_total",
"instances", "controller", "is_controller", "is_isolated", "is_containerized", "credential",
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list",
"pod_spec_override", "summary_fields")
def get_related(self, obj):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
if obj.controller_id:
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
if obj.credential:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
return res
def validate_policy_instance_list(self, value):
for instance_name in value:
if value.count(instance_name) > 1:
raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))
if not Instance.objects.filter(hostname=instance_name).exists():
raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))
if Instance.objects.get(hostname=instance_name).is_isolated():
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
if self.instance and self.instance.controller_id is not None:
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
if value and self.instance and self.instance.is_containerized:
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
return value
def validate_policy_instance_percentage(self, value):
if value and self.instance and self.instance.is_containerized:
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
return value
def validate_policy_instance_minimum(self, value):
if value and self.instance and self.instance.is_containerized:
raise serializers.ValidationError(_('Containerized instances may not be managed via the API'))
return value
def validate_name(self, value):
if self.instance and self.instance.name == 'tower' and value != 'tower':
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
return value
def validate_credential(self, value):
if value and not value.kubernetes:
raise serializers.ValidationError(_('Only Kubernetes credentials can be associated with an Instance Group'))
return value
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=jobs_qs, breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
return 0.0
consumed = self.get_consumed_capacity(obj)
if consumed >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(
((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100)
)
def get_instances(self, obj):
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):
changes = serializers.SerializerMethodField()
object_association = serializers.SerializerMethodField(
help_text=_("When present, shows the field name of the role or relationship that changed."))
object_type = serializers.SerializerMethodField(
help_text=_("When present, shows the model on which the role or relationship was defined."))
@cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
for key in summary_dict.keys():
if 'id' not in summary_dict[key]:
summary_dict[key] = summary_dict[key] + ('id',)
field_list = list(summary_dict.items())
# Needed related fields that are not in the default summary fields
field_list += [
('workflow_job_template_node', ('id', 'unified_job_template_id')),
('label', ('id', 'name', 'organization_id')),
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
('o_auth2_application', ('id', 'name', 'description')),
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
('ad_hoc_command', ('id', 'name', 'status', 'limit')),
('workflow_approval', ('id', 'name', 'unified_job_id')),
]
return field_list
class Meta:
model = ActivityStream
fields = ('*', '-name', '-description', '-created', '-modified', 'timestamp', 'operation',
'changes', 'object1', 'object2', 'object_association', 'action_node', 'object_type')
def get_fields(self):
ret = super(ActivityStreamSerializer, self).get_fields()
for key, field in list(ret.items()):
if key == 'changes':
field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')
if key == 'object1':
field.help_text = _('For create, update, and delete events this is the object type that was affected. '
'For associate and disassociate events this is the object type associated or disassociated with object2.')
if key == 'object2':
field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '
'events this is the object type that object1 is being associated with.')
if key == 'operation':
field.help_text = _('The action taken with respect to the given object(s).')
return ret
def get_changes(self, obj):
if obj is None:
return {}
try:
return json.loads(obj.changes)
except Exception:
logger.warn("Error deserializing activity stream json changes")
return {}
def get_object_association(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
# roles: these values look like
# "awx.main.models.inventory.Inventory.admin_role"
# due to historical reasons the UI expects just "role" here
return "role"
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so instead of splitting on period we have to take after the first underscore
try:
return obj.object_relationship_type.split(".")[-1].split("_", 1)[1]
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_object_type(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
return camelcase_to_underscore(obj.object_relationship_type.rsplit('.', 2)[-2])
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so we have to take after the last period but before the first underscore.
try:
cls = obj.object_relationship_type.rsplit('.', 1)[0]
return camelcase_to_underscore(cls.split('_', 1))
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_related(self, obj):
data = {}
if obj.actor is not None:
data['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk):
continue
m2m_list = self._get_related_objects(obj, fk)
if m2m_list:
data[fk] = []
id_list = []
for item in m2m_list:
if getattr(item, 'id', None) in id_list:
continue
id_list.append(getattr(item, 'id', None))
if hasattr(item, 'get_absolute_url'):
url = item.get_absolute_url(self.context.get('request'))
else:
view_name = fk + '_detail'
url = self.reverse('api:' + view_name, kwargs={'pk': item.id})
data[fk].append(url)
if fk == 'schedule':
data['unified_job_template'] = item.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
data['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
return data
def _get_related_objects(self, obj, fk):
related_model = ActivityStream._meta.get_field(fk).related_model
related_manager = getattr(obj, fk)
if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):
# HACK: manually fill PolymorphicModel caches to prevent running query multiple times
# unnecessary if django-polymorphic issue #68 is solved
if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:
obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())
return related_manager.all()
def _summarize_parent_ujt(self, obj, fk, summary_fields):
summary_keys = {'job': 'job_template',
'workflow_job_template_node': 'workflow_job_template',
'workflow_approval_template': 'workflow_job_template',
'workflow_approval': 'workflow_job',
'schedule': 'unified_job_template'}
if fk not in summary_keys:
return
related_obj = getattr(obj, summary_keys[fk], None)
item = {}
fields = SUMMARIZABLE_FK_FIELDS[summary_keys[fk]]
if related_obj is not None:
summary_fields[get_type_for_model(related_obj)] = []
for field in fields:
fval = getattr(related_obj, field, None)
if fval is not None:
item[field] = fval
summary_fields[get_type_for_model(related_obj)].append(item)
def get_summary_fields(self, obj):
summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields:
try:
if not hasattr(obj, fk):
continue
m2m_list = self._get_related_objects(obj, fk)
if m2m_list:
summary_fields[fk] = []
for thisItem in m2m_list:
self._summarize_parent_ujt(thisItem, fk, summary_fields)
thisItemDict = {}
for field in related_fields:
fval = getattr(thisItem, field, None)
if fval is not None:
thisItemDict[field] = fval
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
if obj.actor is not None:
summary_fields['actor'] = dict(id = obj.actor.id,
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
elif obj.deleted_actor:
summary_fields['actor'] = obj.deleted_actor.copy()
summary_fields['actor']['id'] = None
if obj.setting:
summary_fields['setting'] = [obj.setting]
return summary_fields
| 44.950202
| 150
| 0.629779
|
c2a6b1c657acf2cc8ea87e3d5bcd4e1aa8479590
| 1,198
|
py
|
Python
|
main.py
|
franciskoinno/stock_scanner
|
32d000ebd407e7d1b9c9f1b661a3537731996500
|
[
"MIT"
] | null | null | null |
main.py
|
franciskoinno/stock_scanner
|
32d000ebd407e7d1b9c9f1b661a3537731996500
|
[
"MIT"
] | null | null | null |
main.py
|
franciskoinno/stock_scanner
|
32d000ebd407e7d1b9c9f1b661a3537731996500
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
# github change
# git change
chrome_driver_path = r"C:\Users\Francis\Desktop\chromedriver_win32\chromedriver.exe"
driver_long_lower_shadow = webdriver.Chrome(chrome_driver_path)
driver_hammer = webdriver.Chrome(chrome_driver_path)
driver_dragon = webdriver.Chrome(chrome_driver_path)
driver_white = webdriver.Chrome(chrome_driver_path)
driver_long_lower_shadow.get("https://finviz.com/screener.ashx?v=211&f=cap_smallover,fa_salesqoq_o5,sh_curvol_o50,sh_instown_o10,sh_price_o15,ta_candlestick_lls,ta_highlow52w_b0to10h,ta_sma200_sb50,ta_sma50_pa&ft=3")
driver_hammer.get("https://finviz.com/screener.ashx?v=211&f=cap_smallover,fa_salesqoq_o5,sh_curvol_o50,sh_instown_o10,sh_price_o15,ta_candlestick_h,ta_highlow52w_b0to10h,ta_sma200_sb50,ta_sma50_pa&ft=3")
driver_dragon.get("https://finviz.com/screener.ashx?v=211&f=cap_smallover,fa_salesqoq_o5,sh_curvol_o50,sh_instown_o10,sh_price_o15,ta_candlestick_dd,ta_highlow52w_b0to10h,ta_sma200_sb50,ta_sma50_pa&ft=3")
driver_white.get("https://finviz.com/screener.ashx?v=211&f=cap_smallover,fa_salesqoq_o5,sh_curvol_o50,sh_instown_o10,sh_price_o15,ta_candlestick_mw,ta_highlow52w_b0to10h,ta_sma200_sb50,ta_sma50_pa&ft=3")
| 85.571429
| 216
| 0.868114
|
ca35f8320b6a88dba634b2c3f1faf4671c2f131e
| 1,556
|
py
|
Python
|
tests/test_data_containers/test_data.py
|
Rory-Sullivan/yrlocationforecast
|
26b66834cac4569704daf0009a9d2bba39dbfb75
|
[
"MIT"
] | 13
|
2020-07-28T17:47:42.000Z
|
2022-03-30T13:35:12.000Z
|
tests/test_data_containers/test_data.py
|
Rory-Sullivan/yrlocationforecast
|
26b66834cac4569704daf0009a9d2bba39dbfb75
|
[
"MIT"
] | 5
|
2020-10-14T11:10:13.000Z
|
2022-01-01T17:35:19.000Z
|
tests/test_data_containers/test_data.py
|
Rory-Sullivan/yrlocationforecast
|
26b66834cac4569704daf0009a9d2bba39dbfb75
|
[
"MIT"
] | 6
|
2020-10-16T12:30:07.000Z
|
2022-02-18T07:13:21.000Z
|
"""Tests for the Data class."""
import datetime as dt
import pytest
from metno_locationforecast.data_containers import Place
from metno_locationforecast.forecast import Forecast
USER_AGENT = "testing/0.1 https://github.com/Rory-Sullivan/yrlocationforecast"
SAVE_LOCATION = "./tests/test_data/"
@pytest.fixture
def new_york_data():
lat = 40.7
lon = -74.0
alt = 10
new_york = Place("New York", lat, lon, alt)
new_york_forecast = Forecast(new_york, USER_AGENT, "compact", SAVE_LOCATION)
new_york_forecast.load()
return new_york_forecast.data
@pytest.fixture
def new_york_data_copy():
lat = 40.7
lon = -74.0
alt = 10
new_york = Place("New York", lat, lon, alt)
new_york_forecast = Forecast(new_york, USER_AGENT, "compact", SAVE_LOCATION)
new_york_forecast.load()
return new_york_forecast.data
def test_eq(new_york_data, new_york_data_copy):
assert new_york_data is not new_york_data_copy
assert new_york_data == new_york_data_copy
def test_intervals_for(new_york_data):
day = dt.date(year=2020, month=7, day=20)
intervals = new_york_data.intervals_for(day)
assert len(intervals) == 13
assert intervals[12].variables["wind_speed"].value == 3.5
def test_intervals_between(new_york_data):
start = dt.datetime(year=2020, month=7, day=20, hour=11)
end = dt.datetime(year=2020, month=7, day=20, hour=15)
intervals = new_york_data.intervals_between(start, end)
assert len(intervals) == 4
assert intervals[3].variables["wind_speed"].value == 4.4
| 25.096774
| 80
| 0.722365
|
2cc0268dfc638088baed7b4e0d21de82f35864c5
| 3,546
|
py
|
Python
|
tests/unit/test_models/test_submodels/test_interface/test_lead_acid.py
|
tobykirk/PyBaMM
|
c16b7df76c597468ecac1c40e768d94005f79145
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_models/test_submodels/test_interface/test_lead_acid.py
|
tobykirk/PyBaMM
|
c16b7df76c597468ecac1c40e768d94005f79145
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_models/test_submodels/test_interface/test_lead_acid.py
|
tobykirk/PyBaMM
|
c16b7df76c597468ecac1c40e768d94005f79145
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Test lead acid butler volmer submodel
#
import pybamm
import tests
import unittest
class TestLeadAcid(unittest.TestCase):
def test_public_functions(self):
param = pybamm.LeadAcidParameters()
a_n = pybamm.FullBroadcast(
pybamm.Scalar(0.5), ["negative electrode"], "current collector"
)
a_p = pybamm.FullBroadcast(
pybamm.Scalar(0.5), ["positive electrode"], "current collector"
)
a = pybamm.Scalar(0.5)
variables = {
"Current collector current density": a,
"Negative electrode potential": a_n,
"Negative electrolyte potential": a_n,
"Negative electrode open circuit potential": a_n,
"Negative electrolyte concentration": a_n,
"Negative particle surface concentration": a_n,
"Negative electrode temperature": a_n,
"Negative electrode surface area to volume ratio": a_n,
}
submodel = pybamm.interface.ButlerVolmer(
param,
"Negative",
"lead-acid main",
{
"SEI film resistance": "none",
"total interfacial current density as a state": "false",
},
)
std_tests = tests.StandardSubModelTests(submodel, variables)
std_tests.test_all()
variables = {
"Current collector current density": a,
"Positive electrode potential": a_p,
"Positive electrolyte potential": a_p,
"Positive electrode open circuit potential": a_p,
"Positive electrolyte concentration": a_p,
"Positive particle surface concentration": a_p,
"Negative electrode interfacial current density": a_n,
"Negative electrode exchange current density": a_n,
"Positive electrode temperature": a_p,
"Negative electrode surface area to volume ratio": a_n,
"Positive electrode surface area to volume ratio": a_p,
"X-averaged negative electrode interfacial current density": a,
"X-averaged positive electrode interfacial current density": a,
"Sum of electrolyte reaction source terms": 0,
"Sum of negative electrode electrolyte reaction source terms": 0,
"Sum of positive electrode electrolyte reaction source terms": 0,
"Sum of x-averaged negative electrode "
"electrolyte reaction source terms": 0,
"Sum of x-averaged positive electrode "
"electrolyte reaction source terms": 0,
"Sum of interfacial current densities": 0,
"Sum of negative electrode interfacial current densities": 0,
"Sum of positive electrode interfacial current densities": 0,
"Sum of x-averaged negative electrode interfacial current densities": 0,
"Sum of x-averaged positive electrode interfacial current densities": 0,
}
submodel = pybamm.interface.ButlerVolmer(
param,
"Positive",
"lead-acid main",
{
"SEI film resistance": "none",
"total interfacial current density as a state": "false",
},
)
std_tests = tests.StandardSubModelTests(submodel, variables)
std_tests.test_all()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 38.543478
| 84
| 0.604343
|
29f68205d6c1d06711cb5d5f6e1aa839f6133c7d
| 541
|
py
|
Python
|
blender/arm/logicnode/scene/LN_scene.py
|
niacdoial/armory
|
3f9b633fbf772017c576a3f77695a6c28d9956e1
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/scene/LN_scene.py
|
niacdoial/armory
|
3f9b633fbf772017c576a3f77695a6c28d9956e1
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/scene/LN_scene.py
|
niacdoial/armory
|
3f9b633fbf772017c576a3f77695a6c28d9956e1
|
[
"Zlib"
] | null | null | null |
import bpy
from arm.logicnode.arm_nodes import *
class SceneNode(ArmLogicTreeNode):
"""Stores the given scene as a variable."""
bl_idname = 'LNSceneNode'
bl_label = 'Scene'
arm_version = 1
property0_get: PointerProperty(name='', type=bpy.types.Scene)
def init(self, context):
super(SceneNode, self).init(context)
self.add_output('NodeSocketShader', 'Scene')
def draw_buttons(self, context, layout):
layout.prop_search(self, 'property0_get', bpy.data, 'scenes', icon='NONE', text='')
| 27.05
| 91
| 0.678373
|
2b7793053bdd348799f51d9f22020a406bf4a05e
| 49
|
py
|
Python
|
real_estate/src/errors.py
|
gutessitore/real-estate-scrapper
|
1ea62d71230b6a21c7e32322b0e02b67a6163849
|
[
"MIT"
] | 1
|
2021-09-20T13:51:21.000Z
|
2021-09-20T13:51:21.000Z
|
real_estate/src/errors.py
|
gutessitore/real-estate-scrapper
|
1ea62d71230b6a21c7e32322b0e02b67a6163849
|
[
"MIT"
] | null | null | null |
real_estate/src/errors.py
|
gutessitore/real-estate-scrapper
|
1ea62d71230b6a21c7e32322b0e02b67a6163849
|
[
"MIT"
] | 5
|
2021-09-06T15:08:04.000Z
|
2021-12-05T15:43:36.000Z
|
class InvalidDataFrameError(Exception):
pass
| 16.333333
| 39
| 0.795918
|
89c97dfef1d9556ae35ab13a5ef5bc96b336feb6
| 944
|
py
|
Python
|
tests/test_result/test_result_base.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_result/test_result_base.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_result/test_result_base.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
from returns.result import Result
@pytest.mark.parametrize('method_name', [
'bind',
'unify',
'map',
'rescue',
'fix',
'alt',
'value_or',
])
def test_result_abstract_method(method_name):
"""Checks that Result itself contains abstract methods."""
method = getattr(Result, method_name)
with pytest.raises(NotImplementedError):
method(object, lambda to_output: to_output)
@pytest.mark.parametrize('method_name', [
'failure',
'unwrap',
])
def test_result_abstract_method_single(method_name):
"""Checks that Result itself contains abstract methods."""
method = getattr(Result, method_name)
with pytest.raises(NotImplementedError):
method(object)
def test_result_types():
"""Ensures that Result has two types inside a class."""
assert isinstance(Result.success_type, type)
assert isinstance(Result.failure_type, type)
| 24.205128
| 62
| 0.691737
|
98234e85d4b1f5d091e20748be957a7cec3eafd2
| 3,484
|
py
|
Python
|
lvsfunc/mask.py
|
petzku/lvsfunc
|
5d49bfbdd3863636be4263382f23091b9ee20bfe
|
[
"MIT"
] | null | null | null |
lvsfunc/mask.py
|
petzku/lvsfunc
|
5d49bfbdd3863636be4263382f23091b9ee20bfe
|
[
"MIT"
] | null | null | null |
lvsfunc/mask.py
|
petzku/lvsfunc
|
5d49bfbdd3863636be4263382f23091b9ee20bfe
|
[
"MIT"
] | null | null | null |
"""
Wrappers and masks for denoising.
"""
from functools import partial
from typing import Optional
import vapoursynth as vs
from vsutil import depth, get_depth, get_y, scale_value
from . import util
try:
from cytoolz import functoolz
except ModuleNotFoundError:
try:
from toolz import functoolz # type: ignore
except ModuleNotFoundError:
raise ModuleNotFoundError("Cannot find functoolz: Please install toolz or cytoolz")
core = vs.core
@functoolz.curry
def adaptive_mask(clip: vs.VideoNode, luma_scaling: float = 8.0) -> vs.VideoNode:
"""
A wrapper to create a luma mask for denoising and/or debanding.
Function is curried to allow parameter tuning when passing to denoisers
that allow you to pass your own mask.
Dependencies: adaptivegrain
:param clip: Input clip
:param luma_scaling: Luma scaling factor (Default: 8.0)
:return: Luma mask
"""
return core.adg.Mask(clip.std.PlaneStats(), luma_scaling)
@functoolz.curry
def detail_mask(clip: vs.VideoNode, sigma: Optional[float] = None,
rad: int = 3, radc: int = 2,
brz_a: float = 0.005, brz_b: float = 0.005) -> vs.VideoNode:
"""
A wrapper for creating a detail mask to be used during denoising and/or debanding.
The detail mask is created using debandshit's rangemask,
and is then merged with Prewitt to catch lines it may have missed.
Function is curried to allow parameter tuning when passing to denoisers
that allow you to pass your own mask.
Dependencies: VapourSynth-Bilateral (optional: sigma), debandshit
:param clip: Input clip
:param sigma: Sigma for Bilateral for pre-blurring (Default: False)
:param rad: The luma equivalent of gradfun3's "mask" parameter
:param radc: The chroma equivalent of gradfun3's "mask" parameter
:param brz_a: Binarizing for the detail mask (Default: 0.05)
:param brz_b: Binarizing for the edge mask (Default: 0.05)
:return: Detail mask
"""
try:
from debandshit import rangemask
except ModuleNotFoundError:
raise ModuleNotFoundError("detail_mask: missing dependency 'debandshit'")
if clip.format is None:
raise ValueError("detail_mask: 'Variable-format clips not supported'")
# Handling correct value scaling if there's a assumed depth mismatch
# To the me in the future, long after civilisation has fallen, make sure to check 3.10's pattern matching.
if get_depth(clip) != 32:
if isinstance(brz_a, float):
brz_a = scale_value(brz_a, 32, get_depth(clip))
if isinstance(brz_b, float):
brz_b = scale_value(brz_b, 32, get_depth(clip))
else:
if isinstance(brz_a, int):
brz_a = scale_value(brz_a, get_depth(clip), 32)
if isinstance(brz_b, int):
brz_b = scale_value(brz_b, get_depth(clip), 32)
blur = (util.quick_resample(clip, partial(core.bilateral.Gaussian, sigma=sigma))
if sigma else clip)
mask_a = rangemask(get_y(blur), rad=rad, radc=radc)
mask_a = depth(mask_a, clip.format.bits_per_sample)
mask_a = core.std.Binarize(mask_a, brz_a)
mask_b = core.std.Prewitt(get_y(blur))
mask_b = core.std.Binarize(mask_b, brz_b)
mask = core.std.Expr([mask_a, mask_b], 'x y max')
mask = util.pick_removegrain(mask)(mask, 22)
return util.pick_removegrain(mask)(mask, 11)
| 35.55102
| 110
| 0.677669
|
136037b852887597bd16d3f56881598455db2199
| 892
|
py
|
Python
|
coordinates/test_coordinates.py
|
ThorsHamster/find_new_hometown
|
862231bb1f3a0a1505d02b452adca2b45a6fc850
|
[
"MIT"
] | 2
|
2020-01-07T07:28:17.000Z
|
2020-01-07T10:21:41.000Z
|
coordinates/test_coordinates.py
|
ThorsHamster/find_new_hometown
|
862231bb1f3a0a1505d02b452adca2b45a6fc850
|
[
"MIT"
] | 30
|
2021-03-19T15:46:29.000Z
|
2021-12-21T12:22:50.000Z
|
coordinates/test_coordinates.py
|
ThorsHamster/find_new_hometown
|
862231bb1f3a0a1505d02b452adca2b45a6fc850
|
[
"MIT"
] | null | null | null |
import pytest
from coordinates import Coordinates
@pytest.fixture
def unit_under_test():
return Coordinates()
class TestCoordinates:
def test_longitude(self, unit_under_test):
unit_under_test.longitude = 3
assert unit_under_test.longitude == 3
def test_latitude(self, unit_under_test):
unit_under_test.latitude = 4
assert unit_under_test.latitude == 4
def test_both_elements_with_changes(self, unit_under_test):
unit_under_test.longitude = 3
unit_under_test.latitude = 4
assert unit_under_test.longitude == 3
assert unit_under_test.latitude == 4
unit_under_test.longitude = 7
assert unit_under_test.longitude == 7
assert unit_under_test.latitude == 4
unit_under_test.latitude = 9
assert unit_under_test.longitude == 7
assert unit_under_test.latitude == 9
| 27.030303
| 63
| 0.702915
|
6507cc7a6bad8fb9aed262beaf0b810977b1ba49
| 2,174
|
py
|
Python
|
sheepdog/transactions/submission/__init__.py
|
jacquayj/sheepdog
|
6d6d98a17cab9bcc8881079ced9065036c757eee
|
[
"Apache-2.0"
] | null | null | null |
sheepdog/transactions/submission/__init__.py
|
jacquayj/sheepdog
|
6d6d98a17cab9bcc8881079ced9065036c757eee
|
[
"Apache-2.0"
] | null | null | null |
sheepdog/transactions/submission/__init__.py
|
jacquayj/sheepdog
|
6d6d98a17cab9bcc8881079ced9065036c757eee
|
[
"Apache-2.0"
] | null | null | null |
import flask
from sheepdog import utils
from sheepdog.errors import UserError
from sheepdog.globals import FLAG_IS_ASYNC
from sheepdog.transactions.submission.transaction import SubmissionTransaction
def handle_submission_transaction(program, project, *doc_args, **tx_kwargs):
"""
Create and execute a single (not bulk) transaction.
Return:
Tuple[flask.Response, int]: (API response json, status code)
"""
is_async = tx_kwargs.pop("is_async", utils.is_flag_set(FLAG_IS_ASYNC))
db_driver = tx_kwargs.pop("db_driver", flask.current_app.db)
smtp_conf = None
if utils.should_send_email(flask.current_app.config):
smtp_conf = flask.current_app.get_smtp_conf()
transaction = SubmissionTransaction(
smtp_conf=smtp_conf,
program=program,
project=project,
logger=flask.current_app.logger,
index_client=flask.current_app.index_client,
db_driver=db_driver,
**tx_kwargs
)
if is_async:
session = transaction.db_driver.session_scope()
with session, transaction:
response = {
"code": 200,
"message": "Transaction submitted.",
"transaction_id": transaction.transaction_id,
}
flask.current_app.async_pool.schedule(transaction_worker, transaction)
return flask.jsonify(response), 200
else:
response, code = transaction_worker(transaction)
return flask.jsonify(response), code
def transaction_worker(transaction):
"""
Perform a single transaction in the background after request context.
Args:
transaction: The transaction instance
"""
session = transaction.db_driver.session_scope(can_inherit=False)
with session, transaction:
try:
transaction.take_action()
except UserError as e:
transaction.record_user_error(e)
raise
except Exception as e: # pylint: disable=broad-except
transaction.record_internal_error(e)
finally:
response = transaction.json
code = transaction.status_code
return response, code
| 31.970588
| 78
| 0.671573
|
b73c5e4dbe02531a255424caab0a95abe2d774a7
| 2,853
|
py
|
Python
|
ed2d/material.py
|
explosiveduck/cubix
|
16e7a298a83fe53174bda8ec77dfcf6869ed5336
|
[
"BSD-2-Clause"
] | 1
|
2015-11-02T02:11:18.000Z
|
2015-11-02T02:11:18.000Z
|
ed2d/material.py
|
explosiveduck/cubix
|
16e7a298a83fe53174bda8ec77dfcf6869ed5336
|
[
"BSD-2-Clause"
] | 29
|
2015-06-09T19:27:49.000Z
|
2016-03-08T06:13:24.000Z
|
ed2d/material.py
|
explosiveduck/cubix
|
16e7a298a83fe53174bda8ec77dfcf6869ed5336
|
[
"BSD-2-Clause"
] | null | null | null |
# Cook-Torance for diffuseType and Blinn-Phong
# Figure out specular and frensel stuff
# Emission?
# Need to setup so it can send outputs to a shader
# Inputs need to be defined a lot better
from ed2d import texture
from ed2d import files
class Material(object):
def __init__(self):
self.diffuse = None
self.idiffuse = 0 # Intensity parameter
self.ambient = None
self.iambient = 0 # Intensity parameter
self.specular = None
self.ispecular = 0 # Specular exponent
self.roughness = None
self.emission = None
self.IOR = None
# This is the diffuse textures
self.albedoLayers = {}
self.diffuseType = None
self.specularType = None
self.normalMapLayers = {}
self.specularMapLayers = {}
self.displacementMapLayers = {}
# Assign the shader that will render the Material
self.program = None
def addProgram(self, program):
''' Adds a program to the Material class. '''
self.program = program
def setDiffuseColor(self, r, g, b, intensity):
''' Sets the diffuse color of a material. '''
self.diffuse = [r, g, b]
self.idiffuse = intensity
def setAmbientColor(self, r, g, b, intensity):
''' Sets the ambient color of a material. '''
self.ambient = [r, g, b]
self.iambient = intensity
def setSpecularColor(self, r, g, b, roughness):
''' Sets the specular color and roughness of a material. '''
self.specular = [r, g, b]
self.roughness = roughness
# Leave these like this for now till I figure out the shaders
def setDiffuseType(self, shader):
pass
def setSpecularType(self, shader):
pass
def addTextures(self, textureDict):
''' Will add textures to the Material. It takes a dictionary as param. '''
# Format is {A: [albedo0, albedo1, ...], N: [normal1, normal2, ...], S: [specular1, specular2, ...]}
# This will replace the crap underneath this function
for key, value in textureDict.iteritems():
if key is 'A':
for i in range(len(value)):
imagePath = files.resolve_path('data', 'images', value[i])
self.albedoLayers['Layer' + i] = texture.Texture(imagePath, self.program)
if key is 'N':
for i in range(len(value)):
imagePath = files.resolve_path('data', 'images', value[i])
self.normalMapLayers['Layer' + i] = texture.Texture(imagePath, self.program)
if key is 'S':
for i in range(len(value)):
imagePath = files.resolve_path('data', 'images', value[i])
self.specularMapLayers['Layer' + i] = texture.Texture(imagePath, self.program)
| 35.222222
| 108
| 0.594812
|
55c4e485c5af0d5904eb295e997cc95e12104d1b
| 3,406
|
py
|
Python
|
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/mdecl_wrapper.py
|
nalinimsingh/ITK_4D
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
[
"Apache-2.0"
] | 3
|
2018-10-01T20:46:17.000Z
|
2019-12-17T19:39:50.000Z
|
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/mdecl_wrapper.py
|
nalinimsingh/ITK_4D
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
[
"Apache-2.0"
] | null | null | null |
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/mdecl_wrapper.py
|
nalinimsingh/ITK_4D
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
[
"Apache-2.0"
] | 4
|
2018-05-17T16:34:54.000Z
|
2020-09-24T02:12:40.000Z
|
# Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines class :class:`mdecl_wrapper_t` that allows to work on set of
declarations, as it was one declaration.
The :class:`class <mdecl_wrapper_t>` allows user to not write "for" loops
within the code.
"""
import os
class call_redirector_t(object):
"""Internal class used to call some function of objects"""
def __init__(self, name, decls):
"""creates call_redirector_t instance.
:param name: name of method, to be called on every object in the
`decls` list
:param decls: list of objects
"""
object.__init__(self)
self.name = name
self.decls = decls
def __call__(self, *arguments, **keywords):
"""calls method :attr:`call_redirector_t.name` on every object
within the :attr:`call_redirector_t.decls` list"""
for d in self.decls:
callable_ = getattr(d, self.name)
callable_(*arguments, **keywords)
class mdecl_wrapper_t(object):
"""
multiple declarations class wrapper
The main purpose of this class is to allow an user to work on many
declarations, as they were only one single declaration.
For example, instead of writing `for` loop like the following
.. code-block:: python
for c in global_namespace.classes():
c.attribute = "xxxx"
you can write:
.. code-block:: python
global_namespace.classes().attribute = "xxxx"
The same functionality could be applied on "set" methods too.
"""
def __init__(self, decls):
""":param decls: list of declarations to operate on.
:type decls: list of :class:`declaration wrappers <decl_wrapper_t>`
"""
object.__init__(self)
self.__dict__['declarations'] = decls
def __bool__(self):
return bool(self.declarations)
def __len__(self):
"""returns the number of declarations"""
return len(self.declarations)
def __getitem__(self, index):
"""provides access to declaration"""
return self.declarations[index]
def __iter__(self):
return iter(self.declarations)
def __ensure_attribute(self, name):
invalid_decls = [d for d in self.declarations if not hasattr(d, name)]
sep = os.linesep + ' '
if invalid_decls:
raise RuntimeError((
"Next declarations don't have '%s' attribute: %s")
% (name, sep.join(map(str, invalid_decls))))
def __setattr__(self, name, value):
"""Updates the value of attribute on all declarations.
:param name: name of attribute
:param value: new value of attribute
"""
self.__ensure_attribute(name)
for d in self.declarations:
setattr(d, name, value)
def __getattr__(self, name):
""":param name: name of method
"""
return call_redirector_t(name, self.declarations)
def __contains__(self, item):
return item in self.declarations
def to_list(self):
l = []
for d in self.declarations:
l.append(d)
return l
| 29.617391
| 79
| 0.612449
|
d698b8624bf0eb5203f5ea25c009d9993d3f53f3
| 2,273
|
py
|
Python
|
auth.py
|
shamspias/Raspberry-Web-UI
|
af956a805c32cf5045b4e6e08af5d16992e88079
|
[
"MIT"
] | 1
|
2019-05-03T17:09:12.000Z
|
2019-05-03T17:09:12.000Z
|
auth.py
|
shamspias/Raspberry-Web-UI
|
af956a805c32cf5045b4e6e08af5d16992e88079
|
[
"MIT"
] | 2
|
2021-05-02T11:25:09.000Z
|
2021-05-02T11:25:48.000Z
|
auth.py
|
shamspias/Raspberry-Web-UI
|
af956a805c32cf5045b4e6e08af5d16992e88079
|
[
"MIT"
] | null | null | null |
# auth.py
from flask import Blueprint, render_template, redirect, url_for, request, flash
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_user, logout_user, login_required
from .models import User
from . import db
auth = Blueprint(name='auth', import_name=__name__)
@auth.route('/login')
def login():
return render_template('login.html')
@auth.route('/login', methods=['POST'])
def login_post():
email = request.form.get('email')
password = request.form.get('password')
remember = True if request.form.get('remember') else False
user = User.query.filter_by(email=email).first()
# check if user actually exists
# take the user supplied password, hash it, and compare it to the hashed password in database
if not user or not check_password_hash(user.password, password):
flash('Please check your login details and try again.')
return redirect(url_for('auth.login')) # if user doesn't exist or password is wrong, reload the page
# if the above check passes, then we know the user has the right credentials
login_user(user, remember=remember)
return redirect(url_for('main.profile'))
@auth.route('/signup')
@login_required
def signup():
return render_template('reg.html')
@auth.route('/signup', methods=['POST'])
@login_required
def signup_post():
email = request.form.get('email')
name = request.form.get('name')
password = request.form.get('password')
user = User.query.filter_by(
email=email).first() # if this returns a user, then the email already exists in database
if user: # if a user is found, we want to redirect back to signup page so user can try again
flash('Email address already exists')
return redirect(url_for('auth.signup'))
# create new user with the form data. Hash the password so plaintext version isn't saved.
new_user = User(email=email, name=name, password=generate_password_hash(password, method='sha256'))
# add the new user to the database
db.session.add(new_user)
db.session.commit()
return redirect(url_for('auth.login'))
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main.index'))
| 32.014085
| 109
| 0.714034
|
9ad9b8541a5c5ca06aacdc50b2e83e1204ec0cd9
| 11,142
|
py
|
Python
|
sdl2/sdlmixer.py
|
namelivia/py-sdl2
|
c1bdf43501224d5f0a125dbce70198100ec7be82
|
[
"CC0-1.0"
] | 222
|
2017-08-19T00:51:59.000Z
|
2022-02-05T19:39:33.000Z
|
sdl2/sdlmixer.py
|
namelivia/py-sdl2
|
c1bdf43501224d5f0a125dbce70198100ec7be82
|
[
"CC0-1.0"
] | 103
|
2017-08-20T17:13:05.000Z
|
2022-02-05T20:20:01.000Z
|
sdl2/sdlmixer.py
|
namelivia/py-sdl2
|
c1bdf43501224d5f0a125dbce70198100ec7be82
|
[
"CC0-1.0"
] | 54
|
2017-08-20T17:13:00.000Z
|
2022-01-14T23:51:13.000Z
|
import os
from ctypes import Structure, POINTER, CFUNCTYPE, c_int, c_char_p, c_void_p, \
c_double
from .dll import DLL
from .version import SDL_version, SDL_VERSIONNUM
from .audio import AUDIO_S16LSB, AUDIO_S16MSB, SDL_MIX_MAXVOLUME
from .stdinc import Uint8, Uint16, Uint32, Sint16, SDL_bool
from .endian import SDL_LIL_ENDIAN, SDL_BYTEORDER
from .rwops import SDL_RWops, SDL_RWFromFile
from .error import SDL_SetError, SDL_GetError, SDL_ClearError
__all__ = [
# Structs
"Mix_Chunk", "Mix_Music",
# Defines
"SDL_MIXER_MAJOR_VERSION", "SDL_MIXER_MINOR_VERSION",
"SDL_MIXER_PATCHLEVEL", "MIX_MAJOR_VERSION", "MIX_MINOR_VERSION",
"MIX_PATCHLEVEL", "MIX_CHANNELS", "MIX_DEFAULT_FREQUENCY",
"MIX_DEFAULT_FORMAT", "MIX_DEFAULT_CHANNELS", "MIX_MAX_VOLUME",
"MIX_CHANNEL_POST", "MIX_EFFECTSMAXSPEED",
# Enums
"MIX_InitFlags",
"MIX_INIT_FLAC", "MIX_INIT_MOD", "MIX_INIT_MP3", "MIX_INIT_OGG",
"MIX_INIT_MID", "MIX_INIT_OPUS",
"Mix_Fading",
"MIX_NO_FADING", "MIX_FADING_OUT", "MIX_FADING_IN",
"Mix_MusicType",
"MUS_NONE", "MUS_CMD", "MUS_WAV", "MUS_MOD", "MUS_MID", "MUS_OGG",
"MUS_MP3", "MUS_MP3_MAD_UNUSED", "MUS_FLAC", "MUS_MODPLUG_UNUSED",
"MUS_OPUS",
# Macro Functions
"SDL_MIXER_VERSION", "MIX_VERSION", "SDL_MIXER_COMPILEDVERSION",
"SDL_MIXER_VERSION_ATLEAST", "Mix_LoadWAV", "Mix_PlayChannel",
"Mix_FadeInChannel",
"Mix_Linked_Version", "Mix_Init", "Mix_Quit", "Mix_OpenAudioDevice",
"Mix_OpenAudio", "Mix_AllocateChannels", "Mix_QuerySpec",
"Mix_LoadWAV_RW", "Mix_LoadMUS", "Mix_LoadMUS_RW",
"Mix_LoadMUSType_RW", "Mix_QuickLoad_WAV", "Mix_QuickLoad_RAW",
"Mix_FreeChunk", "Mix_FreeMusic", "Mix_GetNumChunkDecoders",
"Mix_GetChunkDecoder", "Mix_GetNumMusicDecoders",
"Mix_HasChunkDecoder", #"Mix_HasMusicDecoder",
"Mix_GetMusicDecoder", "Mix_GetMusicType",
"Mix_SetPostMix", "Mix_HookMusic",
"Mix_HookMusicFinished", "Mix_GetMusicHookData",
"Mix_ChannelFinished", "Mix_RegisterEffect", "Mix_UnregisterEffect",
"Mix_UnregisterAllEffects", "Mix_SetPanning",
"Mix_SetPosition", "Mix_SetDistance", "Mix_SetReverseStereo",
"Mix_ReserveChannels", "Mix_GroupChannel", "Mix_GroupChannels",
"Mix_GroupAvailable", "Mix_GroupCount", "Mix_GroupOldest",
"Mix_GroupNewer", "Mix_PlayChannelTimed",
"Mix_PlayMusic", "Mix_FadeInMusic", "Mix_FadeInMusicPos",
"Mix_FadeInChannelTimed", "Mix_Volume",
"Mix_VolumeChunk", "Mix_VolumeMusic", "Mix_HaltChannel",
"Mix_HaltGroup", "Mix_HaltMusic", "Mix_ExpireChannel",
"Mix_FadeOutChannel", "Mix_FadeOutGroup", "Mix_FadeOutMusic",
"Mix_FadingMusic", "Mix_FadingChannel", "Mix_Pause", "Mix_Resume",
"Mix_Paused", "Mix_PauseMusic", "Mix_ResumeMusic", "Mix_RewindMusic",
"Mix_PausedMusic", "Mix_SetMusicPosition", "Mix_Playing",
"Mix_PlayingMusic", "Mix_SetMusicCMD", "Mix_SetSynchroValue",
"Mix_GetSynchroValue", "Mix_SetSoundFonts", "Mix_GetSoundFonts",
"Mix_EachSoundFont", "Mix_GetChunk",
"Mix_CloseAudio", "Mix_SetError", "Mix_GetError", "Mix_ClearError",
# Callback Functions
"channel_finished", "music_finished", "mix_func", "soundfont_function",
"Mix_EffectFunc_t", "Mix_EffectDone_t",
# Python Functions
"get_dll_file"
]
try:
dll = DLL("SDL2_mixer", ["SDL2_mixer", "SDL2_mixer-2.0"],
os.getenv("PYSDL2_DLL_PATH"))
except RuntimeError as exc:
raise ImportError(exc)
def get_dll_file():
"""Gets the file name of the loaded SDL2_mixer library."""
return dll.libfile
_bind = dll.bind_function
SDL_MIXER_MAJOR_VERSION = 2
SDL_MIXER_MINOR_VERSION = 0
SDL_MIXER_PATCHLEVEL = 4
def SDL_MIXER_VERSION(x):
x.major = SDL_MIXER_MAJOR_VERSION
x.minor = SDL_MIXER_MINOR_VERSION
x.patch = SDL_MIXER_PATCHLEVEL
MIX_MAJOR_VERSION = SDL_MIXER_MAJOR_VERSION
MIX_MINOR_VERSION = SDL_MIXER_MINOR_VERSION
MIX_PATCHLEVEL = SDL_MIXER_PATCHLEVEL
MIX_VERSION = SDL_MIXER_VERSION
SDL_MIXER_COMPILEDVERSION = SDL_VERSIONNUM(SDL_MIXER_MAJOR_VERSION, SDL_MIXER_MINOR_VERSION, SDL_MIXER_PATCHLEVEL)
SDL_MIXER_VERSION_ATLEAST = lambda x, y, z: (SDL_MIXER_COMPILEDVERSION >= SDL_VERSIONNUM(x, y, z))
Mix_Linked_Version = _bind("Mix_Linked_Version", None, POINTER(SDL_version))
MIX_InitFlags = c_int
MIX_INIT_FLAC = 0x00000001
MIX_INIT_MOD = 0x00000002
MIX_INIT_MP3 = 0x00000008
MIX_INIT_OGG = 0x000000010
MIX_INIT_MID = 0x00000020
MIX_INIT_OPUS = 0x00000040
Mix_Init = _bind("Mix_Init", [c_int], c_int)
Mix_Quit = _bind("Mix_Quit")
MIX_CHANNELS = 8
MIX_DEFAULT_FREQUENCY = 22050
if SDL_BYTEORDER == SDL_LIL_ENDIAN:
MIX_DEFAULT_FORMAT = AUDIO_S16LSB
else:
MIX_DEFAULT_FORMAT = AUDIO_S16MSB
MIX_DEFAULT_CHANNELS = 2
MIX_MAX_VOLUME = SDL_MIX_MAXVOLUME
class Mix_Chunk(Structure):
_fields_ = [("allocated", c_int),
("abuf", POINTER(Uint8)),
("alen", Uint32),
("volume", Uint8)]
Mix_Fading = c_int
MIX_NO_FADING = 0
MIX_FADING_OUT = 1
MIX_FADING_IN = 2
Mix_MusicType = c_int
MUS_NONE = 0
MUS_CMD = 1
MUS_WAV = 2
MUS_MOD = 3
MUS_MID = 4
MUS_OGG = 5
MUS_MP3 = 6
MUS_MP3_MAD_UNUSED = 7
MUS_FLAC = 9
MUS_MODPLUG_UNUSED = 10
MUS_OPUS = 11
class Mix_Music(c_void_p):
pass
Mix_OpenAudio = _bind("Mix_OpenAudio", [c_int, Uint16, c_int, c_int], c_int)
Mix_OpenAudioDevice = _bind("Mix_OpenAudioDevice", [c_int, Uint16, c_int, c_int, c_char_p, c_int], c_int, added='2.0.2')
Mix_AllocateChannels = _bind("Mix_AllocateChannels", [c_int], c_int)
Mix_QuerySpec = _bind("Mix_QuerySpec", [POINTER(c_int), POINTER(Uint16), POINTER(c_int)], c_int)
Mix_LoadWAV_RW = _bind("Mix_LoadWAV_RW", [POINTER(SDL_RWops), c_int], POINTER(Mix_Chunk))
Mix_LoadWAV = lambda fname: Mix_LoadWAV_RW(SDL_RWFromFile(fname, b"rb"), 1)
Mix_LoadMUS = _bind("Mix_LoadMUS", [c_char_p], POINTER(Mix_Music))
Mix_LoadMUS_RW = _bind("Mix_LoadMUS_RW", [POINTER(SDL_RWops)], POINTER(Mix_Music))
Mix_LoadMUSType_RW = _bind("Mix_LoadMUSType_RW", [POINTER(SDL_RWops), Mix_MusicType, c_int], POINTER(Mix_Music))
Mix_QuickLoad_WAV = _bind("Mix_QuickLoad_WAV", [POINTER(Uint8)], POINTER(Mix_Chunk))
Mix_QuickLoad_RAW = _bind("Mix_QuickLoad_RAW", [POINTER(Uint8), Uint32], POINTER(Mix_Chunk))
Mix_FreeChunk = _bind("Mix_FreeChunk", [POINTER(Mix_Chunk)])
Mix_FreeMusic = _bind("Mix_FreeMusic", [POINTER(Mix_Music)])
Mix_GetNumChunkDecoders = _bind("Mix_GetNumChunkDecoders", None, c_int)
Mix_GetChunkDecoder = _bind("Mix_GetChunkDecoder", [c_int], c_char_p)
Mix_HasChunkDecoder = _bind("Mix_HasChunkDecoder", [c_char_p], SDL_bool, added='2.0.2')
Mix_GetNumMusicDecoders = _bind("Mix_GetNumMusicDecoders", None, c_int)
Mix_GetMusicDecoder = _bind("Mix_GetMusicDecoder", [c_int], c_char_p)
#Mix_HasMusicDecoder = _bind("Mix_HasMusicDecoder", [c_char_p], SDL_bool) # not actually implemented in SDL_mixer
Mix_GetMusicType = _bind("Mix_GetMusicType", [POINTER(Mix_Music)], Mix_MusicType)
mix_func = CFUNCTYPE(None, c_void_p, POINTER(Uint8), c_int)
Mix_SetPostMix = _bind("Mix_SetPostMix", [mix_func, c_void_p])
Mix_HookMusic = _bind("Mix_HookMusic", [mix_func, c_void_p])
music_finished = CFUNCTYPE(None)
Mix_HookMusicFinished = _bind("Mix_HookMusicFinished", [music_finished])
Mix_GetMusicHookData = _bind("Mix_GetMusicHookData", None, c_void_p)
channel_finished = CFUNCTYPE(None, c_int)
Mix_ChannelFinished = _bind("Mix_ChannelFinished", [channel_finished])
MIX_CHANNEL_POST = -2
Mix_EffectFunc_t = CFUNCTYPE(None, c_int, c_void_p, c_int, c_void_p)
Mix_EffectDone_t = CFUNCTYPE(None, c_int, c_void_p)
Mix_RegisterEffect = _bind("Mix_RegisterEffect", [c_int, Mix_EffectFunc_t, Mix_EffectDone_t, c_void_p], c_int)
Mix_UnregisterEffect = _bind("Mix_UnregisterEffect", [c_int, Mix_EffectFunc_t], c_int)
Mix_UnregisterAllEffects = _bind("Mix_UnregisterAllEffects", [c_int])
MIX_EFFECTSMAXSPEED = "MIX_EFFECTSMAXSPEED"
Mix_SetPanning = _bind("Mix_SetPanning", [c_int, Uint8, Uint8], c_int)
Mix_SetPosition = _bind("Mix_SetPosition", [c_int, Sint16, Uint8], c_int)
Mix_SetDistance = _bind("Mix_SetDistance", [c_int, Uint8])
Mix_SetReverseStereo = _bind("Mix_SetReverseStereo", [c_int, c_int], c_int)
Mix_ReserveChannels = _bind("Mix_ReserveChannels", [c_int], c_int)
Mix_GroupChannel = _bind("Mix_GroupChannel", [c_int, c_int], c_int)
Mix_GroupChannels = _bind("Mix_GroupChannels", [c_int, c_int, c_int], c_int)
Mix_GroupAvailable = _bind("Mix_GroupAvailable", [c_int], c_int)
Mix_GroupCount = _bind("Mix_GroupCount", [c_int], c_int)
Mix_GroupOldest = _bind("Mix_GroupOldest", [c_int], c_int)
Mix_GroupNewer = _bind("Mix_GroupNewer", [c_int], c_int)
Mix_PlayChannel = lambda channel, chunk, loops: Mix_PlayChannelTimed(channel, chunk, loops, -1)
Mix_PlayChannelTimed = _bind("Mix_PlayChannelTimed", [c_int, POINTER(Mix_Chunk), c_int, c_int], c_int)
Mix_PlayMusic = _bind("Mix_PlayMusic", [POINTER(Mix_Music), c_int], c_int)
Mix_FadeInMusic = _bind("Mix_FadeInMusic", [POINTER(Mix_Music), c_int, c_int], c_int)
Mix_FadeInMusicPos = _bind("Mix_FadeInMusicPos", [POINTER(Mix_Music), c_int, c_int, c_double], c_int)
Mix_FadeInChannel = lambda channel, chunk, loops, ms: Mix_FadeInChannelTimed(channel, chunk, loops, ms, -1)
Mix_FadeInChannelTimed = _bind("Mix_FadeInChannelTimed", [c_int, POINTER(Mix_Chunk), c_int, c_int, c_int], c_int)
Mix_Volume = _bind("Mix_Volume", [c_int, c_int], c_int)
Mix_VolumeChunk = _bind("Mix_VolumeChunk", [POINTER(Mix_Chunk), c_int], c_int)
Mix_VolumeMusic = _bind("Mix_VolumeMusic", [c_int], c_int)
Mix_HaltChannel = _bind("Mix_HaltChannel", [c_int], c_int)
Mix_HaltGroup = _bind("Mix_HaltGroup", [c_int], c_int)
Mix_HaltMusic = _bind("Mix_HaltMusic", None, c_int)
Mix_ExpireChannel = _bind("Mix_ExpireChannel", [c_int, c_int], c_int)
Mix_FadeOutChannel = _bind("Mix_FadeOutChannel", [c_int, c_int], c_int)
Mix_FadeOutGroup = _bind("Mix_FadeOutGroup", [c_int, c_int], c_int)
Mix_FadeOutMusic = _bind("Mix_FadeOutMusic", [c_int], c_int)
Mix_FadingMusic = _bind("Mix_FadingMusic", None, Mix_Fading)
Mix_FadingChannel = _bind("Mix_FadingChannel", [c_int], Mix_Fading)
Mix_Pause = _bind("Mix_Pause", [c_int])
Mix_Resume = _bind("Mix_Resume", [c_int])
Mix_Paused = _bind("Mix_Paused", [c_int], c_int)
Mix_PauseMusic = _bind("Mix_PauseMusic")
Mix_ResumeMusic = _bind("Mix_ResumeMusic")
Mix_RewindMusic = _bind("Mix_RewindMusic")
Mix_PausedMusic = _bind("Mix_PausedMusic", None, c_int)
Mix_SetMusicPosition = _bind("Mix_SetMusicPosition", [c_double], c_int)
Mix_Playing = _bind("Mix_Playing", [c_int], c_int)
Mix_PlayingMusic = _bind("Mix_PlayingMusic", None, c_int)
Mix_SetMusicCMD = _bind("Mix_SetMusicCMD", [c_char_p], c_int)
Mix_SetSynchroValue = _bind("Mix_SetSynchroValue", [c_int], c_int)
Mix_GetSynchroValue = _bind("Mix_GetSynchroValue", None, c_int)
Mix_SetSoundFonts = _bind("Mix_SetSoundFonts", [c_char_p], c_int)
Mix_GetSoundFonts = _bind("Mix_GetSoundFonts", None, c_char_p)
soundfont_function = CFUNCTYPE(c_int, c_char_p, c_void_p)
Mix_EachSoundFont = _bind("Mix_EachSoundFont", [soundfont_function, c_void_p], c_int)
Mix_GetChunk = _bind("Mix_GetChunk", [c_int], POINTER(Mix_Chunk))
Mix_CloseAudio = _bind("Mix_CloseAudio")
Mix_SetError = SDL_SetError
Mix_GetError = SDL_GetError
Mix_ClearError = SDL_ClearError
| 45.851852
| 120
| 0.764315
|
10ef1fd4675cc6bfaccfdf1fd0f0370037027cc8
| 709
|
py
|
Python
|
Array/Medium_FindFirstandLastpositionofElementinSortedArray_34_WYH.py
|
LinkWoong/LC-Solutions
|
98b2ce55f05f6acb672f20519f79ca9f4248961d
|
[
"MIT"
] | 4
|
2019-05-15T10:40:34.000Z
|
2020-07-27T03:05:39.000Z
|
Array/Medium_FindFirstandLastpositionofElementinSortedArray_34_WYH.py
|
LinkWoong/LC-Solutions
|
98b2ce55f05f6acb672f20519f79ca9f4248961d
|
[
"MIT"
] | 2
|
2019-08-20T15:34:33.000Z
|
2019-09-20T19:41:27.000Z
|
Array/Medium_FindFirstandLastpositionofElementinSortedArray_34_WYH.py
|
LinkWoong/LC-Solutions
|
98b2ce55f05f6acb672f20519f79ca9f4248961d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[ ]:
class Solution:
def searchRange(self, nums, target):
#算法思路:
#首先写下一个可以找到第一个出现的target的indice的binary search的方程
#在这个方程里,如果mid的大小大于或等于target,则hi变成现在的mid
#如果mid的大小小于了target了,则lo变为现在的mid+1
#可以注意的是,因为我们在一个sorted list nums寻找,当这个方程的target变为target+1后
#这个方程会返回在nums中最后一个的target的indice+1
def search(n):
lo, hi = 0, len(nums)
while lo < hi:
mid = int((lo + hi) / 2)
if nums[mid] >= n:
hi = mid
else:
lo = mid + 1
return lo
lo = search(target)
return [lo, search(target+1)-1] if target in nums[lo:lo+1] else [-1, -1] #注意应对nums=[]的情况
| 25.321429
| 96
| 0.561354
|
91f1d2b0a65e1f72ade4e3331f1a5becce9fe7fc
| 1,527
|
py
|
Python
|
md2ebook/md2ebook.py
|
brunobord/md2ebook
|
31e0d06b77f2d986e6af1115c9e613dfec0591a9
|
[
"MIT"
] | 8
|
2015-01-14T22:15:27.000Z
|
2022-03-17T16:24:38.000Z
|
md2ebook/md2ebook.py
|
brunobord/md2ebook
|
31e0d06b77f2d986e6af1115c9e613dfec0591a9
|
[
"MIT"
] | 1
|
2018-01-04T14:35:09.000Z
|
2018-01-04T14:35:09.000Z
|
md2ebook/md2ebook.py
|
brunobord/md2ebook
|
31e0d06b77f2d986e6af1115c9e613dfec0591a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""Markdown to Book.
Usage:
md2ebook start [<name>] [--overwrite] [--bookname=<bookname>]
md2ebook build [--with-pdf] [--verbose] [--cover=<cover>]
[--generator=<generator>]
md2ebook check
md2ebook --version
Commands:
start Start a blank project, using the default template
build Generates HTML and EPUB. Optionally PDF
check Checks for the EPUB integrity. Needs epubcheck.
Options:
-h --help Show this screen.
--version Show version.
--overwrite Will overwrite the project directory.
Handle with care.
--bookname=<bookname> Will set the name of the initial Markdown file.
--verbose Will display ebook-convert output.
--with-pdf Will generate the PDF along with the HTML and EPUB.
--cover=<cover> File path or URL for a cover that would override the
configuration (or default standard).
--generator=<generator> Set the generator to be used at build time. So far,
two generators are available: calibre and pandoc.
"""
from docopt import docopt
from .commander import Commander
from .checkers import check_dependencies
def main():
"Main program"
generators = check_dependencies()
args = docopt(__doc__, version='md2ebook 0.0.1-dev')
commander = Commander(args, generators)
commander.handle()
if __name__ == '__main__':
main()
| 32.489362
| 79
| 0.629339
|
481ad428dd1bcfece63c5037ed0ff91800fbc4e1
| 754
|
py
|
Python
|
data_management/Excel Work/Melting DataFrames.py
|
TheRockerfly/JRocker-Portfolio
|
f1272970eb5c52e9928b72a101526adafbd77d4f
|
[
"MIT"
] | null | null | null |
data_management/Excel Work/Melting DataFrames.py
|
TheRockerfly/JRocker-Portfolio
|
f1272970eb5c52e9928b72a101526adafbd77d4f
|
[
"MIT"
] | 7
|
2021-07-03T14:41:03.000Z
|
2022-03-12T00:59:26.000Z
|
data_management/Excel Work/Melting DataFrames.py
|
TheRockerfly/JRocker-Python-Portfolio
|
be8be23195df56d8c76c59f03d1eb9d06e7a93b7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 17 06:59:31 2018
@author: James
"""
import pandas as pd
def single_melt_csv_df(file_name: str, melt_col: str, val_col: str):
df = pd.read_csv(file_name)
# Reset the index
df_reset = df.reset_index()
# Melt visitors_by_city_weekday: visitors
df_melt = pd.melt(df_reset, id_vars=[melt_col], value_name=val_col)
# Print visitors
print(df_melt)
def multi_col_melt(file_name, melt_list: list):
df = pd.read_csv(file_name)
# Multiple columns can be used as identifiers
skinny = pd.melt(df, id_vars=melt_list)
# Print skinny
print(skinny)
# Key-value pairs merge multi-index columns into one
kv_pairs = pd.melt(df, col_level=0)
print(kv_pairs)
| 22.176471
| 71
| 0.685676
|
98b887173ea2da940adff8696d56f78e7ab87f83
| 6,626
|
py
|
Python
|
Detectron2Fire/testfirepanotic.py
|
lkk688/CustomDetectron2
|
f55a7ea41b067cfc182f994a479bf5f64b4b01a3
|
[
"MIT"
] | null | null | null |
Detectron2Fire/testfirepanotic.py
|
lkk688/CustomDetectron2
|
f55a7ea41b067cfc182f994a479bf5f64b4b01a3
|
[
"MIT"
] | null | null | null |
Detectron2Fire/testfirepanotic.py
|
lkk688/CustomDetectron2
|
f55a7ea41b067cfc182f994a479bf5f64b4b01a3
|
[
"MIT"
] | null | null | null |
import torch
print(torch.__version__) #1.8.0+cu111
import torchvision
print(torchvision.__version__) #0.9.0+cu111
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
import detectron2 #detectron2 version: 0.4+cu111
#from detectron2.data import MetadataCatalog
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
from matplotlib import pyplot as plt
#For training
from detectron2.engine import DefaultTrainer
#import json
# import os
# import numpy as np
# import cv2
from pycocotools.coco import COCO
def generate_segmentation_file(img_dir, outputpath):
json_file = os.path.join(img_dir, "FireClassification.json")
coco=COCO(json_file)
#print(coco)
cats = coco.loadCats(coco.getCatIds())
nms=[cat['name'] for cat in cats] #['Fire', 'NoFire', 'Smoke', 'BurntArea']
print('COCO categories: \n{}\n'.format(' '.join(nms)))
imgIds_1 = coco.getImgIds()
print(imgIds_1)
for i in imgIds_1:
imgIds = coco.getImgIds(imgIds = i) ##Image id part in the json
img = coco.loadImgs(imgIds)[0]
print(img)
annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = coco.loadAnns(annIds)
mask = coco.annToMask(anns[0])#(1080, 1920)
for i in range(len(anns)):
mask += coco.annToMask(anns[i])
print(mask)
output = os.path.join(outputpath, img["file_name"])
cv2.imwrite(output, mask)
def cv2_imshow(img, outputfilename='./outputs/result.png'):
rgb=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fig = plt.figure(figsize = (20, 10))
plt.imshow(rgb)
fig.savefig(outputfilename)
def init_cfg(config_file: str):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(config_file))
cfg.DATASETS.TRAIN = ("firedataset_train_new",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file) # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
cfg.SOLVER.MAX_ITER = 500 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 4 # only has one class (ballon)
return cfg
def get_predictor(cfg, model_name: str):
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, model_name)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.1 # set the testing threshold for this model
# cfg.DATASETS.TEST = ("balloon_val",)
predictor = DefaultPredictor(cfg)
return predictor
if __name__ == "__main__":
for d in ["train"]:
dataroot="./Dataset/CMPE_295_All_images/"
outputpath=os.path.join(dataroot, d, "segmentation")
os.makedirs(outputpath, exist_ok=True)
#Change path to point to the shared Images Folder /content/drive/MyDrive/Panoptic_Segmentation/CMPE_295_All_images/Images
generate_segmentation_file(os.path.join(dataroot, "Images"), outputpath)
#if your dataset is in COCO format, this cell can be replaced by the following three lines:
from detectron2.data.datasets import register_coco_instances
register_coco_instances("firedataset_train", {}, './Dataset/CMPE_295_All_images/Images/FireClassification.json', "./Dataset/CMPE_295_All_images/Images")
dataset_dicts = DatasetCatalog.get("firedataset_train")
##Added the extra sem_seg_file_name in the train
print(len(dataset_dicts))
for i in range(len(dataset_dicts)):
tem = dataset_dicts[i]["file_name"]
first, second = tem.rsplit('/', 1)
dataset_dicts[i]["sem_seg_file_name"] = os.path.join(outputpath,second)
from detectron2.data import MetadataCatalog
##registering again as have modified the dicts obatained from the COCO format , added the segmenattion info
DatasetCatalog.register("firedataset_train_new", lambda d=d:dataset_dicts)
MetadataCatalog.get("firedataset_train_new").thing_classes = ['Fire', 'NoFire', 'Smoke', 'BurntArea']
MetadataCatalog.get("firedataset_train_new").stuff_classes = ['Fire', 'NoFire', 'Smoke', 'BurntArea']
dataset_metadata = MetadataCatalog.get("firedataset_train_new")
# Check whether dataset is correctly initialised
#visualise_dataset("train")
#dataset_dicts = get_balloon_dicts(os.path.join("balloon", d))
for d in random.sample(dataset_dicts, 3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=dataset_metadata, scale=0.5)
vis = visualizer.draw_dataset_dict(d)
cv2_imshow(vis.get_image()[:, :, ::-1],'./outputs/'+str(d["image_id"]))
cfg = init_cfg("COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml")
cfg.OUTPUT_DIR='./output/firepanoptic'
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
predictor = get_predictor(cfg, "model_final.pth")
inputs = cv2.imread('./Dataset/CMPE_295_All_images/Images/ChoppervideoCameronPeakFirestartsnearChambersLakeinwesternLarimerCounty-223.jpg')
panoptic_seg, segments_info = predictor(inputs)["panoptic_seg"]
print("segments_info")
print(segments_info)
print("panoptic_seg")
print(panoptic_seg)
datasetname=cfg.DATASETS.TRAIN[0]
metadata=MetadataCatalog.get(datasetname)
v = Visualizer(inputs[:, :, ::-1], metadata, scale=1.2)
v = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"),segments_info)
cv2_imshow(v.get_image()[:, :, ::-1])
for d in random.sample(dataset_dicts, 5):
img = cv2.imread(d["file_name"])
panoptic_seg, segments_info = predictor(img)["panoptic_seg"]
v = Visualizer(img[:, :, ::-1], metadata=metadata, scale=0.5)
v = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"),segments_info)
#v = v.draw_dataset_dict(d)
cv2_imshow(v.get_image()[:, :, ::-1],'./outputs/inference'+str(d["image_id"]))
| 42.474359
| 156
| 0.715666
|
f235e9a92a235f9ca68ea374ffb93efcd01376c7
| 2,610
|
py
|
Python
|
docassemble_webapp/docassemble/webapp/screenreader.py
|
patrickr81/docassemble
|
651653f6d3ab4c4c95d1defbc547ab8e15e460cc
|
[
"MIT"
] | 1
|
2020-06-01T15:46:11.000Z
|
2020-06-01T15:46:11.000Z
|
docassemble_webapp/docassemble/webapp/screenreader.py
|
ttamg/docassemble
|
1429fbbddfeb60b9f8fe74c928a479236d6a6113
|
[
"MIT"
] | 6
|
2021-02-08T20:44:14.000Z
|
2022-01-13T02:42:41.000Z
|
docassemble_webapp/docassemble/webapp/screenreader.py
|
ttamg/docassemble
|
1429fbbddfeb60b9f8fe74c928a479236d6a6113
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from docassemble.base.functions import word
import re
__all__ = ['to_text']
def to_text(html_doc):
#logmessage("Starting to_text")
output = str()
soup = BeautifulSoup(html_doc, 'html.parser')
[s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title', 'audio', 'video', 'pre', 'attribution'])]
[s.extract() for s in soup.find_all(hidden)]
[s.extract() for s in soup.find_all('div', {'class': 'dainvisible'})]
previous = str()
for s in soup.find_all(do_show):
if s.name in ['input', 'textarea', 'img'] and s.has_attr('alt'):
words = s.attrs['alt']
if s.has_attr('placeholder'):
words += str(", ") + s.attrs['placeholder']
else:
words = s.get_text()
words = re.sub(r'\n\s*', ' ', words, flags=re.DOTALL)
if len(words) and re.search(r'\w *$', words, re.UNICODE):
words = words + str('.')
if words != previous:
output += str(words) + "\n"
previous = words
terms = dict()
for s in soup.find_all('a'):
if s.has_attr('class') and s.attrs['class'][0] == 'daterm' and s.has_attr('data-content') and s.string is not None:
terms[s.string] = s.attrs['data-content']
if len(terms):
output += word("Terms used in this question:") + "\n"
for term, definition in terms.items():
output += str(term) + '. ' + str(definition) + '\n'
output = re.sub(r'&gt;', '>', output)
output = re.sub(r'&lt;', '<', output)
output = re.sub(r'>', '>', output)
output = re.sub(r'<', '<', output)
output = re.sub(r'<[^>]+>', '', output)
return output
def hidden(element):
if element.name == 'input':
if element.has_attr('type'):
if element.attrs['type'] == 'hidden':
return True
return False
bad_list = ['div', 'option']
good_list = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'button', 'textarea', 'note']
def do_show(element):
if re.match('<!--.*-->', str(element), re.DOTALL):
return False
if element.name in ['option'] and element.has_attr('selected'):
return True
if element.name in bad_list:
return False
if element.name in ['img', 'input'] and element.has_attr('alt'):
return True
if element.name in good_list:
return True
if element.parent and element.parent.name in good_list:
return False
if element.string:
return True
if re.match(r'\s+', element.get_text()):
return False
return False
| 35.753425
| 123
| 0.563218
|
0a0b9d70a707bffe8fb51de20e81ac099d504a82
| 472
|
py
|
Python
|
Chapter1/1.4.2-Default-Argument-Values.py
|
pankace/SICP-robably
|
765f516f253e96ae2c8e433722ea7cefd31b2f04
|
[
"MIT"
] | null | null | null |
Chapter1/1.4.2-Default-Argument-Values.py
|
pankace/SICP-robably
|
765f516f253e96ae2c8e433722ea7cefd31b2f04
|
[
"MIT"
] | null | null | null |
Chapter1/1.4.2-Default-Argument-Values.py
|
pankace/SICP-robably
|
765f516f253e96ae2c8e433722ea7cefd31b2f04
|
[
"MIT"
] | null | null | null |
k_b = 1.38e-23 # Boltzmann's constant
def pressure(v, t, n=6.022e23):
"""Compute the pressure in pascals of an ideal gas.
v -- volume of gas, in cubic meters
t -- absolute temperature in degrees kelvin
n -- particles of gas (default: one mole)
"""
return n * k_b * t / v
v, t = map(float, input("v and t\n").split())
# v = float(input("v,\\"))
# t = float(input("t,\\"))
print(pressure(v, t))
help(pressure)
print("this is edited with n-vm")
| 23.6
| 55
| 0.612288
|
48e7623a62fdc2b842fd89213c5c79fe482eada8
| 7,699
|
py
|
Python
|
aiobotocore/paginate.py
|
Jastor11/aiobotocore
|
40427e6c45dd6b8fb75072f13cfb076cf6c4d10b
|
[
"Apache-2.0"
] | 772
|
2016-02-12T13:20:26.000Z
|
2022-03-29T20:51:37.000Z
|
aiobotocore/paginate.py
|
Jastor11/aiobotocore
|
40427e6c45dd6b8fb75072f13cfb076cf6c4d10b
|
[
"Apache-2.0"
] | 826
|
2016-02-14T11:31:25.000Z
|
2022-03-31T20:41:31.000Z
|
aiobotocore/paginate.py
|
Jastor11/aiobotocore
|
40427e6c45dd6b8fb75072f13cfb076cf6c4d10b
|
[
"Apache-2.0"
] | 154
|
2016-04-28T16:27:33.000Z
|
2022-03-05T19:41:52.000Z
|
from botocore.exceptions import PaginationError
from botocore.paginate import Paginator, PageIterator
from botocore.utils import set_value_from_jmespath, merge_dicts
from botocore.compat import six
import jmespath
import aioitertools
class AioPageIterator(PageIterator):
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = await self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def result_key_iters(self):
teed_results = aioitertools.tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
async def build_full_result(self):
complete_result = {}
async for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
async def search(self, expression):
compiled = jmespath.compile(expression)
async for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
yield results
class AioPaginator(Paginator):
PAGE_ITERATOR_CLS = AioPageIterator
class ResultKeyIterator:
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __aiter__(self):
return self.__anext__()
async def __anext__(self):
async for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
| 43.744318
| 79
| 0.598649
|
74b2630b74739282d248a2e4f8b67ac66b76f158
| 23,705
|
py
|
Python
|
ansible/lib/ansible/modules/core/database/postgresql/postgresql_privs.py
|
kiv-box/kafka
|
debec1c4bc8c43776070ee447a53b55fef42bd52
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/database/postgresql/postgresql_privs.py
|
kiv-box/kafka
|
debec1c4bc8c43776070ee447a53b55fef42bd52
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/database/postgresql/postgresql_privs.py
|
kiv-box/kafka
|
debec1c4bc8c43776070ee447a53b55fef42bd52
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: postgresql_privs
version_added: "1.2"
short_description: Grant or revoke privileges on PostgreSQL database objects.
description:
- Grant or revoke privileges on PostgreSQL database objects.
- This module is basically a wrapper around most of the functionality of
PostgreSQL's GRANT and REVOKE statements with detection of changes
(GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles))
options:
database:
description:
- Name of database to connect to.
- 'Alias: I(db)'
required: yes
state:
description:
- If C(present), the specified privileges are granted, if C(absent) they
are revoked.
required: no
default: present
choices: [present, absent]
privs:
description:
- Comma separated list of privileges to grant/revoke.
- 'Alias: I(priv)'
required: no
type:
description:
- Type of database object to set privileges on.
required: no
default: table
choices: [table, sequence, function, database,
schema, language, tablespace, group]
objs:
description:
- Comma separated list of database objects to set privileges on.
- If I(type) is C(table) or C(sequence), the special value
C(ALL_IN_SCHEMA) can be provided instead to specify all database
objects of type I(type) in the schema specified via I(schema). (This
also works with PostgreSQL < 9.0.)
- If I(type) is C(database), this parameter can be omitted, in which case
privileges are set for the database specified via I(database).
- 'If I(type) is I(function), colons (":") in object names will be
replaced with commas (needed to specify function signatures, see
examples)'
- 'Alias: I(obj)'
required: no
schema:
description:
- Schema that contains the database objects specified via I(objs).
- May only be provided if I(type) is C(table), C(sequence) or
C(function). Defaults to C(public) in these cases.
required: no
roles:
description:
- Comma separated list of role (user/group) names to set permissions for.
- The special value C(PUBLIC) can be provided instead to set permissions
for the implicitly defined PUBLIC group.
- 'Alias: I(role)'
required: yes
grant_option:
description:
- Whether C(role) may grant/revoke the specified privileges/group
memberships to others.
- Set to C(no) to revoke GRANT OPTION, leave unspecified to
make no changes.
- I(grant_option) only has an effect if I(state) is C(present).
- 'Alias: I(admin_option)'
required: no
choices: ['yes', 'no']
host:
description:
- Database host address. If unspecified, connect via Unix socket.
- 'Alias: I(login_host)'
default: null
required: no
port:
description:
- Database port to connect to.
required: no
default: 5432
unix_socket:
description:
- Path to a Unix domain socket for local connections.
- 'Alias: I(login_unix_socket)'
required: false
default: null
login:
description:
- The username to authenticate with.
- 'Alias: I(login_user)'
default: postgres
password:
description:
- The password to authenticate with.
- 'Alias: I(login_password))'
default: null
required: no
notes:
- Default authentication assumes that postgresql_privs is run by the
C(postgres) user on the remote host. (Ansible's C(user) or C(sudo-user)).
- This module requires Python package I(psycopg2) to be installed on the
remote host. In the default case of the remote host also being the
PostgreSQL server, PostgreSQL has to be installed there as well, obviously.
For Debian/Ubuntu-based systems, install packages I(postgresql) and
I(python-psycopg2).
- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
have singular alias names (I(priv), I(obj), I(role)).
- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
C(present) and I(grant_option) to C(no) (see examples).
- Note that when revoking privileges from a role R, this role may still have
access via privileges granted to any role R is a member of including
C(PUBLIC).
- Note that when revoking privileges from a role R, you do so as the user
specified via I(login). If R has been granted the same privileges by
another user also, R can still access database objects via these privileges.
- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
requirements: [psycopg2]
author: "Bernhard Weitzhofer (@b6d)"
"""
EXAMPLES = """
# On database "library":
# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
# TO librarian, reader WITH GRANT OPTION
- postgresql_privs: >
database=library
state=present
privs=SELECT,INSERT,UPDATE
type=table
objs=books,authors
schema=public
roles=librarian,reader
grant_option=yes
# Same as above leveraging default values:
- postgresql_privs: >
db=library
privs=SELECT,INSERT,UPDATE
objs=books,authors
roles=librarian,reader
grant_option=yes
# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
# Note that role "reader" will be *granted* INSERT privilege itself if this
# isn't already the case (since state=present).
- postgresql_privs: >
db=library
state=present
priv=INSERT
obj=books
role=reader
grant_option=no
# REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
# "public" is the default schema. This also works for PostgreSQL 8.x.
- postgresql_privs: >
db=library
state=absent
privs=INSERT,UPDATE
objs=ALL_IN_SCHEMA
role=reader
# GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
- postgresql_privs: >
db=library
privs=ALL
type=schema
objs=public,math
role=librarian
# GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
# Note the separation of arguments with colons.
- postgresql_privs: >
db=library
privs=ALL
type=function
obj=add(int:int)
schema=math
roles=librarian,reader
# GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
# Note that group role memberships apply cluster-wide and therefore are not
# restricted to database "library" here.
- postgresql_privs: >
db=library
type=group
objs=librarian,reader
roles=alice,bob
admin_option=yes
# GRANT ALL PRIVILEGES ON DATABASE library TO librarian
# Note that here "db=postgres" specifies the database to connect to, not the
# database to grant privileges on (which is specified via the "objs" param)
- postgresql_privs: >
db=postgres
privs=ALL
type=database
obj=library
role=librarian
# GRANT ALL PRIVILEGES ON DATABASE library TO librarian
# If objs is omitted for type "database", it defaults to the database
# to which the connection is established
- postgresql_privs: >
db=library
privs=ALL
type=database
role=librarian
"""
try:
import psycopg2
import psycopg2.extensions
except ImportError:
psycopg2 = None
VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
class Error(Exception):
pass
# We don't have functools.partial in Python < 2.5
def partial(f, *args, **kwargs):
"""Partial function application"""
def g(*g_args, **g_kwargs):
new_kwargs = kwargs.copy()
new_kwargs.update(g_kwargs)
return f(*(args + g_args), **g_kwargs)
g.f = f
g.args = args
g.kwargs = kwargs
return g
class Connection(object):
"""Wrapper around a psycopg2 connection with some convenience methods"""
def __init__(self, params):
self.database = params.database
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"host":"host",
"login":"user",
"password":"password",
"port":"port",
"database": "database",
}
kw = dict( (params_map[k], getattr(params, k)) for k in params_map
if getattr(params, k) != '' )
# If a unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and params.unix_socket != "":
kw["host"] = params.unix_socket
self.connection = psycopg2.connect(**kw)
self.cursor = self.connection.cursor()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
@property
def encoding(self):
"""Connection encoding in Python-compatible form"""
return psycopg2.extensions.encodings[self.connection.encoding]
### Methods for querying database objects
# PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
# phrases in GRANT or REVOKE statements, therefore alternative methods are
# provided here.
def schema_exists(self, schema):
query = """SELECT count(*)
FROM pg_catalog.pg_namespace WHERE nspname = %s"""
self.cursor.execute(query, (schema,))
return self.cursor.fetchone()[0] > 0
def get_all_tables_in_schema(self, schema):
if not self.schema_exists(schema):
raise Error('Schema "%s" does not exist.' % schema)
query = """SELECT relname
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind in ('r', 'v')"""
self.cursor.execute(query, (schema,))
return [t[0] for t in self.cursor.fetchall()]
def get_all_sequences_in_schema(self, schema):
if not self.schema_exists(schema):
raise Error('Schema "%s" does not exist.' % schema)
query = """SELECT relname
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind = 'S'"""
self.cursor.execute(query, (schema,))
return [t[0] for t in self.cursor.fetchall()]
### Methods for getting access control lists and group membership info
# To determine whether anything has changed after granting/revoking
# privileges, we compare the access control lists of the specified database
# objects before and afterwards. Python's list/string comparison should
# suffice for change detection, we should not actually have to parse ACLs.
# The same should apply to group membership information.
def get_table_acls(self, schema, tables):
query = """SELECT relacl
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind = 'r' AND relname = ANY (%s)
ORDER BY relname"""
self.cursor.execute(query, (schema, tables))
return [t[0] for t in self.cursor.fetchall()]
def get_sequence_acls(self, schema, sequences):
query = """SELECT relacl
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
ORDER BY relname"""
self.cursor.execute(query, (schema, sequences))
return [t[0] for t in self.cursor.fetchall()]
def get_function_acls(self, schema, function_signatures):
funcnames = [f.split('(', 1)[0] for f in function_signatures]
query = """SELECT proacl
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE nspname = %s AND proname = ANY (%s)
ORDER BY proname, proargtypes"""
self.cursor.execute(query, (schema, funcnames))
return [t[0] for t in self.cursor.fetchall()]
def get_schema_acls(self, schemas):
query = """SELECT nspacl FROM pg_catalog.pg_namespace
WHERE nspname = ANY (%s) ORDER BY nspname"""
self.cursor.execute(query, (schemas,))
return [t[0] for t in self.cursor.fetchall()]
def get_language_acls(self, languages):
query = """SELECT lanacl FROM pg_catalog.pg_language
WHERE lanname = ANY (%s) ORDER BY lanname"""
self.cursor.execute(query, (languages,))
return [t[0] for t in self.cursor.fetchall()]
def get_tablespace_acls(self, tablespaces):
query = """SELECT spcacl FROM pg_catalog.pg_tablespace
WHERE spcname = ANY (%s) ORDER BY spcname"""
self.cursor.execute(query, (tablespaces,))
return [t[0] for t in self.cursor.fetchall()]
def get_database_acls(self, databases):
query = """SELECT datacl FROM pg_catalog.pg_database
WHERE datname = ANY (%s) ORDER BY datname"""
self.cursor.execute(query, (databases,))
return [t[0] for t in self.cursor.fetchall()]
def get_group_memberships(self, groups):
query = """SELECT roleid, grantor, member, admin_option
FROM pg_catalog.pg_auth_members am
JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
WHERE r.rolname = ANY(%s)
ORDER BY roleid, grantor, member"""
self.cursor.execute(query, (groups,))
return self.cursor.fetchall()
### Manipulating privileges
def manipulate_privs(self, obj_type, privs, objs, roles,
state, grant_option, schema_qualifier=None):
"""Manipulate database object privileges.
:param obj_type: Type of database object to grant/revoke
privileges for.
:param privs: Either a list of privileges to grant/revoke
or None if type is "group".
:param objs: List of database objects to grant/revoke
privileges for.
:param roles: Either a list of role names or "PUBLIC"
for the implicitly defined "PUBLIC" group
:param state: "present" to grant privileges, "absent" to revoke.
:param grant_option: Only for state "present": If True, set
grant/admin option. If False, revoke it.
If None, don't change grant option.
:param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
"FUNCTION") must be qualified by schema.
Ignored for other Types.
"""
# get_status: function to get current status
if obj_type == 'table':
get_status = partial(self.get_table_acls, schema_qualifier)
elif obj_type == 'sequence':
get_status = partial(self.get_sequence_acls, schema_qualifier)
elif obj_type == 'function':
get_status = partial(self.get_function_acls, schema_qualifier)
elif obj_type == 'schema':
get_status = self.get_schema_acls
elif obj_type == 'language':
get_status = self.get_language_acls
elif obj_type == 'tablespace':
get_status = self.get_tablespace_acls
elif obj_type == 'database':
get_status = self.get_database_acls
elif obj_type == 'group':
get_status = self.get_group_memberships
else:
raise Error('Unsupported database object type "%s".' % obj_type)
# Return False (nothing has changed) if there are no objs to work on.
if not objs:
return False
# obj_ids: quoted db object identifiers (sometimes schema-qualified)
if obj_type == 'function':
obj_ids = []
for obj in objs:
try:
f, args = obj.split('(', 1)
except:
raise Error('Illegal function signature: "%s".' % obj)
obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
elif obj_type in ['table', 'sequence']:
obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
else:
obj_ids = ['"%s"' % o for o in objs]
# set_what: SQL-fragment specifying what to set for the target roles:
# Either group membership or privileges on objects of a certain type
if obj_type == 'group':
set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids)
else:
# function types are already quoted above
if obj_type != 'function':
obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
# Note: obj_type has been checked against a set of string literals
# and privs was escaped when it was parsed
set_what = '%s ON %s %s' % (','.join(privs), obj_type,
','.join(obj_ids))
# for_whom: SQL-fragment specifying for whom to set the above
if roles == 'PUBLIC':
for_whom = 'PUBLIC'
else:
for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles)
status_before = get_status(objs)
if state == 'present':
if grant_option:
if obj_type == 'group':
query = 'GRANT %s TO %s WITH ADMIN OPTION'
else:
query = 'GRANT %s TO %s WITH GRANT OPTION'
else:
query = 'GRANT %s TO %s'
self.cursor.execute(query % (set_what, for_whom))
# Only revoke GRANT/ADMIN OPTION if grant_option actually is False.
if grant_option == False:
if obj_type == 'group':
query = 'REVOKE ADMIN OPTION FOR %s FROM %s'
else:
query = 'REVOKE GRANT OPTION FOR %s FROM %s'
self.cursor.execute(query % (set_what, for_whom))
else:
query = 'REVOKE %s FROM %s'
self.cursor.execute(query % (set_what, for_whom))
status_after = get_status(objs)
return status_before != status_after
def main():
module = AnsibleModule(
argument_spec = dict(
database=dict(required=True, aliases=['db']),
state=dict(default='present', choices=['present', 'absent']),
privs=dict(required=False, aliases=['priv']),
type=dict(default='table',
choices=['table',
'sequence',
'function',
'database',
'schema',
'language',
'tablespace',
'group']),
objs=dict(required=False, aliases=['obj']),
schema=dict(required=False),
roles=dict(required=True, aliases=['role']),
grant_option=dict(required=False, type='bool',
aliases=['admin_option']),
host=dict(default='', aliases=['login_host']),
port=dict(type='int', default=5432),
unix_socket=dict(default='', aliases=['login_unix_socket']),
login=dict(default='postgres', aliases=['login_user']),
password=dict(default='', aliases=['login_password'], no_log=True)
),
supports_check_mode = True
)
# Create type object as namespace for module params
p = type('Params', (), module.params)
# param "schema": default, allowed depends on param "type"
if p.type in ['table', 'sequence', 'function']:
p.schema = p.schema or 'public'
elif p.schema:
module.fail_json(msg='Argument "schema" is not allowed '
'for type "%s".' % p.type)
# param "objs": default, required depends on param "type"
if p.type == 'database':
p.objs = p.objs or p.database
elif not p.objs:
module.fail_json(msg='Argument "objs" is required '
'for type "%s".' % p.type)
# param "privs": allowed, required depends on param "type"
if p.type == 'group':
if p.privs:
module.fail_json(msg='Argument "privs" is not allowed '
'for type "group".')
elif not p.privs:
module.fail_json(msg='Argument "privs" is required '
'for type "%s".' % p.type)
# Connect to Database
if not psycopg2:
module.fail_json(msg='Python module "psycopg2" must be installed.')
try:
conn = Connection(p)
except psycopg2.Error:
e = get_exception()
module.fail_json(msg='Could not connect to database: %s' % e)
try:
# privs
if p.privs:
privs = frozenset(pr.upper() for pr in p.privs.split(','))
if not privs.issubset(VALID_PRIVS):
module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
else:
privs = None
# objs:
if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
objs = conn.get_all_tables_in_schema(p.schema)
elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
objs = conn.get_all_sequences_in_schema(p.schema)
else:
objs = p.objs.split(',')
# function signatures are encoded using ':' to separate args
if p.type == 'function':
objs = [obj.replace(':', ',') for obj in objs]
# roles
if p.roles == 'PUBLIC':
roles = 'PUBLIC'
else:
roles = p.roles.split(',')
changed = conn.manipulate_privs(
obj_type = p.type,
privs = privs,
objs = objs,
roles = roles,
state = p.state,
grant_option = p.grant_option,
schema_qualifier=p.schema
)
except Error:
e = get_exception()
conn.rollback()
module.fail_json(msg=e.message)
except psycopg2.Error:
e = get_exception()
conn.rollback()
# psycopg2 errors come in connection encoding, reencode
msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(),
'replace')
module.fail_json(msg=msg)
if module.check_mode:
conn.rollback()
else:
conn.commit()
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
if __name__ == '__main__':
main()
| 36.923676
| 104
| 0.608057
|
7d32348501868062da1d383183f7205b82304039
| 35,339
|
py
|
Python
|
commandlineCW_ipuz.py
|
LSowrabbi/CWPuzzleReader
|
d144da9ce48a75413be2b21f8bf828cf4dbf191f
|
[
"BSD-3-Clause"
] | null | null | null |
commandlineCW_ipuz.py
|
LSowrabbi/CWPuzzleReader
|
d144da9ce48a75413be2b21f8bf828cf4dbf191f
|
[
"BSD-3-Clause"
] | 2
|
2015-12-29T17:06:38.000Z
|
2016-09-16T13:03:53.000Z
|
commandlineCW_ipuz.py
|
LSowrabbi/CWPuzzleReader
|
d144da9ce48a75413be2b21f8bf828cf4dbf191f
|
[
"BSD-3-Clause"
] | null | null | null |
import ipuz
import ipuz_Helper
import json
import sys
unlock_state="disabled"
notes_state="disabled"
is_puz_rebus=False
Encoding_2 = "ISO-8859-1"
class File():
title=None
author=None
cpyrt=None
notes=None
width=0
height=0
solnblock=[]
cellblock=[]
acc=0
dwn=0
across=[]
down=[]
loc=""
# is_multi is set to 1 in order to input rebus entries for a cell; it can be turned off only after 'enter' key is pressed
is_multi=0
multi=[]
across=[]
down=[]
cellblock=[]
solnblock=[]
row_cellno=[]
col_cellno=[]
cellno=[]
pencil=[]
valid=[]
gext=[]
time=0
time_state=0
ifil = input('Enter a file name along with path: ')
ofile_txt=ifil
data_file = open(ifil,'r')
data = data_file.read()
data_file.close()
# puzzle description read from the ipuz file is stored in the 'puzzle' instance
try:
puzzle = ipuz.read(data)
except ipuz.IPUZException:
print("Sorry, File corrupted")
sys.exit(0)
if 'block' in puzzle:
block=puzzle['block']
else:
block="#"
if 'empty' in puzzle:
empty=puzzle['empty']
try:
empty=int(empty)
except ValueError:
pass
else:
empty=0
if 'title' in puzzle:
title=puzzle['title']
else:
title='title'
if 'author' in puzzle:
author=puzzle['author']
else:
author='author'
if 'copyright' in puzzle:
cpyrt=puzzle['copyright']
else:
cpyrt='copyright'
if 'notes' in puzzle:
notes=puzzle['notes']
notes_state="normal"
else:
notes=''
if 'Across' in puzzle['clues'] and 'Down' in puzzle['clues']:
for i in range(0,len(puzzle['clues']['Across'])):
l=puzzle['clues']['Across'][i]
across.append([])
if isinstance(l,dict):
across[i].append(l['number'])
across[i].append(l['clue'])
else:
across[i].append(l[0])
across[i].append(l[1])
acc=len(across)
for i in range(0,len(puzzle['clues']['Down'])):
l=puzzle['clues']['Down'][i]
down.append([])
if isinstance(l,dict):
down[i].append(l['number'])
down[i].append(l['clue'])
else:
down[i].append(l[0])
down[i].append(l[1])
dwn=len(down)
if isinstance(puzzle['dimensions']['height'],str):
height=int(puzzle['dimensions']['height'])
else:
height=puzzle['dimensions']['height']
if isinstance(puzzle['dimensions']['width'],str):
width=int(puzzle['dimensions']['width'])
else:
width=puzzle['dimensions']['width']
for i in range(0,height):
# current state of the grid
cellblock.append([])
# stores the position of cell numbers for cells in the grid
cellno.append([])
# stores all the pencil entries in the grid
pencil.append([])
# stores the valid/invalid state of each entry in the grid
valid.append([])
# if available, stores the solution for puzzle; else all cell entries are assigned the character 'A'
solnblock.append([])
# stores details of circled, previously incorrect, incorrect or revealed entries present in the grid
gext.append([])
for j in range(0,width):
pencil[i].append(0)
valid[i].append(0)
gext[i].append(0)
if isinstance(puzzle['puzzle'][i][j],dict):
cellblock[i].append(puzzle['puzzle'][i][j]['cell'])
else:
cellblock[i].append(puzzle['puzzle'][i][j])
if cellblock[i][j]!=block and cellblock[i][j]!=empty and cellblock[i][j]!="null":
row_cellno.append(i)
col_cellno.append(j)
cellno[i].append(cellblock[i][j])
else:
cellno[i].append(0)
if cellblock[i][j]==block or cellblock[i][j]=="null" or cellblock[i][j]==None:
cellblock[i][j]="."
solnblock[i].append(".")
else:
# if an unshaded cell is encountered and any entry is present in it, stores the corresponding entry in the cell
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
cellblock[i][j]=puzzle['saved'][i][j]['value']
else:
cellblock[i][j]=puzzle['saved'][i][j]
if cellblock[i][j]==empty:
cellblock[i][j]="-"
else:
cellblock[i][j]=cellblock[i][j].upper()
else:
cellblock[i][j]="-"
# if an unshaded cell is encountered, stores the solution for the corresponding cell
if 'solution' in puzzle:
check_reveal_state="normal"
if isinstance(puzzle['solution'][i][j],dict):
solnblock[i].append(puzzle['solution'][i][j]['value'].upper())
else:
solnblock[i].append(puzzle['solution'][i][j].upper())
else:
check_reveal_state="disabled"
solnblock[i].append("A")
for i in range(0,height):
for j in range(0,width):
if(cellblock[i][j] in 'abcdefghijklmnopqrstuvwxyz'):
pencil[i][j]=1
cellblock[i][j]=cellblock[i][j].upper()
# calc_across and calc_down are for calculating current state of the across and down clues respectively
def calc_across(ch=1):
for i in range(0,acc):
temp=across[i][0]
c_row=row_cellno[temp-1]
c_col=col_cellno[temp-1]
curstr=""
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
if(ch==0):
across[i].append(len(curstr))
across[i].append(curstr)
else:
across[i][3]=curstr
def calc_down(ch=1):
for i in range(0,dwn):
temp=down[i][0]
c_row=row_cellno[temp-1]
c_col=col_cellno[temp-1]
curstr=""
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
if(ch==0):
down[i].append(len(curstr))
down[i].append(curstr)
else:
down[i][3]=curstr
# Notifies user if entire grid is filled with correct entries
def is_sol_complete():
for i in range(0,height):
for j in range(0,width):
if(cellblock[i][j]=="-"):
return
if(cellblock[i][j]!="." and cellblock[i][j]!=":" and valid[i][j]!=3):
if((is_puz_rebus==True) and (str(i)+","+str(j) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(i)+","+str(j))
temp_text=rebus_content[rebus_index]
else:
temp_text=solnblock[i][j]
if(cellblock[i][j]!=temp_text):
return
print("Congratulations, You have successfully completed the puzzle")
# displays clue and asks user to enter a solution for the corresponding clue
def disp_clue(clue):
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter word : ')
for char in getstr:
if(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
if(char not in "," and valid[c_row][c_col]!=3 ):
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=char.upper()
else:
cellblock[c_row][c_col]="-"
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_col=c_col+1
if(c_row==height or c_col==width):
break
else:
break
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
across[num][3]=curstr
calc_down()
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter word : ')
for char in getstr:
if(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
if(char not in "," and valid[c_row][c_col]!=3 ):
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=char.upper()
else:
cellblock[c_row][c_col]="-"
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_row=c_row+1
if(c_row==height or c_col==width):
break
else:
break
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
down[num][3]=curstr
calc_across()
return
print("Sorry wrong format")
# function for rebus entry at a particular location in a word
def disp_rebus_clue(clue):
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter the location where rebus has to be placed (for eg. in the word ABCDE, press 1 to place rebus at position A) : ')
loc=int(getstr)
if (loc>across[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_col=c_col+(loc-1)
if (valid[c_row][c_col]==3):
print("Sorry the cellblock at this location has already been revealed")
return
getstr=input('Enter the rebus word : ')
text=""
for char in getstr:
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
text=text+char.upper()
if(text==""):
text="-"
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=text
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
curstr=""
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
across[num][3]=curstr
calc_down()
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter the location where rebus has to be placed (for eg. in the word ABCDE, press 1 to place rebus at position A) : ')
loc=int(getstr)
if (loc>down[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_row=c_row+(loc-1)
if (valid[c_row][c_col]==3):
print("Sorry the cellblock at this location has already been revealed")
return
getstr=input('Enter the rebus word : ')
text=""
for char in getstr:
if char in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz":
text=text+char.upper()
if(text==""):
text="-"
pencil[c_row][c_col]=0
cellblock[c_row][c_col]=text
if(valid[c_row][c_col]==2):
valid[c_row][c_col]=1
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
curstr=""
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
down[num][3]=curstr
calc_across()
return
print("Sorry wrong format")
# view all across and down clues along with their current state
def view_acc():
for i in range(0,acc):
temp=str(across[i][0])+". "+across[i][1]+" ("+str(across[i][2])+") : "+across[i][3]
print(temp)
def view_dwn():
for i in range(0,dwn):
temp=str(down[i][0])+". "+down[i][1]+" ("+str(down[i][2])+") : "+down[i][3]
print(temp)
# clears all the entries in the cells
def clear_cells():
for i in range(0,height):
for j in range(0,width):
valid[i][j]=0
pencil[i][j]=0
if cellblock[i][j]!="." and cellblock[i][j]!=":":
cellblock[i][j]="-"
j=j+1
i=i+1
calc_across()
calc_down()
# view current state of the puzzle
def view_cur():
temp=""
for i in range(0,height):
temp=""
for j in range(0,width):
temp=temp+" "+cellblock[i][j]
j=j+1
print(temp)
i=i+1
# checks the letter in the given row and column of grid with the corresponding letter in the solution
def check(c_row,c_col):
global valid
valid_count=True
if(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":" and valid[c_row][c_col]!=3):
if((is_puz_rebus==True) and (str(c_row)+","+str(c_col) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(c_row)+","+str(c_col))
temp_text=rebus_content[rebus_index]
else:
temp_text=solnblock[c_row][c_col]
if(cellblock[c_row][c_col]==temp_text or cellblock[c_row][c_col]=="-"):
valid_count=True
else:
valid_count=False
valid[c_row][c_col]=2
return valid_count
# checks the validity of a single letter in a word for a given clue
def check_one():
clue= input('Enter clue number (for e.g "1 across"): ')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter the location which has to be checked in the word (for eg. in the word ABCDE, press 1 to check the letter in position A) : ')
loc=int(getstr)
if (loc>across[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_col=c_col+(loc-1)
v=check(c_row,c_col)
if (v==True):
print("The letter is correct")
else:
print("Sorry, the letter seems to be incorrect")
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter the location which has to be checked in the word (for eg. in the word ABCDE, press 1 to check the letter in position A) : ')
loc=int(getstr)
if (loc>down[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_row=c_row+(loc-1)
v=check(c_row,c_col)
if (v==True):
print("The letter is correct")
else:
print("Sorry, the letter seems to be incorrect")
return
print("Sorry wrong format")
# checks the validity of a word for a given clue
def check_word():
ck_val=True
ad=0
clue = input('Enter clue number (for e.g "1 across"): ')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
ad=1
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
ad=2
if (ad==0):
print("Sorry wrong format!")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
text=""
while(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
val=check(c_row,c_col)
if (val==True):
if (cellblock[c_row][c_col]=="-"):
text=text+" - "
else:
text=text+" "+cellblock[c_row][c_col]+","+"Correct "
else:
text=text+" "+cellblock[c_row][c_col]+","+"Wrong "
ck_val=ck_val and val
if(ad==1):
c_col=c_col+1
else:
c_row=c_row+1
if (c_row == height or c_col==width):
break
if(ck_val==True):
print("No incorrect letters found!")
else:
print("Sorry there are some incorrect letters in the word")
print(text)
return
# checks the validity of the entire grid
def check_all():
ck_val=True
text=""
for i in range(0,height):
for j in range(0,width):
val=check(i,j)
if (val==True):
if (cellblock[i][j]=="-" or (cellblock[i][j]=="." or cellblock[i][j]==":" )):
text=text+" "+cellblock[i][j]+" "
else:
text=text+" "+cellblock[i][j]+","+"Correct "
else:
text=text+" "+cellblock[i][j]+","+"Wrong "
ck_val=ck_val and val
j=j+1
text=text+"\n"
i=i+1
if(ck_val==True):
print("No incorrect letters found!")
else:
print("Sorry there are some incorrect entries in the grid")
print(text)
return
# reveals the solution for the given row and column of grid
def reveal(i,j):
global valid
correct_entry=False
if((is_puz_rebus==True) and (str(i)+","+str(j) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(i)+","+str(j))
correct_entry=(rebus_content[rebus_index]==cellblock[i][j])
else:
correct_entry=(solnblock[i][j]==cellblock[i][j])
if(not(correct_entry)):
if solnblock[i][j]!="." and solnblock[i][j]!=":":
pencil[i][j]=0
valid[i][j]=3
if((is_puz_rebus==True) and (str(i)+","+str(j) in rebus_row_col)):
rebus_index=rebus_row_col.index(str(i)+","+str(j))
cellblock[i][j]=rebus_content[rebus_index]
else:
cellblock[i][j]=solnblock[i][j]
# reveals a single letter in a word for a given clue
def reveal_one():
clue = input('Enter clue number (for e.g "1 across"): ')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(across[num][0])+". "+across[num][1]+" ("+str(across[num][2])+") : "+across[num][3])
getstr=input('Enter the location which has to be revealed in the word (for eg. in the word ABCDE, press 1 to reveal the letter in position A) : ')
loc=int(getstr)
if (loc>across[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_col=c_col+(loc-1)
reveal(c_row,c_col)
print("The letter at the given location is : "+cellblock[c_row][c_col])
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while((c_col<width) and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_col=c_col+1
across[num][3]=curstr
calc_down()
return
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
print(str(down[num][0])+". "+down[num][1]+" ("+str(down[num][2])+") : "+down[num][3])
getstr=input('Enter the location which has to be checked in the word (for eg. in the word ABCDE, press 1 to reveal the letter in position A): ')
loc=int(getstr)
if (loc>down[num][2] or loc<1):
print("Sorry location index is out of range")
return
c_row=c_row+(loc-1)
reveal(c_row,c_col)
print("The letter at the given location is : "+cellblock[c_row][c_col])
curstr=""
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
while(c_row<height and (cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":")):
curstr=curstr+cellblock[c_row][c_col]
c_row=c_row+1
down[num][3]=curstr
calc_across()
return
print("Sorry wrong format")
# reveals the word for a given clue
def reveal_word():
ck_val=True
ad=0
clue = input('Enter clue number (for e.g "1 across"):')
if ('across' in clue):
num=acc
user_no=int(clue.replace(' across',''))
for i in range(0,acc):
if(user_no==int(across[i][0])):
num=i
if(num==acc):
print("No such clue number exists in across cluelist")
return
ad=1
if ('down' in clue):
num=dwn
user_no=int(clue.replace(' down',''))
for i in range(0,dwn):
if(user_no==int(down[i][0])):
num=i
if(num==dwn):
print("No such clue number exists in down cluelist")
return
ad=2
if (ad==0):
print("Sorry wrong format!")
return
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
text=""
while(cellblock[c_row][c_col]!="." and cellblock[c_row][c_col]!=":"):
reveal(c_row,c_col)
text=text+cellblock[c_row][c_col]
if(ad==1):
c_col=c_col+1
else:
c_row=c_row+1
if (c_row == height or c_col==width):
break
if(ad==1):
print("The word for the clue '"+across[num][1]+"' is : "+text)
else:
print("The word for the clue '"+down[num][1]+"' is : "+text)
c_row=row_cellno[user_no-1]
c_col=col_cellno[user_no-1]
if(ad==1):
across[num][3]=text
calc_down()
else:
down[num][3]=text
calc_across()
return
# reveals the complete solution
def reveal_sol():
text=""
for i in range(0,height):
for j in range(0,width):
reveal(i,j)
text=text+" "+cellblock[i][j]
j=j+1
text=text+"\n"
i=i+1
print("Solution Grid : ")
print(text)
calc_across()
calc_down()
return
# in locked puzzles, this function checks the validity of the key entered by the user.
def check_key(key):
global check_reveal_state,unlock_state,soln_state,checksum_sol
ab=unscramble_solution(soln.decode(Encoding_2), width, height, int(key))
temp=""
c=0
for j in range(0,width):
c=j
for i in range(0,height):
if(ab[c]!=":" and ab[c]!="."):
temp=temp+ab[c]
c=c+width
data=temp.encode(Encoding_2)
cksum=0
for c in data:
if (cksum & 0x0001):
cksum = ((cksum >> 1) | 0x8000)
else:
cksum = (cksum >> 1)
cksum = (cksum + c) & 0xffff
if (cksum==checksum_sol[0]):
print("The solution for the puzzle has been unlocked")
check_reveal_state="normal"
unlock_state="disabled"
soln_state[0]=0
checksum_sol[0]=0
temp=0
for i in range(0,height):
for j in range(0,width):
solnblock[i][j]=ab[temp]
temp=temp+1
else:
print("Sorry, Wrong key!")
# in locked puzzles, this function gets the key from the user, to unlock the solution.
def unlock_soln():
global key
key = input("Enter the 4 digit key : ")
check_key(key)
# overrides the IPUZ file with the current state of the puzzle
def save_sol():
temp_l=[]
for i in range(0,height):
if 'saved' not in puzzle:
temp_l.append([])
for j in range(0,width):
if cellblock[i][j]==".":
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
puzzle['saved'][i][j]['value']=block
else:
puzzle['saved'][i][j]=block
else:
temp_l[i].append(block)
elif cellblock[i][j]=="-":
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
puzzle['saved'][i][j]['value']=empty
else:
puzzle['saved'][i][j]=empty
else:
temp_l[i].append(empty)
else:
if 'saved' in puzzle:
if isinstance(puzzle['saved'][i][j],dict):
puzzle['saved'][i][j]['value']=cellblock[i][j]
else:
puzzle['saved'][i][j]=cellblock[i][j]
else:
temp_l[i].append(cellblock[i][j])
if 'saved' not in puzzle:
puzzle['saved']=temp_l
data = ipuz.write(puzzle, jsonp=True, callback_name="ipuz_function")
ofile=open(ifil,mode='w')
ofile.write(data)
ofile.close()
# saves the current state of the puzzle in binary format
def save_puz():
getloc=ofile_txt.split("/")
st=getloc[len(getloc)-1]
op=ofile_txt.replace(st,"")
split1=st.split(".")
newst=""
for i in range(0,(len(split1)-1)):
newst=newst+split1[i]
op=op+newst+".puz"
if 'title' in puzzle:
File.title=puzzle['title']
else:
File.title='title'
if 'author' in puzzle:
File.author=puzzle['author']
else:
File.author='author'
if 'copyright' in puzzle:
File.cpyrt=puzzle['copyright']
else:
File.cpyrt='copyright'
if 'notes' in puzzle:
File.notes=puzzle['notes']
else:
File.notes=''
File.width=width
File.height=height
File.solnblock=solnblock
File.cellblock=cellblock
File.acc=acc
File.dwn=dwn
File.across=across
File.down=down
File.loc=op
ipuz_Helper.filewrite(File)
# saves the current state of the puzzle as a text file
def save_txt():
getloc=ofile_txt.split("/")
st=getloc[len(getloc)-1]
op=ofile_txt.replace(st,"")
split1=st.split(".")
newst=""
for i in range(0,(len(split1)-1)):
newst=newst+split1[i]
op=op+newst+".txt"
col_space=[]
max_col=0
ofl=open(op,mode='wb')
ofl.write(("\n ").encode(Encoding_2))
ofl.write(title)
for j in range (0,width):
for i in range (0,height):
if (len(cellblock[i][j])>max_col):
max_col=len(cellblock[i][j])
col_space.append(max_col)
max_col=0
ofl.write(("\n\n\n Current State of the puzzle:\n\n ").encode(Encoding_2))
for i in range(0,height):
ofl.write(("\n ").encode(Encoding_2))
ad_space=0
for j in range(0,width):
if(cellblock[i][j]!=":"):
ofl.write(cellblock[i][j].encode(Encoding_2))
else:
ofl.write(".".encode(Encoding_2))
ad_space=col_space[j]-len(cellblock[i][j])
if ad_space>0:
for k in range(0,ad_space):
ofl.write((" ").encode(Encoding_2))
ofl.write((" ").encode(Encoding_2))
ofl.write(("\n\n CLUES\n").encode(Encoding_2))
ofl.write("\n Across : \n".encode(Encoding_2))
calc_across()
calc_down()
for i in range(0,acc):
ct=across[i][0]
r=row_cellno[ct-1]
c=col_cellno[ct-1]
temp=str(across[i][0])+". "+across[i][1]+" <"+across[i][3]+">"
ofl.write(("\n ").encode(Encoding_2))
ofl.write(temp.encode(Encoding_2))
ofl.write("\n\n Down :\n".encode(Encoding_2))
for i in range(0,dwn):
ct=down[i][0]
r=row_cellno[ct-1]
c=col_cellno[ct-1]
temp=str(down[i][0])+". "+down[i][1]+" <"+down[i][3]+">"
ofl.write(("\n ").encode(Encoding_2))
ofl.write(temp.encode(Encoding_2))
ofl.close()
time_state=1
ip=1
calc_across(0)
calc_down(0)
# performs actions corresponding to the option selected by the user
print('Enter 1 to Display the option menu anytime')
while(ip!=0):
ip = input('Enter your option: ')
if(ip=="1"):
if(unlock_state=="disabled"):
print(" 2 : Enter word for a clue (While entering letters for the word, press ',' key to repeat letters from the previous entry of the word eg. A,,DE)\n 3 : Enter rebus for a cell\n 4 : View all across clues\n 5 : View all down clues\n 6 : Clear cells\n 7 : Save\n 8 : View current state of the grid\n 9 : Check a letter, word or entire grid\n 10 : Reveal letter, word or entire solution grid")
else:
print(" 2 : Enter word for a clue\n 3 : Enter rebus for a cell\n 4 : View all across clues\n 5 : View all down clues\n 6 : Clear cells\n 7 : Save\n 8 : View current state of the grid\n 11 : Unlock solution")
if(notes_state=="normal"):
print(" 12 : Display notepad\n 0 : Exit")
else:
print(" 0 : Exit")
if(ip=="2"):
clue= input('Enter clue number (for e.g "1 across"): ')
disp_clue(clue)
is_sol_complete()
if(ip=="3"):
clue= input('Enter clue number (for e.g "1 across"): ')
disp_rebus_clue(clue)
is_sol_complete()
if(ip=="4"):
print('Across:')
view_acc()
if(ip=="5"):
print('Down:')
view_dwn()
if(ip=="6"):
clear_cells()
print('Cells Cleared!!')
if(ip=="7"):
choice=input(' 1 : Save work\n 2 : Save as .puz file\n 3 : Copy work to a text file\n')
if choice=="1":
save_sol()
print("Saved Work Succesfully!")
if choice=="2":
save_puz()
print("Saved Work Succesfully!")
if choice=="3":
save_txt()
print("Saved as text file succesfully!")
if(ip=="8"):
print('Current Block:')
view_cur()
if(ip=="9"):
if(unlock_state=="disabled"):
print('Enter your choice for checking blocks:')
choice=input(' 1 : Check letter\n 2 : Check word\n 3 : Check entire grid\n')
if choice=="1":
check_one()
if choice=="2":
check_word()
if choice=="3":
check_all()
else:
print("Sorry you must unlock the solution first to check or reveal the grid")
if(ip=="10"):
if(unlock_state=="disabled"):
print('Enter your choice for revealing blocks:')
choice=input(' 1 : Reveal letter\n 2 : Reveal word\n 3 : Reveal entire grid\n')
if choice=="1":
reveal_one()
is_sol_complete()
if choice=="2":
reveal_word()
is_sol_complete()
if choice=="3":
reveal_sol()
is_sol_complete()
else:
print("Sorry you must unlock the solution first to check or reveal the grid")
if(ip=="11"):
if(unlock_state=="normal"):
print('Unlock Solution:')
unlock_soln()
else:
print("The solution has already been unlocked!")
if(ip=="12"):
if(notes_state=="normal"):
print(notes.decode(Encoding_2))
else:
print("There are no notes available for this puzzle")
if(ip=="0"):
print("Thank you!!")
break
| 35.659939
| 411
| 0.520728
|
65999305203d3d2680af5ebe52efe2f85a197696
| 1,843
|
py
|
Python
|
ants/contrib/downloadermiddleware/robotstxt.py
|
wcong/scale-crawl
|
52918d18c94a9a69c3b2495286e3384ba57ad6f8
|
[
"BSD-3-Clause"
] | 18
|
2015-01-16T02:25:03.000Z
|
2018-11-18T03:47:07.000Z
|
ants/contrib/downloadermiddleware/robotstxt.py
|
wcong/ants
|
52918d18c94a9a69c3b2495286e3384ba57ad6f8
|
[
"BSD-3-Clause"
] | 15
|
2015-01-12T02:28:23.000Z
|
2015-02-03T03:41:07.000Z
|
ants/contrib/downloadermiddleware/robotstxt.py
|
wcong/scale-crawl
|
52918d18c94a9a69c3b2495286e3384ba57ad6f8
|
[
"BSD-3-Clause"
] | 5
|
2015-01-22T12:38:38.000Z
|
2019-06-07T14:35:07.000Z
|
"""
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
import robotparser
from ants.utils import log
from ants.utils.exceptions import NotConfigured, IgnoreRequest
from ants.http import Request
from ants.utils.httpobj import urlparse_cached
class RobotsTxtMiddleware(object):
DOWNLOAD_PRIORITY = 1000
def __init__(self, crawler):
if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
raise NotConfigured
self.crawler = crawler
self._useragent = crawler.settings.get('USER_AGENT')
self._parsers = {}
self._spider_netlocs = set()
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
useragent = self._useragent
rp = self.robot_parser(request, spider)
if rp and not rp.can_fetch(useragent, request.url):
log.spider_log("Forbidden by robots.txt:" + request.url,
level=log.DEBUG)
raise IgnoreRequest
def robot_parser(self, request, spider):
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = None
robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
robotsreq = Request(robotsurl, priority=self.DOWNLOAD_PRIORITY)
dfd = self.crawler.engine.download(robotsreq, spider)
dfd.addCallback(self._parse_robots)
self._spider_netlocs.add(netloc)
return self._parsers[netloc]
def _parse_robots(self, response):
rp = robotparser.RobotFileParser(response.url)
rp.parse(response.body.splitlines())
self._parsers[urlparse_cached(response).netloc] = rp
| 33.509091
| 76
| 0.668475
|
0d6620cb94fa427f4e13aeeb6f3f58590c26aebe
| 48
|
py
|
Python
|
src/synpp/__init__.py
|
Nitnelav/synpp
|
b2b2136a99701ce77fd4fea939f8efb521f67c21
|
[
"MIT"
] | 6
|
2020-04-01T12:06:20.000Z
|
2021-11-02T19:10:27.000Z
|
src/synpp/__init__.py
|
Nitnelav/synpp
|
b2b2136a99701ce77fd4fea939f8efb521f67c21
|
[
"MIT"
] | 26
|
2019-12-08T12:25:39.000Z
|
2022-02-28T07:24:56.000Z
|
src/synpp/__init__.py
|
Nitnelav/synpp
|
b2b2136a99701ce77fd4fea939f8efb521f67c21
|
[
"MIT"
] | 8
|
2020-06-19T15:49:46.000Z
|
2021-07-06T10:15:37.000Z
|
from .pipeline import *
from .parallel import *
| 16
| 23
| 0.75
|
e3c2608f7e0f3c8385f9c53bc261be6e2e352e43
| 35,913
|
py
|
Python
|
EMS/scoreManagement/views.py
|
jlbbj111/2019-Software-Engineering-Curriculum-Design
|
a55deabaf00220c5ffb531c6e40ed9edb8063062
|
[
"MIT"
] | 45
|
2019-03-22T23:01:45.000Z
|
2021-11-09T01:32:12.000Z
|
EMS/scoreManagement/views.py
|
LHMdanchaofan/2019-Software-Engineering-Curriculum-Design
|
a55deabaf00220c5ffb531c6e40ed9edb8063062
|
[
"MIT"
] | 9
|
2019-03-25T03:27:57.000Z
|
2021-06-10T21:27:21.000Z
|
EMS/scoreManagement/views.py
|
LHMdanchaofan/2019-Software-Engineering-Curriculum-Design
|
a55deabaf00220c5ffb531c6e40ed9edb8063062
|
[
"MIT"
] | 13
|
2019-03-28T13:44:05.000Z
|
2021-05-23T06:45:03.000Z
|
from django.shortcuts import render, redirect, Http404
from django.utils.datastructures import MultiValueDictKeyError
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse, HttpResponseRedirect
from django.db.models import Q
from datetime import datetime
import pandas as pd
from backstage.models import Student, Teacher, College, Major, MajorPlan, ClassRoom, AdmClass, User
from courseScheduling.models import Teaching, Course, MajorPlan, MajorCourses, Teacher, Teacher_Schedule_result
from courseSelection.models import CourseSelected
from scoreManagement.models import CourseScore, EvaluationForm
from .utils import get_semester
def welcome(request):
students = Student.objects.all()
teachers = Teacher.objects.all()
colleges = College.objects.all()
majors = Major.objects.all()
major_plans = MajorPlan.objects.all()
class_rooms = ClassRoom.objects.all()
context = {
'students': students,
'teachers': teachers,
}
return render(request, 'scoreManage/student_score_manage.html', context)
def adm_all_course_score(request):
try:
username = request.session['username']
adm = User.objects.get(username=username)
if not adm.is_superuser:
return render(request, 'errors/403page.html')
else:
all_colleges = College.objects.all()
all_majors = Major.objects.all()
all_course_score = CourseSelected.objects.filter(is_finish=True)
all_years = [y['teaching__mcno__year'] for y in
CourseScore.objects.values("teaching__mcno__year").distinct()]
all_semester = [y['teaching__mcno__semester'] for y in
CourseScore.objects.values("teaching__mcno__semester").distinct()]
try:
sear_year = request.GET['sear_year']
sear_semester = request.GET['sear_semester']
tch_sch_list = Teacher_Schedule_result.objects.filter(tno__mcno__year=sear_year,
tno__mcno__semester=sear_semester)
all_course_score = CourseSelected.objects.filter(cno__in=tch_sch_list)
context = {
"all_course_score": all_course_score,
"all_years": all_years,
"all_semester": all_semester,
"all_colleges": all_colleges,
"all_majors": all_majors,
"sear_year": sear_year,
"sear_semester": sear_semester,
}
return render(request, 'scoreManage/adm_score_manage.html', context)
except Exception:
context = {
"all_course_score": all_course_score,
"all_years": all_years,
"all_semester": all_semester,
"all_colleges": all_colleges,
"all_majors": all_majors,
}
return render(request, 'scoreManage/adm_score_manage.html', context)
except:
return render(request, 'errors/500page.html')
def score_home_page(request):
if request.session['user_type'] == '学生':
return render(request, 'scoreManage/student_score_manage.html')
elif request.session['user_type'] == '教师':
return render(request, 'scoreManage/teacher_score_manage.html')
else:
return render(request, 'scoreManage/adm_score_manage.html')
def student_view_score(request):
if request.session['user_type'] != '学生':
return render(request, 'errors/403page.html')
sno = request.session['username']
student = Student.objects.get(username=sno)
course_score = CourseScore.objects.filter(sno=student)
years = [c['teaching__mcno__year'] for c in course_score.values("teaching__mcno__year").distinct()]
semesters = [s['teaching__mcno__semester'] for s in course_score.values("teaching__mcno__semester").distinct()]
context = {
"my_course_score": course_score,
"years": years,
"semesters": semesters
}
return render(request, "scoreManage/student_view_score.html", context)
def student_own_study(request):
if request.session['user_type'] != '学生':
redirect("scoreManagement:welcome")
sno = request.session['username']
student = Student.objects.get(username=sno)
course_list = \
CourseScore.objects.filter(sno=student). \
order_by("teaching__mcno__year", "teaching__mcno__semester")
year_semester = \
course_list.values_list("teaching__mcno__year", "teaching__mcno__semester"). \
distinct()
# 总学分
sum = Student.objects.get(username=sno).score_got
# 毕业所需学分
sum_req = student.in_cls.major.score_grad
# 总绩点
gpa = 0
for course_list_item in course_list:
a = course_list_item.teaching.mcno.cno.score
b = course_list_item.score
if b >= 90:
gpa = gpa + a / sum * 4
elif 80 <= b < 90:
gpa = gpa + a / sum * 3
elif 70 <= b < 80:
gpa = gpa + a / sum * 2
elif 60 <= b < 70:
gpa = gpa + a / sum * 1
else:
gpa = gpa
# 每个学期的平均绩点
semester_GPA_list = []
# 每个学期的总学分
semester_sum_list = []
# 每个学期选课的数量
semester_num_list = []
for year_semester_item in year_semester:
semester_course = \
course_list.filter(
Q(teaching__mcno__year=year_semester_item[0]),
Q(teaching__mcno__semester=year_semester_item[1])
)
semester_num_list.append(semester_course.count())
semester_sum = 0
semester_GPA = 0
for year_semester_course_item in semester_course:
a = year_semester_course_item.teaching.mcno.cno.score
semester_sum = semester_sum + a
semester_sum_list.append(semester_sum)
for year_semester_course_item in semester_course:
a = year_semester_course_item.teaching.mcno.cno.score
b = year_semester_course_item.score
if b >= 90:
semester_GPA = semester_GPA + a / semester_sum * 4
elif 80 <= b < 90:
semester_GPA = semester_GPA + a / semester_sum * 3
elif 70 <= b < 80:
semester_GPA = semester_GPA + a / semester_sum * 2
elif 60 <= b < 70:
semester_GPA = semester_GPA + a / semester_sum * 1
else:
semester_GPA = semester_GPA
semester_GPA_list.append(semester_GPA)
context = {
"student_name": student.name,
"my_scoresum": sum,
"my_gpa": round(gpa, 2),
"my_year_semester": year_semester,
"semester_GPA": semester_GPA_list,
"semester_scoresum": semester_sum_list,
"my_score_gg": sum_req,
"my_score_g": round(sum / sum_req, 2),
"semester_num": semester_num_list,
}
return render(request, "scoreManage/student_own_study.html", context)
def std_view_major_course(request):
if request.session['user_type'] != '学生':
return render(request, 'errors/403page.html')
sno = request.session['username']
student = Student.objects.get(username=sno)
# my_major_plan = student.in_cls.major
all_major_course = MajorCourses.objects.all()
all_college = College.objects.all()
all_course_type = Course.objects.values("course_type").distinct()
all_year = MajorCourses.objects.values("year").order_by("year").distinct()
all_major = Major.objects.all()
context = {"all_major_course": all_major_course,
"all_college": all_college,
"all_course": all_course_type,
"all_year": all_year,
"student": student,
"all_major": all_major
}
return render(request, "scoreManage/student_major_course.html", context)
def std_view_major_plan(request):
if request.session['user_type'] != '学生':
return render(request, 'errors/403page.html')
sno = request.session['username']
student = Student.objects.get(username=sno)
all_major_plan = MajorPlan.objects.all()
all_college = College.objects.all()
all_year = MajorPlan.objects.values("year").order_by("year").distinct()
college_id = request.GET.get('stat_type_id', None)
all_major = Major.objects.all()
context = {
"all_major_plan": all_major_plan,
"all_college": all_college,
"all_year": all_year,
"student": student,
"all_major": all_major
}
return render(request, "scoreManage/student_major_plan.html", context)
def teacher_view_major_course(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
all_major_course = MajorCourses.objects.all()
all_college = College.objects.all()
all_course_type = Course.objects.values("course_type").distinct()
all_year = MajorCourses.objects.values("year").order_by("year").distinct()
all_major = Major.objects.all()
context = {"all_major_course": all_major_course,
"all_college": all_college,
"all_course": all_course_type,
"all_year": all_year,
"all_major": all_major
}
return render(request, "scoreManage/teacher_major_course.html", context)
def teacher_view_major_plan(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
all_major_plan = MajorPlan.objects.all()
all_college = College.objects.all()
all_year = MajorPlan.objects.values("year").order_by("year").distinct()
all_major = Major.objects.all()
context = {
"all_major_plan": all_major_plan,
"all_college": all_college,
"all_year": all_year,
"all_major": all_major
}
return render(request, "scoreManage/teacher_major_plan.html", context)
def adm_view_major_course(request):
username = request.session['username']
adm = User.objects.get(username=username)
if not adm.is_superuser:
return render(request, 'errors/403page.html')
all_major_plan = MajorPlan.objects.all()
all_course = Course.objects.all()
all_major_course = MajorCourses.objects.all()
all_college = College.objects.all()
all_course_type = Course.objects.values("course_type").distinct()
all_year = MajorCourses.objects.values("year").order_by("year").distinct()
all_major = Major.objects.all()
context = {"all_major_course": all_major_course,
"all_college": all_college,
"all_course_type": all_course_type,
"all_year": all_year,
"all_major": all_major,
"all_major_plan": all_major_plan,
"all_course": all_course,
}
return render(request, "scoreManage/adm_major_course.html", context)
def adm_view_major_plan(request):
username = request.session['username']
adm = User.objects.get(username=username)
if not adm.is_superuser:
return render(request, 'errors/403page.html')
all_major_plan = MajorPlan.objects.all()
all_college = College.objects.all()
all_year = MajorPlan.objects.values("year").order_by("year").distinct()
all_major = Major.objects.all()
context = {
"all_major_plan": all_major_plan,
"all_college": all_college,
"all_year": all_year,
"all_major": all_major
}
return render(request, "scoreManage/adm_major_plan.html", context)
# 学生评教
def assess_teacher(request):
if request.session['user_type'] != '学生':
redirect("scoreManagement:welcome")
# 判断该学生是否已经全部提交过
def judge(s):
items = EvaluationForm.objects.filter(student_id=s)
if len(items) != 0:
for item in items:
if item.is_finish == False:
return False # 该学生还未提交
else:
return True # 该学生已经提交
else:
return False
log = []
stuno = request.session['username']
sno_id = stuno[4:] # 得到学生id
stu = Student.objects.filter(username=stuno)
courses = CourseScore.objects.filter(sno=sno_id) # 从选课表中找出该学生修的课程
num1 = 0
sum = 0
for item1 in courses:
teachings = Teaching.objects.filter(id=item1.teaching_id)
for item2 in teachings:
if item2.mcno.year == 2017 and item2.mcno.semester == 1:
# print(item2)
# print(item2.tno.name)
# print(item2.mcno.cno.cname)
# print(item2.mcno.cno.course_type)
temp = dict()
temp['student'] = stuno
temp['sno'] = stu # 学生
temp['cno'] = item2.mcno.cno # 课程
# print(item2.mcno.cno)
temp['course'] = item2.mcno.id
temp['tno'] = item2.tno # 教师
temp['teacher'] = item2.tno_id
# print(item2.tno_id)
temp['state'] = False
temp['r1'] = 0
temp['r2'] = 0
temp['r3'] = 0
temp['r4'] = 0
temp['r5'] = 0
temp['r6'] = 0
temp['r7'] = 0
temp['r8'] = 0
temp['text'] = "无"
temp['flag'] = False
try:
temp1 = EvaluationForm.objects.get(
student_id=sno_id, course_id=item2.mcno.id, teacher_id=item2.tno_id)
temp['r1'] = temp1.item1
temp['r2'] = temp1.item2
temp['r3'] = temp1.item3
temp['r4'] = temp1.item4
temp['r5'] = temp1.item5
temp['r6'] = temp1.item6
temp['r7'] = temp1.item7
temp['r8'] = temp1.item8
temp['text'] = temp1.description
temp['flag'] = temp1.is_finish
# print("!!!")
# if temp1.is_finish == True:
temp['state'] = True
num1 += 1
except:
temp['state'] = False
# print("???")
pass
temp['tname'] = item2.tno.name
temp['cname'] = item2.mcno.cno.cname
# print(item2.tno.id)
temp['type'] = item2.mcno.cno.course_type
# if temp1.is_finish == True:
# temp['state'] = "提交"
# else:
# temp['state'] = "未提交"
sum += 1
log.append(temp)
# print(log)
num2 = sum - num1
flag = judge(sno_id)
context = {'log': log, 'num1': num1, 'num2': num2, 'flag': flag}
return render(request, 'scoreManage/assess_teacher.html', context=context)
# 学生提交评价信息
def submit_result(request):
if request.session['user_type'] != '学生':
redirect("scoreManagement:welcome")
print("!!!")
# 得到各个等级对应的分数
def getScore(s):
if s == 'A':
return 100
elif s == 'B':
return 90
elif s == 'C':
return 70
elif s == 'D':
return 60
elif s == 'E':
return 50
# if 'submit_result' in request.POST:
if request.GET:
r1 = request.GET.get('r1')
r2 = request.GET.get('r2')
r3 = request.GET.get('r3')
r4 = request.GET.get('r4')
r5 = request.GET.get('r5')
r6 = request.GET.get('r6')
r7 = request.GET.get('r7')
r8 = request.GET.get('r8')
text = request.GET.get('message')
if text == "":
text = "无"
item_sno = request.GET.get('item_sno')
item_tno = request.GET.get('item_tno')
item_cno = request.GET.get('item_cno')
r1 = getScore(r1)
r2 = getScore(r2)
r3 = getScore(r3)
r4 = getScore(r4)
r5 = getScore(r5)
r6 = getScore(r6)
r7 = getScore(r7)
r8 = getScore(r8)
print(r1, r2, r3, r4, r5, r6, r7, r8, text)
sum = r1 + r2 + r3 + r4 + r5 + r6 + r7 + r8
ave = sum * 1.0 / 8
# print(ave)
# print(type(item_sno), type(item_tno), type(item_cno))
# 学生对象
student = Student.objects.get(username=item_sno)
# print(student)
# 教师对象
# print(item_tno)
teacher = Teacher.objects.get(id=item_tno)
# print(teacher)
# 课程对象
course = MajorCourses.objects.get(id=item_cno)
# print(course)
print("!!!")
try:
EvaluationForm.objects.get(
student=student, course=course, teacher=teacher)
EvaluationForm.objects.filter(student=student, course=course, teacher=teacher).update(
item1=r1, item2=r2, item3=r3, item4=r4, item5=r5, item6=r6, item7=r7, item8=r8, description=text,
sum=ave, is_finish=False)
except:
EvaluationForm.objects.create(student=student, course=course, teacher=teacher, item1=r1, item2=r2,
item3=r3, item4=r4, item5=r5, item6=r6, item7=r7, item8=r8, description=text,
sum=ave, is_finish=False)
return redirect('scoreManagement:assess_teacher')
# # 最终的提交,提交后不可更改
@csrf_exempt
def submit_all(request):
try:
if request.session['user_type'] != '学生':
return render(request, 'errors/403page.html')
if request.GET:
item_sno = request.session['username']
# 学生对象
student = Student.objects.get(username=item_sno)
# 更改评价表的is_finish字段
EvaluationForm.objects.filter(student=student).update(is_finish=True)
return redirect('scoreManagement:assess_teacher')
except:
return render(request, 'errors/500page.html')
def teacher_view_teaching(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
tno = request.session['username']
teacher = Teacher.objects.get(username=tno)
teaching_list = Teaching.objects.filter(tno=teacher)
years = [y['mcno__year'] for y in teaching_list.values('mcno__year').distinct()]
semesters = [s['mcno__semester'] for s in teaching_list.values('mcno__semester').distinct()]
context = {
'teaching_list': teaching_list,
'years': years,
'semesters': semesters
}
if request.method == 'GET':
try:
other_tno = request.GET['seacher_tno']
except MultiValueDictKeyError:
return render(request, "scoreManage/teacher_view_teaching.html", context)
try:
other_teacher = Teacher.objects.get(username=other_tno)
except Teacher.DoesNotExist:
return render(request, "scoreManage/teacher_view_teaching.html", context)
other_teaching_list = Teaching.objects.filter(tno=other_teacher)
other_years = [y['mcno__year'] for y in other_teaching_list.values('mcno__year').distinct()]
other_semesters = [s['mcno__semester'] for s in other_teaching_list.values('mcno__semester').distinct()]
result = {
"is_find": True,
"other_tno": other_tno,
"other_years": other_years,
"other_semesters": other_semesters,
"other_teaching_list": other_teaching_list,
'teaching_list': teaching_list,
'years': years,
'semesters': semesters
}
return render(request, "scoreManage/teacher_view_teaching.html", result)
return render(request, "scoreManage/teacher_view_teaching.html", context)
# 授课老师录入成绩
def teacher_upload_score(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
tno = request.session['username']
teacher = Teacher.objects.get(username=tno)
my_courses = Teaching.objects.filter(tno=teacher)
return render(request, 'scoreManage/teacher_upload_score.html')
# 管理员查看教学评价情况
def adm_view_teacher_evaluation(request):
username = request.session['username']
adm = User.objects.get(username=username)
if not adm.is_superuser:
return render(request, 'errors/403page.html')
evaluation_sets = EvaluationForm.objects.all()
context = {
'evaluation_sets': evaluation_sets
}
return render(request, 'scoreManage/adm_view_teacher_evaluation.html', context)
# 获取到老师教的课
def get_all_teaching(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
tno = request.session['username']
teacher = Teacher.objects.get(username=tno)
this_year = datetime.now().year
this_semester = get_semester(datetime.now().month)
teaching_list = Teaching.objects.filter(tno=teacher, mcno__year=this_year, mcno__semester=this_semester)
tch_sch_list = Teacher_Schedule_result.objects.filter(tno__in=teaching_list)
all_year = [year[0] for year in teaching_list.values_list('mcno__year').distinct()]
all_semester = [semester[0] for semester in teaching_list.values_list('mcno__semester').distinct()]
context = {
'teaching_list': teaching_list,
'tch_sch_list': tch_sch_list,
'all_year': all_year,
'all_semester': all_semester,
'this_year': this_year,
'this_semester': this_semester,
}
return render(request, 'scoreManage/teacher_view_stu_score.html', context)
def show_student_score(request, cno, course_type):
user = request.session["username"]
teacher = Teacher.objects.get(username=user)
class_no = Course.objects.get(cno=cno, course_type=course_type)
major_courses = MajorCourses.objects.get(cno=class_no)
teaching = Teaching.objects.get(mcno=major_courses, tno=teacher)
teacher_schedule_result = Teacher_Schedule_result.objects.filter(tno=teaching)
if not teacher_schedule_result:
return render(request, "scoreManage/tch_view_stu_score_detail.html")
else:
teacher_schedule_result = teacher_schedule_result[0]
course_selected = CourseSelected.objects.filter(cno=teacher_schedule_result)
adm_id_list = course_selected.values('sno__in_cls').distinct()
adm_class_list = []
for adm_id in adm_id_list:
adm_class_list.append(AdmClass.objects.get(id=adm_id['sno__in_cls']))
context = {
'course_selected': course_selected,
'adm_class_list': adm_class_list
}
return render(request, "scoreManage/tch_view_stu_score_detail.html", context)
def teacher_view_stu_score(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
tno = request.session['username']
teacher = Teacher.objects.get(username=tno)
if request.method == 'GET':
try:
year = request.GET['year']
semester = request.GET['semester']
if year == '无' or semester == '无':
return render(request, 'scoreManage/teacher_view_stu_score.html')
teaching_list = Teaching.objects.filter(tno=teacher, mcno__year=year, mcno__semester=semester)
except MultiValueDictKeyError:
year = datetime.now().year
month = datetime.now().month
if month == 7:
semester = 3
elif 3 <= month <= 6:
semester = 2
else:
semester = 1
teaching_list = Teaching.objects.filter(tno=teacher, mcno__year=year, mcno__semester=semester)
else:
year = datetime.now().year
month = datetime.now().month
if month == 7:
semester = 3
elif 3 <= month <= 6:
semester = 2
else:
semester = 1
teaching_list = Teaching.objects.filter(tno=teacher, mcno__year=year, mcno__semester=semester)
all_teaching_list = Teaching.objects.filter(tno=teacher)
schedule_result = Teacher_Schedule_result.objects.filter(tno__in=teaching_list)
course_list = CourseSelected.objects.filter(cno__in=schedule_result)
adm_id_list = course_list.values('sno__in_cls').distinct()
adm_class_list = []
for adm_id in adm_id_list:
adm_class_list.append(AdmClass.objects.get(id=adm_id['sno__in_cls']))
all_year = [year[0] for year in all_teaching_list.values_list('mcno__year').distinct()]
all_semester = [semester[0] for semester in all_teaching_list.values_list('mcno__semester').distinct()]
context = {
'course_list': course_list,
'adm_class_list': adm_class_list,
'all_year': all_year,
'all_semester': all_semester,
'teaching_list': teaching_list,
}
return render(request, 'scoreManage/teacher_view_stu_score.html', context)
def adm_change_score(request):
if request.is_ajax():
if len(request.GET):
cs_id = request.GET.get('cs_id')
sno = request.GET.get('sno')
common = float(request.GET.get('common'))
final_score = float(request.GET.get('final_score'))
cs = CourseSelected.objects.get(id=cs_id)
cs.common_score = common
cs.final_score = final_score
cs.save()
n_commen = common
n_final = final_score
score = cs.score
result = {
'n_commen': n_commen,
'n_final': n_final,
'score': score,
}
return JsonResponse(result)
return redirect("scoreManagement:adm_all_course_score")
def adm_change_major_plan(request):
if request.is_ajax():
if len(request.GET):
year = request.GET.get('year')
major_name = request.GET.get('major')
people_num = request.GET.get('people_num')
lowest_score = request.GET.get('lowest_score')
stu_method = request.GET.get('stu_method')
course_num = request.GET.get('course_num')
adm_class_num = request.GET.get('adm_class_num')
major = Major.objects.get(mno=major_name.split('-')[0])
major_plan = MajorPlan.objects.get(major=major, year=year)
# make change
major_plan.cls_num = adm_class_num
major_plan.people_num = people_num
major_plan.score_grad = lowest_score
major_plan.stu_years = stu_method
major_plan.course_num = course_num
major_plan.save()
new_people_num = major_plan.people_num
new_score_grad = major_plan.score_grad
new_clsw_num = major_plan.cls_num
new_stu_years = major_plan.stu_years
new_course_num = major_plan.course_num
data = {
'new_people_num': new_people_num,
'new_score_grad': new_score_grad,
'new_clsw_num': new_clsw_num,
'new_stu_years': new_stu_years,
'new_course_num': new_course_num,
}
return JsonResponse(data)
# 管理员修改专业课程,只修改部分属性
def adm_change_major_course(request):
if request.is_ajax():
if len(request.GET):
major = request.GET.get('major')
year = request.GET.get('year')
semester = request.GET.get('semester')
cno = request.GET.get('cno')
teach_hours = request.GET.get('teach_hours')
exp_hours = request.GET.get('exp_hours')
exam_method = request.GET.get('exam_method')
major_plan = MajorPlan.objects.get(major__mno=major.split('-')[1], year=major.split('-')[0])
# 获取到MajorCourse对象
print(cno)
major_course = MajorCourses.objects.get(cno__cno=cno, mno=major_plan)
major_course.year = year
major_course.semester = semester
major_course.hour_class = teach_hours
major_course.hour_other = exp_hours
major_course.hour_total = teach_hours + exp_hours
major_course.exam_method = (exam_method == '考试')
major_course.save()
return JsonResponse({})
# 管理员添加专业课程信息
def adm_add_major_course(request):
if request.is_ajax():
if len(request.GET):
major_str = request.GET.get('major_str').split('-')
year = request.GET.get('year')
semester = request.GET.get('semester')
major_course = request.GET.get('major_course')
teach_hour = request.GET.get('teach_hour')
exp_hour = request.GET.get('exp_hour')
hour_total = teach_hour + exp_hour
exam_method = request.GET.get('exam_method')
exam_method = (exam_method == '考试')
major = MajorPlan.objects.get(major__mno=major_str[1], year=major_str[0])
course = Course.objects.filter(cno=major_course)[0]
MajorCourses.objects.update_or_create(
cno=course,
mno=major,
year=year,
semester=semester,
hour_total=hour_total,
hour_class=teach_hour,
hour_other=exp_hour,
exam_method=exam_method
)
return JsonResponse({})
# 管理员删除专业课程MajorCourse信息
def adm_delete_major_course(request):
if request.is_ajax():
if len(request.GET):
major_plan_str = request.GET.get('major_plan').split('-')
cno = request.GET.get('cno')
ctype = request.GET.get('ctype')
course = Course.objects.get(cno=cno, course_type=ctype)
major_plan = MajorPlan.objects.get(year=major_plan_str[0], major__mno=major_plan_str[1])
major_course = MajorCourses.objects.get(cno=course, mno=major_plan)
major_course.delete()
return JsonResponse({})
# 管理员添加课程Course
def adm_add_course(request):
if request.is_ajax():
if len(request.GET):
add_college = request.GET.get('add_college')
cno = request.GET.get('cno')
cname = request.GET.get('cname')
ctype = request.GET.get('ctype')
cscore = request.GET.get('cscore')
college = College.objects.get(name=add_college)
Course.objects.update_or_create(cno=cno, cname=cname, college=college, course_type=ctype, score=cscore)
return JsonResponse({})
# 管理员修改课程Course
def adm_change_course(request):
if request.is_ajax():
if len(request.GET):
cno = request.GET.get('cno')
ctype = request.GET.get('ctype')
n_op_college = request.GET.get('n_op_college')
n_cno = request.GET.get('n_cno')
n_cname = request.GET.get('n_cname')
n_ctype = request.GET.get('n_ctype')
n_cscore = request.GET.get('n_cscore')
course = Course.objects.get(cno=cno, course_type=ctype)
try:
college = College.objects.get(name=n_op_college)
course.college = college
course.course_type = n_ctype
course.cno = n_cno
course.cname = n_cname
course.score = n_cscore
course.save()
except College.DoesNotExist:
return JsonResponse({"exception": '学院不存在'})
return JsonResponse({})
# 管理员删除课程Course
def adm_delete_course(request):
if request.is_ajax():
if len(request.GET):
cno = request.GET.get('cno')
ctype = request.GET.get('ctype')
cname = request.GET.get('cname')
try:
course = Course.objects.get(cno=cno, course_type=ctype, cname=cname)
course.delete()
except Course.DoesNotExist:
print(cno, ctype, cname)
return JsonResponse({})
# 获取到老师教了并且排了的课
def get_all_course_selected(request):
if request.session['user_type'] != '教师':
return render(request, 'errors/403page.html')
tno = request.session['username']
teacher = Teacher.objects.get(username=tno)
this_year = datetime.now().year
this_semester = get_semester(datetime.now().month)
teaching_list = Teaching.objects.filter(tno=teacher, mcno__year=this_year, mcno__semester=this_semester)
tch_sch_list = Teacher_Schedule_result.objects.filter(tno__in=teaching_list)
all_year = [year[0] for year in teaching_list.values_list('mcno__year').distinct()]
all_semester = [semester[0] for semester in teaching_list.values_list('mcno__semester').distinct()]
context = {
'teaching_list': teaching_list,
'tch_sch_list': tch_sch_list,
'all_year': all_year,
'all_semester': all_semester,
'this_year': this_year,
'this_semester': this_semester,
}
return render(request, 'scoreManage/teacher_upload_score.html', context)
def upload_student_score(request, tch_sch_id):
tch_sch = Teacher_Schedule_result.objects.get(id=tch_sch_id)
weight = tch_sch.tno.weight
course_selected_list = CourseSelected.objects.filter(cno_id=tch_sch_id)
context = {
'course_selected_list': course_selected_list,
'weight': weight,
'tch_sch': tch_sch_id,
}
return render(request, "scoreManage/tch_upload_score_detail.html", context)
# 教师添加单个学生成绩
def tch_add_score(request):
if request.is_ajax():
if len(request.GET):
cs_id = request.GET.get('cs_id')
com_score = float(request.GET.get('com_score'))
fin_score = float(request.GET.get('fin_score'))
weight = float(request.GET.get('weight'))
cs = CourseSelected.objects.get(id=cs_id)
cs.common_score = com_score
cs.final_score = fin_score
cs.score = com_score * (1 - weight) + fin_score * weight
score = round(cs.score, 2)
cs.is_finish = True
cs.save()
return JsonResponse({"score": score})
# 修改成绩权重
def tch_change_score_weight(request):
if request.is_ajax():
if len(request.GET):
old_weight = float(request.GET.get('old_weight'))
final_weight = float(request.GET.get('final_weight'))
tch_sch_id = request.GET.get('tch_sch_id')
print(old_weight, final_weight)
if old_weight == final_weight:
return JsonResponse({"no_need": "yes"})
course_selected_list = CourseSelected.objects.filter(cno_id=tch_sch_id)
course_selected_list[0].cno.tno.weight = final_weight
course_selected_list[0].cno.tno.save()
for c in course_selected_list:
c.cno.tno.weight = final_weight
c.cno.tno.save()
c.score = c.common_score * (1 - final_weight) + c.final_score * final_weight
c.save()
return JsonResponse({"succ": "yes"})
# 处理批量上传的文件
@csrf_exempt
def handle_batch_score(request):
if request.method == 'POST':
f = request.FILES.get('fileUpload')
excel_data = pd.read_excel(f)
excel_data.columns = excel_data.iloc[0]
excel_data = excel_data.drop(0)
tch_id = 0
try:
for _, row in excel_data.iterrows():
cs_id, sno, comm, final, wei = row['编号'], row['学号'], row['平时分'], row['考试分'], row['考试权重']
cs = CourseSelected.objects.get(id=cs_id)
tch_id = cs.cno_id
cs.common_score = comm
cs.final_score = final
cs.score = comm * (1 - wei) + final * wei
cs.is_finish = True
cs.save()
except:
return render(request, "errors/500page.html")
return redirect("scoreManagement:upload_student_score", tch_id)
| 38.699353
| 119
| 0.605408
|
760cd1e6d299c97d3889f21718bdfb6fbe4793fe
| 102
|
py
|
Python
|
skyjump/__main__.py
|
Akida31/skyjump
|
d51270ae9c67e213761a9bd294b2643912a205d5
|
[
"MIT"
] | null | null | null |
skyjump/__main__.py
|
Akida31/skyjump
|
d51270ae9c67e213761a9bd294b2643912a205d5
|
[
"MIT"
] | null | null | null |
skyjump/__main__.py
|
Akida31/skyjump
|
d51270ae9c67e213761a9bd294b2643912a205d5
|
[
"MIT"
] | null | null | null |
"""
entrypoint of the simple skyjump game
"""
from skyjump.game import Game
game = Game()
game.run()
| 12.75
| 37
| 0.705882
|
9b601b1abd013c3a71b7bf2ca36358c2010630b2
| 1,305
|
py
|
Python
|
create_plugin/create_plugin.py
|
you0708/volatility
|
f7f41705c015ee1bd92c729508e627118130953e
|
[
"Apache-2.0"
] | 7
|
2018-05-19T00:27:11.000Z
|
2021-06-07T15:36:49.000Z
|
create_plugin/create_plugin.py
|
you0708/volatility
|
f7f41705c015ee1bd92c729508e627118130953e
|
[
"Apache-2.0"
] | null | null | null |
create_plugin/create_plugin.py
|
you0708/volatility
|
f7f41705c015ee1bd92c729508e627118130953e
|
[
"Apache-2.0"
] | 1
|
2021-06-07T15:36:50.000Z
|
2021-06-07T15:36:50.000Z
|
#!/usr/bin/env python
import subprocess
import os, sys
import glob
import argparse
from collections import namedtuple
PLUGIN_DIR = None
TEMPLATE_FILE = 'malware_template.py'
parser = argparse.ArgumentParser(description='Create initial Volatiliy plugin script for malware detection')
parser.add_argument('MALWARE_NAME', help='Malware name')
parser.add_argument('-o', '--outdir', action='store', dest='outdir', default=None, help='Specify output directory')
args = parser.parse_args()
def main():
Malware_Name = args.MALWARE_NAME
MALWARE_NAME = args.MALWARE_NAME.upper()
malware_name = args.MALWARE_NAME.lower()
print('[*] initialize {}.py'.format(malware_name))
with open(TEMPLATE_FILE, 'rb') as fp:
out_data = fp.read()
out_data = out_data.replace('Malware_Name', Malware_Name)
out_data = out_data.replace('MALWARE_NAME', MALWARE_NAME)
out_data = out_data.replace('malware_name', malware_name)
if args.outdir:
out_file = args.outdir + '/' + malware_name + '.py'
elif PLUGIN_DIR:
out_file = PLUGIN_DIR + '/' + malware_name + '.py'
else:
out_file = './' + malware_name + '.py'
with open(out_file, 'w') as fp:
fp.write(out_data)
print('[*] saved as {}'.format(out_file))
if __name__ == '__main__':
main()
| 31.829268
| 115
| 0.689655
|
2df7784e18309b2f63c791a1f11b08e20ddfaae5
| 184
|
py
|
Python
|
FileOpt.py
|
zhongguohb2018/dytt_spider88
|
c85c14f5d274cfe585ecb9efa06e69446ea8d354
|
[
"Apache-2.0"
] | null | null | null |
FileOpt.py
|
zhongguohb2018/dytt_spider88
|
c85c14f5d274cfe585ecb9efa06e69446ea8d354
|
[
"Apache-2.0"
] | null | null | null |
FileOpt.py
|
zhongguohb2018/dytt_spider88
|
c85c14f5d274cfe585ecb9efa06e69446ea8d354
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#FileOpt.py
#文件操作
class FileOptBase:
def __init__(self):
print("FileOptBase_init")
def FileOptPrint(self):
print("FileOptBase")
| 15.333333
| 29
| 0.663043
|
53b33d122706f6d48844b0d787032374cddcc8ae
| 1,403
|
py
|
Python
|
tripleo_ansible/roles/tripleo_nvdimm/molecule/default/tests/test_default.py
|
smolar/tripleo-ansible
|
7bd37f019870c032bea71f22b305832932d81424
|
[
"Apache-2.0"
] | 22
|
2018-08-29T12:33:15.000Z
|
2022-03-30T00:17:25.000Z
|
tripleo_ansible/roles/tripleo_nvdimm/molecule/default/tests/test_default.py
|
smolar/tripleo-ansible
|
7bd37f019870c032bea71f22b305832932d81424
|
[
"Apache-2.0"
] | 1
|
2020-02-07T20:54:34.000Z
|
2020-02-07T20:54:34.000Z
|
tripleo_ansible/roles/tripleo_nvdimm/molecule/default/tests/test_default.py
|
smolar/tripleo-ansible
|
7bd37f019870c032bea71f22b305832932d81424
|
[
"Apache-2.0"
] | 19
|
2019-07-16T04:42:00.000Z
|
2022-03-30T00:17:29.000Z
|
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import json
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_ndctl_is_installed(host):
ndctl = host.package("ndctl")
assert ndctl.is_installed
def test_namespace_is_created(host):
if not host.check_output('lsmod | grep libnvdimm | cut -d " " -f 1'):
pytest.skip("Skipping because this needs NVDIMM hardware")
pmem_ns = os.environ['TRIPLEO_NVDIMM_PMEM_NAMESPACES']
ndctl_list_output = host.check_output('ndctl list')
namespaces = {ns.get('name') for ns in json.loads(ndctl_list_output)}
wanted_ns = [ns_name.split(':')[1] for ns_name in pmem_ns.split(',')]
for ns in wanted_ns:
assert ns in namespaces
| 34.219512
| 75
| 0.741269
|
724078238973707bd28bdf9f28779811754ff571
| 709
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
BulcoinProject/BULC
|
3be8ab73cc1d7ba8dfd6190648ba462f34ffde9c
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
BulcoinProject/BULC
|
3be8ab73cc1d7ba8dfd6190648ba462f34ffde9c
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
BulcoinProject/BULC
|
3be8ab73cc1d7ba8dfd6190648ba462f34ffde9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):50307")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
| 21.484848
| 78
| 0.509168
|
0c8671094dc4082f803cd673e34660a1b6b77dea
| 70
|
py
|
Python
|
samples/basic/hello.py
|
tsdking/pythonnote
|
197ef8f989ee8e880bd7f891b546b19ba1c6106d
|
[
"Apache-2.0"
] | null | null | null |
samples/basic/hello.py
|
tsdking/pythonnote
|
197ef8f989ee8e880bd7f891b546b19ba1c6106d
|
[
"Apache-2.0"
] | null | null | null |
samples/basic/hello.py
|
tsdking/pythonnote
|
197ef8f989ee8e880bd7f891b546b19ba1c6106d
|
[
"Apache-2.0"
] | 1
|
2022-01-14T13:18:51.000Z
|
2022-01-14T13:18:51.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('Hello, world')
| 14
| 23
| 0.585714
|
7fd3d7dfb293f85eba82ba9fc3bfaffd9c316bf1
| 2,778
|
py
|
Python
|
setup.py
|
emedgene/smart_open
|
5f730f2e3b2167c9f00e6767c71b31de95af027f
|
[
"MIT"
] | null | null | null |
setup.py
|
emedgene/smart_open
|
5f730f2e3b2167c9f00e6767c71b31de95af027f
|
[
"MIT"
] | null | null | null |
setup.py
|
emedgene/smart_open
|
5f730f2e3b2167c9f00e6767c71b31de95af027f
|
[
"MIT"
] | 1
|
2020-02-16T11:32:33.000Z
|
2020-02-16T11:32:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
import io
import os
import sys
from setuptools import setup, find_packages
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
#
# This code intentially duplicates a similar function in __init__.py. The
# alternative would be to somehow import that module to access the function,
# which would be too messy for a setup.py script.
#
def _get_version():
curr_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(curr_dir, 'smart_open', 'VERSION')) as fin:
return fin.read().strip()
tests_require = [
'mock',
'moto==1.3.4',
'pathlib2',
'responses',
# Temporary pin boto3 & botocore, because moto doesn't work with new version
# See https://github.com/spulec/moto/issues/1793 and https://github.com/RaRe-Technologies/smart_open/issues/227
'boto3 < 1.8.0',
# 'botocore < 1.11.0'
# Not used directly but allows boto GCE plugins to load.
# https://github.com/GoogleCloudPlatform/compute-image-packages/issues/262
'google-compute-engine==2.8.12'
]
install_requires = [
'boto >= 2.32',
'requests',
'boto3',
]
if sys.version_info[0] == 2:
install_requires.append('bz2file')
setup(
name='smart_open',
version=_get_version(),
description='Utils for streaming large files (S3, HDFS, gzip, bz2...)',
long_description=read('README.rst'),
packages=find_packages(),
package_data={
"smart_open": ["VERSION"],
"smart_open.tests": ["test_data/*gz"],
},
author='Radim Rehurek',
author_email='me@radimrehurek.com',
maintainer='Radim Rehurek',
maintainer_email='me@radimrehurek.com',
url='https://github.com/piskvorky/smart_open',
download_url='http://pypi.python.org/pypi/smart_open',
keywords='file streaming, s3, hdfs',
license='MIT',
platforms='any',
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
},
test_suite="smart_open.tests",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
| 27.50495
| 115
| 0.651908
|
013440d89aa4768fef4e9b046491f19071c90238
| 1,331
|
py
|
Python
|
python/subscribe-to-send-grid/main.py
|
paarthchhabra/demos-for-functions
|
1794614ae22c11603ac4db3a76751d5d89b464cb
|
[
"MIT"
] | 1
|
2022-01-31T18:17:56.000Z
|
2022-01-31T18:17:56.000Z
|
python/subscribe-to-send-grid/main.py
|
paarthchhabra/demos-for-functions
|
1794614ae22c11603ac4db3a76751d5d89b464cb
|
[
"MIT"
] | 2
|
2022-03-11T08:32:38.000Z
|
2022-03-11T14:42:33.000Z
|
python/subscribe-to-send-grid/main.py
|
paarthchhabra/demos-for-functions
|
1794614ae22c11603ac4db3a76751d5d89b464cb
|
[
"MIT"
] | null | null | null |
#Importing the required libraries
import os
import json
import requests
from appwrite.client import Client
#Initialise the Appwrite Client SDK for Python
client = Client()
(client
.set_endpoint(os.environ.get('APPWRITE_ENDPOINT')) #Your API Endpoint
.set_project(os.environ.get('APPWRITE_FUNCTION_PROJECT_ID')) #Your Project ID available by default
.set_key(os.environ.get('APPWRITE_API_KEY')) #Your secret API Key
)
# #Initialise the Sendgrid Client SDK
# sg = sendgrid.SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY')) #Your Sendgrid API Key
# Get the name and email of the new user from Appwrite's Environment variables
payload = json.loads(os.environ.get('APPWRITE_FUNCTION_DATA'))
recipient_id = payload['email']
print('recipient id: ', recipient_id)
list_id = os.environ.get('SENDGRID_LIST_ID') # The Newsletter ID to which the new user has to be added
print('list id: ', list_id)
url = "https://api.sendgrid.com/v3/marketing/contacts"
data = {
"list_ids": [
list_id
],
"contacts": [
{
"email": recipient_id,
"custom_fields": {}
}
]
}
headers = {
'authorization': "Bearer "+os.environ.get('SENDGRID_API_KEY'),
'content-type': "application/json"
}
response = requests.request("PUT", url, data=json.dumps(data), headers=headers)
print(response.text)
| 25.596154
| 102
| 0.724267
|
70ecf56e2f208a8bd6c3126d0caf58a9e26725be
| 4,260
|
py
|
Python
|
dvc/utils/collections.py
|
Sayanta66/dvc
|
0d484d45f710b39c6baf541ac4d23aa983792aae
|
[
"Apache-2.0"
] | 1
|
2021-07-20T05:33:18.000Z
|
2021-07-20T05:33:18.000Z
|
dvc/utils/collections.py
|
Sayanta66/dvc
|
0d484d45f710b39c6baf541ac4d23aa983792aae
|
[
"Apache-2.0"
] | null | null | null |
dvc/utils/collections.py
|
Sayanta66/dvc
|
0d484d45f710b39c6baf541ac4d23aa983792aae
|
[
"Apache-2.0"
] | 1
|
2021-08-05T07:15:30.000Z
|
2021-08-05T07:15:30.000Z
|
import inspect
import os
from collections.abc import Mapping
from functools import wraps
from typing import Callable, Dict, Iterable, List, TypeVar, Union
from pygtrie import StringTrie as _StringTrie
class PathStringTrie(_StringTrie):
"""Trie based on platform-dependent separator for pathname components."""
def __init__(self, *args, **kwargs):
kwargs["separator"] = os.sep
super().__init__(*args, **kwargs)
def apply_diff(src, dest):
"""Recursively apply changes from src to dest.
Preserves dest type and hidden info in dest structure,
like ruamel.yaml leaves when parses files. This includes comments,
ordering and line foldings.
Used in Stage load/dump cycle to preserve comments and custom formatting.
"""
Seq = (list, tuple)
Container = (Mapping, list, tuple)
def is_same_type(a, b):
return any(
isinstance(a, t) and isinstance(b, t)
for t in [str, Mapping, Seq, bool]
)
if isinstance(src, Mapping) and isinstance(dest, Mapping):
for key, value in src.items():
if isinstance(value, Container) and is_same_type(
value, dest.get(key)
):
apply_diff(value, dest[key])
elif key not in dest or value != dest[key]:
dest[key] = value
for key in set(dest) - set(src):
del dest[key]
elif isinstance(src, Seq) and isinstance(dest, Seq):
if len(src) != len(dest):
dest[:] = src
else:
for i, value in enumerate(src):
if isinstance(value, Container) and is_same_type(
value, dest[i]
):
apply_diff(value, dest[i])
elif value != dest[i]:
dest[i] = value
else:
raise AssertionError(
"Can't apply diff from {} to {}".format(
src.__class__.__name__, dest.__class__.__name__
)
)
def ensure_list(item: Union[Iterable[str], str, None]) -> List[str]:
if item is None:
return []
if isinstance(item, str):
return [item]
return list(item)
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
def chunk_dict(d: Dict[_KT, _VT], size: int = 1) -> List[Dict[_KT, _VT]]:
from funcy import chunks
return [{key: d[key] for key in chunk} for chunk in chunks(size, d)]
class _NamespacedDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def validate(*validators: Callable, post: bool = False):
"""
Validate and transform arguments and results from function calls.
The validators functions are passed a dictionary of arguments, which
supports dot notation access too.
The key is derived from the function signature, and hence is the name of
the argument, whereas the value is the one passed to the function
(if it is not passed, default value from keyword arguments are provided).
>>> def validator(args):
... assert args["l"] >= 0 and args.b >= 0 and args.h >= 0
>>> @validate(validator)
... def cuboid_area(l, b, h=1):
... return 2*(l*b + l*h + b*h)
>>> cuboid_area(5, 20)
250
>>> cuboid_area(-1, -2)
Traceback (most recent call last):
...
AssertionError
"""
def wrapped(func: Callable):
sig = inspect.signature(func)
@wraps(func)
def inner(*args, **kwargs):
ba = sig.bind(*args, **kwargs)
ba.apply_defaults()
ba.arguments = _NamespacedDict(ba.arguments)
if not post:
for validator in validators:
validator(ba.arguments)
result = func(*ba.args, **ba.kwargs)
if post:
for validator in validators:
result = validator(result)
return result
return inner
return wrapped
def nested_contains(dictionary: Dict, phrase: str) -> bool:
for key, val in dictionary.items():
if key == phrase and val:
return True
if isinstance(val, dict):
if nested_contains(val, phrase):
return True
return False
| 28.783784
| 77
| 0.585681
|
c75da67ca0c4ed0be023582392d5e5cbfb5bfe03
| 5,997
|
py
|
Python
|
extensions/information/user.py
|
kamfretoz/XJ9
|
b5daaf297dc538ae250f69ace16a4855c797e4e7
|
[
"WTFPL"
] | 7
|
2022-01-26T13:31:17.000Z
|
2022-03-11T16:34:07.000Z
|
extensions/information/user.py
|
kamfretoz/XJ9
|
b5daaf297dc538ae250f69ace16a4855c797e4e7
|
[
"WTFPL"
] | null | null | null |
extensions/information/user.py
|
kamfretoz/XJ9
|
b5daaf297dc538ae250f69ace16a4855c797e4e7
|
[
"WTFPL"
] | 2
|
2022-01-26T06:05:44.000Z
|
2022-03-07T18:48:32.000Z
|
from datetime import datetime
from lightbulb.ext import filament
import hikari
import lightbulb
user_plugin = lightbulb.Plugin("user", "User lookup commands")
@user_plugin.command
@lightbulb.option("target", "The member to get information about.", hikari.Member, required=False)
@lightbulb.command("memberinfo", "Get info on a server member.", aliases=["mi","profile","minfo"], ephemeral=True, auto_defer=True)
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
@filament.utils.pass_options
async def member_info(ctx: lightbulb.Context, target) -> None:
target = ctx.get_guild().get_member(target or ctx.user)
if not target:
await ctx.respond("That user is not in the server, use the userinfo command instead.")
return
created_at = int(target.created_at.timestamp())
joined_at = int(target.joined_at.timestamp())
roles = (await target.fetch_roles())[1:] # All but @everyone
emb = hikari.Embed(
title=f"User Info - {target.display_name}",
description=f"ID: `{target.id}`",
colour=target.accent_color,
timestamp=datetime.now().astimezone(),
)
emb.set_footer(
text=f"Requested by {ctx.member.display_name}",
icon=ctx.member.avatar_url or ctx.member.default_avatar_url,
)
emb.set_thumbnail(target.avatar_url or target.default_avatar_url)
emb.add_field(
"Bot?",
str(target.is_bot),
inline=False,
)
emb.add_field(
"Created account on",
f"<t:{created_at}:d>\n(<t:{created_at}:R>)",
inline=False,
)
emb.add_field(
"Joined server on",
f"<t:{joined_at}:d>\n(<t:{joined_at}:R>)",
inline=False,
)
emb.add_field(
"Roles",
", ".join(r.mention for r in roles) or "No Roles.",
inline=False,
)
emb.add_field(
"Mention",
target.mention,
inline=False
)
await ctx.respond(emb)
@user_plugin.command
@lightbulb.option("user", "The member to get information about.", hikari.User, required=False)
@lightbulb.command("userinfo", "Get info on any user", aliases=["ui","uprofile","uinfo"], ephemeral=True, auto_defer=True)
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
@filament.utils.pass_options
async def user_info(ctx: lightbulb.Context, user) -> None:
target = await ctx.bot.rest.fetch_user(user = user or ctx.user)
if not target:
await ctx.respond("Cannot find that user.")
return
created_at = int(target.created_at.timestamp())
emb = hikari.Embed(
title=f"User Info - {target}",
description=f"ID: `{target.id}`",
colour=target.accent_color,
timestamp=datetime.now().astimezone(),
)
emb.add_field(name="Is bot?", value=target.is_bot, inline=False)
emb.set_thumbnail(target.avatar_url or target.default_avatar_url)
if target.banner_url:
emb.set_image(target.banner_url)
emb.add_field(
"Created account on",
f"<t:{created_at}:d>\n(<t:{created_at}:R>)",
inline=False,
)
emb.add_field(name="Mention", value=target.mention, inline=False)
await ctx.respond(embed=emb)
@user_plugin.command
@lightbulb.add_cooldown(3, 3, lightbulb.UserBucket)
@lightbulb.option("target", "The member to get the banner.", hikari.User, required=False)
@lightbulb.command("banner", "Get a member's banner.", auto_defer = True, ephemeral = True)
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
@filament.utils.pass_options
async def user_banner(ctx: lightbulb.Context, target: hikari.User):
"""Show the banner of a user, if any"""
target = await ctx.bot.rest.fetch_user(target or ctx.user)
if not target:
await ctx.respond("That user is not in the server.")
return
banner = target.banner_url
# If statement because the user may not have a banner
if banner:
bnr = hikari.Embed(
description=f"**{target.mention}**'s Banner",
title="Banner Viewer",
color=target.accent_colour,
timestamp=datetime.now().astimezone(),
)
bnr.set_image(banner)
await ctx.respond(embed=bnr)
else:
await ctx.respond(embed=hikari.Embed(description="This User has no banner set."))
@user_plugin.command
@lightbulb.option("server", "Get the server avatar instead?", bool, required = False, default = False)
@lightbulb.option("target", "The member to get the avatar.", hikari.User , required=False)
@lightbulb.command("avatar", "Get a member's avatar.", auto_defer=True, aliases=["pp", "pfp","ava","icon"], ephemeral=True)
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
@filament.utils.pass_options
async def user_avatar(ctx: lightbulb.Context, target: hikari.User, server: bool):
"""Show avatar of a user, if any"""
target = await ctx.bot.rest.fetch_user(target or ctx.user)
if not target:
await ctx.respond("That user is not in the server.")
return
if server:
try:
pfp = target.guild_avatar_url
except AttributeError:
return await ctx.respond("That user doesn't have server-specific avatar.")
else:
pfp = target.avatar_url or target.default_avatar_url
# If statement because the user may not have a custom avatar
if pfp:
ava = hikari.Embed(
description=f"**{target.mention}**'s Avatar",
title="Avatar Viewer",
color=target.accent_colour,
timestamp=datetime.now().astimezone(),
)
ava.set_image(pfp)
await ctx.respond(embed=ava)
else:
await ctx.respond(embed=hikari.Embed(description="This User has no avatar set."))
def load(bot) -> None:
bot.add_plugin(user_plugin)
def unload(bot) -> None:
bot.remove_plugin(user_plugin)
| 36.791411
| 131
| 0.653327
|
a37dd584f8b3ad912e49fc84a98c4a958da3054a
| 4,844
|
py
|
Python
|
RecoTauTag/TauTagTools/test/training/training_control_plots.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
RecoTauTag/TauTagTools/test/training/training_control_plots.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
RecoTauTag/TauTagTools/test/training/training_control_plots.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import re
import os
input_file = sys.argv[1]
output_dir = sys.argv[2]
sys.argv[:] = []
if not os.path.exists(output_dir):
os.makedirs(output_dir)
import ROOT
ROOT.gROOT.SetBatch(True)
ROOT.gROOT.SetStyle("Plain")
ROOT.gStyle.SetPalette(1)
def get_by_type(directory, type):
for key in directory.GetListOfKeys():
object = key.ReadObj()
if isinstance(object, type):
yield object
def gini_index(signal, background):
signal_integral = signal.GetIntegral()
background_integral = background.GetIntegral()
total = signal.Integral() + background.Integral()
linear = ROOT.TGraph(signal.GetNbinsX()+1)
signal_fraction = ROOT.TGraph(signal.GetNbinsX()+1)
for ibin in range(0, signal.GetNbinsX()+1):
total_fraction_of_sample = (
signal_integral[ibin] + background_integral[ibin])/2.0
linear.SetPoint(
ibin, total_fraction_of_sample, total_fraction_of_sample)
total_fraction_of_signal = signal_integral[ibin]
signal_fraction.SetPoint(
ibin, total_fraction_of_sample, total_fraction_of_signal)
return 0.5-signal_fraction.Integral()
colors = {
'Signal' : ROOT.EColor.kRed,
'Background' : ROOT.EColor.kBlue,
}
if __name__ == "__main__":
# Exit gracefully
if not os.path.exists(input_file):
print "WARNING: no training control plot .root file found!"
sys.exit(0)
file = ROOT.TFile(input_file)
correlation_canvas = ROOT.TCanvas("corr", "corr", 2000, 1000)
correlation_canvas.Divide(2)
signal_correlation = file.Get("CorrelationMatrixS")
background_correlation = file.Get("CorrelationMatrixB")
background_correlation.SetMarkerColor(ROOT.EColor.kBlack)
for index, plot in enumerate([signal_correlation, background_correlation]):
correlation_canvas.cd(index+1)
plot.SetMarkerColor(ROOT.EColor.kBlack)
plot.Draw("col")
plot.Draw("text, same")
plot.GetXaxis().SetLabelSize(0.03)
plot.GetYaxis().SetLabelSize(0.03)
ROOT.gPad.SetMargin(0.2, 0.1, 0.2, 0.1)
correlation_canvas.SaveAs(os.path.join(
output_dir, "correlations.png"))
method_canvas = ROOT.TCanvas("method", "method", 800, 800)
method_canvas.cd()
# Find the MVA result directory
for method_dir in [dir for dir in get_by_type(file, ROOT.TDirectory)
if 'Method_' in dir.GetName()]:
method_canvas.SetLogy(False)
# Strip prefix
method_type = method_dir.GetName().replace('Method_', '')
print method_type
result_dir = method_dir.Get(method_type)
signal_mva_out = result_dir.Get("MVA_%s_S" % method_type)
background_mva_out = result_dir.Get("MVA_%s_B" % method_type)
signal_mva_out.SetLineColor(colors['Signal'])
background_mva_out.SetLineColor(colors['Background'])
stack = ROOT.THStack("stack", "MVA Output")
stack.Add(signal_mva_out, "HIST")
stack.Add(background_mva_out, "HIST")
stack.Draw("nostack")
method_canvas.SaveAs(os.path.join(
output_dir, "%s_mva_output.png" % method_type))
perf_curve = result_dir.Get("MVA_%s_effBvsS" % method_type)
perf_curve.Draw()
perf_curve.SetMinimum(1e-4)
method_canvas.SetLogy(True)
method_canvas.SaveAs(os.path.join(output_dir, "%s_performance.png"
% method_type))
input_var_dir = file.Get("InputVariables_NoTransform")
if not input_var_dir:
input_var_dir = file.Get("InputVariables_Id")
matcher = re.compile("(?P<name>[^_]*)__(?P<type>[A-Za-z0-9]*)_Id")
input_distributions = {}
for histo in get_by_type(input_var_dir, ROOT.TH1F):
rawname = histo.GetName()
match = matcher.match(rawname)
name = match.group('name')
type = match.group('type')
histo.Scale(1.0/histo.Integral())
histo.SetLineColor(colors[type])
histo_info = input_distributions.setdefault(name, {})
histo_info[type] = histo
variable_canvas = ROOT.TCanvas("var", "var", 1000, 1000)
for variable, histograms in input_distributions.iteritems():
maximum = max(histograms[type].GetMaximum()
for type in ['Signal', 'Background'])
for type in ['Signal', 'Background']:
histograms[type].SetLineWidth(2)
# Tgraph integral not in ROOT 5.27?
gini = gini_index(histograms['Signal'], histograms['Background'])
histograms['Signal'].SetMaximum(1.2*maximum)
histograms['Signal'].SetTitle(variable + " gini: %0.2f" % gini)
histograms['Signal'].Draw()
histograms['Background'].Draw('same')
variable_canvas.SaveAs(os.path.join(
output_dir, variable + ".png"))
| 36.69697
| 79
| 0.655656
|
e2a929c10517315c0dc12d94a7c27f71db0d5e6b
| 1,602
|
py
|
Python
|
content_bruter.py
|
n1cfury/BlackHatPython
|
26f6e9bce7e13879f0c2d9760d210c222d5e9e3a
|
[
"MIT"
] | 10
|
2018-08-14T22:28:07.000Z
|
2020-10-23T16:21:48.000Z
|
content_bruter.py
|
n1cfury/BlackHatPython
|
26f6e9bce7e13879f0c2d9760d210c222d5e9e3a
|
[
"MIT"
] | 1
|
2018-08-01T05:45:53.000Z
|
2018-08-01T05:47:45.000Z
|
content_bruter.py
|
n1cfury/BlackHatPython
|
26f6e9bce7e13879f0c2d9760d210c222d5e9e3a
|
[
"MIT"
] | 4
|
2018-05-18T08:46:29.000Z
|
2020-05-07T03:28:03.000Z
|
import urllib2, threading, Queue, urllib
threads = 50
target_url = "http://testphp.vulnweb.com"
wordlist_file = "/tmp/all.txt" #from SVNDigger
resume = None
user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:19.0) Gecko/ 20100101 Firefox/19.0"
def banner():
print "[***] Directory Brute Force p67 [***]"
def build_wordlist(wordlist_file):
fd - open(wordlist_file, "rb")
raw_words = df.readlines()
fd.close()
found_resume = False
words = Queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
words.put(word)
else:
if word == reusme:
found_resume = True
print "Resuming wordlist from: %s" % resume
else:
words.put(word)
return words
def dir_bruter(word_queue, extensions=None):
while not word_queue.empty():
attempt = word+queue.get()
attempt_list=[]
if "." not in attempt:
attempt_list.append("/%s/" %attempt)
else:
attempt_list.append("/%s" %attempt)
if extensions:
for extension in extensions:
attempt_list.append("/%s%s" % (attempt, extension))
for brute in attempt_list:
url = "%s%s" % (target_url, urllib.quote(brute))
try:
headers = {}
headers["User-Agent"] = user_agent
r = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(r)
if len(response.read()):
print "[%d] => %s" % (resposne.code, url)
except urllib2.URLError, e:
if hasattr(e, 'code') and e.code != 404:
print "!!! %d => %s" % (response.code, url)
pass
def main():
banner()
dir_bruter(word_queue, extensions=None)
if __name__ == '__main__':
main()
| 26.262295
| 84
| 0.654806
|
5adc86a52bcf2b64cb53e0bf9d1951cec1b54bde
| 175
|
py
|
Python
|
turbustat/tests/setup_package.py
|
CFD-UTSA/Turbulence-stars
|
354d02e38d15e3b0d1f751b43f430dbd3a14c250
|
[
"MIT"
] | 42
|
2016-04-07T20:49:59.000Z
|
2022-03-28T12:54:13.000Z
|
turbustat/tests/setup_package.py
|
CFD-UTSA/Turbulence-stars
|
354d02e38d15e3b0d1f751b43f430dbd3a14c250
|
[
"MIT"
] | 131
|
2015-03-05T21:42:27.000Z
|
2021-07-22T14:59:04.000Z
|
turbustat/tests/setup_package.py
|
CFD-UTSA/Turbulence-stars
|
354d02e38d15e3b0d1f751b43f430dbd3a14c250
|
[
"MIT"
] | 21
|
2015-06-10T17:10:06.000Z
|
2022-02-28T15:59:42.000Z
|
def get_package_data():
return {
_ASTROPY_PACKAGE_NAME_ + '.tests': ['data/*.fits', 'data/*.npz',
'coveragerc']
}
| 25
| 72
| 0.44
|
de6b16cc981079b5bbfe45ba3d44ce5f61318ece
| 33,782
|
py
|
Python
|
fairseq/data/multilingual/multilingual_data_manager.py
|
ypsoh/fairseq
|
578ee3d456034f12af13e446bba9447844374356
|
[
"MIT"
] | 651
|
2015-03-14T23:18:44.000Z
|
2022-01-19T14:08:28.000Z
|
fairseq/data/multilingual/multilingual_data_manager.py
|
ypsoh/fairseq
|
578ee3d456034f12af13e446bba9447844374356
|
[
"MIT"
] | 362
|
2015-01-26T16:20:28.000Z
|
2022-01-26T06:19:23.000Z
|
fairseq/data/multilingual/multilingual_data_manager.py
|
ypsoh/fairseq
|
578ee3d456034f12af13e446bba9447844374356
|
[
"MIT"
] | 169
|
2015-09-28T17:06:28.000Z
|
2021-12-18T16:02:49.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import numpy as np
from collections import OrderedDict
import json
from fairseq import options
from fairseq.options import eval_str_dict, csv_str_list
from fairseq.data import (
Dictionary,
AppendTokenDataset,
ConcatDataset,
data_utils,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
SampledMultiDataset,
TransformEosLangPairDataset,
SampledMultiEpochDataset,
)
from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat
from fairseq.file_io import PathManager
logger = logging.getLogger(__name__)
def _lang_token(lang: str, style='__{}__'):
return style.format(lang)
def _lang_token_index(dic: Dictionary, lang: str, style='__{}__'):
"""Return language token index."""
idx = dic.index(_lang_token(lang, style))
assert idx != dic.unk_index, \
'cannot find language token for lang {}'.format(lang)
return idx
def _lang_id(dic: Dictionary, lang: str):
"""Return language ID index."""
idx = dic.index(lang)
assert idx != dic.unk_index, \
'cannot find language ID for lang {}'.format(lang)
return idx
def load_sampling_weights(from_file):
with open(from_file) as f:
weights = json.load(f)
return weights
class MultilingualDatasetManager(object):
def __init__(self, args, lang_pairs, langs, dicts, sampling_method):
super().__init__()
self.args = args
self.seed = args.seed
self.lang_pairs = lang_pairs
self.langs = langs
self.dicts = dicts
self.lang_dict = self.create_lang_dictionary(self.langs)
self.sampling_method = sampling_method
self.sampling_scheduler = None
self._has_sharded_data = False
self._num_shards = {}
@classmethod
def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method):
return MultilingualDatasetManager(args, lang_pairs, langs, dicts, sampling_method)
@staticmethod
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--lang-dict', default=None, type=str,
help='language dictionary path with a list of '
'languages which can appear in lang-pairs')
parser.add_argument('--lang-tok-style', default='multilingual',
type=str, choices=['multilingual', 'mbart'],
help='language token styles')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='prepend to the beginning of source sentence the source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='prepend to the beginning of target sentence the target language token')
parser.add_argument('--lang-tok-replacing-bos-eos', action='store_true', default=False)
parser.add_argument('--enable-lang-ids', default=False, action='store_true',
help='whether to include language IDs in samples')
parser.add_argument('--enable-reservsed-directions-shared-datasets', default=False, action='store_true',
help='whether to allow datasets be used in reversed directions')
parser.add_argument('--extra-data', help='a dictionary of data name to this path, \
e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}',
type=lambda uf: eval_str_dict(uf, type=str),
default=None)
parser.add_argument('--extra-lang-pairs', help='a dictionary of data name to the language pairs they serve, \
e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}',
type=lambda uf: eval_str_dict(uf, type=str),
default=None)
parser.add_argument('--langtoks-specs',
help='a list of comma separated data types that a set of language tokens to be specialized for, \
e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to \
distinguish languages in different training data types. If not specified, default language \
tokens per languages will be added',
default='main',
type=csv_str_list,
)
parser.add_argument('--langtoks', help='a dictionary of how to add language tokens, \
e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": \
("src", "tgt")}, or {"mined": ("src.mined", "tgt")}',
default=None,
type=lambda uf: eval_str_dict(uf, type=str),
)
parser.add_argument('--sampling-weights-from-file',
help='a file contain a python dictionary of how to sample data sets, \
e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \
"mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }',
default=None, type=str,
)
parser.add_argument('--sampling-weights', help='a dictionary of how to sample data sets, \
e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \
"mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }',
default=None,
type=lambda uf: eval_str_dict(uf, type=str),
)
parser.add_argument('--virtual-epoch-size', default=1000000, type=int,
help='virtual epoch size to speed up data loading')
parser.add_argument('--virtual-data-size', default=None, type=int,
help='virtual data size of the whole joint dataset to speed'
'up data loading and have specific dynamic sampling strategy interval')
@classmethod
def load_langs(cls, args, **kwargs):
if args.lang_dict is None:
logger.warning(
'External language dictionary is not provided; '
'use lang-pairs to infer the set of supported languages. '
'The language ordering is not stable which might cause '
'misalignment in pretraining and finetuning.')
# infer from lang_pairs as it is
langs = list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})
langs = sorted(langs)
logger.info(f'inferred language list: {langs}')
else:
with PathManager.open(args.lang_dict, "r", encoding="utf-8") as f:
langs = [lang.strip() for lang in f.readlines() if lang.strip()]
logger.info(f'loaded language list from {args.lang_dict} as they are ordered in file')
return langs
def has_sharded_data(self, split):
return split == 'train' and self._has_sharded_data
def _shared_collater(self):
return (
not (self.args.extra_data and 'mono_dae' in self.args.extra_data)
and (not self.args.lang_tok_replacing_bos_eos)
)
@classmethod
def prepare(cls, load_dictionary, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if not hasattr(args, 'shuffle_instance'):
args.shuffle_instance = False
if args.langtoks is None:
args.langtoks = {}
if 'main' not in args.langtoks:
src_langtok_spec = args.encoder_langtok if args.encoder_langtok else None
tgt_langtok_spec = 'tgt' if args.decoder_langtok else None
args.langtoks['main'] = (src_langtok_spec, tgt_langtok_spec)
def check_langs(langs, pairs):
messages = []
for src, tgt in pairs:
if src not in langs or tgt not in langs:
messages.append(f'language pair {src}-{tgt} contains languages '
'that are not in the language dictionary')
if len(messages) > 0:
raise ValueError(' '.join(messages) + f"; langs: {langs}")
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
sorted_langs = cls.load_langs(args, **kargs)
check_langs(
sorted_langs,
([p.split('-') for p in args.lang_pairs] if training
else [(args.source_lang, args.target_lang)])
)
# load dictionaries
if training:
extra_lang_pairs = (
list({p for _, v in args.extra_lang_pairs.items() for p in v.split(',')})
if args.extra_lang_pairs else []
)
langs_to_load_dicts = sorted({x for p in args.lang_pairs + extra_lang_pairs for x in p.split('-')})
else:
langs_to_load_dicts = sorted([args.source_lang, args.target_lang])
dicts = OrderedDict()
supported_langtok_specs = args.langtoks_specs
for lang in langs_to_load_dicts:
paths = args.data.split(os.pathsep)
assert len(paths) > 0
dicts[lang] = load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[langs_to_load_dicts[0]].pad()
assert dicts[lang].eos() == dicts[langs_to_load_dicts[0]].eos()
assert dicts[lang].unk() == dicts[langs_to_load_dicts[0]].unk()
# keep the langs consistent for all experiments with the same lang dict
# for finetuning regardless of whether lang_tok is required or not just add the tokens to the dicts
for spec in supported_langtok_specs:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(
MultilingualDatasetManager.get_lang_tok(lang_to_add, args, spec)
)
if args.lang_tok_style == 'mbart' or (args.extra_data and 'mono_dae' in args.extra_data):
dicts[lang].add_symbol('<mask>')
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return sorted_langs, dicts, training
TOKEN_STYLES = {
'mbart': '[{}]',
'multilingual': '__{}__'
}
@classmethod
def create_lang_dictionary(cls, langs):
unk = '<unk>'
# hack to remove symbols other than unk as they are not needed by lang dict
lang_dict = Dictionary(
pad=unk,
eos=unk,
unk=unk,
bos=unk,
)
for lang in langs:
lang_dict.add_symbol(lang)
return lang_dict
@classmethod
def get_lang_tok_style(cls, args):
return cls.TOKEN_STYLES[args.lang_tok_style]
@classmethod
def get_lang_tok(cls, lang, args, spec=''):
if spec is None:
return None
if spec.endswith('dae'):
lang = f'{lang}_dae'
elif spec.endswith('mined'):
lang = f'{lang}_mined'
return _lang_token(lang, cls.get_lang_tok_style(args))
@classmethod
def get_langtok_index(cls, lang_tok, dic):
idx = dic.index(lang_tok)
assert idx != dic.unk_index, \
'cannot find language token {} in the dictionary'.format(lang_tok)
return idx
def get_encoder_langtok(self, src_lang, tgt_lang, spec=None):
if spec is None:
return None
if spec and spec.startswith('src'):
if src_lang is None:
return None
langtok = self.get_lang_tok(src_lang, self.args, spec)
else:
if tgt_lang is None:
return None
langtok = self.get_lang_tok(tgt_lang, self.args, spec)
return self.get_langtok_index(langtok, self.dicts[src_lang if src_lang else tgt_lang])
def get_decoder_langtok(self, tgt_lang, spec=None):
if spec is None:
return None
langtok = self.get_lang_tok(tgt_lang, self.args, spec)
return self.get_langtok_index(langtok, self.dicts[tgt_lang])
@classmethod
def load_data(cls, path, vdict, impl):
dataset = data_utils.load_indexed_dataset(path, vdict, impl)
return dataset
@classmethod
def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
@classmethod
def mono_split_exists(cls, split, lang, data_path, dataset_impl):
filename = os.path.join(data_path, '{}.{}'.format(split, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
def load_lang_dataset(
self,
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
max_source_positions,
prepend_bos=False, load_alignments=False,
truncate_source=False,
):
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
logger.error(f"Dataset not found: {data_path}, {split_k}, {src}, {tgt}")
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = self.load_data(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_datasets.append(
self.load_data(prefix + tgt, tgt_dict, dataset_impl)
)
logger.info('{} {} {}-{} {} examples'.format(
data_path, split_k, src, tgt, len(src_datasets[-1])
))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
return src_dataset, tgt_dataset, align_dataset
def load_langpair_dataset(
self,
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False,
src_dataset_transform_func=lambda dataset: dataset,
tgt_dataset_transform_func=lambda dataset: dataset,
src_lang_id=None,
tgt_lang_id=None,
langpairs_sharing_datasets=None,
):
if langpairs_sharing_datasets is not None:
src_dataset = langpairs_sharing_datasets.get((data_path, split, src), 'NotInCache')
tgt_dataset = langpairs_sharing_datasets.get((data_path, split, tgt), 'NotInCache')
align_dataset = langpairs_sharing_datasets.get((data_path, split, src, tgt), 'NotInCache')
# a hack: any one is not in cache, we need to reload them
if (
langpairs_sharing_datasets is None
or src_dataset == 'NotInCache'
or tgt_dataset == 'NotInCache'
or align_dataset == 'NotInCache'
or split != 'train'
):
# source and target datasets can be reused in reversed directions to save memory
# reversed directions of valid and test data will not share source and target datasets
src_dataset, tgt_dataset, align_dataset = self.load_lang_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
max_source_positions=max_source_positions,
prepend_bos=prepend_bos, load_alignments=load_alignments,
truncate_source=truncate_source,
)
src_dataset = src_dataset_transform_func(src_dataset)
tgt_dataset = tgt_dataset_transform_func(tgt_dataset)
if langpairs_sharing_datasets is not None:
langpairs_sharing_datasets[(data_path, split, src)] = src_dataset
langpairs_sharing_datasets[(data_path, split, tgt)] = tgt_dataset
langpairs_sharing_datasets[(data_path, split, src, tgt)] = align_dataset
return LanguagePairDataset(
src_dataset, src_dataset.sizes, src_dict,
tgt_dataset, tgt_dataset.sizes, tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
src_lang_id=src_lang_id,
tgt_lang_id=tgt_lang_id,
)
def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None):
if self.args.lang_tok_replacing_bos_eos:
# it is handled by self.alter_dataset_langtok
# TODO: Unifiy with alter_dataset_langtok
return dataset
if spec is None:
return dataset
tok = self.get_encoder_langtok(src_lang, tgt_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None):
if self.args.lang_tok_replacing_bos_eos:
# TODO: Unifiy with alter_dataset_langtok
# It is handled by self.alter_dataset_langtok.
# The complication in self.alter_dataset_langtok
# makes a unified framework difficult.
return dataset
# if not self.args.decoder_langtok:
if not spec:
return dataset
tok = self.get_decoder_langtok(target_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def alter_dataset_langtok(self, lang_pair_dataset,
src_eos=None, src_lang=None,
tgt_eos=None, tgt_lang=None,
src_langtok_spec=None, tgt_langtok_spec=None,
):
if src_langtok_spec is None and tgt_langtok_spec is None:
return lang_pair_dataset
new_src_eos = None
if src_langtok_spec is not None and src_eos is not None \
and (src_lang is not None or tgt_lang is not None):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec)
else:
src_eos = None
new_tgt_bos = None
if tgt_langtok_spec and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_a_dataset(
self,
split,
data_path,
src, src_dict,
tgt, tgt_dict,
combine,
prepend_bos=False,
langpairs_sharing_datasets=None,
data_category=None,
**extra_kwargs,
):
dataset_impl = self.args.dataset_impl
upsample_primary = self.args.upsample_primary
left_pad_source = self.args.left_pad_source
left_pad_target = self.args.left_pad_target
max_source_positions = self.args.max_source_positions
max_target_positions = self.args.max_target_positions
load_alignments = self.args.load_alignments
truncate_source = self.args.truncate_source
src_dataset_transform_func = self.src_dataset_tranform_func
tgt_dataset_transform_func = self.tgt_dataset_tranform_func
enable_lang_ids = self.args.enable_lang_ids
lang_dictionary = self.lang_dict
src_langtok_spec, tgt_langtok_spec = extra_kwargs['langtok_spec']
src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec)
tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec)
logger.info(f'{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}')
langpair_ds = self.load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos, load_alignments,
truncate_source,
src_dataset_transform_func=lambda dataset: src_dataset_transform_func(src, tgt, dataset, src_langtok_spec),
tgt_dataset_transform_func=lambda dataset: tgt_dataset_transform_func(src, tgt, dataset, tgt_langtok_spec),
src_lang_id=_lang_id(lang_dictionary, src) if enable_lang_ids and lang_dictionary is not None else None,
tgt_lang_id=_lang_id(lang_dictionary, tgt) if enable_lang_ids and lang_dictionary is not None else None,
langpairs_sharing_datasets=langpairs_sharing_datasets,
)
if langpair_ds.tgt_sizes is None:
# hack to use src_sizes as the sizes for the whole pair dataset for ConcatDataset
langpair_ds.sizes = langpair_ds.src_sizes
else:
# use the max of two sides to define the size to help max positions filtering
langpair_ds.sizes = np.vstack([langpair_ds.src_sizes, langpair_ds.tgt_sizes]).max(axis=0)
assert langpair_ds.sizes.shape == langpair_ds.src_sizes.shape
# TODO: handle modified lang toks for mined data and dae data
if self.args.lang_tok_replacing_bos_eos:
ds = self.alter_dataset_langtok(
langpair_ds,
src_eos=self.dicts[src if src else tgt].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
ds = langpair_ds
return ds
def load_split_langpair_datasets(
self,
split,
data_param_list,
):
datasets = []
langpairs_sharing_datasets = {} if self.args.enable_reservsed_directions_shared_datasets else None
for param in data_param_list:
ds = self.load_a_dataset(split=split, langpairs_sharing_datasets=langpairs_sharing_datasets, **param)
datasets.append(ds)
return datasets
def get_data_paths_and_lang_pairs(self, split):
datapaths = {
'main': self.args.data,
}
lang_pairs = {
'main': self.lang_pairs
}
if split == 'train':
# only training data can have extra data and extra language pairs
if self.args.extra_data:
extra_datapaths = self.args.extra_data
datapaths.update(extra_datapaths)
if self.args.extra_lang_pairs:
extra_lang_pairs = {k: v.split(',') for k, v in self.args.extra_lang_pairs.items()}
lang_pairs.update(extra_lang_pairs)
return datapaths, lang_pairs
def get_split_data_param_list(self, split, epoch, shard_epoch=None):
def get_epoch(epoch, shard_epoch):
return epoch if shard_epoch is None else shard_epoch
# TODO: to extend with extra datasets and keys and loop over different shard data paths
param_list = []
data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split)
logger.info(f'langtoks settings: {self.args.langtoks}')
for data_category, paths in data_paths.items():
if data_category not in lang_pairs:
continue
# paths = self.args.data.split(os.pathsep)
paths = paths.split(os.pathsep)
assert len(paths) > 0
if len(paths) > 1:
self._has_sharded_data = True
self._num_shards[data_category] = len(paths)
# epoch starts with 1 now:
data_path = paths[(get_epoch(epoch, shard_epoch) - 1) % len(paths)]
if data_category in self.args.langtoks:
lang_tok_spec = self.args.langtoks[data_category]
else:
# default to None
lang_tok_spec = (None, None)
# infer langcode
lang_dirs = [lang_pair.split('-') for lang_pair in lang_pairs[data_category]]
lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs]
for src, tgt in lang_dirs:
assert src is not None or data_category == 'mono_dae', (f'error: src={src}, '
'tgt={tgt} for data_category={data_category}')
# logger.info(f"preparing param for {data_category}: {src} - {tgt}")
param_list.append(
{
'key': f'{data_category}:{src}-{tgt}',
'data_path': data_path,
'split': split,
'src': src,
'src_dict': self.dicts[src] if src and data_category != 'mono_dae' else None,
'tgt': tgt,
'tgt_dict': self.dicts[tgt],
'data_category': data_category,
'langtok_spec': lang_tok_spec,
}
)
return param_list
def get_train_sampling_ratios(self, datasets, epoch=1):
data_sizes = [len(d) for _, d in datasets]
sampling_func = self.sampling_method.sampling_method_selector()
sample_ratios = sampling_func(data_sizes) if sampling_func is not None else None
return sample_ratios
def get_sampling_ratios(self, data_param_list, datasets, epoch):
if self.args.sampling_weights_from_file:
weights = load_sampling_weights(self.args.sampling_weights_from_file)
sample_ratios = [weights[k] for k, _ in datasets]
logger.info('| ignoring --sampling-weights when loadding sampling weights '
f'from file {self.args.sampling_weights_from_file}')
elif self.args.sampling_weights:
sample_ratios = [self.args.sampling_weights[k] for k, _ in datasets]
else:
# TODO: modify to provide sampling function more information other than sizes
sample_ratios = self.get_train_sampling_ratios(datasets, epoch)
if sample_ratios is not None:
logger.info('| Upsample ratios: {}'.format(
list(zip(map(lambda x: x['key'], data_param_list), sample_ratios))
))
assert len(sample_ratios) == len(datasets)
return sample_ratios
def load_split_datasets(
self,
split,
training,
epoch=1, combine=False, shard_epoch=None, **kwargs,
):
data_param_list = self.get_split_data_param_list(
split, epoch, shard_epoch=shard_epoch,
)
langpairs_sharing_datasets = {} if self.args.enable_reservsed_directions_shared_datasets else None
datasets = [
(
param['key'],
self.load_a_dataset(
combine=combine,
langpairs_sharing_datasets=langpairs_sharing_datasets,
**param
),
)
for param in data_param_list
]
return datasets, data_param_list
def load_into_sampled_multi_epoch_dataset(
self, split, datasets, data_param_list,
epoch, shard_epoch=None
):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiEpochDataset(
OrderedDict(datasets),
epoch=epoch,
shard_epoch=shard_epoch,
# valid and test datasets will be degerate to concating datasets:
sampling_ratios=sample_ratios,
eval_key=None,
batch_by_size=True,
collate_format=CollateFormat.single,
virtual_size=self.args.virtual_data_size,
split=split,
virtual_epoch_size=self.args.virtual_epoch_size,
# if not using lang_tok altering, simplified to use the same collater
shared_collater=self._shared_collater(),
)
def load_into_concat_dataset(self, split, datasets, data_param_list):
if self.args.lang_tok_replacing_bos_eos:
# TODO: to investigate why TransformEosLangPairDataset doesn't work with ConcatDataset
return SampledMultiDataset(
OrderedDict(datasets),
sampling_ratios=None,
eval_key=None,
batch_by_size=True,
collate_format=CollateFormat.single,
virtual_size=None,
split=split,
)
return ConcatDataset([d for _, d in datasets])
def load_sampled_multi_epoch_dataset(
self,
split,
training,
epoch=0, combine=False, shard_epoch=None, **kwargs
):
datasets, data_param_list = self.load_split_datasets(
split, training,
epoch, combine, shard_epoch=shard_epoch, **kwargs
)
if training and split == 'train':
return self.load_into_sampled_multi_epoch_dataset(
split, datasets, data_param_list, epoch, shard_epoch=shard_epoch)
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
| 44.803714
| 125
| 0.596146
|
8d1026852d02d44189001ca0094c6536d918dfbd
| 811
|
py
|
Python
|
data_types_continued.py
|
KennethBui/astr-119-session-3
|
b66417d2c68e56f54d8cafba318284f6f147e0be
|
[
"MIT"
] | null | null | null |
data_types_continued.py
|
KennethBui/astr-119-session-3
|
b66417d2c68e56f54d8cafba318284f6f147e0be
|
[
"MIT"
] | 1
|
2018-10-10T22:33:34.000Z
|
2018-10-10T22:33:34.000Z
|
data_types_continued.py
|
KennethBui/astr-119-session-3
|
b66417d2c68e56f54d8cafba318284f6f147e0be
|
[
"MIT"
] | 1
|
2018-10-18T01:42:03.000Z
|
2018-10-18T01:42:03.000Z
|
#string
s = "I am a string."
print(type(s)) #will say str
#Boolean
yes = True #Booleaan True
print(type(yes))
no = False #Boolean False
print(type(no))
# List -- ordered and changeable
alpha_list = ["a", "b", "c"] #list initialization
print(type(alpha_list)) #will say list
print(type(alpha_list[0])) #will say string
alpha_list.append("d") #wil add "d" to the list end
print(alpha_list) #will print list
#Tuple -- ordered and unchangeable
alpha_tuple = ("a", "b", "c") #tuple initialization
print(type(alpha_tuple)) #will say tuple
try: #attempt the following line
alpha_tuple[2] = "d" #wont work and will raise TypeError
except TypeError: #when we get a TypeError
print("We can't add elements to tuples!") #print this message
print(alpha_tuple) #will print tuple
| 26.16129
| 62
| 0.686806
|
74127d2936cfd1b9570a617f6456b639db78de5c
| 1,998
|
py
|
Python
|
tests/step_defs/events/test_event_delete_cancel.py
|
pbraiders/pomponne-test-bdd
|
7f2973936318221f54e65e0f8bd839cad7216fa4
|
[
"MIT"
] | 1
|
2021-03-30T14:41:29.000Z
|
2021-03-30T14:41:29.000Z
|
tests/step_defs/events/test_event_delete_cancel.py
|
pbraiders/pomponne-test-bdd
|
7f2973936318221f54e65e0f8bd839cad7216fa4
|
[
"MIT"
] | null | null | null |
tests/step_defs/events/test_event_delete_cancel.py
|
pbraiders/pomponne-test-bdd
|
7f2973936318221f54e65e0f8bd839cad7216fa4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""Event deletion, cancel cases."""
from functools import partial
from pytest_bdd import (
given,
scenario,
then,
when,
)
from pbraiders.contact import ContactFakerFactory # pylint: disable=import-error
from pbraiders.event import EventFakerFactory # pylint: disable=import-error
from pbraiders.pages.events import EventPage # pylint: disable=import-error
from pbraiders.pages.events.actions import EventDeleteAction # pylint: disable=import-error
from pbraiders.pages import new_event # pylint: disable=import-error
from pbraiders.pages import sign_in # pylint: disable=import-error
scenario = partial(scenario, 'events/event_delete_cancel.feature')
@scenario('Not deleting an event')
def test_not_deleting_an_event() -> None:
"""Not deleting an event."""
@given('I am on a event page', target_fixture="page_event")
def page_event(the_config, the_browser, the_faker, the_database) -> EventPage:
"""I am on a event page."""
# Create new event
p_contact = ContactFakerFactory(_faker=the_faker).initialize(config={})
p_event = EventFakerFactory(_faker=the_faker).initialize(config={})
assert sign_in(driver=the_browser, config=the_config, user="simple") is True
assert new_event(driver=the_browser, config=the_config['urls'], contact=p_contact, event=p_event) is True
# Access the event page
p_page = EventPage(_driver=the_browser,
_config=the_config['urls'],
_event=p_event,
_contact=p_contact)
assert p_page.visit() is True
return p_page
@when('I cancel the deletion of the event')
def cancel_delete_event(page_event) -> None:
"""I cancel the deletion of the event."""
p_action = EventDeleteAction(_page=page_event)
p_action.delete().cancel()
@then('I should still access into the event page')
def still_access(page_event) -> None:
"""I should still access into the event page."""
assert page_event.visit() is True
| 35.052632
| 109
| 0.720721
|
9a7982eb4aea23a4e077c9cd9038a141322318d3
| 225
|
py
|
Python
|
Beecrowd/Python/ex1052.py
|
yurifalves/Exercises
|
a4e84ac76b9432f6c2efdeef6e5e2a093c39882d
|
[
"MIT"
] | null | null | null |
Beecrowd/Python/ex1052.py
|
yurifalves/Exercises
|
a4e84ac76b9432f6c2efdeef6e5e2a093c39882d
|
[
"MIT"
] | null | null | null |
Beecrowd/Python/ex1052.py
|
yurifalves/Exercises
|
a4e84ac76b9432f6c2efdeef6e5e2a093c39882d
|
[
"MIT"
] | null | null | null |
meses = {1: 'January', 2: 'February', 3: 'March', 4: 'April',
5: 'May', 6: 'June', 7: 'July', 8: 'August',
9: 'September', 10: 'October', 11: 'November', 12: 'December'}
mes = int(input())
print(meses[mes])
| 37.5
| 71
| 0.524444
|
c3f07b5a2242cd236396071ab999f8226205519c
| 11,398
|
py
|
Python
|
pylot/control/mpc/utils.py
|
alvkao58/pylot
|
ab49647236fcbc8aa08ec9650e0596e778e9ef85
|
[
"Apache-2.0"
] | null | null | null |
pylot/control/mpc/utils.py
|
alvkao58/pylot
|
ab49647236fcbc8aa08ec9650e0596e778e9ef85
|
[
"Apache-2.0"
] | null | null | null |
pylot/control/mpc/utils.py
|
alvkao58/pylot
|
ab49647236fcbc8aa08ec9650e0596e778e9ef85
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import bisect
global_config = {
'vehicle': { # configured for lincoln mkz
'length': 4.93,
'width': 1.86,
'offset': 1.0,
'wheel_length': 0.3,
'wheel_width': 0.2,
'track': 0.7,
'wheelbase': 2.85,
'max_steer': np.deg2rad(37.5),
'min_steer': np.deg2rad(-37.5),
'max_steer_speed': np.deg2rad(22.5),
'min_steer_speed': np.deg2rad(-22.5),
'max_vel': 20,
'min_vel': 0,
'max_accel': 3.0,
'min_accel': -5.0,
},
'controller': {
'R': np.diag([0.01, 0.10]), # Input cost
'Rd': np.diag([0.01, 1.0]), # Input difference cost
'Q': np.diag([1.0, 1.0, 0.01, 0.01]), # State cost
'Qf': np.diag([1.0, 1.0, 0.01, 0.01]), # Terminal state cost
'goal_threshold': 1.0, # Threshold for goal test [m]
'expiration_time': 100.0, # Expiration time [s]
'max_iteration': 5, # Max step iterations
'convergence_threshold': 0.1, # Threshold for convergence test
'horizon': 5, # Horizon
'index_horizon': 5, # Index horizon
},
}
def compute_curvature(vel, accel, yaw):
dx = vel * np.tan(yaw)
ddx = accel * np.tan(yaw)
dy = vel * np.tan(yaw)
ddy = accel * np.tan(yaw)
return (ddy * dx - ddx * dy) / ((dx**2 + dy**2)**(3 / 2))
def normalize_yaw(yaw_list):
if len(yaw_list) > 1:
for i in range(len(yaw_list) - 1):
delta_yaw = yaw_list[i + 1] - yaw_list[i]
while delta_yaw >= np.pi / 2.0:
yaw_list[i + 1] -= np.pi * 2.0
delta_yaw = yaw_list[i + 1] - yaw_list[i]
while delta_yaw <= -np.pi / 2.0:
yaw_list[i + 1] += np.pi * 2.0
delta_yaw = yaw_list[i + 1] - yaw_list[i]
return yaw_list
def zero_to_2_pi(angle):
return (angle + 360) % 360
class CubicSpline1D:
"""
1-dimensional cubic spline class. For technical details see: http://mathworld.wolfram.com/CubicSpline.html
"""
def __init__(self, x, y):
"""
Construct the 1-dimensional cubic spline.
:param x: list
List of x values.
:param y: list
List of y values.
"""
self.a = [item for item in y]
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x)
h = np.diff(x)
matrix_a = self._matrix_a(h)
matrix_b = self._matrix_b(h)
self.c = np.linalg.solve(matrix_a, matrix_b)
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc_der0(self, t):
"""
Calculate the 1st derivative evaluated at t.
:param t: float
Position along the 1-dimensional spline.
:return: float
1st derivative evaluated at t.
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self._search_index(t)
dx = t - self.x[i]
result = \
self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calc_der1(self, t):
"""
Calculate the 2nd derivative evaluated at t.
:param t: float
Position along the 1-dimensional spline.
:return: float
2nd derivative evaluated at t.
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self._search_index(t)
dx = t - self.x[i]
result = \
self.b[i] + 2.0 * self.c[i] * dx + \
3.0 * self.d[i] * dx ** 2.0
return result
def calc_der2(self, t):
"""
Calculate the 3rd derivative evaluated at t.
:param t: float
Position along the 1-dimensional spline.
:return: float
3rd derivative evaluated at t.
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self._search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def _search_index(self, x):
"""
Search the spline for index closest to x.
:param x: float
Position along the 1-dimensional spline.
:return: int
Index closest to x.
"""
return bisect.bisect(self.x, x) - 1
def _matrix_a(self, h):
"""
Create the constants matrix a used in spline construction.
:param h: np.ndarray
List of deltas between values.
:return: np.ndarray
Constants matrix.
"""
matrix_a = np.zeros((self.nx, self.nx))
matrix_a[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
matrix_a[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
matrix_a[i + 1, i] = h[i]
matrix_a[i, i + 1] = h[i]
matrix_a[0, 1] = 0.0
matrix_a[self.nx - 1, self.nx - 2] = 0.0
matrix_a[self.nx - 1, self.nx - 1] = 1.0
return matrix_a
def _matrix_b(self, h):
"""
Create the 1st derivative matrix b used in spline construction.
:param h: np.ndarray
List of deltas between values
:return: np.ndarray
1st derivative matrix
"""
matrix_b = np.zeros(self.nx)
for i in range(self.nx - 2):
matrix_b[i + 1] = \
3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
return matrix_b
class CubicSpline2D:
"""
2-dimensional cubic spline class. For technical details see: http://mathworld.wolfram.com/CubicSpline.html
"""
def __init__(self, x, y, delta_s=1):
"""
Construct the 2-dimensional cubic spline.
:param x: list
List of x values.
:param y: list
List of y values
:param delta_s: float.
Distance between interpolated points.
"""
self.delta_s = delta_s # [m]
self.s = self._calc_s(x, y)
self.sx = CubicSpline1D(self.s, x)
self.sy = CubicSpline1D(self.s, y)
def calc_x(self, s):
"""
Calculate the x position along the spline at given s.
:param s: float
s position along the 2-dimensional spline.
:return: float
x position along the 2-dimensional spline.
"""
return self.sx.calc_der0(s)
def calc_y(self, s):
"""
Calculate the y position along the spline at given s.
:param s: float
s position along the 2-dimensional spline.
:return: float
y position along the 2-dimensional spline.
"""
return self.sy.calc_der0(s)
def calc_position(self, s):
"""
Calculate the x, y position along the spline at given s.
:param s: float
s position along the 2-dimensional spline.
:return: (float, float)
x, y position along the 2-dimensional spline.
"""
x = self.sx.calc_der0(s)
y = self.sy.calc_der0(s)
return x, y
def calc_curvature(self, s):
"""
Calculate the curvature along the spline at given s.
:param s: float
s position along the 2-dimensional spline.
:return: float
Curvature along the 2-dimensional spline.
"""
dx = self.sx.calc_der1(s)
ddx = self.sx.calc_der2(s)
dy = self.sy.calc_der1(s)
ddy = self.sy.calc_der2(s)
k = (ddy * dx - ddx * dy) / ((dx**2 + dy**2)**(3 / 2))
return k
def calc_yaw(self, s):
"""
Calculate the yaw in radians along the spline at given s.
:param s: float
s position along the 2-dimensional spline.
:return: float
Yaw along the 2-dimensional spline.
"""
dx = self.sx.calc_der1(s)
dy = self.sy.calc_der1(s)
yaw = np.arctan2(dy, dx)
return yaw
def _calc_s(self, x, y):
"""
Calculate the s values for interpolation given x, y.
:param x: list
List of x values.
:param y: list
List of y values.
:return: np.ndarray
List of s values for interpolation.
"""
dx = np.diff(x)
dy = np.diff(y)
self.ds = [np.sqrt(idx**2 + idy**2) for (idx, idy) in zip(dx, dy)]
s = [0]
s.extend(np.cumsum(self.ds))
s = np.unique(s)
return s
class Vehicle:
def __init__(self, config):
self.config = config
self.time = None # Time [s]
self.distance = None # Arc distance [m]
self.x = None # X coordinate [m]
self.y = None # Y coordinate [m]
self.curvature = None # Curvature [1/m]
self.vel = None # Tangential velocity [m/s]
self.yaw = None # Yaw [rad]
self.accel = None # Acceleration [m/s2]
self.steer = None # Steering [rad]
def update(self, time, distance, x, y, curvature, vel, yaw, accel, steer):
self.time = time
self.distance = distance
self.x = x
self.y = y
self.curvature = curvature
self.vel = vel
self.yaw = yaw
self.accel = accel
self.steer = steer
def get_position(self):
return np.asarray([self.x, self.y])
def get_state(self):
return np.asarray([self.x, self.y, self.vel, self.yaw])
class Trajectory:
def __init__(self,
t_list,
s_list,
x_list,
y_list,
k_list,
vel_list,
yaw_list,
accel_list=None,
steer_list=None):
self.t_list = list(t_list) # Time [s]
self.s_list = list(s_list) # Arc distance list [m]
self.x_list = list(x_list) # X coordinate list [m]
self.y_list = list(y_list) # Y coordinate list [m]
self.k_list = list(k_list) # Curvature list [1/m]
self.vel_list = list(vel_list) # Tangential velocity list [m/s]
self.yaw_list = list(normalize_yaw(yaw_list)) # Yaw list [rad]
if accel_list is not None:
self.accel_list = list(accel_list) # Acceleration list [m/s2]
else:
self.accel_list = accel_list
if steer_list is not None:
self.steer_list = list(steer_list) # Steering list [rad]
else:
self.steer_list = steer_list
def append_vel(self, vel):
self.vel_list.append(vel)
def append(self, t, s, x, y, k, vel, yaw, accel=None, steer=None):
self.t_list.append(t)
self.s_list.append(s)
self.x_list.append(x)
self.y_list.append(y)
self.k_list.append(k)
self.vel_list.append(vel)
self.yaw_list.append(yaw)
if accel is not None:
self.accel_list.append(accel)
if steer is not None:
self.steer_list.append(steer)
| 30.31383
| 110
| 0.514038
|
b163b1a2e3e2ccbe768741b38d78254078171083
| 7,153
|
py
|
Python
|
test/azure/AcceptanceTests/test_paging.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
test/azure/AcceptanceTests/test_paging.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
test/azure/AcceptanceTests/test_paging.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from uuid import uuid4
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath
from paging import AutoRestPagingTestService
from custombaseurlpaging import AutoRestParameterizedHostTestPagingClient
from azure.core.exceptions import HttpResponseError
import pytest
@pytest.fixture
def client(credential, authentication_policy):
with AutoRestPagingTestService(credential, base_url="http://localhost:3000", authentication_policy=authentication_policy) as client:
yield client
@pytest.fixture
def custom_url_client(credential, authentication_policy):
with AutoRestParameterizedHostTestPagingClient(credential, host="host:3000", authentication_policy=authentication_policy) as client:
yield client
class TestPaging(object):
def test_get_no_item_name_pages(self, client):
pages = client.paging.get_no_item_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0].properties.id == 1
assert items[0].properties.name == "Product"
def test_get_null_next_link_name_pages(self, client):
pages = client.paging.get_null_next_link_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0].properties.id == 1
assert items[0].properties.name == "Product"
def test_get_single_pages_with_cb(self, client):
def cb(list_of_obj):
for obj in list_of_obj:
obj.marked = True
return list_of_obj
pages = client.paging.get_single_pages(cls=cb)
assert all(obj.marked for obj in pages)
def test_get_single_pages(self, client):
pages = client.paging.get_single_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0].properties.id == 1
assert items[0].properties.name == "Product"
def test_get_multiple_pages(self, client):
pages = client.paging.get_multiple_pages()
items = [i for i in pages]
assert len(items) == 10
def test_query_params(self, client):
pages = client.paging.get_with_query_params(required_query_parameter='100')
items = [i for i in pages]
assert len(items) == 2
def test_get_odata_multiple_pages(self, client):
pages = client.paging.get_odata_multiple_pages()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_retry_first(self, client):
pages = client.paging.get_multiple_pages_retry_first()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_retry_second(self, client):
pages = client.paging.get_multiple_pages_retry_second()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_with_offset(self, client):
from paging.models import PagingGetMultiplePagesWithOffsetOptions
options = PagingGetMultiplePagesWithOffsetOptions(offset=100)
pages = client.paging.get_multiple_pages_with_offset(paging_get_multiple_pages_with_offset_options=options)
items = [i for i in pages]
assert len(items) == 10
assert items[-1].properties.id == 110
def test_get_single_pages_failure(self, client):
pages = client.paging.get_single_pages_failure()
with pytest.raises(HttpResponseError):
list(pages)
def test_get_multiple_pages_failure(self, client):
pages = client.paging.get_multiple_pages_failure()
with pytest.raises(HttpResponseError):
list(pages)
def test_get_multiple_pages_failure_uri(self, client):
pages = client.paging.get_multiple_pages_failure_uri()
with pytest.raises(HttpResponseError):
list(pages)
def test_paging_fragment_path(self, client):
pages = client.paging.get_multiple_pages_fragment_next_link("1.6", "test_user")
items = [i for i in pages]
assert len(items) == 10
with pytest.raises(AttributeError):
# Be sure this method is not generated (Transform work)
client.paging.get_multiple_pages_fragment_next_link_next() # pylint: disable=E1101
def test_custom_url_get_pages_partial_url(self, custom_url_client):
paged = list(custom_url_client.paging.get_pages_partial_url("local"))
assert len(paged) == 2
assert paged[0].properties.id == 1
assert paged[1].properties.id == 2
def test_custom_url_get_pages_partial_url_operation(self, custom_url_client):
paged = list(custom_url_client.paging.get_pages_partial_url_operation("local"))
assert len(paged) == 2
assert paged[0].properties.id == 1
assert paged[1].properties.id == 2
def test_get_multiple_pages_lro(self, client):
"""LRO + Paging at the same time.
"""
poller = client.paging.begin_get_multiple_pages_lro()
pager = poller.result()
items = list(pager)
assert len(items) == 10
assert items[0].properties.id == 1
assert items[1].properties.id == 2
def test_item_name_with_xms_client_name(self, client):
pages = client.paging.get_paging_model_with_item_name_with_xms_client_name()
items = [i for i in pages]
assert len(items) == 1
def test_models(self):
from paging.models import OperationResult
if sys.version_info >= (3,5):
from paging.models._models_py3 import OperationResult as OperationResultPy3
assert OperationResult == OperationResultPy3
else:
from paging.models._models import OperationResult as OperationResultPy2
assert OperationResult == OperationResultPy2
| 38.875
| 136
| 0.691598
|
d9a739e3665ea89c2643020d55818e6fdcbe8066
| 1,140
|
py
|
Python
|
tests/period/test_add_subtract.py
|
shammellee/pendulum
|
bb179c8fb6ef92b7bfc471a46338abbfac9fafca
|
[
"MIT"
] | 5,049
|
2016-07-04T07:16:34.000Z
|
2022-03-31T07:41:48.000Z
|
tests/period/test_add_subtract.py
|
shammellee/pendulum
|
bb179c8fb6ef92b7bfc471a46338abbfac9fafca
|
[
"MIT"
] | 536
|
2016-07-05T22:46:29.000Z
|
2022-03-22T12:41:54.000Z
|
tests/period/test_add_subtract.py
|
shammellee/pendulum
|
bb179c8fb6ef92b7bfc471a46338abbfac9fafca
|
[
"MIT"
] | 373
|
2016-07-05T19:51:51.000Z
|
2022-03-23T16:57:46.000Z
|
# -*- coding: utf-8 -*-
import pendulum
def test_dst_add():
start = pendulum.datetime(2017, 3, 7, tz="America/Toronto")
end = start.add(days=6)
period = end - start
new_end = start + period
assert new_end == end
def test_dst_add_non_variable_units():
start = pendulum.datetime(2013, 3, 31, 1, 30, tz="Europe/Paris")
end = start.add(hours=1)
period = end - start
new_end = start + period
assert new_end == end
def test_dst_subtract():
start = pendulum.datetime(2017, 3, 7, tz="America/Toronto")
end = start.add(days=6)
period = end - start
new_start = end - period
assert new_start == start
def test_naive_subtract():
start = pendulum.naive(2013, 3, 31, 1, 30)
end = start.add(hours=1)
period = end - start
new_end = start + period
assert new_end == end
def test_negative_difference_subtract():
start = pendulum.datetime(2018, 5, 28, 12, 34, 56, 123456)
end = pendulum.datetime(2018, 1, 1)
print((start - end).in_words())
period = end - start
print(period.in_words())
new_end = start + period
assert new_end == end
| 21.509434
| 68
| 0.635088
|
de8d1e6cd2d374fc0cf0b5ca9dd5474e02cf4423
| 4,765
|
py
|
Python
|
applications/DamApplication/python_scripts/check_and_prepare_model_process_dam_mechanical.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 778
|
2017-01-27T16:29:17.000Z
|
2022-03-30T03:01:51.000Z
|
applications/DamApplication/python_scripts/check_and_prepare_model_process_dam_mechanical.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 6,634
|
2017-01-15T22:56:13.000Z
|
2022-03-31T15:03:36.000Z
|
applications/DamApplication/python_scripts/check_and_prepare_model_process_dam_mechanical.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 224
|
2017-02-07T14:12:49.000Z
|
2022-03-06T23:09:34.000Z
|
import KratosMultiphysics
def Factory(settings, Model):
if not isinstance(settings, Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return CheckAndPrepareModelProcessDamMechanical(Model, settings["Parameters"])
## All the processes python should be derived from "Process"
class CheckAndPrepareModelProcessDamMechanical(KratosMultiphysics.Process):
"""Prepare the computing model part.
The computing model part is created if it does not exist. Nodes and elements
from the domain sub model parts are added to the computing model part.
Conditions are added from the processes sub model parts.
"""
def __init__(self, main_model_part, Parameters ):
KratosMultiphysics.Process.__init__(self)
self.main_model_part = main_model_part
self.mechanical_model_part_name = Parameters["mechanical_model_part_name"].GetString()
self.mechanical_domain_sub_model_part_list = Parameters["mechanical_domain_sub_model_part_list"]
self.mechanical_loads_sub_model_part_list = Parameters["mechanical_loads_sub_model_part_list"]
self.body_domain_sub_model_part_list = Parameters["body_domain_sub_model_part_list"]
self.body_domain_sub_sub_model_part_list = Parameters["body_domain_sub_sub_model_part_list"]
self.loads_sub_model_part_list = Parameters["loads_sub_model_part_list"]
self.loads_sub_sub_model_part_list = Parameters["loads_sub_sub_model_part_list"]
def Execute(self):
# Construct the computing model part: a model part which contains the mesh to compute
self.main_model_part.CreateSubModelPart(self.mechanical_model_part_name)
mechanical_model_part = self.main_model_part.GetSubModelPart(self.mechanical_model_part_name)
mechanical_model_part.ProcessInfo = self.main_model_part.ProcessInfo
mechanical_model_part.Properties = self.main_model_part.Properties
mechanical_model_part.Set(KratosMultiphysics.ACTIVE)
domain_parts = []
for i in range(self.mechanical_domain_sub_model_part_list.size()):
domain_parts.append(self.main_model_part.GetSubModelPart(self.mechanical_domain_sub_model_part_list[i].GetString()))
# Adding Nodes to Computing Model Part
list_of_ids = set()
for part in domain_parts:
for node in part.Nodes:
list_of_ids.add(node.Id)
mechanical_model_part.AddNodes(list(list_of_ids))
# Adding Elements to Computing Model Part
list_of_ids = set()
for part in domain_parts:
for elem in part.Elements:
list_of_ids.add(elem.Id)
mechanical_model_part.AddElements(list(list_of_ids))
# Adding Conditions to Computing Model Part
domain_conditions = []
for i in range(self.mechanical_loads_sub_model_part_list.size()):
domain_conditions.append(self.main_model_part.GetSubModelPart(self.mechanical_loads_sub_model_part_list[i].GetString()))
list_of_ids = set()
for part in domain_conditions:
for cond in part.Conditions:
list_of_ids.add(cond.Id)
mechanical_model_part.AddConditions(list(list_of_ids))
# Adding Computing Sub Sub Model Parts
# Body - Joints
for i in range(self.body_domain_sub_model_part_list.size()):
body_sub_model_part = self.main_model_part.GetSubModelPart(self.body_domain_sub_model_part_list[i].GetString())
mechanical_model_part.CreateSubModelPart(self.body_domain_sub_sub_model_part_list[i].GetString())
body_sub_sub_model_part = mechanical_model_part.GetSubModelPart(self.body_domain_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in body_sub_model_part.Nodes:
list_of_ids.add(node.Id)
body_sub_sub_model_part.AddNodes(list(list_of_ids))
list_of_ids = set()
for elem in body_sub_model_part.Elements:
list_of_ids.add(elem.Id)
body_sub_sub_model_part.AddElements(list(list_of_ids))
# Arc-Length
for i in range(self.loads_sub_model_part_list.size()):
load_sub_model_part = self.main_model_part.GetSubModelPart(self.loads_sub_model_part_list[i].GetString())
mechanical_model_part.CreateSubModelPart(self.loads_sub_sub_model_part_list[i].GetString())
load_sub_sub_model_part = mechanical_model_part.GetSubModelPart(self.loads_sub_sub_model_part_list[i].GetString())
list_of_ids = set()
for node in load_sub_model_part.Nodes:
list_of_ids.add(node.Id)
load_sub_sub_model_part.AddNodes(list(list_of_ids))
| 56.72619
| 132
| 0.729906
|
d074e1506989e513f365df6336cc7359802542be
| 1,462
|
py
|
Python
|
src/ikea_api/endpoints/item/item_ingka.py
|
sqr/ikea-api-client
|
687cbb532b3e59001437373b9c44af504c8c4baf
|
[
"MIT"
] | null | null | null |
src/ikea_api/endpoints/item/item_ingka.py
|
sqr/ikea-api-client
|
687cbb532b3e59001437373b9c44af504c8c4baf
|
[
"MIT"
] | null | null | null |
src/ikea_api/endpoints/item/item_ingka.py
|
sqr/ikea-api-client
|
687cbb532b3e59001437373b9c44af504c8c4baf
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from requests import Session
from ikea_api.constants import Constants, Secrets
from ikea_api.endpoints.item import generic_item_fetcher
from ikea_api.errors import ItemFetchError
def _fetch_items_specs(session: Session, items: list[str]):
url = (
"https://api.ingka.ikea.com/salesitem/communications/"
+ Constants.COUNTRY_CODE
+ "/"
+ Constants.LANGUAGE_CODE
)
params = {"itemNos": ",".join(items)}
response = session.get(url, params=params)
r_json = response.json()
if "data" not in r_json and "error" in r_json:
err_msg = None
if "message" in r_json["error"]:
error = r_json["error"]
r_err_msg = error["message"]
if r_err_msg == "no item numbers were found":
try:
err_msg = error["details"][0]["value"]["keys"]
except (KeyError, TypeError):
pass
if not err_msg:
err_msg = r_err_msg
else:
err_msg = r_json["error"]
raise ItemFetchError(err_msg)
return r_json
def fetch(items: str | list[str]):
headers = {
"Accept": "*/*",
"Referer": f"{Constants.BASE_URL}/{Constants.COUNTRY_CODE}/{Constants.LANGUAGE_CODE}/order/delivery/",
"x-client-id": Secrets.item_ingka_x_client_id,
}
return generic_item_fetcher(items, headers, _fetch_items_specs, 50)
| 31.782609
| 110
| 0.610807
|
b78cac078e3f40e2d39ee8077dd56d77d4b01178
| 29,253
|
py
|
Python
|
ignite_trainer/_trainer.py
|
jinczing/AudioCLIP
|
b080fc946599290c91f9d3b203295e5968af1bf6
|
[
"MIT"
] | 304
|
2021-06-28T09:59:13.000Z
|
2022-03-30T17:33:52.000Z
|
ignite_trainer/_trainer.py
|
AK391/AudioCLIP
|
45327aa203839bfeb58681dd36c04fd493ee72f4
|
[
"MIT"
] | 5
|
2021-07-07T06:12:34.000Z
|
2021-07-23T15:44:06.000Z
|
ignite_trainer/_trainer.py
|
AK391/AudioCLIP
|
45327aa203839bfeb58681dd36c04fd493ee72f4
|
[
"MIT"
] | 34
|
2021-06-29T11:50:19.000Z
|
2022-03-02T12:01:36.000Z
|
import io
import os
import glob
import json
import time
import tqdm
import signal
import argparse
import numpy as np
import torch
import torch.utils.data
import torchvision as tv
import ignite.engine as ieng
import ignite.metrics as imet
import ignite.handlers as ihan
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Union
from typing import Optional
from termcolor import colored
from collections import defaultdict
from collections.abc import Iterable
from ignite_trainer import _utils
from ignite_trainer import _visdom
from ignite_trainer import _interfaces
VISDOM_HOST = 'localhost'
VISDOM_PORT = 8097
VISDOM_ENV_PATH = os.path.join(os.path.expanduser('~'), 'logs')
BATCH_TRAIN = 128
BATCH_TEST = 1024
WORKERS_TRAIN = 0
WORKERS_TEST = 0
EPOCHS = 100
LOG_INTERVAL = 50
SAVED_MODELS_PATH = os.path.join(os.path.expanduser('~'), 'saved_models')
def run(experiment_name: str,
visdom_host: str,
visdom_port: int,
visdom_env_path: str,
model_class: str,
model_args: Dict[str, Any],
optimizer_class: str,
optimizer_args: Dict[str, Any],
dataset_class: str,
dataset_args: Dict[str, Any],
batch_train: int,
batch_test: int,
workers_train: int,
workers_test: int,
transforms: List[Dict[str, Union[str, Dict[str, Any]]]],
epochs: int,
log_interval: int,
saved_models_path: str,
performance_metrics: Optional = None,
scheduler_class: Optional[str] = None,
scheduler_args: Optional[Dict[str, Any]] = None,
model_suffix: Optional[str] = None,
setup_suffix: Optional[str] = None,
orig_stdout: Optional[io.TextIOBase] = None,
skip_train_val: bool = False):
with _utils.tqdm_stdout(orig_stdout) as orig_stdout:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
experiment_name = f'{experiment_name}-x{num_gpus}'
transforms_train = list()
transforms_test = list()
for idx, transform in enumerate(transforms):
use_train = transform.get('train', True)
use_test = transform.get('test', True)
transform = _utils.load_class(transform['class'])(**transform['args'])
if use_train:
transforms_train.append(transform)
if use_test:
transforms_test.append(transform)
transforms[idx]['train'] = use_train
transforms[idx]['test'] = use_test
transforms_train = tv.transforms.Compose(transforms_train)
transforms_test = tv.transforms.Compose(transforms_test)
Dataset: Type = _utils.load_class(dataset_class)
train_loader, eval_loader = _utils.get_data_loaders(
Dataset,
dataset_args,
batch_train,
batch_test,
workers_train,
workers_test,
transforms_train,
transforms_test
)
Network: Type = _utils.load_class(model_class)
model: _interfaces.AbstractNet = Network(**model_args)
if hasattr(train_loader.dataset, 'class_weights'):
model.register_buffer('class_weights', train_loader.dataset.class_weights.clone().exp(), persistent=False)
if hasattr(train_loader.dataset, 'label_to_class_idx'):
model.label_to_class_idx = {idx: lb for idx, lb in train_loader.dataset.label_to_class_idx.items()}
model = torch.nn.DataParallel(model, device_ids=range(num_gpus))
model = model.to(device)
# disable all parameters
for p in model.parameters():
p.requires_grad = False
# enable only audio-related parameters
for p in model.module.audio.parameters():
p.requires_grad = True
# disable fbsp-parameters
for p in model.module.audio.fbsp.parameters():
p.requires_grad = False
# disable logit scaling
model.module.logit_scale_ai.requires_grad = False
model.module.logit_scale_at.requires_grad = False
# add only enabled parameters to optimizer's list
param_groups = [
{'params': [p for p in model.module.parameters() if p.requires_grad]}
]
# enable fbsp-parameters
for p in model.module.audio.fbsp.parameters():
p.requires_grad = True
# enable logit scaling
model.module.logit_scale_ai.requires_grad = True
model.module.logit_scale_at.requires_grad = True
# add fbsp- and logit scaling parameters to a separate group without weight decay
param_groups.append({
'params': [
p for p in model.module.audio.fbsp.parameters()
] + [
model.module.logit_scale_ai,
model.module.logit_scale_at
],
'weight_decay': 0.0
})
Optimizer: Type = _utils.load_class(optimizer_class)
optimizer: torch.optim.Optimizer = Optimizer(
param_groups,
**{**optimizer_args, **{'lr': optimizer_args['lr'] * num_gpus}}
)
if scheduler_class is not None:
Scheduler: Type = _utils.load_class(scheduler_class)
if scheduler_args is None:
scheduler_args = dict()
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = Scheduler(optimizer, **scheduler_args)
else:
scheduler = None
model_short_name = ''.join([c for c in Network.__name__ if c == c.upper()])
model_name = '{}{}'.format(
model_short_name,
'-{}'.format(model_suffix) if model_suffix is not None else ''
)
visdom_env_name = '{}_{}_{}{}'.format(
Dataset.__name__,
experiment_name,
model_name,
'-{}'.format(setup_suffix) if setup_suffix is not None else ''
)
vis, vis_pid = _visdom.get_visdom_instance(visdom_host, visdom_port, visdom_env_name, visdom_env_path)
prog_bar_epochs = tqdm.tqdm(total=epochs, desc='Epochs', file=orig_stdout, dynamic_ncols=True, unit='epoch')
prog_bar_iters = tqdm.tqdm(desc='Batches', file=orig_stdout, dynamic_ncols=True)
num_params_total = sum(p.numel() for p in model.parameters())
num_params_train = sum(p.numel() for grp in optimizer.param_groups for p in grp['params'])
params_total_label = ''
params_train_label = ''
if num_params_total > 1e6:
num_params_total /= 1e6
params_total_label = 'M'
elif num_params_total > 1e3:
num_params_total /= 1e3
params_total_label = 'k'
if num_params_train > 1e6:
num_params_train /= 1e6
params_train_label = 'M'
elif num_params_train > 1e3:
num_params_train /= 1e3
params_train_label = 'k'
tqdm.tqdm.write(f'\n{Network.__name__}\n')
tqdm.tqdm.write('Total number of parameters: {:.2f}{}'.format(num_params_total, params_total_label))
tqdm.tqdm.write('Number of trainable parameters: {:.2f}{}'.format(num_params_train, params_train_label))
def training_step(engine: ieng.Engine, batch) -> torch.Tensor:
model.train()
model.epoch = engine.state.epoch
model.batch_idx = (engine.state.iteration - 1) % len(train_loader)
model.num_batches = len(train_loader)
optimizer.zero_grad()
audio, image, text = batch
if audio is not None:
audio = audio.to(device)
if image is not None:
image = image.to(device)
batch_indices = torch.arange(audio.shape[0], dtype=torch.int64, device=device)
_, loss = model(audio, image, text, batch_indices)
if loss.ndim > 0:
loss = loss.mean()
loss.backward(retain_graph=False)
optimizer.step(None)
return loss.item()
def eval_step(_: ieng.Engine, batch) -> _interfaces.TensorPair:
model.eval()
with torch.no_grad():
audio, _, text = batch
((audio_features, _, _), _), _ = model(
audio=audio,
batch_indices=torch.arange(audio.shape[0], dtype=torch.int64, device=device)
)
audio_features = audio_features.unsqueeze(1)
((_, _, text_features), _), _ = model(
text=[
[eval_loader.dataset.class_idx_to_label[class_idx]]
for class_idx in sorted(eval_loader.dataset.class_idx_to_label.keys())
],
batch_indices=torch.arange(
len(eval_loader.dataset.class_idx_to_label), dtype=torch.int64, device=device
)
)
text_features = text_features.unsqueeze(1).transpose(0, 1)
logit_scale_at = torch.clamp(model.module.logit_scale_at.exp(), min=1.0, max=100.0)
y_pred = (logit_scale_at * audio_features @ text_features.transpose(-1, -2)).squeeze(1)
y = torch.zeros(
audio.shape[0], len(eval_loader.dataset.class_idx_to_label), dtype=torch.int8, device=device
)
for item_idx, labels in enumerate(text):
class_ids = list(sorted([
eval_loader.dataset.label_to_class_idx[lb] for lb in labels
]))
y[item_idx][class_ids] = 1
if model.module.multilabel:
y_pred = torch.sigmoid(y_pred / logit_scale_at - 0.5)
else:
y_pred = torch.softmax(y_pred, dim=-1)
y = y.argmax(dim=-1)
return y_pred, y
trainer = ieng.Engine(training_step)
validator_train = ieng.Engine(eval_step)
validator_eval = ieng.Engine(eval_step)
# placeholder for summary window
vis.text(
text='',
win=experiment_name,
env=visdom_env_name,
opts={'title': 'Summary', 'width': 940, 'height': 416},
append=vis.win_exists(experiment_name, visdom_env_name)
)
default_metrics = {
"Loss": {
"window_name": None,
"x_label": "#Epochs",
"y_label": model.loss_fn_name if not isinstance(model, torch.nn.DataParallel) else model.module.loss_fn_name,
"width": 940,
"height": 416,
"lines": [
{
"line_label": "SMA",
"object": imet.RunningAverage(output_transform=lambda x: x),
"test": False,
"update_rate": "iteration"
}
]
}
}
performance_metrics = {**default_metrics, **performance_metrics}
checkpoint_metrics = list()
for scope_name, scope in performance_metrics.items():
scope['window_name'] = scope.get('window_name', scope_name) or scope_name
for line in scope['lines']:
if 'object' not in line:
line['object']: imet.Metric = _utils.load_class(line['class'])(**line['args'])
line['metric_label'] = '{}: {}'.format(scope['window_name'], line['line_label'])
line['update_rate'] = line.get('update_rate', 'epoch')
line_suffixes = list()
if line['update_rate'] == 'iteration':
line['object'].attach(trainer, line['metric_label'])
line['train'] = False
line['test'] = False
line_suffixes.append(' Train.')
if line.get('train', True):
line['object'].attach(validator_train, line['metric_label'])
line_suffixes.append(' Train.')
if line.get('test', True):
line['object'].attach(validator_eval, line['metric_label'])
line_suffixes.append(' Eval.')
if line.get('is_checkpoint', False):
checkpoint_metrics.append(line['metric_label'])
for line_suffix in line_suffixes:
_visdom.plot_line(
vis=vis,
window_name=scope['window_name'],
env=visdom_env_name,
line_label=line['line_label'] + line_suffix,
x_label=scope['x_label'],
y_label=scope['y_label'],
width=scope['width'],
height=scope['height'],
draw_marker=(line['update_rate'] == 'epoch')
)
if checkpoint_metrics:
score_name = 'performance'
def get_score(engine: ieng.Engine) -> float:
current_mode = getattr(engine.state.dataloader.iterable.dataset, dataset_args['training']['key'])
val_mode = dataset_args['training']['no']
score = 0.0
if current_mode == val_mode:
for metric_name in checkpoint_metrics:
try:
score += engine.state.metrics[metric_name]
except KeyError:
pass
return score
model_saver = ihan.ModelCheckpoint(
os.path.join(saved_models_path, visdom_env_name),
filename_prefix=visdom_env_name,
score_name=score_name,
score_function=get_score,
n_saved=3,
save_as_state_dict=True,
require_empty=False,
create_dir=True
)
validator_eval.add_event_handler(ieng.Events.EPOCH_COMPLETED, model_saver, {model_name: model})
if not skip_train_val:
@trainer.on(ieng.Events.STARTED)
def engine_started(engine: ieng.Engine):
log_validation(engine, False)
@trainer.on(ieng.Events.EPOCH_STARTED)
def reset_progress_iterations(engine: ieng.Engine):
prog_bar_iters.clear()
prog_bar_iters.n = 0
prog_bar_iters.last_print_n = 0
prog_bar_iters.start_t = time.time()
prog_bar_iters.last_print_t = time.time()
prog_bar_iters.total = len(engine.state.dataloader)
@trainer.on(ieng.Events.ITERATION_COMPLETED)
def log_training(engine: ieng.Engine):
prog_bar_iters.update(1)
num_iter = (engine.state.iteration - 1) % len(train_loader) + 1
early_stop = np.isnan(engine.state.output) or np.isinf(engine.state.output)
if num_iter % log_interval == 0 or num_iter == len(train_loader) or early_stop:
tqdm.tqdm.write(
'Epoch[{}] Iteration[{}/{}] Loss: {:.4f}'.format(
engine.state.epoch, num_iter, len(train_loader), engine.state.output
)
)
x_pos = engine.state.epoch + num_iter / len(train_loader) - 1
for scope_name, scope in performance_metrics.items():
for line in scope['lines']:
if line['update_rate'] == 'iteration':
line_label = '{} Train.'.format(line['line_label'])
line_value = engine.state.metrics[line['metric_label']]
if engine.state.epoch >= 1:
_visdom.plot_line(
vis=vis,
window_name=scope['window_name'],
env=visdom_env_name,
line_label=line_label,
x_label=scope['x_label'],
y_label=scope['y_label'],
x=np.full(1, x_pos),
y=np.full(1, line_value)
)
if early_stop:
tqdm.tqdm.write(colored('Early stopping due to invalid loss value.', 'red'))
trainer.terminate()
def log_validation(engine: ieng.Engine,
train: bool = True):
if train:
run_type = 'Train.'
data_loader = train_loader
validator = validator_train
else:
run_type = 'Eval.'
data_loader = eval_loader
validator = validator_eval
prog_bar_validation = tqdm.tqdm(
data_loader,
desc=f'Validation {run_type}',
file=orig_stdout,
dynamic_ncols=True,
leave=False
)
validator.run(prog_bar_validation)
prog_bar_validation.clear()
prog_bar_validation.close()
tqdm_info = [
'Epoch: {}'.format(engine.state.epoch)
]
for scope_name, scope in performance_metrics.items():
for line in scope['lines']:
if line['update_rate'] == 'epoch':
try:
line_label = '{} {}'.format(line['line_label'], run_type)
line_value = validator.state.metrics[line['metric_label']]
_visdom.plot_line(
vis=vis,
window_name=scope['window_name'],
env=visdom_env_name,
line_label=line_label,
x_label=scope['x_label'],
y_label=scope['y_label'],
x=np.full(1, engine.state.epoch),
y=np.full(1, line_value),
draw_marker=True
)
tqdm_info.append('{}: {:.4f}'.format(line_label, line_value))
except KeyError:
pass
tqdm.tqdm.write('{} results - {}'.format(run_type, '; '.join(tqdm_info)))
if not skip_train_val:
@trainer.on(ieng.Events.EPOCH_COMPLETED)
def log_validation_train(engine: ieng.Engine):
log_validation(engine, True)
@trainer.on(ieng.Events.EPOCH_COMPLETED)
def log_validation_eval(engine: ieng.Engine):
log_validation(engine, False)
if engine.state.epoch == 1:
summary = _utils.build_summary_str(
experiment_name=experiment_name,
model_short_name=model_name,
model_class=model_class,
model_args=model_args,
optimizer_class=optimizer_class,
optimizer_args=optimizer_args,
dataset_class=dataset_class,
dataset_args=dataset_args,
transforms=transforms,
epochs=epochs,
batch_train=batch_train,
log_interval=log_interval,
saved_models_path=saved_models_path,
scheduler_class=scheduler_class,
scheduler_args=scheduler_args
)
_visdom.create_summary_window(
vis=vis,
visdom_env_name=visdom_env_name,
experiment_name=experiment_name,
summary=summary
)
vis.save([visdom_env_name])
prog_bar_epochs.update(1)
if scheduler is not None:
scheduler.step(engine.state.epoch)
trainer.run(train_loader, max_epochs=epochs)
if vis_pid is not None:
tqdm.tqdm.write('Stopping visdom')
os.kill(vis_pid, signal.SIGTERM)
del vis
del train_loader
del eval_loader
prog_bar_iters.clear()
prog_bar_iters.close()
prog_bar_epochs.clear()
prog_bar_epochs.close()
tqdm.tqdm.write('\n')
def main():
with _utils.tqdm_stdout() as orig_stdout:
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True)
parser.add_argument('-H', '--visdom-host', type=str, required=False)
parser.add_argument('-P', '--visdom-port', type=int, required=False)
parser.add_argument('-E', '--visdom-env-path', type=str, required=False)
parser.add_argument('-b', '--batch-train', type=int, required=False)
parser.add_argument('-B', '--batch-test', type=int, required=False)
parser.add_argument('-w', '--workers-train', type=int, required=False)
parser.add_argument('-W', '--workers-test', type=int, required=False)
parser.add_argument('-e', '--epochs', type=int, required=False)
parser.add_argument('-L', '--log-interval', type=int, required=False)
parser.add_argument('-M', '--saved-models-path', type=str, required=False)
parser.add_argument('-R', '--random-seed', type=int, required=False)
parser.add_argument('-s', '--suffix', type=str, required=False)
parser.add_argument('-S', '--skip-train-val', action='store_true', default=False)
args, unknown_args = parser.parse_known_args()
if args.batch_test is None:
args.batch_test = args.batch_train
if args.random_seed is not None:
args.suffix = '{}r-{}'.format(
'{}_'.format(args.suffix) if args.suffix is not None else '',
args.random_seed
)
np.random.seed(args.random_seed)
torch.random.manual_seed(args.random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
configs_found = list(sorted(glob.glob(os.path.expanduser(args.config))))
prog_bar_exps = tqdm.tqdm(
configs_found,
desc='Experiments',
unit='setup',
file=orig_stdout,
dynamic_ncols=True
)
for config_path in prog_bar_exps:
config = json.load(open(config_path))
if unknown_args:
tqdm.tqdm.write('\nParsing additional arguments...')
args_not_found = list()
for arg in unknown_args:
if arg.startswith('--'):
keys = arg.strip('-').split('.')
section = config
found = True
for key in keys:
if key in section:
section = section[key]
else:
found = False
break
if found:
override_parser = argparse.ArgumentParser()
section_nargs = None
section_type = type(section) if section is not None else str
if section_type is bool:
if section_type is bool:
def infer_bool(x: str) -> bool:
return x.lower() not in ('0', 'false', 'no')
section_type = infer_bool
if isinstance(section, Iterable) and section_type is not str:
section_nargs = '+'
section_type = {type(value) for value in section}
if len(section_type) == 1:
section_type = section_type.pop()
else:
section_type = str
override_parser.add_argument(arg, nargs=section_nargs, type=section_type)
overridden_args, _ = override_parser.parse_known_args(unknown_args)
overridden_args = vars(overridden_args)
overridden_key = arg.strip('-')
overriding_value = overridden_args[overridden_key]
section = config
old_value = None
for i, key in enumerate(keys, 1):
if i == len(keys):
old_value = section[key]
section[key] = overriding_value
else:
section = section[key]
tqdm.tqdm.write(
colored(f'Overriding "{overridden_key}": {old_value} -> {overriding_value}', 'magenta')
)
else:
args_not_found.append(arg)
if args_not_found:
tqdm.tqdm.write(
colored(
'\nThere are unrecognized arguments to override: {}'.format(
', '.join(args_not_found)
),
'red'
)
)
config = defaultdict(None, config)
experiment_name = config['Setup']['name']
visdom_host = _utils.arg_selector(
args.visdom_host, config['Visdom']['host'], VISDOM_HOST
)
visdom_port = int(_utils.arg_selector(
args.visdom_port, config['Visdom']['port'], VISDOM_PORT
))
visdom_env_path = _utils.arg_selector(
args.visdom_env_path, config['Visdom']['env_path'], VISDOM_ENV_PATH
)
batch_train = int(_utils.arg_selector(
args.batch_train, config['Setup']['batch_train'], BATCH_TRAIN
))
batch_test = int(_utils.arg_selector(
args.batch_test, config['Setup']['batch_test'], BATCH_TEST
))
workers_train = _utils.arg_selector(
args.workers_train, config['Setup']['workers_train'], WORKERS_TRAIN
)
workers_test = _utils.arg_selector(
args.workers_test, config['Setup']['workers_test'], WORKERS_TEST
)
epochs = _utils.arg_selector(
args.epochs, config['Setup']['epochs'], EPOCHS
)
log_interval = _utils.arg_selector(
args.log_interval, config['Setup']['log_interval'], LOG_INTERVAL
)
saved_models_path = _utils.arg_selector(
args.saved_models_path, config['Setup']['saved_models_path'], SAVED_MODELS_PATH
)
model_class = config['Model']['class']
model_args = config['Model']['args']
optimizer_class = config['Optimizer']['class']
optimizer_args = config['Optimizer']['args']
if 'Scheduler' in config:
scheduler_class = config['Scheduler']['class']
scheduler_args = config['Scheduler']['args']
else:
scheduler_class = None
scheduler_args = None
dataset_class = config['Dataset']['class']
dataset_args = config['Dataset']['args']
transforms = config['Transforms']
performance_metrics = config['Metrics']
tqdm.tqdm.write(f'\nStarting experiment "{experiment_name}"\n')
run(
experiment_name=experiment_name,
visdom_host=visdom_host,
visdom_port=visdom_port,
visdom_env_path=visdom_env_path,
model_class=model_class,
model_args=model_args,
optimizer_class=optimizer_class,
optimizer_args=optimizer_args,
dataset_class=dataset_class,
dataset_args=dataset_args,
batch_train=batch_train,
batch_test=batch_test,
workers_train=workers_train,
workers_test=workers_test,
transforms=transforms,
epochs=epochs,
log_interval=log_interval,
saved_models_path=saved_models_path,
performance_metrics=performance_metrics,
scheduler_class=scheduler_class,
scheduler_args=scheduler_args,
model_suffix=config['Setup']['suffix'],
setup_suffix=args.suffix,
orig_stdout=orig_stdout,
skip_train_val=args.skip_train_val
)
prog_bar_exps.close()
tqdm.tqdm.write('\n')
| 38.289267
| 125
| 0.534919
|
b0971c2dafa16c01eaa3e3bfd01d9066ecf3379c
| 837
|
py
|
Python
|
watch/urls.py
|
MachokaDaisy/py-go
|
0c280b84a52aee23c36cc8387ff4fa466077aa62
|
[
"MIT"
] | 1
|
2020-10-11T23:58:36.000Z
|
2020-10-11T23:58:36.000Z
|
watch/urls.py
|
MachokaDaisy/py-go
|
0c280b84a52aee23c36cc8387ff4fa466077aa62
|
[
"MIT"
] | null | null | null |
watch/urls.py
|
MachokaDaisy/py-go
|
0c280b84a52aee23c36cc8387ff4fa466077aa62
|
[
"MIT"
] | null | null | null |
"""watch URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('hood.urls')),
path('accounts/',include('allauth.urls')),
]
| 34.875
| 77
| 0.698925
|
b7aa422747d2a9f29daa0d2d90fc8e86ad1d2748
| 260
|
py
|
Python
|
test/test_del_contact.py
|
k0Bas/python_training
|
80d0bb6f91376f7e90052a2d0619419cbf27f9d9
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_contact.py
|
k0Bas/python_training
|
80d0bb6f91376f7e90052a2d0619419cbf27f9d9
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_contact.py
|
k0Bas/python_training
|
80d0bb6f91376f7e90052a2d0619419cbf27f9d9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.group_contact import GroupContact
def test_delete_first_contact(app):
if app.contact.count(app) == 0:
app.contact.create(GroupContact(firstname="first", lastname="last"))
app.contact.delete_first_contact(app)
| 37.142857
| 76
| 0.726923
|
95ae67f522cd186caf90f0e00dc72bb148bf7aa8
| 570
|
py
|
Python
|
setup.py
|
gsgoncalves/tfrecord
|
b5d0bddf0cbe14e6aea9a1585d186e36a847248a
|
[
"MIT"
] | 9
|
2020-12-30T02:05:58.000Z
|
2022-03-31T02:05:06.000Z
|
setup.py
|
gsgoncalves/tfrecord
|
b5d0bddf0cbe14e6aea9a1585d186e36a847248a
|
[
"MIT"
] | null | null | null |
setup.py
|
gsgoncalves/tfrecord
|
b5d0bddf0cbe14e6aea9a1585d186e36a847248a
|
[
"MIT"
] | 2
|
2021-06-20T04:56:49.000Z
|
2022-03-10T01:35:24.000Z
|
import sys
from distutils.core import setup
from setuptools import find_packages
# List of runtime dependencies required by this built package
install_requires = []
if sys.version_info <= (2, 7):
install_requires += ['future', 'typing']
install_requires += ["numpy", "protobuf"]
setup(
name="tfrecord",
version="1.11",
description="TFRecord reader",
author="Vahid Kazemi",
author_email="vkazemi@gmail.com",
url="https://github.com/vahidk/tfrecord",
packages=find_packages(),
license="MIT",
install_requires=install_requires
)
| 23.75
| 61
| 0.707018
|
6277363797a60096da5285c39ded74900d4f61ae
| 559
|
py
|
Python
|
extract_layers.py
|
bmtgoncalves/TorinoCourse
|
7c365dcd1211817a8b4d85e178dcb20a84d142d9
|
[
"MIT"
] | 5
|
2017-05-03T14:33:27.000Z
|
2020-05-26T20:43:26.000Z
|
extract_layers.py
|
bmtgoncalves/TorinoCourse
|
7c365dcd1211817a8b4d85e178dcb20a84d142d9
|
[
"MIT"
] | null | null | null |
extract_layers.py
|
bmtgoncalves/TorinoCourse
|
7c365dcd1211817a8b4d85e178dcb20a84d142d9
|
[
"MIT"
] | 2
|
2019-03-21T03:41:20.000Z
|
2019-11-17T07:58:51.000Z
|
import json
import sys
data = json.load(open('GeoJSON/NUTS_RG_20M_2013.geojson'))
encoder = json.JSONEncoder()
for layer in range(0, 5):
countries = {}
countries["crs"] = data["crs"]
countries["type"] = data["type"]
countries["features"] = []
for feat in data["features"]:
if feat["properties"]["STAT_LEVL_"] == layer:
countries["features"].append(feat)
if len(countries["features"]) > 0:
output = encoder.encode(countries)
fp = open("GeoJSON/layer_%02u.geojson" % layer, "w")
print(output, file=fp)
fp.close()
| 22.36
| 58
| 0.642218
|
f371aae739291aa9e510fa3911c3df784746e7a1
| 239
|
py
|
Python
|
application/admin/tasks.py
|
oceanio/flask-boot
|
df115a7591218d69b4d5b71617e922a516b88df9
|
[
"Apache-2.0"
] | null | null | null |
application/admin/tasks.py
|
oceanio/flask-boot
|
df115a7591218d69b4d5b71617e922a516b88df9
|
[
"Apache-2.0"
] | null | null | null |
application/admin/tasks.py
|
oceanio/flask-boot
|
df115a7591218d69b4d5b71617e922a516b88df9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from application import celery, mail
from . import security
@celery.task
def send_security_email(msg):
mail.send(msg)
@security.send_mail_task
def delay_security_email(msg):
send_security_email.delay(msg)
| 18.384615
| 36
| 0.753138
|
9b5b1c845746b2cbe247a9e9e8f65f9bea1fc9cd
| 1,503
|
py
|
Python
|
qas_experimental_evaluation_project/utils.py
|
andreportela/qas_intrusion_detection
|
3daed80e075991185c814de5ef95f50686fc626b
|
[
"MIT"
] | null | null | null |
qas_experimental_evaluation_project/utils.py
|
andreportela/qas_intrusion_detection
|
3daed80e075991185c814de5ef95f50686fc626b
|
[
"MIT"
] | null | null | null |
qas_experimental_evaluation_project/utils.py
|
andreportela/qas_intrusion_detection
|
3daed80e075991185c814de5ef95f50686fc626b
|
[
"MIT"
] | null | null | null |
import os, time, datetime, subprocess, signal
def log_info(task, pid):
print(f"{task} pid({pid}) at {datetime.datetime.now()}")
def get_total_time_str(minutes=0, seconds=0):
seconds_waited_str = f"{seconds} seconds"
minutes_waited_str = f"{minutes} minutes"
if minutes and seconds:
return "". join([minutes_waited_str, " and ", seconds_waited_str])
elif minutes:
return minutes_waited_str
else:
return seconds_waited_str
def wait(minutes=0, seconds=0):
wait_time_str = get_total_time_str(minutes, seconds)
total_seconds_to_wait = (minutes * 60) + seconds
print(f"pid({os.getpid()}) waiting for {wait_time_str} at {datetime.datetime.now()}")
for seconds_remaining in range(total_seconds_to_wait, 0, -1):
print(f"{seconds_remaining} seconds remaining...")
time.sleep(1)
print(f"pid({os.getpid()}) resumed for {wait_time_str} at {datetime.datetime.now()}")
def run(server_args):
return subprocess.Popen(server_args)
def kill(process, name):
process_obj = process[name]["process"]
log_info(f"[killing {name} child]", process_obj.pid)
process_obj.send_signal(signal.SIGTERM)
def run_and_log(server_cmd, process_name):
log_info(f"[starting {process_name} main]", os.getpid())
process = run(server_cmd)
log_info(f"[starting {process_name} child]", process.pid)
return process
def execute_command(commands, name):
commands[name]['process'] = run_and_log(commands[name]['cmd'], name)
| 35.785714
| 89
| 0.701264
|
03533b261e0fa400c87687e1e00807eb27533aec
| 2,008
|
py
|
Python
|
model/gumbel_masks.py
|
slachapelle/anon_disentanglement_via_mechanism_sparsity
|
677f7e160f3532e1357a3c7f35f9f8f8529b389a
|
[
"Apache-2.0"
] | 6
|
2021-11-23T17:23:25.000Z
|
2022-03-22T21:15:01.000Z
|
model/gumbel_masks.py
|
slachapelle/anon_disentanglement_via_mechanism_sparsity
|
677f7e160f3532e1357a3c7f35f9f8f8529b389a
|
[
"Apache-2.0"
] | null | null | null |
model/gumbel_masks.py
|
slachapelle/anon_disentanglement_via_mechanism_sparsity
|
677f7e160f3532e1357a3c7f35f9f8f8529b389a
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
class GumbelSigmoid(torch.nn.Module):
def __init__(self, shape, freeze=False, drawhard=True, tau=1):
super(GumbelSigmoid, self).__init__()
self.shape = shape
self.freeze=freeze
self.drawhard = drawhard
self.log_alpha = torch.nn.Parameter(torch.zeros(self.shape))
self.tau = tau
# useful to make sure these parameters will be pushed to the GPU
self.uniform = torch.distributions.uniform.Uniform(0, 1)
self.register_buffer("fixed_mask", torch.ones(shape))
self.reset_parameters()
def forward(self, bs):
if self.freeze:
y = self.fixed_mask.unsqueeze(0).expand((bs,) + self.shape)
return y
else:
shape = tuple([bs] + list(self.shape))
logistic_noise = self.sample_logistic(shape).type(self.log_alpha.type()).to(self.log_alpha.device)
y_soft = torch.sigmoid((self.log_alpha + logistic_noise) / self.tau)
if self.drawhard:
y_hard = (y_soft > 0.5).type(y_soft.type())
# This weird line does two things:
# 1) at forward, we get a hard sample.
# 2) at backward, we differentiate the gumbel sigmoid
y = y_hard.detach() - y_soft.detach() + y_soft
else:
y = y_soft
return y
def get_proba(self):
"""Returns probability of getting one"""
if self.freeze:
return self.fixed_mask
else:
return torch.sigmoid(self.log_alpha)
def reset_parameters(self):
torch.nn.init.constant_(self.log_alpha, 5) # 5) # will yield a probability ~0.99. Inspired by DCDI
def sample_logistic(self, shape):
u = self.uniform.sample(shape)
return torch.log(u) - torch.log(1 - u)
def threshold(self):
proba = self.get_proba()
self.fixed_mask.copy_((proba > 0.5).type(proba.type()))
self.freeze = True
| 34.033898
| 110
| 0.592629
|
4b328b421fb9c09622f3eb53a7b30a1093d3e18c
| 91,859
|
py
|
Python
|
pika/connection.py
|
hugovk/pika
|
03542ef616a2a849e8bfb0845427f50e741ea0c6
|
[
"BSD-3-Clause"
] | null | null | null |
pika/connection.py
|
hugovk/pika
|
03542ef616a2a849e8bfb0845427f50e741ea0c6
|
[
"BSD-3-Clause"
] | null | null | null |
pika/connection.py
|
hugovk/pika
|
03542ef616a2a849e8bfb0845427f50e741ea0c6
|
[
"BSD-3-Clause"
] | null | null | null |
"""Core connection objects"""
# disable too-many-lines
# pylint: disable=C0302
import abc
import ast
import copy
import functools
import logging
import math
import numbers
import platform
import socket
import warnings
import ssl
from pika import __version__
from pika import callback as pika_callback
import pika.channel
from pika import credentials as pika_credentials
from pika import exceptions
from pika import frame
from pika import heartbeat as pika_heartbeat
from pika import spec
import pika.compat
from pika.compat import (xrange, basestring, # pylint: disable=W0622
url_unquote, dictkeys, dict_itervalues,
dict_iteritems)
BACKPRESSURE_WARNING = ("Pika: Write buffer exceeded warning threshold at "
"%i bytes and an estimated %i frames behind")
PRODUCT = "Pika Python Client Library"
LOGGER = logging.getLogger(__name__)
class Parameters(object): # pylint: disable=R0902
"""Base connection parameters class definition
"""
# Declare slots to protect against accidental assignment of an invalid
# attribute
__slots__ = (
'_backpressure_detection',
'_blocked_connection_timeout',
'_channel_max',
'_client_properties',
'_connection_attempts',
'_credentials',
'_frame_max',
'_heartbeat',
'_host',
'_locale',
'_port',
'_retry_delay',
'_socket_timeout',
'_stack_timeout',
'_ssl_options',
'_virtual_host',
'_tcp_options'
)
DEFAULT_USERNAME = 'guest'
DEFAULT_PASSWORD = 'guest'
DEFAULT_BACKPRESSURE_DETECTION = False
DEFAULT_BLOCKED_CONNECTION_TIMEOUT = None
DEFAULT_CHANNEL_MAX = pika.channel.MAX_CHANNELS
DEFAULT_CLIENT_PROPERTIES = None
DEFAULT_CREDENTIALS = pika_credentials.PlainCredentials(DEFAULT_USERNAME,
DEFAULT_PASSWORD)
DEFAULT_CONNECTION_ATTEMPTS = 1
DEFAULT_FRAME_MAX = spec.FRAME_MAX_SIZE
DEFAULT_HEARTBEAT_TIMEOUT = None # None accepts server's proposal
DEFAULT_HOST = 'localhost'
DEFAULT_LOCALE = 'en_US'
DEFAULT_PORT = 5672
DEFAULT_RETRY_DELAY = 2.0
DEFAULT_SOCKET_TIMEOUT = 10.0 # socket.connect() timeout
DEFAULT_STACK_TIMEOUT = 15.0 # full-stack TCP/[SSl]/AMQP bring-up timeout
DEFAULT_SSL = False
DEFAULT_SSL_OPTIONS = None
DEFAULT_SSL_PORT = 5671
DEFAULT_VIRTUAL_HOST = '/'
DEFAULT_TCP_OPTIONS = None
DEFAULT_HEARTBEAT_INTERVAL = DEFAULT_HEARTBEAT_TIMEOUT # DEPRECATED
def __init__(self):
self._backpressure_detection = None
self.backpressure_detection = self.DEFAULT_BACKPRESSURE_DETECTION
# If not None, blocked_connection_timeout is the timeout, in seconds,
# for the connection to remain blocked; if the timeout expires, the
# connection will be torn down, triggering the connection's
# on_close_callback
self._blocked_connection_timeout = None
self.blocked_connection_timeout = (
self.DEFAULT_BLOCKED_CONNECTION_TIMEOUT)
self._channel_max = None
self.channel_max = self.DEFAULT_CHANNEL_MAX
self._client_properties = None
self.client_properties = self.DEFAULT_CLIENT_PROPERTIES
self._connection_attempts = None
self.connection_attempts = self.DEFAULT_CONNECTION_ATTEMPTS
self._credentials = None
self.credentials = self.DEFAULT_CREDENTIALS
self._frame_max = None
self.frame_max = self.DEFAULT_FRAME_MAX
self._heartbeat = None
self.heartbeat = self.DEFAULT_HEARTBEAT_TIMEOUT
self._host = None
self.host = self.DEFAULT_HOST
self._locale = None
self.locale = self.DEFAULT_LOCALE
self._port = None
self.port = self.DEFAULT_PORT
self._retry_delay = None
self.retry_delay = self.DEFAULT_RETRY_DELAY
self._socket_timeout = None
self.socket_timeout = self.DEFAULT_SOCKET_TIMEOUT
self._stack_timeout = None
self.stack_timeout = self.DEFAULT_STACK_TIMEOUT
self._ssl_options = None
self.ssl_options = self.DEFAULT_SSL_OPTIONS
self._virtual_host = None
self.virtual_host = self.DEFAULT_VIRTUAL_HOST
self._tcp_options = None
self.tcp_options = self.DEFAULT_TCP_OPTIONS
def __repr__(self):
"""Represent the info about the instance.
:rtype: str
"""
return ('<%s host=%s port=%s virtual_host=%s ssl=%s>' %
(self.__class__.__name__, self.host, self.port,
self.virtual_host, bool(self.ssl_options)))
@property
def backpressure_detection(self):
"""
:returns: boolean indicating whether backpressure detection is
enabled. Defaults to `DEFAULT_BACKPRESSURE_DETECTION`.
"""
return self._backpressure_detection
@backpressure_detection.setter
def backpressure_detection(self, value):
"""
:param bool value: boolean indicating whether to enable backpressure
detection
"""
if not isinstance(value, bool):
raise TypeError('backpressure_detection must be a bool, '
'but got %r' % (value,))
self._backpressure_detection = value
@property
def blocked_connection_timeout(self):
"""
:returns: None or float blocked connection timeout. Defaults to
`DEFAULT_BLOCKED_CONNECTION_TIMEOUT`.
"""
return self._blocked_connection_timeout
@blocked_connection_timeout.setter
def blocked_connection_timeout(self, value):
"""
:param value: If not None, blocked_connection_timeout is the timeout, in
seconds, for the connection to remain blocked; if the timeout
expires, the connection will be torn down, triggering the
connection's on_close_callback
"""
if value is not None:
if not isinstance(value, numbers.Real):
raise TypeError('blocked_connection_timeout must be a Real '
'number, but got %r' % (value,))
if value < 0:
raise ValueError('blocked_connection_timeout must be >= 0, but '
'got %r' % (value,))
self._blocked_connection_timeout = value
@property
def channel_max(self):
"""
:returns: max preferred number of channels. Defaults to
`DEFAULT_CHANNEL_MAX`.
:rtype: int
"""
return self._channel_max
@channel_max.setter
def channel_max(self, value):
"""
:param int value: max preferred number of channels, between 1 and
`channel.MAX_CHANNELS`, inclusive
"""
if not isinstance(value, numbers.Integral):
raise TypeError('channel_max must be an int, but got %r' % (value,))
if value < 1 or value > pika.channel.MAX_CHANNELS:
raise ValueError('channel_max must be <= %i and > 0, but got %r' %
(pika.channel.MAX_CHANNELS, value))
self._channel_max = value
@property
def client_properties(self):
"""
:returns: None or dict of client properties used to override the fields
in the default client poperties reported to RabbitMQ via
`Connection.StartOk` method. Defaults to
`DEFAULT_CLIENT_PROPERTIES`.
"""
return self._client_properties
@client_properties.setter
def client_properties(self, value):
"""
:param value: None or dict of client properties used to override the
fields in the default client poperties reported to RabbitMQ via
`Connection.StartOk` method.
"""
if not isinstance(value, (dict, type(None),)):
raise TypeError('client_properties must be dict or None, '
'but got %r' % (value,))
# Copy the mutable object to avoid accidental side-effects
self._client_properties = copy.deepcopy(value)
@property
def connection_attempts(self):
"""
:returns: number of socket connection attempts. Defaults to
`DEFAULT_CONNECTION_ATTEMPTS`. See also `retry_delay`.
"""
return self._connection_attempts
@connection_attempts.setter
def connection_attempts(self, value):
"""
:param int value: number of socket connection attempts of at least 1.
See also `retry_delay`.
"""
if not isinstance(value, numbers.Integral):
raise TypeError('connection_attempts must be an int')
if value < 1:
raise ValueError('connection_attempts must be > 0, but got %r' %
(value,))
self._connection_attempts = value
@property
def credentials(self):
"""
:rtype: one of the classes from `pika.credentials.VALID_TYPES`. Defaults
to `DEFAULT_CREDENTIALS`.
"""
return self._credentials
@credentials.setter
def credentials(self, value):
"""
:param value: authentication credential object of one of the classes
from `pika.credentials.VALID_TYPES`
"""
if not isinstance(value, tuple(pika_credentials.VALID_TYPES)):
raise TypeError('Credentials must be an object of type: %r, but '
'got %r' % (pika_credentials.VALID_TYPES, value))
# Copy the mutable object to avoid accidental side-effects
self._credentials = copy.deepcopy(value)
@property
def frame_max(self):
"""
:returns: desired maximum AMQP frame size to use. Defaults to
`DEFAULT_FRAME_MAX`.
"""
return self._frame_max
@frame_max.setter
def frame_max(self, value):
"""
:param int value: desired maximum AMQP frame size to use between
`spec.FRAME_MIN_SIZE` and `spec.FRAME_MAX_SIZE`, inclusive
"""
if not isinstance(value, numbers.Integral):
raise TypeError('frame_max must be an int, but got %r' % (value,))
if value < spec.FRAME_MIN_SIZE:
raise ValueError('Min AMQP 0.9.1 Frame Size is %i, but got %r' %
(spec.FRAME_MIN_SIZE, value,))
elif value > spec.FRAME_MAX_SIZE:
raise ValueError('Max AMQP 0.9.1 Frame Size is %i, but got %r' %
(spec.FRAME_MAX_SIZE, value,))
self._frame_max = value
@property
def heartbeat(self):
"""
:returns: AMQP connection heartbeat timeout value for negotiation during
connection tuning or callable which is invoked during connection tuning.
None to accept broker's value. 0 turns heartbeat off. Defaults to
`DEFAULT_HEARTBEAT_TIMEOUT`.
:rtype: integer, None or callable
"""
return self._heartbeat
@heartbeat.setter
def heartbeat(self, value):
"""
:param int|None|callable value: Controls AMQP heartbeat timeout negotiation
during connection tuning. An integer value always overrides the value
proposed by broker. Use 0 to deactivate heartbeats and None to always
accept the broker's proposal. If a callable is given, it will be called
with the connection instance and the heartbeat timeout proposed by broker
as its arguments. The callback should return a non-negative integer that
will be used to override the broker's proposal.
"""
if value is not None:
if not isinstance(value, numbers.Integral) and not callable(value):
raise TypeError('heartbeat must be an int or a callable function, but got %r' %
(value,))
if not callable(value) and value < 0:
raise ValueError('heartbeat must >= 0, but got %r' % (value,))
self._heartbeat = value
@property
def host(self):
"""
:returns: hostname or ip address of broker. Defaults to `DEFAULT_HOST`.
:rtype: str
"""
return self._host
@host.setter
def host(self, value):
"""
:param str value: hostname or ip address of broker
"""
if not isinstance(value, basestring):
raise TypeError('host must be a str or unicode str, but got %r' %
(value,))
self._host = value
@property
def locale(self):
"""
:returns: locale value to pass to broker; e.g., 'en_US'. Defaults to
`DEFAULT_LOCALE`.
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, value):
"""
:param str value: locale value to pass to broker; e.g., "en_US"
"""
if not isinstance(value, basestring):
raise TypeError('locale must be a str, but got %r' % (value,))
self._locale = value
@property
def port(self):
"""
:returns: port number of broker's listening socket. Defaults to
`DEFAULT_PORT`.
:rtype: int
"""
return self._port
@port.setter
def port(self, value):
"""
:param int value: port number of broker's listening socket
"""
try:
self._port = int(value)
except (TypeError, ValueError):
raise TypeError('port must be an int, but got %r' % (value,))
@property
def retry_delay(self):
"""
:returns: interval between socket connection attempts; see also
`connection_attempts`. Defaults to `DEFAULT_RETRY_DELAY`.
:rtype: float
"""
return self._retry_delay
@retry_delay.setter
def retry_delay(self, value):
"""
:param int | float value: interval between socket connection attempts;
see also `connection_attempts`.
"""
if not isinstance(value, numbers.Real):
raise TypeError('retry_delay must be a float or int, but got %r' %
(value,))
self._retry_delay = value
@property
def socket_timeout(self):
"""
:returns: socket connect timeout in seconds. Defaults to
`DEFAULT_SOCKET_TIMEOUT`. The value None disables this timeout.
:rtype: float | None
"""
return self._socket_timeout
@socket_timeout.setter
def socket_timeout(self, value):
"""
:param int | float | None value: positive socket connect timeout in
seconds. None to disable this timeout.
"""
if value is not None:
if not isinstance(value, numbers.Real):
raise TypeError('socket_timeout must be a float or int, '
'but got %r' % (value,))
if value <= 0:
raise ValueError('socket_timeout must be > 0, but got %r' %
(value,))
value = float(value)
self._socket_timeout = value
@property
def stack_timeout(self):
"""
:returns: full protocol stack TCP/[SSL]/AMQP bring-up timeout in
seconds. Defaults to `DEFAULT_STACK_TIMEOUT`. The value None
disables this timeout.
:rtype: float
"""
return self._stack_timeout
@stack_timeout.setter
def stack_timeout(self, value):
"""
:param int | float | None value: positive full protocol stack
TCP/[SSL]/AMQP bring-up timeout in seconds. It's recommended to set
this value higher than `socket_timeout`. None to disable this
timeout.
"""
if value is not None:
if not isinstance(value, numbers.Real):
raise TypeError('stack_timeout must be a float or int, '
'but got %r' % (value,))
if value <= 0:
raise ValueError('stack_timeout must be > 0, but got %r' %
(value,))
value = float(value)
self._stack_timeout = value
@property
def ssl_options(self):
"""
:returns: None for plaintext or `pika.SSLOptions` instance for SSL/TLS.
:rtype: `pika.SSLOptions`|None
"""
return self._ssl_options
@ssl_options.setter
def ssl_options(self, value):
"""
:param `pika.SSLOptions`|None value: None for plaintext or
`pika.SSLOptions` instance for SSL/TLS. Defaults to None.
"""
if not isinstance(value, (SSLOptions, type(None))):
raise TypeError(
'ssl_options must be None or SSLOptions but got %r'
% (value, ))
self._ssl_options = value
@property
def virtual_host(self):
"""
:returns: rabbitmq virtual host name. Defaults to
`DEFAULT_VIRTUAL_HOST`.
"""
return self._virtual_host
@virtual_host.setter
def virtual_host(self, value):
"""
:param str value: rabbitmq virtual host name
"""
if not isinstance(value, basestring):
raise TypeError('virtual_host must be a str, but got %r' % (value,))
self._virtual_host = value
@property
def tcp_options(self):
"""
:returns: None or a dict of options to pass to the underlying socket
:rtype: dict|None
"""
return self._tcp_options
@tcp_options.setter
def tcp_options(self, value):
"""
:param dict|None value: None or a dict of options to pass to the underlying
socket. Currently supported are TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
and TCP_USER_TIMEOUT. Availability of these may depend on your platform.
"""
if not isinstance(value, (dict, type(None))):
raise TypeError('tcp_options must be a dict or None, but got %r' %
(value,))
self._tcp_options = value
class ConnectionParameters(Parameters):
"""Connection parameters object that is passed into the connection adapter
upon construction.
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ()
class _DEFAULT(object):
"""Designates default parameter value; internal use"""
pass
def __init__(self, # pylint: disable=R0913,R0914,R0912
host=_DEFAULT,
port=_DEFAULT,
virtual_host=_DEFAULT,
credentials=_DEFAULT,
channel_max=_DEFAULT,
frame_max=_DEFAULT,
heartbeat=_DEFAULT,
ssl_options=_DEFAULT,
connection_attempts=_DEFAULT,
retry_delay=_DEFAULT,
socket_timeout=_DEFAULT,
stack_timeout=_DEFAULT,
locale=_DEFAULT,
backpressure_detection=_DEFAULT,
blocked_connection_timeout=_DEFAULT,
client_properties=_DEFAULT,
tcp_options=_DEFAULT,
**kwargs):
"""Create a new ConnectionParameters instance. See `Parameters` for
default values.
:param str host: Hostname or IP Address to connect to
:param int port: TCP port to connect to
:param str virtual_host: RabbitMQ virtual host to use
:param pika.credentials.Credentials credentials: auth credentials
:param int channel_max: Maximum number of channels to allow
:param int frame_max: The maximum byte size for an AMQP frame
:param int|None|callable value: Controls AMQP heartbeat timeout negotiation
during connection tuning. An integer value always overrides the value
proposed by broker. Use 0 to deactivate heartbeats and None to always
accept the broker's proposal. If a callable is given, it will be called
with the connection instance and the heartbeat timeout proposed by broker
as its arguments. The callback should return a non-negative integer that
will be used to override the broker's proposal.
:param `pika.SSLOptions`|None ssl_options: None for plaintext or
`pika.SSLOptions` instance for SSL/TLS. Defaults to None.
:param int connection_attempts: Maximum number of retry attempts
:param int|float retry_delay: Time to wait in seconds, before the next
:param int|float socket_timeout: Positive socket connect timeout in
seconds.
:param int|float stack_timeout: Positive full protocol stack
(TCP/[SSL]/AMQP) bring-up timeout in seconds. It's recommended to
set this value higher than `socket_timeout`.
:param str locale: Set the locale value
:param bool backpressure_detection: DEPRECATED in favor of
`Connection.Blocked` and `Connection.Unblocked`. See
`Connection.add_on_connection_blocked_callback`.
:param blocked_connection_timeout: If not None,
the value is a non-negative timeout, in seconds, for the
connection to remain blocked (triggered by Connection.Blocked from
broker); if the timeout expires before connection becomes unblocked,
the connection will be torn down, triggering the adapter-specific
mechanism for informing client app about the closed connection:
passing `ConnectionBlockedTimeout` exception to on_close_callback
in asynchronous adapters or raising it in `BlockingConnection`.
:type blocked_connection_timeout: None, int, float
:param client_properties: None or dict of client properties used to
override the fields in the default client properties reported to
RabbitMQ via `Connection.StartOk` method.
:param heartbeat_interval: DEPRECATED; use `heartbeat` instead, and
don't pass both
:param tcp_options: None or a dict of TCP options to set for socket
"""
super(ConnectionParameters, self).__init__()
if backpressure_detection is not self._DEFAULT:
self.backpressure_detection = backpressure_detection
if blocked_connection_timeout is not self._DEFAULT:
self.blocked_connection_timeout = blocked_connection_timeout
if channel_max is not self._DEFAULT:
self.channel_max = channel_max
if client_properties is not self._DEFAULT:
self.client_properties = client_properties
if connection_attempts is not self._DEFAULT:
self.connection_attempts = connection_attempts
if credentials is not self._DEFAULT:
self.credentials = credentials
if frame_max is not self._DEFAULT:
self.frame_max = frame_max
if heartbeat is not self._DEFAULT:
self.heartbeat = heartbeat
try:
heartbeat_interval = kwargs.pop('heartbeat_interval')
except KeyError:
# Good, this one is deprecated
pass
else:
warnings.warn('heartbeat_interval is deprecated, use heartbeat',
DeprecationWarning, stacklevel=2)
if heartbeat is not self._DEFAULT:
raise TypeError('heartbeat and deprecated heartbeat_interval '
'are mutually-exclusive')
self.heartbeat = heartbeat_interval
if host is not self._DEFAULT:
self.host = host
if locale is not self._DEFAULT:
self.locale = locale
if retry_delay is not self._DEFAULT:
self.retry_delay = retry_delay
if socket_timeout is not self._DEFAULT:
self.socket_timeout = socket_timeout
if stack_timeout is not self._DEFAULT:
self.stack_timeout = stack_timeout
if ssl_options is not self._DEFAULT:
self.ssl_options = ssl_options
# Set port after SSL status is known
if port is not self._DEFAULT:
self.port = port
else:
self.port = self.DEFAULT_SSL_PORT if self.ssl_options else self.DEFAULT_PORT
if virtual_host is not self._DEFAULT:
self.virtual_host = virtual_host
if tcp_options is not self._DEFAULT:
self.tcp_options = tcp_options
if kwargs:
raise TypeError('Unexpected kwargs: %r' % (kwargs,))
class URLParameters(Parameters):
"""Connect to RabbitMQ via an AMQP URL in the format::
amqp://username:password@host:port/<virtual_host>[?query-string]
Ensure that the virtual host is URI encoded when specified. For example if
you are using the default "/" virtual host, the value should be `%2f`.
See `Parameters` for default values.
Valid query string values are:
- backpressure_detection:
DEPRECATED in favor of
`Connection.Blocked` and `Connection.Unblocked`. See
`Connection.add_on_connection_blocked_callback`.
- channel_max:
Override the default maximum channel count value
- client_properties:
dict of client properties used to override the fields in the default
client properties reported to RabbitMQ via `Connection.StartOk`
method
- connection_attempts:
Specify how many times pika should try and reconnect before it gives up
- frame_max:
Override the default maximum frame size for communication
- heartbeat:
Desired connection heartbeat timeout for negotiation. If not present
the broker's value is accepted. 0 turns heartbeat off.
- locale:
Override the default `en_US` locale value
- ssl_options:
None for plaintext; for SSL: dict of public ssl context-related
arguments that may be passed to :meth:`ssl.SSLSocket` as kwargs,
except `sock`, `server_side`,`do_handshake_on_connect`, `family`,
`type`, `proto`, `fileno`.
- retry_delay:
The number of seconds to sleep before attempting to connect on
connection failure.
- socket_timeout:
Socket connect timeout value in seconds (float or int)
- stack_timeout:
Positive full protocol stack (TCP/[SSL]/AMQP) bring-up timeout in
seconds. It's recommended to set this value higher than
`socket_timeout`.
- blocked_connection_timeout:
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker); if the timeout
expires before connection becomes unblocked, the connection will be
torn down, triggering the connection's on_close_callback
- tcp_options:
Set the tcp options for the underlying socket.
:param str url: The AMQP URL to connect to
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ('_all_url_query_values',)
# The name of the private function for parsing and setting a given URL query
# arg is constructed by catenating the query arg's name to this prefix
_SETTER_PREFIX = '_set_url_'
def __init__(self, url):
"""Create a new URLParameters instance.
:param str url: The URL value
"""
super(URLParameters, self).__init__()
self._all_url_query_values = None
# Handle the Protocol scheme
#
# Fix up scheme amqp(s) to http(s) so urlparse won't barf on python
# prior to 2.7. On Python 2.6.9,
# `urlparse('amqp://127.0.0.1/%2f?socket_timeout=1')` produces an
# incorrect path='/%2f?socket_timeout=1'
if url[0:4].lower() == 'amqp':
url = 'http' + url[4:]
parts = pika.compat.urlparse(url)
if parts.scheme == 'https':
# Create default context which will get overridden by the
# ssl_options URL arg, if any
self.ssl_options = pika.SSLOptions(
context=ssl.create_default_context())
elif parts.scheme == 'http':
self.ssl_options = None
elif parts.scheme:
raise ValueError('Unexpected URL scheme %r; supported scheme '
'values: amqp, amqps' % (parts.scheme,))
if parts.hostname is not None:
self.host = parts.hostname
# Take care of port after SSL status is known
if parts.port is not None:
self.port = parts.port
else:
self.port = (self.DEFAULT_SSL_PORT if self.ssl_options
else self.DEFAULT_PORT)
if parts.username is not None:
self.credentials = pika_credentials.PlainCredentials(url_unquote(parts.username),
url_unquote(parts.password))
# Get the Virtual Host
if len(parts.path) > 1:
self.virtual_host = url_unquote(parts.path.split('/')[1])
# Handle query string values, validating and assigning them
self._all_url_query_values = pika.compat.url_parse_qs(parts.query)
for name, value in dict_iteritems(self._all_url_query_values):
try:
set_value = getattr(self, self._SETTER_PREFIX + name)
except AttributeError:
raise ValueError('Unknown URL parameter: %r' % (name,))
try:
(value,) = value
except ValueError:
raise ValueError('Expected exactly one value for URL parameter '
'%s, but got %i values: %s' % (
name, len(value), value))
set_value(value)
def _set_url_backpressure_detection(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
backpressure_detection = {'t': True, 'f': False}[value]
except KeyError:
raise ValueError('Invalid backpressure_detection value: %r' %
(value,))
self.backpressure_detection = backpressure_detection
def _set_url_blocked_connection_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
blocked_connection_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid blocked_connection_timeout value %r: %r' %
(value, exc,))
self.blocked_connection_timeout = blocked_connection_timeout
def _set_url_channel_max(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
channel_max = int(value)
except ValueError as exc:
raise ValueError('Invalid channel_max value %r: %r' % (value, exc,))
self.channel_max = channel_max
def _set_url_client_properties(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.client_properties = ast.literal_eval(value)
def _set_url_connection_attempts(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
connection_attempts = int(value)
except ValueError as exc:
raise ValueError('Invalid connection_attempts value %r: %r' %
(value, exc,))
self.connection_attempts = connection_attempts
def _set_url_frame_max(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
frame_max = int(value)
except ValueError as exc:
raise ValueError('Invalid frame_max value %r: %r' % (value, exc,))
self.frame_max = frame_max
def _set_url_heartbeat(self, value):
"""Deserialize and apply the corresponding query string arg"""
if 'heartbeat_interval' in self._all_url_query_values:
raise ValueError('Deprecated URL parameter heartbeat_interval must '
'not be specified together with heartbeat')
try:
heartbeat_timeout = int(value)
except ValueError as exc:
raise ValueError('Invalid heartbeat value %r: %r' % (value, exc,))
self.heartbeat = heartbeat_timeout
def _set_url_heartbeat_interval(self, value):
"""Deserialize and apply the corresponding query string arg"""
warnings.warn('heartbeat_interval is deprecated, use heartbeat',
DeprecationWarning, stacklevel=2)
if 'heartbeat' in self._all_url_query_values:
raise ValueError('Deprecated URL parameter heartbeat_interval must '
'not be specified together with heartbeat')
try:
heartbeat_timeout = int(value)
except ValueError as exc:
raise ValueError('Invalid heartbeat_interval value %r: %r' %
(value, exc,))
self.heartbeat = heartbeat_timeout
def _set_url_locale(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.locale = value
def _set_url_retry_delay(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
retry_delay = float(value)
except ValueError as exc:
raise ValueError('Invalid retry_delay value %r: %r' % (value, exc,))
self.retry_delay = retry_delay
def _set_url_socket_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
socket_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid socket_timeout value %r: %r' %
(value, exc,))
self.socket_timeout = socket_timeout
def _set_url_stack_timeout(self, value):
"""Deserialize and apply the corresponding query string arg"""
try:
stack_timeout = float(value)
except ValueError as exc:
raise ValueError('Invalid stack_timeout value %r: %r' %
(value, exc,))
self.stack_timeout = stack_timeout
def _set_url_ssl_options(self, value):
"""Deserialize and apply the corresponding query string arg
"""
options = ast.literal_eval(value)
if options is None:
if self.ssl_options is not None:
raise ValueError(
'Specified ssl_options=None URL arg is inconsistent with '
'the specified https URL scheme.')
else:
# Convert options to pika.SSLOptions via ssl.SSLSocket()
sock = socket.socket()
try:
ssl_sock = ssl.SSLSocket(sock=sock, **options)
try:
self.ssl_options = pika.SSLOptions(
context=ssl_sock.context,
server_hostname=ssl_sock.server_hostname)
finally:
ssl_sock.close()
finally:
sock.close()
def _set_url_tcp_options(self, value):
"""Deserialize and apply the corresponding query string arg"""
self.tcp_options = ast.literal_eval(value)
class SSLOptions(object):
"""Class used to provide parameters for optional fine grained control of SSL
socket wrapping.
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ('context', 'server_hostname')
def __init__(self, context, server_hostname=None):
"""
:param ssl.SSLContext context: SSLContext instance
:param str|None server_hostname: SSLContext.wrap_socket, used to enable
SNI
"""
if not isinstance(context, ssl.SSLContext):
raise TypeError(
'context must be of ssl.SSLContext type, but got {!r}'.format(
context))
self.context = context
self.server_hostname = server_hostname
class Connection(pika.compat.AbstractBase):
"""This is the core class that implements communication with RabbitMQ. This
class should not be invoked directly but rather through the use of an
adapter such as SelectConnection or BlockingConnection.
"""
# Disable pylint messages concerning "method could be a funciton"
# pylint: disable=R0201
ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure'
ON_CONNECTION_CLOSED = '_on_connection_closed'
ON_CONNECTION_ERROR = '_on_connection_error'
ON_CONNECTION_OPEN_OK = '_on_connection_open_ok'
CONNECTION_CLOSED = 0
CONNECTION_INIT = 1
CONNECTION_PROTOCOL = 2
CONNECTION_START = 3
CONNECTION_TUNE = 4
CONNECTION_OPEN = 5
CONNECTION_CLOSING = 6 # client-initiated close in progress
_STATE_NAMES = {
CONNECTION_CLOSED: 'CLOSED',
CONNECTION_INIT: 'INIT',
CONNECTION_PROTOCOL: 'PROTOCOL',
CONNECTION_START: 'START',
CONNECTION_TUNE: 'TUNE',
CONNECTION_OPEN: 'OPEN',
CONNECTION_CLOSING: 'CLOSING'
}
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
internal_connection_workflow=True):
"""Connection initialization expects an object that has implemented the
Parameters class and a callback function to notify when we have
successfully connected to the AMQP Broker.
Available Parameters classes are the ConnectionParameters class and
URLParameters class.
:param pika.connection.Parameters parameters: Read-only connection
parameters.
:param method on_open_callback: Called when the connection is opened:
on_open_callback(connection)
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
"""
self.connection_state = self.CONNECTION_CLOSED
# Determines whether we invoke the on_open_error_callback or
# on_close_callback. So that we don't lose track when state transitions
# to CONNECTION_CLOSING as the result of Connection.close() call during
# opening.
self._opened = False
# Value to pass to on_open_error_callback or on_close_callback when
# connection fails to be established or becomes closed
self._error = None # type: Exception
# Used to hold timer if configured for Connection.Blocked timeout
self._blocked_conn_timer = None
self._heartbeat_checker = None
# Set our configuration options
if parameters is not None:
# NOTE: Work around inability to copy ssl.SSLContext contained in
# our SSLOptions; ssl.SSLContext fails to implement __getnewargs__
saved_ssl_options = parameters.ssl_options
parameters.ssl_options = None
try:
self.params = copy.deepcopy(parameters)
self.params.ssl_options = saved_ssl_options
finally:
parameters.ssl_options = saved_ssl_options
else:
self.params = ConnectionParameters()
self._internal_connection_workflow = internal_connection_workflow
# Define our callback dictionary
self.callbacks = pika_callback.CallbackManager()
# Attributes that will be properly initialized by _init_connection_state
# and/or during connection handshake.
self.server_capabilities = None
self.server_properties = None
self._body_max_length = None
self.known_hosts = None
self._frame_buffer = None
self._channels = None
self._backpressure_multiplier = None
self._init_connection_state()
# Add the on connection error callback
self.callbacks.add(0, self.ON_CONNECTION_ERROR,
on_open_error_callback or
self._default_on_connection_error,
False)
# On connection callback
if on_open_callback:
self.add_on_open_callback(on_open_callback)
# On connection callback
if on_close_callback:
self.add_on_close_callback(on_close_callback)
self._set_connection_state(self.CONNECTION_INIT)
if self._internal_connection_workflow:
# Kick off full-stack connection establishment. It will complete
# asynchronously.
self._adapter_connect_stream()
else:
# Externally-managed connection workflow will proceed asynchronously
# using adapter-specific mechanism
LOGGER.debug('Using external connection workflow.')
def _init_connection_state(self):
"""Initialize or reset all of the internal state variables for a given
connection. On disconnect or reconnect all of the state needs to
be wiped.
"""
# TODO: probably don't need the state recovery logic since we don't
# test re-connection sufficiently (if at all), and users should
# just create a new instance of Connection when needed.
# So, just merge the pertinent logic into the constructor.
# Connection state
self._set_connection_state(self.CONNECTION_CLOSED)
# Negotiated server properties
self.server_properties = None
# Inbound buffer for decoding frames
self._frame_buffer = bytes()
# Dict of open channels
self._channels = dict()
# Data used for Heartbeat checking and back-pressure detection
self.bytes_sent = 0
self.bytes_received = 0
self.frames_sent = 0
self.frames_received = 0
self._heartbeat_checker = None
# Default back-pressure multiplier value
self._backpressure_multiplier = 10
# When closing, holds reason why
self._error = None
# Our starting point once connected, first frame received
self._add_connection_start_callback()
# Add a callback handler for the Broker telling us to disconnect.
# NOTE: As of RabbitMQ 3.6.0, RabbitMQ broker may send Connection.Close
# to signal error during connection setup (and wait a longish time
# before closing the TCP/IP stream). Earlier RabbitMQ versions
# simply closed the TCP/IP stream.
self.callbacks.add(0, spec.Connection.Close,
self._on_connection_close_from_broker)
if self.params.blocked_connection_timeout is not None:
if self._blocked_conn_timer is not None:
# Blocked connection timer was active when teardown was
# initiated
self._adapter_remove_timeout(self._blocked_conn_timer)
self._blocked_conn_timer = None
self.add_on_connection_blocked_callback(
self._on_connection_blocked)
self.add_on_connection_unblocked_callback(
self._on_connection_unblocked)
def add_backpressure_callback(self, callback):
"""Call method "callback" when pika believes backpressure is being
applied.
:param method callback: The method to call
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0, self.ON_CONNECTION_BACKPRESSURE, callback,
False)
def add_on_close_callback(self, callback):
"""Add a callback notification when the connection has closed. The
callback will be passed the connection and an exception instance. The
exception will either be an instance of `exceptions.ConnectionClosed` if
a fully-open connection was closed by user or broker or exception of
another type that describes the cause of connection closure/failure.
:param method callback: Callback to call on close, having the signature:
callback(pika.connection.Connection, exception)
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0, self.ON_CONNECTION_CLOSED, callback, False)
def add_on_connection_blocked_callback(self, callback):
"""RabbitMQ AMQP extension - Add a callback to be notified when the
connection gets blocked (`Connection.Blocked` received from RabbitMQ)
due to the broker running low on resources (memory or disk). In this
state RabbitMQ suspends processing incoming data until the connection
is unblocked, so it's a good idea for publishers receiving this
notification to suspend publishing until the connection becomes
unblocked.
See also `Connection.add_on_connection_unblocked_callback()`
See also `ConnectionParameters.blocked_connection_timeout`.
:param method callback: Callback to call on `Connection.Blocked`,
having the signature `callback(connection, pika.frame.Method)`,
where the method frame's `method` member is of type
`pika.spec.Connection.Blocked`
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0,
spec.Connection.Blocked,
functools.partial(callback, self),
one_shot=False)
def add_on_connection_unblocked_callback(self, callback):
"""RabbitMQ AMQP extension - Add a callback to be notified when the
connection gets unblocked (`Connection.Unblocked` frame is received from
RabbitMQ) letting publishers know it's ok to start publishing again.
:param method callback: Callback to call on
`Connection.Unblocked`, having the signature
`callback(connection, pika.frame.Method)`, where the method frame's
`method` member is of type `pika.spec.Connection.Unblocked`
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0,
spec.Connection.Unblocked,
functools.partial(callback, self),
one_shot=False)
def add_on_open_callback(self, callback):
"""Add a callback notification when the connection has opened. The
callback will be passed the connection instance as its only arg.
:param method callback: Callback to call when open
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
self.callbacks.add(0, self.ON_CONNECTION_OPEN_OK, callback, False)
def add_on_open_error_callback(self, callback, remove_default=True):
"""Add a callback notification when the connection can not be opened.
The callback method should accept the connection instance that could not
connect, and either a string or an exception as its second arg.
:param method callback: Callback to call when can't connect, having
the signature _(Connection, Exception)
:param bool remove_default: Remove default exception raising callback
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
if remove_default:
self.callbacks.remove(0, self.ON_CONNECTION_ERROR,
self._default_on_connection_error)
self.callbacks.add(0, self.ON_CONNECTION_ERROR, callback, False)
def channel(self, channel_number=None, on_open_callback=None):
"""Create a new channel with the next available channel number or pass
in a channel number to use. Must be non-zero if you would like to
specify but it is recommended that you let Pika manage the channel
numbers.
:param int channel_number: The channel number to use, defaults to the
next available.
:param method on_open_callback: The callback when the channel is opened.
The callback will be invoked with the `Channel` instance as its only
argument.
:rtype: pika.channel.Channel
"""
if not self.is_open:
raise exceptions.ConnectionWrongStateError(
'Channel allocation requires an open connection: %s' % self)
if not channel_number:
channel_number = self._next_channel_number()
self._channels[channel_number] = self._create_channel(channel_number,
on_open_callback)
self._add_channel_callbacks(channel_number)
self._channels[channel_number].open()
return self._channels[channel_number]
def close(self, reply_code=200, reply_text='Normal shutdown'):
"""Disconnect from RabbitMQ. If there are any open channels, it will
attempt to close them prior to fully disconnecting. Channels which
have active consumers will attempt to send a Basic.Cancel to RabbitMQ
to cleanly stop the delivery of messages prior to closing the channel.
:param int reply_code: The code number for the close
:param str reply_text: The text reason for the close
:raises pika.exceptions.ConnectionWrongStateError: if connection is
closed or closing.
"""
if self.is_closing or self.is_closed:
msg = (
'Illegal close({}, {!r}) request on {} because it '
'was called while connection state={}.'.format(
reply_code, reply_text, self,
self._STATE_NAMES[self.connection_state]))
LOGGER.error(msg)
raise exceptions.ConnectionWrongStateError(msg)
# NOTE The connection is either in opening or open state
# Initiate graceful closing of channels that are OPEN or OPENING
if self._channels:
self._close_channels(reply_code, reply_text)
prev_state = self.connection_state
# Transition to closing
self._set_connection_state(self.CONNECTION_CLOSING)
LOGGER.info("Closing connection (%s): %r", reply_code, reply_text)
if not self._opened:
# It was opening, but not fully open yet, so we won't attempt
# graceful AMQP Connection.Close.
LOGGER.info('Connection.close() is terminating stream and '
'bypassing graceful AMQP close, since AMQP is still '
'opening.')
error = exceptions.ConnectionOpenAborted(
'Connection.close() called before connection '
'finished opening: prev_state={} ({}): {!r}'.format(
self._STATE_NAMES[prev_state],
reply_code,
reply_text))
self._terminate_stream(error)
else:
self._error = exceptions.ConnectionClosedByClient(reply_code,
reply_text)
# If there are channels that haven't finished closing yet, then
# _on_close_ready will finally be called from _on_channel_cleanup once
# all channels have been closed
if not self._channels:
# We can initiate graceful closing of the connection right away,
# since no more channels remain
self._on_close_ready()
else:
LOGGER.info(
'Connection.close is waiting for %d channels to close: %s',
len(self._channels), self)
def set_backpressure_multiplier(self, value=10):
"""Alter the backpressure multiplier value. We set this to 10 by default.
This value is used to raise warnings and trigger the backpressure
callback.
:param int value: The multiplier value to set
"""
self._backpressure_multiplier = value
#
# Connection state properties
#
@property
def is_closed(self):
"""
Returns a boolean reporting the current connection state.
"""
return self.connection_state == self.CONNECTION_CLOSED
@property
def is_closing(self):
"""
Returns True if connection is in the process of closing due to
client-initiated `close` request, but closing is not yet complete.
"""
return self.connection_state == self.CONNECTION_CLOSING
@property
def is_open(self):
"""
Returns a boolean reporting the current connection state.
"""
return self.connection_state == self.CONNECTION_OPEN
#
# Properties that reflect server capabilities for the current connection
#
@property
def basic_nack(self):
"""Specifies if the server supports basic.nack on the active connection.
:rtype: bool
"""
return self.server_capabilities.get('basic.nack', False)
@property
def consumer_cancel_notify(self):
"""Specifies if the server supports consumer cancel notification on the
active connection.
:rtype: bool
"""
return self.server_capabilities.get('consumer_cancel_notify', False)
@property
def exchange_exchange_bindings(self):
"""Specifies if the active connection supports exchange to exchange
bindings.
:rtype: bool
"""
return self.server_capabilities.get('exchange_exchange_bindings', False)
@property
def publisher_confirms(self):
"""Specifies if the active connection can use publisher confirmations.
:rtype: bool
"""
return self.server_capabilities.get('publisher_confirms', False)
@abc.abstractmethod
def _adapter_add_timeout(self, deadline, callback):
"""Adapters should override to call the callback after the
specified number of seconds have elapsed, using a timer, or a
thread, or similar.
:param float | int deadline: The number of seconds to wait to call
callback
:param method callback: The callback will be called without args.
:return: Handle that can be passed to `_adapter_remove_timeout()` to
cancel the callback.
"""
raise NotImplementedError
@abc.abstractmethod
def _adapter_remove_timeout(self, timeout_id):
"""Adapters should override: Remove a timeout
:param opaque timeout_id: The timeout handle to remove
"""
raise NotImplementedError
@abc.abstractmethod
def _adapter_add_callback_threadsafe(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this connection's IOLoop thread.
NOTE: This is the only thread-safe method offered by the connection. All
other manipulations of the connection must be performed from the
connection's thread.
:param method callback: The callback method; must be callable.
"""
raise NotImplementedError
#
# Internal methods for managing the communication process
#
@abc.abstractmethod
def _adapter_connect_stream(self):
"""Subclasses should override to initiate stream connection
workflow asynchronously. Upon failed or aborted completion, they must
invoke `Connection._on_stream_terminated()`.
NOTE: On success, the stack will be up already, so there is no
corresponding callback.
"""
raise NotImplementedError
@abc.abstractmethod
def _adapter_disconnect_stream(self):
"""Asynchronously bring down the streaming transport layer and invoke
`Connection._on_stream_terminated()` asynchronously when complete.
:raises: NotImplementedError
"""
raise NotImplementedError
@abc.abstractmethod
def _adapter_emit_data(self, data):
"""Take ownership of data and send it to AMQP server as soon as
possible.
Subclasses must override this
:param bytes data:
"""
raise NotImplementedError
@abc.abstractmethod
def _adapter_get_write_buffer_size(self):
"""
Subclasses must override this
:return: Current size of output data buffered by the transport
:rtype: int
"""
raise NotImplementedError
def _add_channel_callbacks(self, channel_number):
"""Add the appropriate callbacks for the specified channel number.
:param int channel_number: The channel number for the callbacks
"""
# pylint: disable=W0212
# This permits us to garbage-collect our reference to the channel
# regardless of whether it was closed by client or broker, and do so
# after all channel-close callbacks.
self._channels[channel_number]._add_on_cleanup_callback(
self._on_channel_cleanup)
def _add_connection_start_callback(self):
"""Add a callback for when a Connection.Start frame is received from
the broker.
"""
self.callbacks.add(0, spec.Connection.Start, self._on_connection_start)
def _add_connection_tune_callback(self):
"""Add a callback for when a Connection.Tune frame is received."""
self.callbacks.add(0, spec.Connection.Tune, self._on_connection_tune)
def _check_for_protocol_mismatch(self, value):
"""Invoked when starting a connection to make sure it's a supported
protocol.
:param pika.frame.Method value: The frame to check
:raises: ProtocolVersionMismatch
"""
if ((value.method.version_major, value.method.version_minor) !=
spec.PROTOCOL_VERSION[0:2]):
raise exceptions.ProtocolVersionMismatch(frame.ProtocolHeader(),
value)
@property
def _client_properties(self):
"""Return the client properties dictionary.
:rtype: dict
"""
properties = {
'product': PRODUCT,
'platform': 'Python %s' % platform.python_version(),
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': True,
'consumer_cancel_notify': True,
'publisher_confirms': True
},
'information': 'See http://pika.rtfd.org',
'version': __version__
}
if self.params.client_properties:
properties.update(self.params.client_properties)
return properties
def _close_channels(self, reply_code, reply_text):
"""Initiate graceful closing of channels that are in OPEN or OPENING
states, passing reply_code and reply_text.
:param int reply_code: The code for why the channels are being closed
:param str reply_text: The text reason for why the channels are closing
"""
assert self.is_open, str(self)
for channel_number in dictkeys(self._channels):
chan = self._channels[channel_number]
if not (chan.is_closing or chan.is_closed):
chan.close(reply_code, reply_text)
def _connect(self):
"""Attempt to connect to RabbitMQ
:rtype: bool
"""
warnings.warn('This method is deprecated, use Connection.connect',
DeprecationWarning)
def _create_channel(self, channel_number, on_open_callback):
"""Create a new channel using the specified channel number and calling
back the method specified by on_open_callback
:param int channel_number: The channel number to use
:param method on_open_callback: The callback when the channel is opened.
The callback will be invoked with the `Channel` instance as its only
argument.
"""
LOGGER.debug('Creating channel %s', channel_number)
return pika.channel.Channel(self, channel_number, on_open_callback)
def _create_heartbeat_checker(self):
"""Create a heartbeat checker instance if there is a heartbeat interval
set.
:rtype: pika.heartbeat.Heartbeat|None
"""
if self.params.heartbeat is not None and self.params.heartbeat > 0:
LOGGER.debug('Creating a HeartbeatChecker: %r',
self.params.heartbeat)
return pika_heartbeat.HeartbeatChecker(self, self.params.heartbeat)
return None
def _remove_heartbeat(self):
"""Stop the heartbeat checker if it exists
"""
if self._heartbeat_checker:
self._heartbeat_checker.stop()
self._heartbeat_checker = None
def _deliver_frame_to_channel(self, value):
"""Deliver the frame to the channel specified in the frame.
:param pika.frame.Method value: The frame to deliver
"""
if not value.channel_number in self._channels:
# This should never happen and would constitute breach of the
# protocol
LOGGER.critical(
'Received %s frame for unregistered channel %i on %s',
value.NAME, value.channel_number, self)
return
# pylint: disable=W0212
self._channels[value.channel_number]._handle_content_frame(value)
def _detect_backpressure(self):
"""Attempt to calculate if TCP backpressure is being applied due to
our outbound buffer being larger than the average frame size over
a window of frames.
"""
avg_frame_size = self.bytes_sent / self.frames_sent
buffer_size = self._adapter_get_write_buffer_size()
if buffer_size > (avg_frame_size * self._backpressure_multiplier):
LOGGER.warning(BACKPRESSURE_WARNING, buffer_size,
int(buffer_size / avg_frame_size))
self.callbacks.process(0, self.ON_CONNECTION_BACKPRESSURE, self)
def _ensure_closed(self):
"""If the connection is not closed, close it."""
if self.is_open:
self.close()
def _get_body_frame_max_length(self):
"""Calculate the maximum amount of bytes that can be in a body frame.
:rtype: int
"""
return (
self.params.frame_max - spec.FRAME_HEADER_SIZE - spec.FRAME_END_SIZE
)
def _get_credentials(self, method_frame):
"""Get credentials for authentication.
:param pika.frame.MethodFrame method_frame: The Connection.Start frame
:rtype: tuple(str, str)
"""
(auth_type, response) = self.params.credentials.response_for(
method_frame.method)
if not auth_type:
raise exceptions.AuthenticationError(self.params.credentials.TYPE)
self.params.credentials.erase_credentials()
return auth_type, response
def _has_pending_callbacks(self, value):
"""Return true if there are any callbacks pending for the specified
frame.
:param pika.frame.Method value: The frame to check
:rtype: bool
"""
return self.callbacks.pending(value.channel_number, value.method)
def _is_method_frame(self, value):
"""Returns true if the frame is a method frame.
:param pika.frame.Frame value: The frame to evaluate
:rtype: bool
"""
return isinstance(value, frame.Method)
def _is_protocol_header_frame(self, value):
"""Returns True if it's a protocol header frame.
:rtype: bool
"""
return isinstance(value, frame.ProtocolHeader)
def _next_channel_number(self):
"""Return the next available channel number or raise an exception.
:rtype: int
"""
limit = self.params.channel_max or pika.channel.MAX_CHANNELS
if len(self._channels) >= limit:
raise exceptions.NoFreeChannels()
for num in xrange(1, len(self._channels) + 1):
if num not in self._channels:
return num
return len(self._channels) + 1
def _on_channel_cleanup(self, channel):
"""Remove the channel from the dict of channels when Channel.CloseOk is
sent. If connection is closing and no more channels remain, proceed to
`_on_close_ready`.
:param pika.channel.Channel channel: channel instance
"""
try:
del self._channels[channel.channel_number]
LOGGER.debug('Removed channel %s', channel.channel_number)
except KeyError:
LOGGER.error('Channel %r not in channels',
channel.channel_number)
if self.is_closing:
if not self._channels:
# Initiate graceful closing of the connection
self._on_close_ready()
else:
# Once Connection enters CLOSING state, all remaining channels
# should also be in CLOSING state. Deviation from this would
# prevent Connection from completing its closing procedure.
channels_not_in_closing_state = [
chan for chan in dict_itervalues(self._channels)
if not chan.is_closing]
if channels_not_in_closing_state:
LOGGER.critical(
'Connection in CLOSING state has non-CLOSING '
'channels: %r', channels_not_in_closing_state)
def _on_close_ready(self):
"""Called when the Connection is in a state that it can close after
a close has been requested by client. This happens after all of the
channels are closed that were open when the close request was made.
"""
if self.is_closed:
LOGGER.warning('_on_close_ready invoked when already closed')
return
# NOTE: Assuming self._error is instance of exceptions.ConnectionClosed
self._send_connection_close(self._error.reply_code,
self._error.reply_text)
def _on_stream_connected(self):
"""Invoked when the socket is connected and it's time to start speaking
AMQP with the broker.
"""
self._set_connection_state(self.CONNECTION_PROTOCOL)
# Start the communication with the RabbitMQ Broker
self._send_frame(frame.ProtocolHeader())
def _on_blocked_connection_timeout(self):
""" Called when the "connection blocked timeout" expires. When this
happens, we tear down the connection
"""
self._blocked_conn_timer = None
self._terminate_stream(
exceptions.ConnectionBlockedTimeout(
'Blocked connection timeout expired.'))
def _on_connection_blocked(self, _connection, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
LOGGER.warning('Received %s from broker', method_frame)
if self._blocked_conn_timer is not None:
# RabbitMQ is not supposed to repeat Connection.Blocked, but it
# doesn't hurt to be careful
LOGGER.warning('_blocked_conn_timer %s already set when '
'_on_connection_blocked is called',
self._blocked_conn_timer)
else:
self._blocked_conn_timer = self._adapter_add_timeout(
self.params.blocked_connection_timeout,
self._on_blocked_connection_timeout)
def _on_connection_unblocked(self, _connection, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
LOGGER.info('Received %s from broker', method_frame)
if self._blocked_conn_timer is None:
# RabbitMQ is supposed to pair Connection.Blocked/Unblocked, but it
# doesn't hurt to be careful
LOGGER.warning('_blocked_conn_timer was not active when '
'_on_connection_unblocked called')
else:
self._adapter_remove_timeout(self._blocked_conn_timer)
self._blocked_conn_timer = None
def _on_connection_close_from_broker(self, method_frame):
"""Called when the connection is closed remotely via Connection.Close
frame from broker.
:param pika.frame.Method method_frame: The Connection.Close frame
"""
LOGGER.debug('_on_connection_close_from_broker: frame=%s', method_frame)
self._terminate_stream(
exceptions.ConnectionClosedByBroker(method_frame.method.reply_code,
method_frame.method.reply_text))
def _on_connection_close_ok(self, method_frame):
"""Called when Connection.CloseOk is received from remote.
:param pika.frame.Method method_frame: The Connection.CloseOk frame
"""
LOGGER.debug('_on_connection_close_ok: frame=%s', method_frame)
self._terminate_stream(None)
def _default_on_connection_error(self, _connection_unused, error):
"""Default behavior when the connecting connection cannot connect and
user didn't supply own `on_connection_error` callback.
:raises: the given error
"""
raise error
def _on_connection_open_ok(self, method_frame):
"""
This is called once we have tuned the connection with the server and
called the Connection.Open on the server and it has replied with
Connection.Ok.
"""
self._opened = True
self.known_hosts = method_frame.method.known_hosts
# We're now connected at the AMQP level
self._set_connection_state(self.CONNECTION_OPEN)
# Call our initial callback that we're open
self.callbacks.process(0, self.ON_CONNECTION_OPEN_OK, self, self)
def _on_connection_start(self, method_frame):
"""This is called as a callback once we have received a Connection.Start
from the server.
:param pika.frame.Method method_frame: The frame received
:raises: UnexpectedFrameError
"""
self._set_connection_state(self.CONNECTION_START)
try:
if self._is_protocol_header_frame(method_frame):
raise exceptions.UnexpectedFrameError(method_frame)
self._check_for_protocol_mismatch(method_frame)
self._set_server_information(method_frame)
self._add_connection_tune_callback()
self._send_connection_start_ok(*self._get_credentials(method_frame))
except Exception as error: # pylint: disable=W0703
LOGGER.exception('Error processing Connection.Start.')
self._terminate_stream(error)
@staticmethod
def _negotiate_integer_value(client_value, server_value):
"""Negotiates two values. If either of them is 0 or None,
returns the other one. If both are positive integers, returns the
smallest one.
:param int client_value: The client value
:param int server_value: The server value
:rtype: int
"""
if client_value is None:
client_value = 0
if server_value is None:
server_value = 0
# this is consistent with how Java client and Bunny
# perform negotiation, see pika/pika#874
if client_value == 0 or server_value == 0:
val = max(client_value, server_value)
else:
val = min(client_value, server_value)
return val
@staticmethod
def _tune_heartbeat_timeout(client_value, server_value):
""" Determine heartbeat timeout per AMQP 0-9-1 rules
Per https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf,
> Both peers negotiate the limits to the lowest agreed value as follows:
> - The server MUST tell the client what limits it proposes.
> - The client responds and **MAY reduce those limits** for its
connection
If the client specifies a value, it always takes precedence.
:param client_value: None to accept server_value; otherwise, an integral
number in seconds; 0 (zero) to disable heartbeat.
:param server_value: integral value of the heartbeat timeout proposed by
broker; 0 (zero) to disable heartbeat.
:returns: the value of the heartbeat timeout to use and return to broker
"""
if client_value is None:
# Accept server's limit
timeout = server_value
else:
timeout = client_value
return timeout
def _on_connection_tune(self, method_frame):
"""Once the Broker sends back a Connection.Tune, we will set our tuning
variables that have been returned to us and kick off the Heartbeat
monitor if required, send our TuneOk and then the Connection. Open rpc
call on channel 0.
:param pika.frame.Method method_frame: The frame received
"""
self._set_connection_state(self.CONNECTION_TUNE)
# Get our max channels, frames and heartbeat interval
self.params.channel_max = Connection._negotiate_integer_value(
self.params.channel_max,
method_frame.method.channel_max)
self.params.frame_max = Connection._negotiate_integer_value(
self.params.frame_max,
method_frame.method.frame_max)
if callable(self.params.heartbeat):
ret_heartbeat = self.params.heartbeat(self, method_frame.method.heartbeat)
if ret_heartbeat is None or callable(ret_heartbeat):
# Enforce callback-specific restrictions on callback's return value
raise TypeError('heartbeat callback must not return None '
'or callable, but got %r' % (ret_heartbeat,))
# Leave it to hearbeat setter deal with the rest of the validation
self.params.heartbeat = ret_heartbeat
# Negotiate heatbeat timeout
self.params.heartbeat = self._tune_heartbeat_timeout(
client_value=self.params.heartbeat,
server_value=method_frame.method.heartbeat)
# Calculate the maximum pieces for body frames
self._body_max_length = self._get_body_frame_max_length()
# Create a new heartbeat checker if needed
self._heartbeat_checker = self._create_heartbeat_checker()
# Send the TuneOk response with what we've agreed upon
self._send_connection_tune_ok()
# Send the Connection.Open RPC call for the vhost
self._send_connection_open()
def _on_data_available(self, data_in):
"""This is called by our Adapter, passing in the data from the socket.
As long as we have buffer try and map out frame data.
:param str data_in: The data that is available to read
"""
self._frame_buffer += data_in
while self._frame_buffer:
consumed_count, frame_value = self._read_frame()
if not frame_value:
return
self._trim_frame_buffer(consumed_count)
self._process_frame(frame_value)
def _terminate_stream(self, error):
"""Deactivate heartbeat instance if activated already, and initiate
termination of the stream (TCP) connection asynchronously.
When connection terminates, the appropriate user callback will be
invoked with the given error: "on open error" or "on connection closed".
:param Exception | None error: exception instance describing the reason
for termination; None for normal closing, such as upon receipt of
Connection.CloseOk.
"""
assert isinstance(error, (type(None), Exception)), \
'error arg is neither None nor instance of Exception: {!r}.'.format(
error)
if error is not None:
# Save the exception for user callback once the stream closes
self._error = error
else:
assert self._error is not None, (
'_terminate_stream() expected self._error to be set when '
'passed None error arg.')
# So it won't mess with the stack
self._remove_heartbeat()
# Begin disconnection of stream or termination of connection workflow
self._adapter_disconnect_stream()
def _on_stream_terminated(self, error):
"""Handle termination of stack (including TCP layer) or failure to
establish the stack. Notify registered ON_CONNECTION_ERROR or
ON_CONNECTION_CLOSED callbacks, depending on whether the connection
was opening or open.
:param Exception | None error: None means that the transport was aborted
internally and exception in `self._error` represents the cause.
Otherwise it's an exception object that describes the unexpected
loss of connection.
"""
LOGGER.info('AMQP stack terminated, failed to connect, or aborted: '
'error-arg=%r; pending-error=%r', error, self._error)
if error is not None:
if self._error is not None:
LOGGER.debug('_on_stream_terminated(): overriding '
'pending-error=%r with %r', self._error, error)
self._error = error
else:
assert self._error is not None, (
'_on_stream_terminated() expected self._error to be populated '
'with reason for terminating stack.')
# Stop the heartbeat checker if it exists
self._remove_heartbeat()
# Remove connection management callbacks
self._remove_callbacks(0,
[spec.Connection.Close, spec.Connection.Start])
if self.params.blocked_connection_timeout is not None:
self._remove_callbacks(0, [spec.Connection.Blocked,
spec.Connection.Unblocked])
if not self._opened and isinstance(self._error,
exceptions.StreamLostError):
# Heuristically deduce error based on connection state
if self.connection_state == self.CONNECTION_PROTOCOL:
LOGGER.error('Probably incompatible Protocol Versions')
self._error = exceptions.IncompatibleProtocolError(
repr(self._error))
elif self.connection_state == self.CONNECTION_START:
LOGGER.error('Connection closed while authenticating indicating a '
'probable authentication error')
self._error = exceptions.ProbableAuthenticationError(
repr(self._error))
elif self.connection_state == self.CONNECTION_TUNE:
LOGGER.error('Connection closed while tuning the connection '
'indicating a probable permission error when '
'accessing a virtual host')
self._error = exceptions.ProbableAccessDeniedError(
repr(self._error))
elif self.connection_state not in [self.CONNECTION_OPEN,
self.CONNECTION_CLOSED,
self.CONNECTION_CLOSING]:
LOGGER.warning('Unexpected connection state on disconnect: %i',
self.connection_state)
# Transition to closed state
self._set_connection_state(self.CONNECTION_CLOSED)
# Inform our channel proxies, if any are still around
for channel in dictkeys(self._channels):
if channel not in self._channels:
continue
# pylint: disable=W0212
self._channels[channel]._on_close_meta(self._error)
# Inform interested parties
if not self._opened:
LOGGER.info('Connection setup terminated due to %r', self._error)
self.callbacks.process(0,
self.ON_CONNECTION_ERROR,
self, self,
self._error)
else:
LOGGER.info('Stack terminated due to %r', self._error)
self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self,
self._error)
# Reset connection properties
self._init_connection_state()
def _process_callbacks(self, frame_value):
"""Process the callbacks for the frame if the frame is a method frame
and if it has any callbacks pending.
:param pika.frame.Method frame_value: The frame to process
:rtype: bool
"""
if (self._is_method_frame(frame_value) and
self._has_pending_callbacks(frame_value)):
self.callbacks.process(frame_value.channel_number, # Prefix
frame_value.method, # Key
self, # Caller
frame_value) # Args
return True
return False
def _process_frame(self, frame_value):
"""Process an inbound frame from the socket.
:param frame_value: The frame to process
:type frame_value: pika.frame.Frame | pika.frame.Method
"""
# Will receive a frame type of -1 if protocol version mismatch
if frame_value.frame_type < 0:
return
# Keep track of how many frames have been read
self.frames_received += 1
# Process any callbacks, if True, exit method
if self._process_callbacks(frame_value):
return
# If a heartbeat is received, update the checker
if isinstance(frame_value, frame.Heartbeat):
if self._heartbeat_checker:
self._heartbeat_checker.received()
else:
LOGGER.warning('Received heartbeat frame without a heartbeat '
'checker')
# If the frame has a channel number beyond the base channel, deliver it
elif frame_value.channel_number > 0:
self._deliver_frame_to_channel(frame_value)
def _read_frame(self):
"""Try and read from the frame buffer and decode a frame.
:rtype tuple: (int, pika.frame.Frame)
"""
return frame.decode_frame(self._frame_buffer)
def _remove_callbacks(self, channel_number, method_classes):
"""Remove the callbacks for the specified channel number and list of
method frames.
:param int channel_number: The channel number to remove the callback on
:param sequence method_classes: The method classes (derived from
`pika.amqp_object.Method`) for the callbacks
"""
for method_cls in method_classes:
self.callbacks.remove(str(channel_number), method_cls)
def _rpc(self, channel_number, method,
callback=None,
acceptable_replies=None):
"""Make an RPC call for the given callback, channel number and method.
acceptable_replies lists out what responses we'll process from the
server with the specified callback.
:param int channel_number: The channel number for the RPC call
:param pika.amqp_object.Method method: The method frame to call
:param method callback: The callback for the RPC response
:param list acceptable_replies: The replies this RPC call expects
"""
# Validate that acceptable_replies is a list or None
if acceptable_replies and not isinstance(acceptable_replies, list):
raise TypeError('acceptable_replies should be list or None')
# Validate the callback is callable
if callback is not None:
if not callable(callback):
raise TypeError('callback should be None, function or method.')
for reply in acceptable_replies:
self.callbacks.add(channel_number, reply, callback)
# Send the rpc call to RabbitMQ
self._send_method(channel_number, method)
def _send_connection_close(self, reply_code, reply_text):
"""Send a Connection.Close method frame.
:param int reply_code: The reason for the close
:param str reply_text: The text reason for the close
"""
self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0),
self._on_connection_close_ok, [spec.Connection.CloseOk])
def _send_connection_open(self):
"""Send a Connection.Open frame"""
self._rpc(0, spec.Connection.Open(self.params.virtual_host,
insist=True),
self._on_connection_open_ok, [spec.Connection.OpenOk])
def _send_connection_start_ok(self, authentication_type, response):
"""Send a Connection.StartOk frame
:param str authentication_type: The auth type value
:param str response: The encoded value to send
"""
self._send_method(0,
spec.Connection.StartOk(self._client_properties,
authentication_type, response,
self.params.locale))
def _send_connection_tune_ok(self):
"""Send a Connection.TuneOk frame"""
self._send_method(0, spec.Connection.TuneOk(self.params.channel_max,
self.params.frame_max,
self.params.heartbeat))
def _send_frame(self, frame_value):
"""This appends the fully generated frame to send to the broker to the
output buffer which will be then sent via the connection adapter.
:param frame_value: The frame to write
:type frame_value: pika.frame.Frame|pika.frame.ProtocolHeader
:raises: exceptions.ConnectionClosed
"""
if self.is_closed:
LOGGER.error('Attempted to send frame when closed')
raise exceptions.ConnectionWrongStateError(
'Attempted to send a frame on closed connection.')
marshaled_frame = frame_value.marshal()
self._output_marshaled_frames([marshaled_frame])
def _send_method(self, channel_number, method, content=None):
"""Constructs a RPC method frame and then sends it to the broker.
:param int channel_number: The channel number for the frame
:param pika.amqp_object.Method method: The method to send
:param tuple content: If set, is a content frame, is tuple of
properties and body.
"""
if content:
self._send_message(channel_number, method, content)
else:
self._send_frame(frame.Method(channel_number, method))
def _send_message(self, channel_number, method_frame, content):
"""Publish a message.
:param int channel_number: The channel number for the frame
:param pika.object.Method method_frame: The method frame to send
:param tuple content: A content frame, which is tuple of properties and
body.
"""
length = len(content[1])
marshaled_body_frames = []
# Note: we construct the Method, Header and Content objects, marshal them
# *then* output in case the marshaling operation throws an exception
frame_method = frame.Method(channel_number, method_frame)
frame_header = frame.Header(channel_number, length, content[0])
marshaled_body_frames.append(frame_method.marshal())
marshaled_body_frames.append(frame_header.marshal())
if content[1]:
chunks = int(math.ceil(float(length) / self._body_max_length))
for chunk in xrange(0, chunks):
start = chunk * self._body_max_length
end = start + self._body_max_length
if end > length:
end = length
frame_body = frame.Body(channel_number, content[1][start:end])
marshaled_body_frames.append(frame_body.marshal())
self._output_marshaled_frames(marshaled_body_frames)
def _set_connection_state(self, connection_state):
"""Set the connection state.
:param int connection_state: The connection state to set
"""
LOGGER.debug('New Connection state: %s (prev=%s)',
self._STATE_NAMES[connection_state],
self._STATE_NAMES[self.connection_state])
self.connection_state = connection_state
def _set_server_information(self, method_frame):
"""Set the server properties and capabilities
:param spec.connection.Start method_frame: The Connection.Start frame
"""
self.server_properties = method_frame.method.server_properties
self.server_capabilities = self.server_properties.get('capabilities',
dict())
if hasattr(self.server_properties, 'capabilities'):
del self.server_properties['capabilities']
def _trim_frame_buffer(self, byte_count):
"""Trim the leading N bytes off the frame buffer and increment the
counter that keeps track of how many bytes have been read/used from the
socket.
:param int byte_count: The number of bytes consumed
"""
self._frame_buffer = self._frame_buffer[byte_count:]
self.bytes_received += byte_count
def _output_marshaled_frames(self, marshaled_frames):
"""Output list of marshaled frames to buffer and update stats
:param list marshaled_frames: A list of frames marshaled to bytes
"""
for marshaled_frame in marshaled_frames:
self.bytes_sent += len(marshaled_frame)
self.frames_sent += 1
self._adapter_emit_data(marshaled_frame)
if self.params.backpressure_detection:
self._detect_backpressure()
| 37.973956
| 95
| 0.628039
|
b5f8055db652d9af9b348c5374a593a428c9675d
| 2,059
|
py
|
Python
|
05 Supervised/BurstyBTM/script/eta.py
|
Cal-Fang/Defund-the-Police-Survey-Project
|
209ad907cddd2c3b3616f5dd2fc2e06f5449b2c7
|
[
"MIT"
] | null | null | null |
05 Supervised/BurstyBTM/script/eta.py
|
Cal-Fang/Defund-the-Police-Survey-Project
|
209ad907cddd2c3b3616f5dd2fc2e06f5449b2c7
|
[
"MIT"
] | null | null | null |
05 Supervised/BurstyBTM/script/eta.py
|
Cal-Fang/Defund-the-Police-Survey-Project
|
209ad907cddd2c3b3616f5dd2fc2e06f5449b2c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# compute eta(the bursty probability) for each biterm in each day
import sys
# read the day frequency for each biterm
# Input format: w1 w2 freq
# compute and output the etas in a day
def proc_day(bdf_pt, day, res_pt):
etas = {}
for l in open(bdf_pt):
b, df = l.strip().split('\t')
df = [s.split(':') for s in df.split()]
df = dict([(int(d), int(f)) for d, f in df])
if not df.has_key(day):
continue
etas[b] = computeEta(df, day)
write_etas(etas, res_pt)
# Given the day frequencies of a biterm, compute its eta in `day`
# df {day:freq, ...}
# return eta
def computeEta(df, day):
eps = 0.01 # epsilon in the paper
# avgf = sum(df.values()) / float(len(df))
avgf = sum([f for d, f in df.items() if d >= day - 10 and d <= day]) / float(min(day+1, 10))
if avgf < 5:
return eps
else:
return max(df[day] - avgf - 5, eps) / df[day]
# write the frequency of biterms
# format: wi wj freq
def write_etas(etas, res_pt):
print 'write:', res_pt
wf = open(res_pt, 'w')
for b, eta in sorted(etas.items(), key=lambda d:d[1], reverse=True):
print >> wf, '%s\t%f' % (b, eta)
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Compute the bursty probability (eta) for each biterm in each day'
print 'Usage: python %s <n_day> <bf_dir> <res_dir>' % sys.argv[0]
print '\tn_day number of days to be processed (count from 0)'
print '\tbf_pt input docs, each line is a biterm with its daily frequency. Line format: "w1 w2 day:freq day:freq ..."'
print '\tres_dir output the eta of the biterms. Line format: "w1 w2 eta"'
exit(1)
n_day = int(sys.argv[1])
bdf_pt = sys.argv[2]
eta_dir = sys.argv[3]
for day in range(1, n_day):
eta_pt = eta_dir + '%d.txt' % day
proc_day(bdf_pt, day, eta_pt)
| 36.122807
| 132
| 0.558038
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.