hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794ea23ccef3098f4853ec0712ad36bd4fc2acca
| 285
|
py
|
Python
|
metaflow/datastore/__init__.py
|
RobBlumberg/metaflow
|
9f737e6026eee250c1593a2cb1d1c4b19a00adf4
|
[
"Apache-2.0"
] | null | null | null |
metaflow/datastore/__init__.py
|
RobBlumberg/metaflow
|
9f737e6026eee250c1593a2cb1d1c4b19a00adf4
|
[
"Apache-2.0"
] | null | null | null |
metaflow/datastore/__init__.py
|
RobBlumberg/metaflow
|
9f737e6026eee250c1593a2cb1d1c4b19a00adf4
|
[
"Apache-2.0"
] | null | null | null |
from .inputs import Inputs
from .flow_datastore import FlowDataStore
from .datastore_set import TaskDataStoreSet
from .task_datastore import TaskDataStore
from .local_storage import LocalStorage
from .s3_storage import S3Storage
DATASTORES = {"local": LocalStorage, "s3": S3Storage}
| 28.5
| 53
| 0.831579
|
794ea43c6b915563d1c542455f60e2c2757c931f
| 272
|
py
|
Python
|
DataStorageSystems/BST.py
|
Nezgun/Exploring-Bees
|
25b6028b55a9dacd0d65c165848da2fe57c39a5a
|
[
"MIT"
] | null | null | null |
DataStorageSystems/BST.py
|
Nezgun/Exploring-Bees
|
25b6028b55a9dacd0d65c165848da2fe57c39a5a
|
[
"MIT"
] | 1
|
2019-07-26T04:52:12.000Z
|
2019-07-26T04:52:12.000Z
|
DataStorageSystems/BST.py
|
Nezgun/Exploring-Bees
|
25b6028b55a9dacd0d65c165848da2fe57c39a5a
|
[
"MIT"
] | null | null | null |
class BST:
def __init__(self):
self.head = None
self.size = 0
def addNode(self, data):
return
def deleteNode(self, Node):
return
def search(self, data):
return
def getSize(self):
return self.size
| 16
| 31
| 0.536765
|
794ea514a735a14e95adaca4e39081ad1b9c7cf8
| 10,989
|
py
|
Python
|
build/lib/preview_generator/preview/builder/office__libreoffice.py
|
alounko/preview-generator-py2
|
604734ee3c18b0fcf87360022871b884132f71a0
|
[
"MIT"
] | null | null | null |
build/lib/preview_generator/preview/builder/office__libreoffice.py
|
alounko/preview-generator-py2
|
604734ee3c18b0fcf87360022871b884132f71a0
|
[
"MIT"
] | null | null | null |
build/lib/preview_generator/preview/builder/office__libreoffice.py
|
alounko/preview-generator-py2
|
604734ee3c18b0fcf87360022871b884132f71a0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from io import BytesIO
import logging
import os
from subprocess import check_call
from subprocess import DEVNULL
from subprocess import STDOUT
import time
import typing
from PyPDF2 import PdfFileReader
from PyPDF2 import PdfFileWriter
from preview_generator.exception import BuilderDependencyNotFound
from preview_generator.exception import ExecutableNotFound
from preview_generator.preview.generic_preview import PreviewBuilder
from preview_generator.utils import check_executable_is_available
from preview_generator.utils import ImgDims
from preview_generator.preview.builder.image__wand import convert_pdf_to_jpeg
class OfficePreviewBuilderLibreoffice(PreviewBuilder):
@classmethod
def get_supported_mimetypes(cls):
return [
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', # nopep8
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.spreadsheet',
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document', # nopep8
'application/vnd.openxmlformats-officedocument.wordprocessingml.template', # nopep8
'application/vnd.ms-word.document.macroEnabled.12',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', # nopep8
'application/vnd.openxmlformats-officedocument.spreadsheetml.template', # nopep8
'application/vnd.ms-excel.sheet.macroEnabled.12',
'application/vnd.ms-excel.template.macroEnabled.12',
'application/vnd.ms-excel.addin.macroEnabled.12',
'application/vnd.ms-excel.sheet.binary.macroEnabled.12',
'application/vnd.ms-powerpoint',
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # nopep8
'application/vnd.openxmlformats-officedocument.presentationml.template', # nopep8
'application/vnd.openxmlformats-officedocument.presentationml.slideshow', # nopep8
'application/vnd.ms-powerpoint.addin.macroEnabled.12',
'application/vnd.ms-powerpoint.presentation.macroEnabled.12',
'application/vnd.ms-powerpoint.template.macroEnabled.12',
'application/vnd.ms-powerpoint.slideshow.macroEnabled.12',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.text-web',
'application/vnd.oasis.opendocument.text-master',
'application/vnd.oasis.opendocument.graphics',
'application/vnd.oasis.opendocument.graphics-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template',
'application/vnd.oasis.opendocument.spreadsheet-template',
'application/vnd.oasis.opendocument.chart',
'application/vnd.oasis.opendocument.chart',
'application/vnd.oasis.opendocument.formula',
'application/vnd.oasis.opendocument.database',
'application/vnd.oasis.opendocument.image',
'application/vnd.openofficeorg.extension',
] # type: typing.List[str]
@classmethod
def check_dependencies(cls):
try:
return check_executable_is_available('libreoffice')
except ExecutableNotFound:
raise BuilderDependencyNotFound(
'this builder requires libreoffice to be available'
)
def build_jpeg_preview(
self,
file_path,
preview_name,
cache_path,
page_id,
extension = '.jpg',
size=None
):
with open(file_path, 'rb') as odt:
if os.path.exists(
'{path}{file_name}.pdf'.format(
path=cache_path,
file_name=preview_name
)):
input_pdf_stream = open(
'{path}.pdf'.format(
path=cache_path + preview_name,
), 'rb')
else:
if self.cache_file_process_already_running(
cache_path + preview_name):
time.sleep(2)
return self.build_jpeg_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=cache_path,
extension=extension,
page_id=page_id
)
else:
input_pdf_stream = convert_office_document_to_pdf(
odt,
cache_path,
preview_name
)
input_pdf = PdfFileReader(input_pdf_stream)
intermediate_pdf = PdfFileWriter()
intermediate_pdf.addPage(input_pdf.getPage(int(page_id)))
intermediate_pdf_stream = BytesIO()
intermediate_pdf.write(intermediate_pdf_stream)
intermediate_pdf_stream.seek(0, 0)
jpeg_stream = convert_pdf_to_jpeg(intermediate_pdf_stream, size)
jpeg_preview_path = '{path}{file_name}{extension}'.format(
path=cache_path,
file_name=preview_name,
extension=extension
)
with open(jpeg_preview_path, 'wb') as jpeg_output_stream:
buffer = jpeg_stream.read(1024)
while buffer:
jpeg_output_stream.write(buffer)
buffer = jpeg_stream.read(1024)
def get_page_number(self, file_path, preview_name,
cache_path):
page_nb_file_path = cache_path + preview_name + '_page_nb'
if not os.path.exists(page_nb_file_path):
pdf_version_filepath = cache_path + preview_name + '.pdf'
if not os.path.exists(pdf_version_filepath):
self.build_pdf_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=cache_path
)
with open(page_nb_file_path, 'w') as page_nb_file_stream:
page_nb_file_stream.seek(0, 0)
with open(pdf_version_filepath, 'rb') as pdf_stream:
pdf_reader = PdfFileReader(pdf_stream)
page_nb_file_stream.write(str(pdf_reader.numPages))
with open(page_nb_file_path, 'r') as page_nb_stream:
page_nb = int(page_nb_stream.read())
return page_nb
def has_pdf_preview(self):
"""
Override and return True if your builder allow PDF preview
:return:
"""
return True
def build_pdf_preview(
self,
file_path,
preview_name,
cache_path,
extension = '.pdf',
page_id = -1):
intermediate_filename = preview_name.split('-page')[0]
intermediate_pdf_file_path = os.path.join(
cache_path,
'{}.pdf'.format(intermediate_filename)
)
if not os.path.exists(intermediate_pdf_file_path):
if os.path.exists(intermediate_pdf_file_path + '_flag'):
# Wait 2 seconds, then retry
time.sleep(2)
return self.build_pdf_preview(
file_path=file_path,
preview_name=preview_name,
cache_path=cache_path,
extension=extension,
page_id=page_id
)
with open(file_path, 'rb') as input_stream:
# first step is to convert full document to full pdf
convert_office_document_to_pdf(
input_stream,
cache_path,
intermediate_filename
)
if page_id < 0:
return # in this case, the intermediate file is the requested one
pdf_in = PdfFileReader(intermediate_pdf_file_path)
output_file_path = os.path.join(cache_path, '{}{}'.format(preview_name, extension))
pdf_out = PdfFileWriter()
pdf_out.addPage(pdf_in.getPage(page_id))
with open(output_file_path, 'wb') as output_file:
pdf_out.write(output_file)
def cache_file_process_already_running(self, file_name):
if os.path.exists(file_name + '_flag'):
return True
else:
return False
def create_flag_file(filepath):
# the flag is used to avoid concurrent build of same previews
try:
os.mkdir(filepath+'_flag')
except OSError:
pass
def convert_office_document_to_pdf(
file_content,
cache_path,
file_name
):
cache_filename_path = cache_path + file_name
create_flag_file(cache_filename_path)
if not os.path.exists(cache_filename_path):
with open('{path}{file_name}'.format(
path=cache_path,
file_name=file_name), 'wb') \
as odt_temp:
file_content.seek(0, 0)
buffer = file_content.read(1024)
while buffer:
odt_temp.write(buffer)
buffer = file_content.read(1024)
try:
logging.info('Creation of directory' + cache_path)
os.makedirs(cache_path)
except OSError:
pass
# TODO There's probably a cleaner way to convert to pdf
check_call(
[
'libreoffice',
'--headless',
'--convert-to',
'pdf:writer_pdf_Export',
'{path}{extension}'.format(path=cache_path, extension=file_name), # nopep8
'--outdir',
cache_path,
'-env:UserInstallation=file:///tmp/LibreOffice_Conversion_${USER}', # nopep8
],
stdout=DEVNULL, stderr=STDOUT
)
try:
logging.info('Removing directory' + cache_path + file_name + '_flag')
os.removedirs(cache_path + file_name + '_flag')
except OSError:
pass
try:
logging.info('Removing directory {path}{file_name}'.format(
path=cache_path,
file_name=file_name
)
)
os.remove('{path}{file_name}'.format(
path=cache_path,
file_name=file_name
)
)
except OSError:
pass
with open('{path}{file_name}.pdf'.format(
path=cache_path,
file_name=file_name
), 'rb') as pdf:
pdf.seek(0, 0)
content_as_bytes = pdf.read()
output = BytesIO(content_as_bytes)
output.seek(0, 0)
return output
| 36.267327
| 98
| 0.590045
|
794ea607ea15bc54b31800cbb68c1934c3b54946
| 2,660
|
py
|
Python
|
scripts/plotNetWTP.py
|
aerospike/aerospike-benchmarks
|
97a18f669a8141d007a9458b750a102a89d1c4eb
|
[
"Apache-2.0"
] | 3
|
2017-04-06T06:27:33.000Z
|
2019-02-09T15:36:24.000Z
|
scripts/plotNetWTP.py
|
aerospike-examples/aerospike-benchmarks
|
97a18f669a8141d007a9458b750a102a89d1c4eb
|
[
"Apache-2.0"
] | null | null | null |
scripts/plotNetWTP.py
|
aerospike-examples/aerospike-benchmarks
|
97a18f669a8141d007a9458b750a102a89d1c4eb
|
[
"Apache-2.0"
] | 1
|
2017-11-27T21:43:07.000Z
|
2017-11-27T21:43:07.000Z
|
#!/usr/bin/env /usr/local/bin/python
#
# Copyright 2012-2016 Aerospike, Inc.
#
# Portions may be licensed to Aerospike, Inc. under one or more contributor
# license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
#################################################################################
#
# Purpose: This application graphs output from dstat.
# Dstat must be used with the following options:
#
# dstat -Tcmndr --output dstat.out
#
#################################################################################
# import modules
import json
import math
import glob
import os
import sys
import getopt
import time
import datetime
import numpy as np
import csv
import datetime
import matplotlib as mlab
mlab.use('Agg')
import matplotlib.pyplot as plt
def main(argv):
# initialize some variable to be lists:
i = 1
x1 = []
y1 = []
# Read command line args
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:d:",["ifile=","ofile="])
except getopt.GetoptError:
print 'plotNetWTP.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'plotNetWTP.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-d"):
dbname = arg
with open(inputfile,'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
seconds = 0
for row in plots:
x1.append(seconds)
y1.append(float(row[12]))
seconds +=1
fig = plt.figure(figsize=[10,9])
tmp_title = dbname.upper() + " Net Write Throughput"
fig.suptitle(tmp_title, fontsize=18)
plt.xlabel('Time(s)', fontsize=16)
plt.ylabel('Bytes', fontsize=16)
plt.tick_params(labelsize=10)
plt.xticks(rotation=70)
xv = np.array(x1)
yv = np.array(y1)
plt.plot(xv, yv,color=[0.0/255,0.0/255,255.0/255])
plt.savefig(outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
| 27.142857
| 81
| 0.615038
|
794ea63f4952f74c669dcb01993e9bea1484b1fe
| 31
|
py
|
Python
|
module/__init__.py
|
TauWu/monitor_bot
|
637a13266a4841ecfdbcc455ecc9b094c07ffd94
|
[
"Apache-2.0"
] | 3
|
2018-09-25T08:01:21.000Z
|
2021-02-02T10:12:41.000Z
|
module/__init__.py
|
TauWu/monitor_bot
|
637a13266a4841ecfdbcc455ecc9b094c07ffd94
|
[
"Apache-2.0"
] | null | null | null |
module/__init__.py
|
TauWu/monitor_bot
|
637a13266a4841ecfdbcc455ecc9b094c07ffd94
|
[
"Apache-2.0"
] | 1
|
2018-11-14T07:52:06.000Z
|
2018-11-14T07:52:06.000Z
|
# -*- coding:utf-8 -*-
# module
| 15.5
| 22
| 0.516129
|
794ea6c316fa08eecd473a8133c03d68232b47a1
| 464
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/bio_engineer/creature/shared_creature_roba.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/bio_engineer/creature/shared_creature_roba.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/bio_engineer/creature/shared_creature_roba.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/bio_engineer/creature/shared_creature_roba.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.294118
| 90
| 0.734914
|
794ea70638fda491863f96e6ae7938ec44ce71dc
| 3,464
|
py
|
Python
|
imwievaluation/semester_old.py
|
ESchae/IMWIEvaluation
|
2fa661711b7b65cba25c1fa9ba69e09e75c7655f
|
[
"MIT"
] | null | null | null |
imwievaluation/semester_old.py
|
ESchae/IMWIEvaluation
|
2fa661711b7b65cba25c1fa9ba69e09e75c7655f
|
[
"MIT"
] | null | null | null |
imwievaluation/semester_old.py
|
ESchae/IMWIEvaluation
|
2fa661711b7b65cba25c1fa9ba69e09e75c7655f
|
[
"MIT"
] | 1
|
2019-10-19T10:11:17.000Z
|
2019-10-19T10:11:17.000Z
|
"""
Copyrigh 2018
Author Elke Schaechtele
"""
from csv import DictReader
from imwievaluation.utils import clean_string
class Semester(object):
def __init__(self, term):
self.term = term
self.lectures = []
self.lecturers = []
def read_survey_file(self, survey_file):
with open(survey_file) as f:
reader = DictReader(f, delimiter='\t')
for row in reader:
# make Lecture object and append it to self.lectures
title = row['lecture title'].strip()
name = row['lecturer'].strip()
with_tutorial = row['tutorial'].strip()
lecture = Lecture(self.term, title, name, with_tutorial)
self.lectures.append(lecture)
if name not in self.lecturer_names():
# add lecturer to self.lecturers
mail = row['mail'].strip()
gender = row['gender'].strip()
lecturer = Lecturer(name, mail, gender)
self.lecturers.append(lecturer)
# add lecture to lecturer's lecture list
for lecturer in self.lecturers:
if lecturer.name == name:
lecturer.add_lecture(lecture)
def lecture_titles(self):
lecture_titles = []
for lecture in self.lectures:
lecture_titles.append(lecture.title)
return lecture_titles
def lecturer_names(self):
lecturer_names = []
for lecturer in self.lecturers:
lecturer_names.append(lecturer.name)
return lecturer_names
class Lecturer(object):
def __init__(self, name, mail, gender):
self.name = name
self.mail = mail
self.gender = gender
self.lectures = []
def add_lecture(self, lecture):
self.lectures.append(lecture)
class Lecture(object):
def __init__(self, term, title, lecturer, with_tutorial):
self.term = term
self.lecturer = lecturer
self.title = title
self.with_tutorial = with_tutorial
self.participants = []
def get_evaluation_title(self):
return 'Evaluation %s (%s) %s' % (self.title, self.lecturer, self.term)
def get_filename(self, modus):
lecturer = clean_string(self.lecturer)
term = self.term.replace('/', '') # TODO: Maybe deprecated
if modus == 'participants':
title = clean_string(self.title).replace(' ', '_')
return 'Teilnehmer_%s_%s_%s.csv' % (title, lecturer, term)
elif modus == 'csv':
title = clean_string(self.title).replace(' ', '_')
return 'Ergebnisse_%s_%s_%s.csv' % (title, lecturer, term)
elif modus == 'pdf':
title = clean_string(self.title).replace(' ', '_')
# fileending will be added automatically in latex.py
return 'Ergebnisse_%s_%s_%s' % (title, lecturer, term)
elif modus == 'evaluation':
return 'Evaluation %s (%s) %s' % (self.title, self.lecturer, term)
def parse_participants_csv_file(self, csv_file):
with open(csv_file) as f:
reader = DictReader(f)
for row in reader:
self.participants.append({
'email': row['email'].strip(),
'lastname': row['lastname'].strip(),
'firstname': row['firstname'].strip()
})
| 34.29703
| 79
| 0.565242
|
794ea70b51519ab8081f7f0963fc8960bbe9dc02
| 1,034
|
py
|
Python
|
jp.atcoder/abc252/abc252_d/31857223.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc252/abc252_d/31857223.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc252/abc252_d/31857223.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import collections
import bisect
def main() -> None:
n = int(input())
a = list(map(int, input().split()))
b = list(collections.Counter(a).values())
origin = sorted(set(b))
b = [bisect.bisect_left(origin, x) for x in b]
mx = max(b)
cc = [0] * (mx + 1)
for c in b:
cc[c] += 1
def choose_3(n):
return n * (n - 1) * (n - 2) // 6
def choose_2(n):
return n * (n - 1) // 2
tot = 0
for i in range(mx + 1):
o = origin[i]
tot += choose_3(cc[i]) * o**3
tot += choose_2(cc[i]) * o**2 * (n - cc[i] * o)
s = [cc[i] * origin[i] for i in range(mx + 1)]
for i in range(mx):
s[i + 1] += s[i]
for i in range(mx):
oi = origin[i]
for j in range(i + 1, mx + 1):
if cc[i] == 0 or cc[j] == 0:
continue
oj = origin[j]
tot += cc[i] * oi * cc[j] * oj * (n - s[j])
print(tot)
if __name__ == "__main__":
main()
| 21.541667
| 56
| 0.421663
|
794ea7326037da48e5aafbaf02f97d16015ff472
| 8,591
|
py
|
Python
|
spyder/app/solver.py
|
CodedX-cyber/spyder
|
57a0e206562b566ee565221ea20f46941bc00e00
|
[
"MIT"
] | 1
|
2021-04-11T00:32:35.000Z
|
2021-04-11T00:32:35.000Z
|
spyder/app/solver.py
|
CodedX-cyber/spyder
|
57a0e206562b566ee565221ea20f46941bc00e00
|
[
"MIT"
] | null | null | null |
spyder/app/solver.py
|
CodedX-cyber/spyder
|
57a0e206562b566ee565221ea20f46941bc00e00
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Plugin dependency solver.
"""
import ast
import importlib
import logging
import os
import traceback
import pkg_resources
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import (
SpyderDockablePlugin, SpyderPluginWidget, Plugins)
from spyder.config.base import DEV, STDERR, running_under_pytest
from spyder.utils.external.toposort import (CircularDependencyError,
toposort_flatten)
logger = logging.getLogger(__name__)
def find_internal_plugins():
"""
Find available plugins based on setup.py entry points.
In DEV mode we parse the `setup.py` file directly.
"""
internal_plugins = {}
# If DEV, look for entry points in setup.py file for internal plugins
# and then look on the system for the rest
HERE = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.dirname(os.path.dirname(HERE))
setup_path = os.path.join(base_path, "setup.py")
if (DEV is not None or running_under_pytest()
and os.path.isfile(setup_path)):
if not os.path.isfile(setup_path):
raise Exception(
'No "setup.py" file found and running in DEV mode!')
with open(setup_path, "r") as fh:
lines = fh.read().split("\n")
start = None
end = None
for idx, line in enumerate(lines):
if line.startswith("spyder_plugins_entry_points"):
start = idx + 1
continue
if start is not None:
if line.startswith("]"):
end = idx + 1
break
internal_plugins = {}
entry_points_list = "[" + "\n".join(lines[start:end])
spyder_plugin_entry_points = ast.literal_eval(entry_points_list)
for entry_point in spyder_plugin_entry_points:
try:
name, module = entry_point.split(" = ")
name = name.strip()
module = module.strip()
module, class_name = module.split(":")
except Exception:
logger.error(
'"setup.py" entry point "{entry_point}" is malformed!'
"".format(entry_point=entry_point)
)
try:
mod = importlib.import_module(module)
internal_plugins[name] = getattr(mod, class_name, None)
except (ModuleNotFoundError, ImportError):
pass
else:
import spyder.plugins as plugin_mod
plugins_path = os.path.dirname(plugin_mod.__file__)
for folder in os.listdir(plugins_path):
plugin_path = os.path.join(plugins_path, folder)
init_path = os.path.join(plugin_path, "__init__.py")
if (os.path.isdir(plugin_path) and os.path.isfile(init_path)
and not folder.startswith("io_")):
spec = importlib.util.spec_from_file_location(folder,
init_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for plugin_class in getattr(module, "PLUGIN_CLASSES", []):
internal_plugins[plugin_class.NAME] = plugin_class
return internal_plugins
def find_external_plugins():
"""
Find available internal plugins based on setuptools entry points.
"""
internal_plugins = find_internal_plugins()
plugins = [
entry_point for entry_point
in pkg_resources.iter_entry_points("spyder.plugins")
]
external_plugins = {}
for entry_point in plugins:
name = entry_point.name
if name not in internal_plugins:
try:
class_name = entry_point.attrs[0]
mod = importlib.import_module(entry_point.module_name)
plugin_class = getattr(mod, class_name, None)
# To display in dependencies dialog
plugin_class._spyder_module_name = entry_point.module_name
plugin_class._spyder_package_name = (
entry_point.dist.project_name)
plugin_class._spyder_version = entry_point.dist.version
external_plugins[name] = plugin_class
if name != plugin_class.NAME:
raise SpyderAPIError(
"Entry point name '{0}' and plugin.NAME '{1}' "
"do not match!".format(name, plugin_class.NAME)
)
except (ModuleNotFoundError, ImportError) as error:
print("%s: %s" % (name, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
return external_plugins
def solve_plugin_dependencies(plugins):
"""
Return a list of plugins sorted by dependencies.
Notes
-----
* Prune the plugins for which required dependencies are not met
* Prune the optional dependencies from the remaining plugins based on
the remaining plugins available.
* Group the remaining optional dependencies with the required
dependencies.
* Sort with toposort algorithm.
"""
# Back up dependencies
for plugin in plugins:
if plugin.REQUIRES is None:
plugin.REQUIRES = []
if plugin.OPTIONAL is None:
plugin.OPTIONAL = []
plugin._REQUIRES = plugin.REQUIRES.copy()
plugin._OPTIONAL = plugin.OPTIONAL.copy()
plugin_names = {plugin.NAME: plugin for plugin in plugins}
dependencies_dict = {}
# Prune plugins based on required dependencies or populate the dependencies
# if using a wildcard i.e 'Plugins.All' or to add base dependencies for
# example the Shortcuts plugin to all SpyderDockablePlugin's (shortcut for
# the "switch to plugin" action).
remaining_plugins = []
plugins_requiring_all_plugins = []
pruning_requires = True
import copy
while pruning_requires:
pruning_requires = False
remaining_plugins = []
current_plugins = copy.deepcopy(plugins)
for plugin in current_plugins:
if issubclass(plugin, (SpyderDockablePlugin, SpyderPluginWidget)):
if Plugins.Shortcuts not in plugin.REQUIRES:
plugin.REQUIRES.append(Plugins.Shortcuts)
plugin._REQUIRES = plugin.REQUIRES.copy()
for required in plugin.REQUIRES[:]:
# Check self references
if plugin.NAME == required:
raise SpyderAPIError("Plugin is self referencing!")
if (required == Plugins.All and len(plugin.REQUIRES) == 1):
all_plugins = plugin_names.copy()
all_plugins.pop(plugin.NAME)
plugin.REQUIRES = list(all_plugins)
plugin._REQUIRES = plugin.REQUIRES.copy()
logger.info(
"Added all plugins as dependencies to plugin: " +
plugin.NAME)
plugins_requiring_all_plugins.append(plugin)
continue
if required not in plugin_names:
plugin_names.pop(plugin.NAME)
plugins.remove(plugin)
for plugin_req_all in plugins_requiring_all_plugins:
plugin_req_all.REQUIRES = [Plugins.All]
plugin_req_all._REQUIRES = [Plugins.All]
logger.error("Pruned plugin: {}".format(plugin.NAME))
logger.error("Missing requirement: {}".format(required))
logger.error("Restart plugins pruning by REQUIRES check")
pruning_requires = True
break
else:
if plugin.NAME in plugin_names:
remaining_plugins.append(plugin)
# Prune optional dependencies from remaining plugins
for plugin in remaining_plugins:
for optional in plugin.OPTIONAL:
if optional not in plugin_names:
plugin._OPTIONAL.remove(optional)
plugin._REQUIRES += plugin._OPTIONAL
dependencies_dict[plugin.NAME] = set(plugin._REQUIRES)
# Now use toposort with plugin._REQUIRES!
deps = toposort_flatten(dependencies_dict)
plugin_deps = [plugin_names[name] for name in deps]
return plugin_deps
| 37.515284
| 79
| 0.598533
|
794ea842b0507dd37e2eaa693caec1e4e40f8d6a
| 32,052
|
py
|
Python
|
dgp_graph/impl_parallel.py
|
naiqili/DGPG-MLJ
|
65d566cc666a7ed43b5211e9b9c1af43ec8c0216
|
[
"MIT"
] | 1
|
2022-01-05T11:17:44.000Z
|
2022-01-05T11:17:44.000Z
|
dgp_graph/impl_parallel.py
|
naiqili/DGPG-journal
|
2770ce58185091afff5eca8e295086b42f4d874e
|
[
"MIT"
] | null | null | null |
dgp_graph/impl_parallel.py
|
naiqili/DGPG-journal
|
2770ce58185091afff5eca8e295086b42f4d874e
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from gpflow.mean_functions import Identity, Linear
from gpflow import settings
from gpflow import transforms
from gpflow.misc import _broadcasting_elementwise_op
float_type = settings.float_type
from gpflow import params_as_tensors, params_as_tensors_for, ParamList
from gpflow.mean_functions import Zero, MeanFunction
from gpflow.kernels import Stationary, RBF, Kernel
from compatible.ver_adaption import *
from doubly_stochastic_dgp.dgp import DGP_Base, TWDGP_Base
from doubly_stochastic_dgp.layers import Layer
from jack_utils.common import time_it
from gpflow.decors import autoflow
from gpflow.transforms import LowerTriangular, Transform
from dgp_graph.my_op import *
IP3D = InducingPoints # IP is compatiable with 3d Z
# todo auto mem inc(maybe done by gpflow)
# config = tf.ConfigProto()
# config.gpu_options.allow_growth=True
# sess = tf.Session(config=config)
class LowerTriangular3D(LowerTriangular):
"""
LowerTriangular Transform for 3D (4d actually) inputs
It's a reshape wrapper to the original LowerTriangular but keeps computation efficiency.
The node-fim must be at the first dim.
"""
def __init__(self, num_nodes, N, dim_out=1, **kwargs):
super(LowerTriangular3D, self).__init__(N, num_nodes * dim_out, **kwargs)
self.num_nodes = num_nodes
self.dim_out = dim_out
def forward(self, x):
"""
triangle -> vec each
:param x: packed x(num_nodes, num_matrices, num_non_zero)
:return: triangle matrices y(num_nodes, num_matrices, N, N)
"""
x_ = x.reshape(self.num_nodes*self.dim_out, -1)
y = super(LowerTriangular3D, self).forward(x_).reshape(self.num_nodes, self.dim_out, self.N, self.N)
return y
def backward(self, y):
"""
triangle -> vec each
:param y: (num_nodes, num_matrices, N, N) input triangle matrices
:return: packed x(num_nodes, num_matrices, num_non_zero)
"""
y_ = y.reshape(self.num_nodes*self.dim_out, self.N, self.N)
x = super(LowerTriangular3D, self).backward(y_).reshape(self.num_nodes, self.dim_out, -1)
return x
def forward_tensor(self, x):
x_ = tf.reshape(x, (self.num_nodes * self.dim_out, -1))
y_ = super(LowerTriangular3D, self).forward_tensor(x_)
y = tf.reshape(y_, (self.num_nodes, self.dim_out, self.N, self.N))
return y
def backward_tensor(self, y):
y_ = tf.reshape(y, (self.num_nodes * self.dim_out, self.N, self.N))
x_ = super(LowerTriangular3D, self).backward_tensor(y_)
x = tf.reshape(x_, (self.num_nodes, self.dim_out, -1))
return x
def __str__(self):
return "LoTri_3d->matrix"
@staticmethod
def test():
nodes, dim_in, dim_out = 3, 2, 4
y = np.random.randint(1, 10, size=(dim_out, dim_in, dim_in)) * np.tri(dim_in)
y_all = np.tile(y[None, ...], [nodes, 1, 1, 1])
trans = LowerTriangular(dim_in, num_matrices=dim_out)
trans_3d = LowerTriangular3D(nodes, dim_in, dim_out)
# np version
assert np.array_equal(y, trans.forward(trans.backward(y)))
assert np.array_equal(y_all, trans_3d.forward(trans_3d.backward(y_all)))
x_all = trans_3d.backward(y_all)
x_all_for = np.stack([trans.backward(y_all[i]) for i in range(nodes)], axis=0)
assert np.array_equal(x_all, x_all_for)
assert np.array_equal(trans_3d.forward(x_all),
np.stack([trans.forward(x_all[i]) for i in range(nodes)], axis=0))
# tf version
sess = tf.Session()
sess.run(tf.assert_equal(y, trans.forward_tensor(trans.backward_tensor(y))))
sess.run(tf.assert_equal(y_all, trans_3d.forward_tensor(trans_3d.backward_tensor(y_all))))
x_all = trans_3d.backward_tensor(y_all)
x_all_for = tf.stack([trans.backward_tensor(y_all[i]) for i in range(nodes)], axis=0)
sess.run(tf.assert_equal(x_all, x_all_for))
sess.run(tf.assert_equal(trans_3d.forward_tensor(x_all),
tf.stack([trans.forward_tensor(x_all[i]) for i in range(nodes)], axis=0)))
class DGPG(DGP_Base):
@time_it
def __init__(self, X, Y, Z, input_dims, likelihood, adj,
agg_op_name='concat3d', ARD=False,
is_Z_forward=True, mean_trainable=False, out_mf0=True,
kern_type='RBF',
**kwargs):
"""
init layers for graph dgp model.
:param X: (s1, n, d_in)
:param Y: (s1, n, d_out)
:param Z: (s2, n, d_in)
:param kernels: [(n, d_in)...] length=L
:param likelihood: todo
:param adj: (n, n)
:param is_Z_forward: whether Z should be aggregated and propagated among layers
"""
assert np.ndim(X) == 3 and np.ndim(Z) == 3 and np.ndim(Y) == 3
nb_agg = get_nbf_op(agg_op_name)
num_nodes = adj.shape[0]
raw_mask = adj.copy()
# 1. constructing layers
layers, X_running, Z_running = [], X.copy(), Z.copy()
layer_n = 0
for dim_in, dim_out in zip(input_dims[:-1], input_dims[1:]):
# get in->out dimension for current layer
# constructing mean function
W, fixed_nmf = FixedNMF.init(X_running, adj, dim_out, agg_op_name, mean_trainable)
# constructing kernel
if 'concat' in agg_op_name:
mask_concat = neighbour_feats(raw_mask, np.ones((num_nodes, dim_in))) # (n, n*feat)
kern = RBFNodes(num_nodes, num_nodes*dim_in, mask=mask_concat, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
else:
kern = RBFNodes(num_nodes, dim_in, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
# init layer
layers.append(SVGPG_Layer(fixed_nmf, kern, Z_running, adj, dim_out, agg_op_name, is_Z_forward))
print('input-output dim ({}(agg:{})->{})'.format(dim_in, kern.input_dim, dim_out))
# propagating X & Z
if is_Z_forward:
Z_running = nb_agg(adj, Z_running)
# todo 若使用concat,Z不聚合时,不能和X共用同一个mean_function,需要重新搞一个维度转换用的W。
Z_running = FixedNMF.np_call(Z_running, W) # (s2, n, d_in) -> (s2, n, d_out)
X_running = FixedNMF.np_call(nb_agg(adj, X_running), W) # (s1, n, d_in) -> (s1, n, d_out)
layer_n += 1
# 2. constructing the last/output layer recording to the shape of Y
# constructing mean function
dim_in = input_dims[-1]
if 'concat' in agg_op_name:
mask_concat = neighbour_feats(raw_mask, np.ones((num_nodes, dim_in)))
kern = RBFNodes(num_nodes, num_nodes * dim_in, mask=mask_concat, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
else:
kern = RBFNodes(num_nodes, dim_in, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
mf = Zero() if out_mf0 else FixedNMF.init(X_running, adj, Y.shape[-1], agg_op_name, mean_trainable)[1]
# init layer
layers.append(SVGPG_Layer(mf, kern, Z_running, adj, Y.shape[-1], agg_op_name, is_Z_forward))
print('input-output dim ({}(agg:{})->{})'.format(dim_in, kern.input_dim, Y.shape[-1]))
# 3. using layers to init the Base model with 2d inputs
DGP_Base.__init__(self, X.reshape(X.shape[0], -1), Y.reshape(Y.shape[0], -1), likelihood, layers,
name='DGPG', **kwargs)
class TWDGPG(TWDGP_Base):
@time_it
def __init__(self, X, Y, Z, time_vec, input_dims, likelihood, adj,
agg_op_name='concat3d', ARD=False,
is_Z_forward=True, mean_trainable=False, out_mf0=True,
kern_type='RBF', wfunc='exp',
**kwargs):
"""
init layers for graph dgp model.
:param X: (s1, n, d_in)
:param Y: (s1, n, d_out)
:param Z: (s2, n, d_in)
:param kernels: [(n, d_in)...] length=L
:param likelihood: todo
:param adj: (n, n)
:param is_Z_forward: whether Z should be aggregated and propagated among layers
"""
#assert np.ndim(X) == 3 and np.ndim(Z) == 3 and np.ndim(Y) == 3
nb_agg = get_nbf_op(agg_op_name)
num_nodes = adj.shape[0]
raw_mask = adj.copy()
# 1. constructing layers
layers, X_running, Z_running = [], X.copy(), Z.copy()
layer_n = 0
for dim_in, dim_out in zip(input_dims[:-1], input_dims[1:]):
# get in->out dimension for current layer
# constructing mean function
W, fixed_nmf = FixedNMF.init(X_running, adj, dim_out, agg_op_name, mean_trainable)
# constructing kernel
if 'concat' in agg_op_name:
mask_concat = neighbour_feats(raw_mask, np.ones((num_nodes, dim_in))) # (n, n*feat)
kern = RBFNodes(num_nodes, num_nodes*dim_in, mask=mask_concat, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
else:
kern = RBFNodes(num_nodes, dim_in, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
# init layer
layers.append(SVGPG_Layer(fixed_nmf, kern, Z_running, adj, dim_out, agg_op_name, is_Z_forward))
print('input-output dim ({}(agg:{})->{})'.format(dim_in, kern.input_dim, dim_out))
# propagating X & Z
if is_Z_forward:
Z_running = nb_agg(adj, Z_running)
# todo 若使用concat,Z不聚合时,不能和X共用同一个mean_function,需要重新搞一个维度转换用的W。
Z_running = FixedNMF.np_call(Z_running, W) # (s2, n, d_in) -> (s2, n, d_out)
X_running = FixedNMF.np_call(nb_agg(adj, X_running), W) # (s1, n, d_in) -> (s1, n, d_out)
layer_n += 1
# 2. constructing the last/output layer recording to the shape of Y
# constructing mean function
dim_in = input_dims[-1]
if 'concat' in agg_op_name:
mask_concat = neighbour_feats(raw_mask, np.ones((num_nodes, dim_in)))
kern = RBFNodes(num_nodes, num_nodes * dim_in, mask=mask_concat, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
else:
kern = RBFNodes(num_nodes, dim_in, ARD=ARD, layer_n=layer_n, kern_type=kern_type)
mf = Zero() if out_mf0 else FixedNMF.init(X_running, adj, Y.shape[-1], agg_op_name, mean_trainable)[1]
# init layer
layers.append(SVGPG_Layer(mf, kern, Z_running, adj, Y.shape[-1], agg_op_name, is_Z_forward))
print('input-output dim ({}(agg:{})->{})'.format(dim_in, kern.input_dim, Y.shape[-1]))
if np.ndim(X) == 3:
# 3. using layers to init the Base model with 2d inputs
TWDGP_Base.__init__(self, X.reshape(X.shape[0], -1), Y.reshape(Y.shape[0], -1), time_vec, likelihood, layers,
name='DGPG', wfunc=wfunc, **kwargs)
else:
TWDGP_Base.__init__(self, X.reshape(X.shape[0], X.shape[1], -1), Y.reshape(Y.shape[0], Y.shape[1], -1), time_vec, likelihood, layers,
name='DGPG', wfunc=wfunc, **kwargs)
class SVGPG_Layer(Layer):
"""
实现单层SVGP-Graph:
1) 函数conditional_ND() - 计算q_f的均值和协方差 - 从q_f采样时被用到,以计算证据下界的第一项
2) 函数KL() - 计算q_u的KL散度 - 用以计算证据下界的第二项
"""
def __init__(self, nodes_mf, kern, Z, adj, dim_per_out, agg_op_name='concat3d', is_Z_forward=True, **kwargs):
assert np.ndim(Z) == 3
Layer.__init__(self, input_prop_dim=False, **kwargs)
# agg_operation
self.agg = get_nbf_op(agg_op_name)
self.tf_agg = get_nbf_op(agg_op_name, is_tf=True)
# shape variables
self.adj = adj
self.num_nodes = adj.shape[0]
self.num_inducing = Z.shape[0]
self.dim_per_out = dim_per_out
self.num_outputs = self.num_nodes * dim_per_out
# neighbour aggregation(e.g., sum, concat, mean) of Z
Z_agg = self.agg(adj, Z) if is_Z_forward else Z
# construct and init key Params:
# func m: mean_function
# func k: kern_function
# q(u)~N(u|bf{m},bf{S})
# bf{m} : q_mu
# bf{S} : q_sqrt @ q_sqrt.T
self.mean_function = nodes_mf
self.kern = kern
self.q_mu = Parameter(np.zeros((self.num_nodes, self.num_inducing, dim_per_out))) # (n, s2, d_out)
self.feature = IP3D(Z_agg)
# constructing Param *q_sqrt* with cholesky(Kuu)
Ku = kern.compute_K_symm(Z_agg)
Ku += np.eye(self.num_inducing) * settings.jitter # k(Z, Z)+ I*jitter : (n, s2, s2)
Lu = np.linalg.cholesky(Ku) # L = sqrt(k(Z,Z)) : (n, s2, s2)
q_sqrt = np.tile(np.expand_dims(Lu, axis=1), [1, dim_per_out, 1, 1]) # (n, dim_out, s2, s2)
self.q_sqrt = Parameter(q_sqrt, transform=LowerTriangular3D(self.num_nodes, self.num_inducing, dim_per_out))
self.needs_build_cholesky = True
@params_as_tensors
def build_cholesky_if_needed(self):
# make sure we only compute this once
if self.needs_build_cholesky:
self.Ku = self.feature.Kzz(self.kern, jitter=settings.jitter) # (n, s2, s2)
self.Lu = tf.linalg.cholesky(self.Ku) # (n, s2, s2) Low Triangle
self.Ku_tiled = tf.tile(tf.expand_dims(self.Ku, 1), [1, self.dim_per_out, 1, 1]) # (n,d_out,s2,s2)
self.Lu_tiled = tf.tile(tf.expand_dims(self.Lu, 1), [1, self.dim_per_out, 1, 1])
self.needs_build_cholesky = False
def conditional_ND(self, X, full_cov=False):
"""
wrapper 2d and 3d input/output
:param X: **2-dim** inputs with shape(samples1, nodes * dim_per_in)
:param full_cov: not used currently
:return: return 2-dim mean and variance
"""
#
self.build_cholesky_if_needed()
if tf.shape(X).shape[0] == 2:
X = tf.reshape(X, [tf.shape(X)[0], self.num_nodes, -1]) # (s1, n*d_in) -> (s1, n, d_in)
mean, var = self.conditional_ND_3D(X, full_cov=False)
mean = tf.reshape(tf.transpose(mean, [1, 0, 2]), [tf.shape(X)[0], -1]) # (n,s1,d_out) ->(s1, n*d_out)
var = tf.reshape(tf.transpose(var, [1, 0, 2]), [tf.shape(X)[0], -1])
return mean, var
else:
return self.conditional_ND_3D(X, full_cov=False)
def conditional_ND_3D(self, X, full_cov=False):
"""
implementation of equation (7)(8) of paper DS-DGP
:param X: input X (num_observation=s1, num_nodes=n, dim_per_in=d_in)
:param full_cov: whether calculating the full covariance
:return: mean, var
"""
if full_cov:
raise NotImplementedError
# 0. neighbour feat aggregation
X_agg = self.tf_agg(self.adj, X) # (s1, n, d_in/n*d_in)
# 1. calc alpha=k(Z,Z)^{-1}@k(Z, X)
Kuf = self.feature.Kuf(self.kern, X_agg) # Kuf(n, s2, s1)
alpha = tf.matrix_triangular_solve(self.Lu, Kuf, lower=True) # Lu(n, s2, s2), A(n, s2, s1)
alpha = tf.matrix_triangular_solve(tf.transpose(self.Lu, [0, 2, 1]), alpha, lower=False) # the same as above
alpha_tiled = tf.tile(tf.expand_dims(alpha, axis=1), [1, self.dim_per_out, 1, 1]) # (n, d_out, s2, s1)
# 2. calc mean of q(f)
# m(x) + alpha.T @ (mm - m(Z)), mm=0 here. m(Z):self.q_mu
mean = tf.transpose(alpha, [0, 2, 1]) @ self.q_mu # (n, s1, s2)@(n, s2, d_out)=(n, s1, d_out)
mean += tf.transpose(self.mean_function(X_agg), [1, 0, 2]) # (n, s1, d_out) + (s1, n, d_out).T(0,1)
# 3.1 calc the the 2nd term of covariance
# SK = -k(Z, Z) + S | Ku_tiled(n, d_out, s2, s2), q_sqrt(n, d_out, s2, s2)
SK = -self.Ku_tiled + self.q_sqrt @ tf.transpose(self.q_sqrt, [0, 1, 3, 2])
# alpha(x).T @ SK @ alpha(x), 只计算对角线元素的cov,所以用此trick替代 @
delta_cov = tf.reduce_sum(alpha_tiled * (SK @ alpha_tiled), axis=2) # (n, d_out, s2, s1)-> (n, d_out, s1)
# 3.2 calc cov = k(X, X) + delta_cov
Kff = self.kern.Kdiag(X_agg) # (n, s1)
var = tf.expand_dims(Kff, 1) + delta_cov # (n, 1, s1)+(n, d_out, s1) = (n, d_out, s1)
var = tf.transpose(var, [0, 2, 1]) # (n, d_out, s1) -> (n, s1, d_out)
return mean, var # (n, s1, d_out) both
def KL(self):
"""
分别计算每个节点的KL散度,再求和 <==> 每一个中间项都求和,最后累加
:return: KL divergence from N(q_mu, q_sqrt) to N(0, I), independently for each GP
"""
self.build_cholesky_if_needed()
KL = -0.5 * self.num_inducing * self.num_nodes * self.dim_per_out
# q_sqrt(n,d_out,s2,s2) -> diag:(n,d_out,s2)-> reduce_sum: (n,)
KL -= 0.5 * tf.reduce_sum(tf.log(tf.square(tf.linalg.diag_part(self.q_sqrt))))
# Lu(n, s2, s2) -> diag(n, s2) -> reduce_sum(n,)
KL += tf.reduce_sum(tf.log(tf.linalg.diag_part(self.Lu))) * self.dim_per_out
# Lu_tiled(n, d_out, s2, s2)
KL += 0.5 * tf.reduce_sum(tf.square(tf.matrix_triangular_solve(self.Lu_tiled, self.q_sqrt, lower=True)))
Kinv_m = tf.cholesky_solve(self.Lu, self.q_mu) # (n, s2, s2),(n, s2, d_out) -> (n, s2, d_out)
KL += 0.5 * tf.reduce_sum(self.q_mu * Kinv_m)
return KL
def conditional_SND(self, X, full_cov=False):
"""
A multisample conditional, where X is shape (S,N,D_out), independent over samples S
if full_cov is True
mean is (S,N,D_out), var is (S,N,N,D_out)
if full_cov is False
mean and var are both (S,N,D_out)
:param X: The input locations (S,N,D_in)
:param full_cov: Whether to calculate full covariance or just diagonal
:return: mean (S,N,D_out), var (S,N,D_out or S,N,N,D_out)
"""
if full_cov is True:
raise NotImplementedError
else:
S, N, D = tf.shape(X)[0], tf.shape(X)[1], tf.shape(X)[2]
X_flat = tf.reshape(X, [S * N, D])
mean, var = self.conditional_ND(X_flat)
return [tf.reshape(m, [S, N, self.num_outputs]) for m in [mean, var]]
class RBFNodes(Kernel):
def __init__(self, nodes, input_dim, mask=None, ARD=False, layer_n=0, name='RBFNode', kern_type='RBF'):
"""
rbf kernel for each node, computed parallel
:param nodes: number of nodes
:param input_dim: number of node features
:param mask: 0-1 adj used to mask represent active dims of feats_dim. (nodes, feats)
"""
super().__init__(input_dim, active_dims=None, name=name+str(layer_n))
self.nodes = nodes
self.mask = mask
# init params
self.variance = Parameter(np.ones(nodes), transform=transforms.positive,
dtype=settings.float_type)
if ARD:
lengthscales = mask if mask is not None else np.ones((nodes, input_dim))
else:
lengthscales = np.ones(nodes, dtype=settings.float_type)
self.lengthscales = Parameter(lengthscales, transform=transforms.positive,
dtype=settings.float_type)
self.kern_type = kern_type
self.build() # very important, it's a confusing point that this class can't be auto built.
# auto flow 会自动创建张量图,获取session执行计算图并返回真实结果。省去执行相关代码的繁琐过程。
# 相当于tf2的eager excution
@autoflow((settings.float_type, [None, None, None]),
(settings.float_type, [None, None, None]))
def compute_K(self, X, Z):
return self.K(X, Z)
@autoflow((settings.float_type, [None, None, None]))
def compute_K_symm(self, X):
return self.K(X)
@autoflow((settings.float_type, [None, None, None]))
def compute_Kdiag(self, X):
return self.Kdiag(X)
@staticmethod
def rbf(args):
"""
:param args: tuples of [X:(s1/s2, d), X2:(s1/s2, d), LS:(d/1,), VAR(1,)]
:return: rbf variance of X and X2.
"""
X, X2, lengthscales, variance = args
#print(X.shape, lengthscales.shape)
# calculate r2
X = X / lengthscales
if X2 is None:
Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += Xs + tf.transpose(Xs, [1, 0])
r2 = dist
else:
Xs = tf.reduce_sum(tf.square(X), axis=-1)
X2 = X2 / lengthscales
X2s = tf.reduce_sum(tf.square(X2), axis=-1)
r2 = -2 * tf.tensordot(X, X2, [[-1], [-1]])
r2 += _broadcasting_elementwise_op(tf.add, Xs, X2s)
# calculate rbf
rbf = variance * tf.exp(-r2 / 2.)
return rbf
@staticmethod
def r(X, X2, lengthscales):
X = X / lengthscales
if X2 is None:
Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += Xs + tf.transpose(Xs, [1, 0])
r2 = dist
else:
Xs = tf.reduce_sum(tf.square(X), axis=-1)
X2 = X2 / lengthscales
X2s = tf.reduce_sum(tf.square(X2), axis=-1)
r2 = -2 * tf.tensordot(X, X2, [[-1], [-1]])
r2 += _broadcasting_elementwise_op(tf.add, Xs, X2s)
r = tf.sqrt(tf.maximum(r2, 1e-36))
return r
@staticmethod
def m12(args):
X, X2, lengthscales, variance = args
r = RBFNodes.r(X, X2, lengthscales)
m12 = variance * tf.exp(-r)
return m12
@staticmethod
def m32(args):
X, X2, lengthscales, variance = args
r = RBFNodes.r(X, X2, lengthscales)
sqrt3 = np.sqrt(3.)
m32 = variance * (1. + sqrt3 * r) * tf.exp(-sqrt3 * r)
return m32
@staticmethod
def m52(args):
X, X2, lengthscales, variance = args
r = RBFNodes.r(X, X2, lengthscales)
sqrt5 = np.sqrt(5.)
m52 = variance * (1.0 + sqrt5 * r + 5. / 3. * tf.square(r)) * tf.exp(-sqrt5 * r)
return m52
@staticmethod
def poly(args):
X, X2, lengthscales, variance = args
X = X * (lengthscales)
if X2 is None:
ss = tf.matmul(X, X, transpose_b=True)
else:
X2 = X2 * (lengthscales)
ss = tf.tensordot(X, X2, [[-1], [-1]])
dg = 2
#return ((ss+1)**dg)
return (ss)
@staticmethod
def poly2(args):
X, X2, lengthscales, variance = args
X = X * (lengthscales)
if X2 is None:
ss = tf.matmul(X, X, transpose_b=True)
else:
X2 = X2 * (lengthscales)
ss = tf.tensordot(X, X2, [[-1], [-1]])
dg = 2
return ((ss+1)**dg)
#return (ss)
@staticmethod
def rbf_self(args):
X, lengthscales, variance = args
return RBFNodes.rbf([X, None, lengthscales, variance])
@staticmethod
def m12_self(args):
X, lengthscales, variance = args
return RBFNodes.m12([X, None, lengthscales, variance])
@staticmethod
def m32_self(args):
X, lengthscales, variance = args
return RBFNodes.m32([X, None, lengthscales, variance])
@staticmethod
def m52_self(args):
X, lengthscales, variance = args
return RBFNodes.m52([X, None, lengthscales, variance])
@staticmethod
def poly_self(args):
X, lengthscales, variance = args
return RBFNodes.poly([X, None, lengthscales, variance])
@staticmethod
def poly_self2(args):
X, lengthscales, variance = args
return RBFNodes.poly2([X, None, lengthscales, variance])
@params_as_tensors
def K(self, X, X2=None):
"""
calc rbf similarity for each node. nodes calc could be independent/the same/correlated
how about employing adj? kernel matrix of different nodes have correlation.
There are two ways to parallel:
tf.map_fn(lambda x: scaled_square_dist(x[0], x[1]), (A, B), dtype=tf.float32)
tf.vectorized_map(RBFNodes.rbf, (X_, X2_))
:param X: (s1, n, d)
:param X2: (s2, n, d)
:return: K(X, X2) = (n, s1, s2)
"""
assert tf.shape(X).shape[0] == 3
#print(X.shape, self.lengthscales.shape)
X_ = tf.transpose(X, [1, 0, 2]) # (n, s1, d)
if X2 is None:
if self.kern_type == 'RBF':
return tf.vectorized_map(RBFNodes.rbf_self, (X_, self.lengthscales, self.variance)) # (n, s1, s1)
elif self.kern_type == 'Matern12':
return tf.vectorized_map(RBFNodes.m12_self, (X_, self.lengthscales, self.variance)) # (n, s1, s1)
elif self.kern_type == 'Matern32':
return tf.vectorized_map(RBFNodes.m32_self, (X_, self.lengthscales, self.variance)) # (n, s1, s1)
elif self.kern_type == 'Matern52':
return tf.vectorized_map(RBFNodes.m52_self, (X_, self.lengthscales, self.variance)) # (n, s1, s1)
elif self.kern_type == 'Poly1':
return tf.vectorized_map(RBFNodes.poly_self, (X_, self.lengthscales, self.variance)) # (n, s1, s1)
elif self.kern_type == 'Poly2':
return tf.vectorized_map(RBFNodes.poly_self2, (X_, self.lengthscales, self.variance)) # (n, s1, s1)
else:
X2_ = tf.transpose(X2, [1, 0, 2]) # (n, s1, d)
if self.kern_type == 'RBF':
return tf.vectorized_map(RBFNodes.rbf, (X_, X2_, self.lengthscales, self.variance)) # (n, s1, s2)
elif self.kern_type == 'Matern12':
return tf.vectorized_map(RBFNodes.m12, (X_, X2_, self.lengthscales, self.variance)) # (n, s1, s2)
elif self.kern_type == 'Matern32':
return tf.vectorized_map(RBFNodes.m32, (X_, X2_, self.lengthscales, self.variance)) # (n, s1, s2)
elif self.kern_type == 'Matern52':
return tf.vectorized_map(RBFNodes.m52, (X_, X2_, self.lengthscales, self.variance)) # (n, s1, s2)
elif self.kern_type == 'Poly1':
return tf.vectorized_map(RBFNodes.poly, (X_, X2_, self.lengthscales, self.variance)) # (n, s1, s2)
elif self.kern_type == 'Poly2':
return tf.vectorized_map(RBFNodes.poly2, (X_, X2_, self.lengthscales, self.variance)) # (n, s1, s2)
@params_as_tensors
def Kdiag(self, X):
"""
calc diag covariance only
:param X: (s1, n, d_in)
:return: (n, s1)
"""
return tf.tile(tf.expand_dims(self.variance, axis=-1), [1, tf.shape(X)[0]])
# return tf.fill(tf.shape(X)[:-1], tf.squeeze(self.variance))
def singe_sparse_svd(sparse_matrix, mask, topk, k_apx=1000):
"""
sparse_matrix: (m, n*feats), mask(n*feats,)
there are many zero columns in sparse_matrix, masked by mask array.
return topk component of right singular matrix
"""
dense_matrix = sparse_matrix.T[np.where(mask > 0)].T
# approximation
if dense_matrix.shape[0] > k_apx:
dense_matrix = dense_matrix[:k_apx]
_, _, V = np.linalg.svd(dense_matrix) # V(nb*feats, nb*feats)
result = np.zeros(shape=(sparse_matrix.shape[-1], topk))
result[np.where(mask > 0)] = V[:,:topk]
return result
vec_sparse_svd = np.vectorize(singe_sparse_svd, signature='(n,nf),(nf)->(nf,topk)', excluded=['topk'])
def test_sparse_svd():
"""
testing for sparse svd
"""
mask = np.random.randint(0, 2, size=(5, 5 * 4))
sparse_matrix = np.random.randint(1, 10, size=(5, 3, 5 * 4))
sparse_matrix = sparse_matrix * mask[:, None, :]
topk = 2
# raw method
results = []
for i in range(sparse_matrix.shape[0]):
dense_matrix = sparse_matrix[i].T[np.where(mask[i] == 1)].T
results.append(np.linalg.svd(dense_matrix)[-1][:, :topk])
y = results
# masked & padded method
x = vec_sparse_svd(sparse_matrix, mask, topk=topk)
# assert almost equal (ignore extremely small numerical errors)
for i in range(sparse_matrix.shape[0]):
np.testing.assert_almost_equal(np.dot(sparse_matrix[i], x[i]),
np.dot(sparse_matrix[i].T[np.where(mask[i] == 1)].T, y[i]))
class FixedNMF(MeanFunction):
"""
Fixed Nodes Mean Function, The projection parameter W is fixed (i.e., non-trainable)
"""
def __init__(self, W, b, trainable=False):
MeanFunction.__init__(self)
self.W = Parameter(W, trainable=trainable)
self.b = Parameter(b, trainable=trainable)
self.trainable = trainable
@staticmethod
def init(graph_signal, adj, feat_out, agg_op_name='concat', trainable=False):
assert np.ndim(graph_signal) == 3 # observations, nodes, feats
agg_op = get_nbf_op(agg_op_name)
s, n, feat_in = graph_signal.shape
#aggregation = agg_op(adj, graph_signal) # (b, n, f_in/n*f_in)
aggregation = agg_op(adj, graph_signal[:5000]) # approximate (b, n, f_in/n*f_in)
feat_in_expand = aggregation.shape[-1]
if feat_in_expand == feat_out:
W = np.identity(feat_in_expand)
elif feat_in_expand > feat_out:
# calc svd for every node and extract the primary component.
if 'concat' in agg_op_name:
mask_concat = neighbour_feats(adj, np.ones((n, feat_in))) # (nodes, nodes*nbs)
W = vec_sparse_svd(np.transpose(aggregation, [1, 0, 2]), mask_concat, topk=feat_out)
# W: (nodes, nbs*feat_in, feat_out)
else:
_, _, V = np.linalg.svd(np.transpose(aggregation, [1, 0, 2])) # (nodes, feat_in, feat_in)
W = np.transpose(V[:, :feat_out], [0, 2, 1]) # (nodes, feat_in, feat_out)
else:
# (f_in, f_out)
W = np.concatenate([np.eye(feat_in_expand), np.zeros((feat_in_expand, feat_out - feat_in_expand))], 1)
b = np.zeros(feat_out)
mean_function = FixedNMF(W, b, trainable=trainable)
return W, mean_function
def __call__(self, X):
"""
calculate mean for every node recording to different dimension cases.
:param input: X(s,n,f_in) or X(s, n*f_in)
:return: mean(X) - (s, n, f_out)
"""
assert tf.shape(X).shape[0] == 3
if tf.shape(self.W).shape[0] == 2: # identity/padding
mX = tf.tensordot(X, self.W, [[-1], [0]]) # X(s,n,f_in), W(f_in,f_out) padding case
else: # transforming for each node separately with different
mX = tf.expand_dims(X, axis=-2) @ self.W # X(s,n,1,f_in), W(n,f_in,f_out) pca case
mX = tf.reduce_sum(mX, axis=-2) # mX(s,n,f_out)
return mX + self.b
@staticmethod
def np_call(X, W):
assert np.ndim(X) == 3
if np.ndim(W) == 2: # identity/padding
mX = np.matmul(X, W) # X(s,n,f_in), W(f_in,f_out) padding case
else: # transforming for each node separately with different
mX = np.matmul(np.expand_dims(X, axis=-2), W) # X(s,n,1,f_in), W(n,f_in,f_out) pca case
mX = np.sum(mX, axis=-2) # mX(s,n,f_out)
return mX
@staticmethod
def test_agg():
gs = np.random.rand(4, 3, 2)
adj = np.round(np.random.rand(3, 3), 0)
agg = np.matmul(adj, gs)
agg_for = np.stack([np.matmul(adj, gs[i]) for i in range(gs.shape[0])], axis=0)
assert np.array_equal(agg, agg_for)
@staticmethod
def test_call():
x = tf.random.uniform((9, 3, 4))
# case 1: for f_in < f_out (padding)
w = tf.random.uniform((4, 6))
xw = tf.tensordot(x, w, [[-1], [0]])
xw_for = tf.stack([x[i]@w for i in range(x.shape[0])], axis=0)
tf.assert_equal(xw, xw_for)
# case 2 for f_in > f_out (pca case, do pca for each node)
x = tf.expand_dims(x, axis=-2) # (9, 3, 1, 4)
w = tf.random.uniform((3, 4, 2))
xw = x@w
xw_for = tf.stack([x[i]@w for i in range(x.shape[0])], axis=0)
tf.assert_equal(xw, xw_for)
if __name__ == '__main__':
test_sparse_svd()
| 42.793057
| 146
| 0.588887
|
794ea932050165867b6b9d8983e1375f81ffd90b
| 9,538
|
py
|
Python
|
cryptofeed_werks/tests/test_calendar.py
|
globophobe/django-quant-werks
|
fc2f3eefccf03f4cb4df0abe6bb6c54cbd2e998d
|
[
"MIT"
] | 7
|
2021-12-30T02:38:17.000Z
|
2022-03-08T16:14:35.000Z
|
cryptofeed_werks/tests/test_calendar.py
|
globophobe/fastapi-quant-candles
|
0bc95f6bb32071aa32a4951ca0a15521f67f7f97
|
[
"MIT"
] | null | null | null |
cryptofeed_werks/tests/test_calendar.py
|
globophobe/fastapi-quant-candles
|
0bc95f6bb32071aa32a4951ca0a15521f67f7f97
|
[
"MIT"
] | 1
|
2022-01-28T00:18:45.000Z
|
2022-01-28T00:18:45.000Z
|
from datetime import datetime, time, timezone
from typing import List
import pandas as pd
from django.test import TestCase
from cryptofeed_werks.lib import (
get_current_time,
get_min_time,
get_next_time,
get_range,
iter_missing,
iter_timeframe,
iter_window,
parse_period_from_to,
)
class GetMinTimeTestCase(TestCase):
def test_get_min_time_1d(self):
"""Get start of current day."""
now = get_current_time()
min_time = get_min_time(now, value="1d")
self.assertEqual(
min_time,
datetime.combine(min_time.date(), time.min).replace(tzinfo=timezone.utc),
)
class GetNexttimeTestCase(TestCase):
def test_get_next_minute(self):
"""Get start of next day."""
now = get_current_time()
tomorrow = get_next_time(now, value="1d")
self.assertEqual(
tomorrow,
datetime.combine(tomorrow.date(), time.min).replace(tzinfo=timezone.utc),
)
class GetRangeTestCase(TestCase):
def setUp(self):
now = get_current_time()
self.timestamp_from = get_min_time(now, value="1d")
def test_get_range_1m(self):
"""Get range, by 1 minute."""
one_minute = pd.Timedelta("1t")
timestamp_to = get_next_time(self.timestamp_from, value="1d") - one_minute
values = get_range(self.timestamp_from, timestamp_to)
self.assertEqual(len(values), 1440)
self.assertEqual(values[0], self.timestamp_from)
self.assertEqual(values[-1], timestamp_to)
def test_get_range_1d(self):
"""Get range, by 1 day."""
values = get_range(self.timestamp_from, self.timestamp_from, value="1d")
self.assertEqual(len(values), 1)
self.assertEqual(values[0], self.timestamp_from)
class IterWindowTestCase(TestCase):
def setUp(self):
one_day = pd.Timedelta("1d")
self.now = get_current_time()
self.yesterday = self.now - one_day
self.two_days_ago = self.yesterday - one_day
def test_iter_window(self):
"""Iter window by days."""
values = [
value for value in iter_window(self.two_days_ago, self.now, value="1d")
]
self.assertEqual(len(values), 2)
self.assertEqual(values[0][0], get_min_time(self.two_days_ago, value="1d"))
self.assertEqual(values[1][1], get_min_time(self.now, value="1d"))
def test_iter_window_reverse(self):
"""Iter window by days, in reverse."""
values = [
value
for value in iter_window(
self.two_days_ago, self.now, value="1d", reverse=True
)
]
self.assertEqual(len(values), 2)
self.assertEqual(values[0][1], get_min_time(self.now, value="1d"))
self.assertEqual(values[1][0], get_min_time(self.two_days_ago, value="1d"))
class IterTimeframeTestCase(TestCase):
def setUp(self):
self.now = get_current_time()
def get_values(
self, timestamp_from: datetime, timestamp_to: datetime
) -> List[tuple]:
"""Get values for timeframe."""
return [
value
for value in iter_timeframe(timestamp_from, timestamp_to, reverse=True)
]
def test_iter_timeframe_with_head_and_no_body(self):
"""Current day only."""
date_from = self.now.date().isoformat()
time_from = self.now.time().isoformat()
timestamp_from, timestamp_to = parse_period_from_to(
date_from=date_from, time_from=time_from
)
values = self.get_values(timestamp_from, timestamp_to)
self.assertEqual(len(values), 1)
ts_from, ts_to = values[0]
self.assertEqual(ts_from, get_min_time(timestamp_from, "1t"))
self.assertEqual(ts_to, get_min_time(timestamp_to, "1t"))
def test_iter_timeframe_with_tail_only(self):
"""Previous day only."""
yesterday = self.now - pd.Timedelta("1d")
date_from = yesterday.date().isoformat()
time_from = yesterday.time().isoformat()
date_to = self.now.date().isoformat()
timestamp_from, timestamp_to = parse_period_from_to(
date_from=date_from, time_from=time_from, date_to=date_to
)
values = self.get_values(timestamp_from, timestamp_to)
self.assertEqual(len(values), 1)
def test_iter_timeframe_with_head(self):
"""1 min after midnight yesterday, until today."""
yesterday = get_min_time(self.now, "2d") + pd.Timedelta("1m")
today = get_min_time(self.now, "1d")
timestamp_from, timestamp_to = parse_period_from_to(
date_from=yesterday.date().isoformat(),
time_from=yesterday.time().isoformat(),
date_to=today.date().isoformat(),
time_to=today.time().isoformat(),
)
values = self.get_values(timestamp_from, timestamp_to)
target = len(values) - 1
for index, value in enumerate(values):
ts_from, ts_to = value
if index != target:
self.assertEqual(ts_from + pd.Timedelta("1d"), ts_to)
else:
self.assertEqual(ts_from, timestamp_from)
self.assertEqual(ts_to, get_min_time(ts_to, value="1d"))
def test_iter_timeframe_with_neither_head_nor_tail(self):
"""Two days ago until yesterday."""
yesterday = self.now - pd.Timedelta("1d")
two_days_ago = self.now - pd.Timedelta("2d")
timestamp_from, timestamp_to = parse_period_from_to(
date_from=two_days_ago.date().isoformat(),
date_to=yesterday.date().isoformat(),
)
values = self.get_values(timestamp_from, timestamp_to)
self.assertEqual(len(values), 1)
self.assertEqual(values[0][0], timestamp_from)
self.assertEqual(values[0][1], timestamp_to)
def test_iter_timeframe_with_tail(self):
"""Yesterday, 1 minute to midnight."""
timestamp = get_min_time(self.now, "1d") - pd.Timedelta("1m")
timestamp_from, timestamp_to = parse_period_from_to(
date_to=timestamp.date().isoformat(), time_to=timestamp.time().isoformat()
)
values = self.get_values(timestamp_from, timestamp_to)
for index, value in enumerate(values):
ts_from, ts_to = value
if index == 0:
self.assertEqual(ts_from, get_min_time(ts_from, value="1d"))
self.assertEqual(ts_to, timestamp_to)
else:
self.assertEqual(ts_from + pd.Timedelta("1d"), ts_to)
class IterMissingTestCase(TestCase):
def setUp(self):
self.one_minute = pd.Timedelta("1t")
self.timestamp_from = get_min_time(get_current_time(), "1d")
self.timestamp_to = self.timestamp_from + (self.one_minute * 5)
self.timestamps = get_range(self.timestamp_from, self.timestamp_to)
def test_iter_missing_with_no_missing(self):
"""No missing timestamps."""
values = [
value for value in iter_missing(self.timestamp_from, self.timestamp_to, [])
]
self.assertEqual(len(values), 1)
self.assertEqual(values[0][0], self.timestamp_from)
self.assertEqual(values[-1][1], self.timestamp_to)
def test_iter_missing_with_head(self):
"""First timestamp is OK."""
existing = self.timestamps[0]
values = [
value
for value in iter_missing(
self.timestamp_from, self.timestamp_to, [existing]
)
]
self.assertEqual(len(values), 1)
self.assertEqual(values[0][0], self.timestamp_from + self.one_minute)
self.assertEqual(values[-1][1], self.timestamp_to)
def test_iter_missing_with_one_timestamp_ok(self):
"""Second timestamp is OK."""
existing = self.timestamps[1]
values = [
value
for value in iter_missing(
self.timestamp_from, self.timestamp_to, [existing]
)
]
self.assertEqual(len(values), 2)
self.assertEqual(values[0][0], self.timestamp_from)
self.assertEqual(values[0][1], existing)
self.assertEqual(values[-1][0], existing + self.one_minute)
self.assertEqual(values[-1][1], self.timestamp_to)
def test_iter_missing_with_two_timestamps_ok(self):
"""Second and fourth timestamps are OK."""
existing_one = self.timestamps[1]
existing_two = self.timestamps[3]
values = [
value
for value in iter_missing(
self.timestamp_from, self.timestamp_to, [existing_one, existing_two]
)
]
self.assertEqual(len(values), 3)
self.assertEqual(values[0][0], self.timestamp_from)
self.assertEqual(values[0][1], existing_one)
self.assertEqual(values[1][0], existing_one + self.one_minute)
self.assertEqual(values[1][1], existing_two)
self.assertEqual(values[-1][0], existing_two + self.one_minute)
self.assertEqual(values[-1][1], self.timestamp_to)
def test_iter_missing_with_tail(self):
"""Last timestamp is OK."""
existing = self.timestamps[-1]
values = [
value
for value in iter_missing(
self.timestamp_from, self.timestamp_to, [existing]
)
]
self.assertEqual(len(values), 1)
self.assertEqual(values[0][0], self.timestamp_from)
self.assertEqual(values[0][1], self.timestamp_to)
| 37.849206
| 87
| 0.626337
|
794ea9d751c4f4e3f296f813809dd78ea7a413b9
| 16,287
|
py
|
Python
|
app/env/lib/python3.7/site-packages/twilio/rest/preview/sync/service/sync_map/sync_map_permission.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 30
|
2018-06-12T12:00:53.000Z
|
2021-05-02T01:27:16.000Z
|
app/env/lib/python3.7/site-packages/twilio/rest/preview/sync/service/sync_map/sync_map_permission.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 11
|
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
app/env/lib/python3.7/site-packages/twilio/rest/preview/sync/service/sync_map/sync_map_permission.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 4
|
2018-06-12T14:14:20.000Z
|
2018-06-19T16:01:49.000Z
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SyncMapPermissionList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, service_sid, map_sid):
"""
Initialize the SyncMapPermissionList
:param Version version: Version that contains the resource
:param service_sid: Sync Service Instance SID.
:param map_sid: Sync Map SID.
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionList
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionList
"""
super(SyncMapPermissionList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'map_sid': map_sid, }
self._uri = '/Services/{service_sid}/Maps/{map_sid}/Permissions'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams SyncMapPermissionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists SyncMapPermissionInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SyncMapPermissionInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SyncMapPermissionPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SyncMapPermissionInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SyncMapPermissionPage(self._version, response, self._solution)
def get(self, identity):
"""
Constructs a SyncMapPermissionContext
:param identity: Identity of the user to whom the Sync Map Permission applies.
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
"""
return SyncMapPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
identity=identity,
)
def __call__(self, identity):
"""
Constructs a SyncMapPermissionContext
:param identity: Identity of the user to whom the Sync Map Permission applies.
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
"""
return SyncMapPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
identity=identity,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Sync.SyncMapPermissionList>'
class SyncMapPermissionPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the SyncMapPermissionPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: Sync Service Instance SID.
:param map_sid: Sync Map SID.
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionPage
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionPage
"""
super(SyncMapPermissionPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SyncMapPermissionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
"""
return SyncMapPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Sync.SyncMapPermissionPage>'
class SyncMapPermissionContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, service_sid, map_sid, identity):
"""
Initialize the SyncMapPermissionContext
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param map_sid: Sync Map SID or unique name.
:param identity: Identity of the user to whom the Sync Map Permission applies.
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
"""
super(SyncMapPermissionContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'map_sid': map_sid, 'identity': identity, }
self._uri = '/Services/{service_sid}/Maps/{map_sid}/Permissions/{identity}'.format(**self._solution)
def fetch(self):
"""
Fetch a SyncMapPermissionInstance
:returns: Fetched SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SyncMapPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
identity=self._solution['identity'],
)
def delete(self):
"""
Deletes the SyncMapPermissionInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, read, write, manage):
"""
Update the SyncMapPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
"""
data = values.of({'Read': read, 'Write': write, 'Manage': manage, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SyncMapPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
identity=self._solution['identity'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Sync.SyncMapPermissionContext {}>'.format(context)
class SyncMapPermissionInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, service_sid, map_sid, identity=None):
"""
Initialize the SyncMapPermissionInstance
:returns: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
"""
super(SyncMapPermissionInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'map_sid': payload['map_sid'],
'identity': payload['identity'],
'read': payload['read'],
'write': payload['write'],
'manage': payload['manage'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'map_sid': map_sid,
'identity': identity or self._properties['identity'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapPermissionContext for this SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionContext
"""
if self._context is None:
self._context = SyncMapPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
identity=self._solution['identity'],
)
return self._context
@property
def account_sid(self):
"""
:returns: Twilio Account SID.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: Sync Service Instance SID.
:rtype: unicode
"""
return self._properties['service_sid']
@property
def map_sid(self):
"""
:returns: Sync Map SID.
:rtype: unicode
"""
return self._properties['map_sid']
@property
def identity(self):
"""
:returns: Identity of the user to whom the Sync Map Permission applies.
:rtype: unicode
"""
return self._properties['identity']
@property
def read(self):
"""
:returns: Read access.
:rtype: bool
"""
return self._properties['read']
@property
def write(self):
"""
:returns: Write access.
:rtype: bool
"""
return self._properties['write']
@property
def manage(self):
"""
:returns: Manage access.
:rtype: bool
"""
return self._properties['manage']
@property
def url(self):
"""
:returns: URL of this Sync Map Permission.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a SyncMapPermissionInstance
:returns: Fetched SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the SyncMapPermissionInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, read, write, manage):
"""
Update the SyncMapPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncMapPermissionInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_permission.SyncMapPermissionInstance
"""
return self._proxy.update(read, write, manage, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Sync.SyncMapPermissionInstance {}>'.format(context)
| 35.561135
| 109
| 0.639958
|
794ea9ee2cf6ac4e80716ddddb5c293e77263be9
| 94,806
|
py
|
Python
|
sdks/python/apache_beam/dataframe/frames.py
|
elharo/beam
|
a86dc0609f0b1bcc0c450979363b27b2657418af
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/dataframe/frames.py
|
elharo/beam
|
a86dc0609f0b1bcc0c450979363b27b2657418af
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
sdks/python/apache_beam/dataframe/frames.py
|
elharo/beam
|
a86dc0609f0b1bcc0c450979363b27b2657418af
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import inspect
import math
import re
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import io
from apache_beam.dataframe import partitionings
def populate_not_implemented(pd_type):
def wrapper(deferred_type):
for attr in dir(pd_type):
# Don't auto-define hidden methods or dunders
if attr.startswith('_'):
continue
if not hasattr(deferred_type, attr):
pd_value = getattr(pd_type, attr)
if isinstance(pd_value, property) or inspect.isclass(pd_value):
# Some of the properties on pandas types (cat, dt, sparse), are
# actually attributes with class values, not properties
setattr(
deferred_type,
attr,
property(frame_base.not_implemented_method(attr)))
elif callable(pd_value):
setattr(deferred_type, attr, frame_base.not_implemented_method(attr))
return deferred_type
return wrapper
class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
def __array__(self, dtype=None):
raise frame_base.WontImplementError(
'Conversion to a non-deferred a numpy array.')
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def drop(self, labels, axis, index, columns, errors, **kwargs):
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
if axis in (0, 'index'):
index = labels
columns = None
elif axis in (1, 'columns'):
index = None
columns = labels
else:
raise ValueError(
"axis must be one of (0, 1, 'index', 'columns'), "
"got '%s'" % axis)
if columns is not None:
# Compute the proxy based on just the columns that are dropped.
proxy = self._expr.proxy().drop(columns=columns, errors=errors)
else:
proxy = self._expr.proxy()
if index is not None and errors == 'raise':
# In order to raise an error about missing index values, we'll
# need to collect the entire dataframe.
requires = partitionings.Singleton()
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'drop',
lambda df: df.drop(
axis=axis,
index=index,
columns=columns,
errors=errors,
**kwargs), [self._expr],
proxy=proxy,
requires_partition_by=requires))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def droplevel(self, level, axis):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'droplevel',
lambda df: df.droplevel(level, axis=axis), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()
if axis in (1, 'column') else partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def fillna(self, value, method, axis, limit, **kwargs):
# Default value is None, but is overriden with index.
axis = axis or 'index'
if method is not None and axis in (0, 'index'):
raise frame_base.WontImplementError('order-sensitive')
if isinstance(value, frame_base.DeferredBase):
value_expr = value._expr
else:
value_expr = expressions.ConstantExpression(value)
if limit is not None and method is None:
# If method is not None (and axis is 'columns'), we can do limit in
# a distributed way. Else, it is order sensitive.
raise frame_base.WontImplementError('order-sensitive')
return frame_base.DeferredFrame.wrap(
# yapf: disable
expressions.ComputedExpression(
'fillna',
lambda df,
value: df.fillna(value, method=method, axis=axis, **kwargs),
[self._expr, value_expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def ffill(self, **kwargs):
return self.fillna(method='ffill', **kwargs)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def bfill(self, **kwargs):
return self.fillna(method='bfill', **kwargs)
pad = ffill
backfill = bfill
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def groupby(self, by, level, axis, as_index, group_keys, **kwargs):
if not as_index:
raise NotImplementedError('groupby(as_index=False)')
if not group_keys:
raise NotImplementedError('groupby(group_keys=False)')
if axis in (1, 'columns'):
return _DeferredGroupByCols(
expressions.ComputedExpression(
'groupbycols',
lambda df: df.groupby(by, axis=axis, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
elif level is not None:
if isinstance(level, (list, tuple)):
grouping_indexes = level
else:
grouping_indexes = [level]
grouping_columns = []
index = self._expr.proxy().index
# Translate to level numbers only
grouping_indexes = [
l if isinstance(l, int) else index.names.index(l)
for l in grouping_indexes
]
if index.nlevels == 1:
to_group_with_index = self._expr
to_group = self._expr
else:
levels_to_drop = [
i for i in range(index.nlevels) if i not in grouping_indexes
]
# Reorder so the grouped indexes are first
to_group_with_index = self.reorder_levels(
grouping_indexes + levels_to_drop)
grouping_indexes = list(range(len(grouping_indexes)))
levels_to_drop = list(range(len(grouping_indexes), index.nlevels))
if levels_to_drop:
to_group = to_group_with_index.droplevel(levels_to_drop)._expr
else:
to_group = to_group_with_index._expr
to_group_with_index = to_group_with_index._expr
elif callable(by):
def map_index(df):
df = df.copy()
df.index = df.index.map(by)
return df
to_group = expressions.ComputedExpression(
'map_index',
map_index, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
orig_nlevels = self._expr.proxy().index.nlevels
to_group_with_index = expressions.ComputedExpression(
'map_index_keep_orig',
lambda df: df.set_index([df.index.map(by), df.index]),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
# Partitioning by the original indexes is preserved
preserves_partition_by=partitionings.Index(
list(range(1, orig_nlevels + 1))))
grouping_columns = []
# The index we need to group by is the last one
grouping_indexes = [0]
elif isinstance(by, DeferredSeries):
raise NotImplementedError(
"grouping by a Series is not yet implemented. You can group by a "
"DataFrame column by specifying its name.")
elif isinstance(by, np.ndarray):
raise frame_base.WontImplementError('order sensitive')
elif isinstance(self, DeferredDataFrame):
if not isinstance(by, list):
by = [by]
# Find the columns that we need to move into the index so we can group by
# them
column_names = self._expr.proxy().columns
grouping_columns = list(set(by).intersection(column_names))
index_names = self._expr.proxy().index.names
for label in by:
if label not in index_names and label not in self._expr.proxy().columns:
raise KeyError(label)
grouping_indexes = list(set(by).intersection(index_names))
if grouping_indexes:
if set(by) == set(index_names):
to_group = self._expr
elif set(by).issubset(index_names):
to_group = self.droplevel(index_names.difference(by))._expr
else:
to_group = self.reset_index(grouping_indexes).set_index(by)._expr
else:
to_group = self.set_index(by)._expr
if grouping_columns:
# TODO(BEAM-11711): It should be possible to do this without creating an
# expression manually, by using DeferredDataFrame.set_index, i.e.:
# to_group_with_index = self.set_index([self.index] +
# grouping_columns)._expr
to_group_with_index = expressions.ComputedExpression(
'move_grouped_columns_to_index',
lambda df: df.set_index([df.index] + grouping_columns),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Index(
list(range(self._expr.proxy().index.nlevels))))
else:
to_group_with_index = self._expr
else:
raise NotImplementedError(by)
return DeferredGroupBy(
expressions.ComputedExpression(
'groupbyindex',
lambda df: df.groupby(
level=list(range(df.index.nlevels)), **kwargs), [to_group],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()),
kwargs,
to_group,
to_group_with_index,
grouping_columns=grouping_columns,
grouping_indexes=grouping_indexes)
abs = frame_base._elementwise_method('abs')
astype = frame_base._elementwise_method('astype')
copy = frame_base._elementwise_method('copy')
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def tz_localize(self, ambiguous, **kwargs):
if isinstance(ambiguous, np.ndarray):
raise frame_base.WontImplementError(
"ambiguous=ndarray is not supported, please use a deferred Series "
"instead.")
elif isinstance(ambiguous, frame_base.DeferredFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda df,
ambiguous: df.tz_localize(ambiguous=ambiguous, **kwargs),
[self._expr, ambiguous._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton()))
elif ambiguous == 'infer':
# infer attempts to infer based on the order of the timestamps
raise frame_base.WontImplementError("order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda df: df.tz_localize(ambiguous=ambiguous, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@property
def size(self):
sizes = expressions.ComputedExpression(
'get_sizes',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(df.size),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sum_sizes',
lambda sizes: sizes.sum(), [sizes],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@property
def empty(self):
empties = expressions.ComputedExpression(
'get_empties',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(df.empty),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'check_all_empty',
lambda empties: empties.all(), [empties],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
def bool(self):
# Will throw if any partition has >1 element
bools = expressions.ComputedExpression(
'get_bools',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series([], dtype=bool)
if df.empty else pd.Series([df.bool()]),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
# Will throw if overall dataset has != 1 element
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_all_bools',
lambda bools: bools.bool(), [bools],
proxy=bool(),
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
def equals(self, other):
intermediate = expressions.ComputedExpression(
'equals_partitioned',
# Wrap scalar results in a Series for easier concatenation later
lambda df,
other: pd.Series(df.equals(other)),
[self._expr, other._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate_equals',
lambda df: df.all(), [intermediate],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def sort_values(self, axis, **kwargs):
if axis in (0, 'index'):
# axis=rows imposes an ordering on the DataFrame rows which we do not
# support
raise frame_base.WontImplementError("order-sensitive")
else:
# axis=columns will reorder the columns based on the data
raise frame_base.WontImplementError("non-deferred column values")
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def sort_index(self, axis, **kwargs):
if axis in (0, 'rows'):
# axis=rows imposes an ordering on the DataFrame which we do not support
raise frame_base.WontImplementError("order-sensitive")
# axis=columns reorders the columns by name
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sort_index',
lambda df: df.sort_index(axis, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def where(self, cond, other, errors, **kwargs):
requires = partitionings.Arbitrary()
deferred_args = {}
actual_args = {}
# TODO(bhulette): This is very similar to the logic in
# frame_base.elementwise_method, can we unify it?
if isinstance(cond, frame_base.DeferredFrame):
deferred_args['cond'] = cond
requires = partitionings.Index()
else:
actual_args['cond'] = cond
if isinstance(other, frame_base.DeferredFrame):
deferred_args['other'] = other
requires = partitionings.Index()
else:
actual_args['other'] = other
if errors == "ignore":
# We need all data in order to ignore errors and propagate the original
# data.
requires = partitionings.Singleton()
actual_args['errors'] = errors
def where_execution(df, *args):
runtime_values = {
name: value
for (name, value) in zip(deferred_args.keys(), args)
}
return df.where(**runtime_values, **actual_args, **kwargs)
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
"where",
where_execution,
[self._expr] + [df._expr for df in deferred_args.values()],
requires_partition_by=requires,
preserves_partition_by=partitionings.Index(),
))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def mask(self, cond, **kwargs):
return self.where(~cond, **kwargs)
@property
def dtype(self):
return self._expr.proxy().dtype
isin = frame_base._elementwise_method('isin')
@property
def ndim(self):
return self._expr.proxy().ndim
def _get_index(self):
return _DeferredIndex(self)
index = property(
_get_index, frame_base.not_implemented_method('index (setter)'))
hist = frame_base.wont_implement_method('plot')
attrs = property(frame_base.wont_implement_method('experimental'))
first = last = frame_base.wont_implement_method('order-sensitive')
head = tail = frame_base.wont_implement_method('order-sensitive')
interpolate = frame_base.wont_implement_method('order-sensitive')
reorder_levels = frame_base._proxy_method(
'reorder_levels',
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
@populate_not_implemented(pd.Series)
@frame_base.DeferredFrame._register_for(pd.Series)
class DeferredSeries(DeferredDataFrameOrSeries):
@property
def name(self):
return self._expr.proxy().name
@name.setter
def name(self, value):
def fn(s):
s = s.copy()
s.name = value
return s
self._expr = expressions.ComputedExpression(
'series_set_name',
fn, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
@property
def dtype(self):
return self._expr.proxy().dtype
dtypes = dtype
def __getitem__(self, key):
if _is_null_slice(key) or key is Ellipsis:
return self
elif (isinstance(key, int) or _is_integer_slice(key)
) and self._expr.proxy().index._should_fallback_to_positional():
raise frame_base.WontImplementError('order sensitive')
elif isinstance(key, slice) or callable(key):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
# yapf: disable
'getitem',
lambda df: df[key],
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
# yapf: disable
'getitem',
lambda df,
indexer: df[indexer],
[self._expr, key._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif pd.core.series.is_iterator(key) or pd.core.common.is_bool_indexer(key):
raise frame_base.WontImplementError('order sensitive')
else:
# We could consider returning a deferred scalar, but that might
# be more surprising than a clear error.
raise frame_base.WontImplementError('non-deferred')
def keys(self):
return self.index
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def append(self, to_append, ignore_index, verify_integrity, **kwargs):
if not isinstance(to_append, DeferredSeries):
raise frame_base.WontImplementError(
"append() only accepts DeferredSeries instances, received " +
str(type(to_append)))
if ignore_index:
raise frame_base.WontImplementError(
"append(ignore_index=True) is order sensitive")
if verify_integrity:
# verifying output has a unique index requires global index.
# TODO(BEAM-11839): Attach an explanation to the Singleton partitioning
# requirement, and include it in raised errors.
requires = partitionings.Singleton()
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'append',
lambda s,
to_append: s.append(
to_append, verify_integrity=verify_integrity, **kwargs),
[self._expr, to_append._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def align(self, other, join, axis, level, method, **kwargs):
if level is not None:
raise NotImplementedError('per-level align')
if method is not None:
raise frame_base.WontImplementError('order-sensitive')
# We're using pd.concat here as expressions don't yet support
# multiple return values.
aligned = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'align',
lambda x,
y: pd.concat([x, y], axis=1, join='inner'),
[self._expr, other._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
return aligned.iloc[:, 0], aligned.iloc[:, 1]
array = property(frame_base.wont_implement_method('non-deferred value'))
argmax = frame_base.wont_implement_method('order-sensitive')
argmin = frame_base.wont_implement_method('order-sensitive')
ravel = frame_base.wont_implement_method('non-deferred value')
rename = frame_base._elementwise_method('rename')
between = frame_base._elementwise_method('between')
def dot(self, other):
left = self._expr
if isinstance(other, DeferredSeries):
right = expressions.ComputedExpression(
'to_dataframe',
pd.DataFrame, [other._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
right_is_series = True
elif isinstance(other, DeferredDataFrame):
right = other._expr
right_is_series = False
else:
raise frame_base.WontImplementError('non-deferred result')
dots = expressions.ComputedExpression(
'dot',
# Transpose so we can sum across rows.
(lambda left, right: pd.DataFrame(left @ right).T),
[left, right],
requires_partition_by=partitionings.Index())
with expressions.allow_non_parallel_operations(True):
sums = expressions.ComputedExpression(
'sum',
lambda dots: dots.sum(), #
[dots],
requires_partition_by=partitionings.Singleton())
if right_is_series:
result = expressions.ComputedExpression(
'extract',
lambda df: df[0], [sums],
requires_partition_by=partitionings.Singleton())
else:
result = sums
return frame_base.DeferredFrame.wrap(result)
__matmul__ = dot
def std(self, *args, **kwargs):
# Compute variance (deferred scalar) with same args, then sqrt it
return self.var(*args, **kwargs).apply(lambda var: math.sqrt(var))
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def var(self, axis, skipna, level, ddof, **kwargs):
if level is not None:
raise NotImplementedError("per-level aggregation")
if skipna is None or skipna:
self = self.dropna() # pylint: disable=self-cls-assignment
# See the online, numerically stable formulae at
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# and
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
def compute_moments(x):
n = len(x)
m = x.std(ddof=0)**2 * n
s = x.sum()
return pd.DataFrame(dict(m=[m], s=[s], n=[n]))
def combine_moments(data):
m = s = n = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
m, s, n = datum.m, datum.s, datum.n
else:
delta = s / n - datum.s / datum.n
m += datum.m + delta**2 * n * datum.n / (n + datum.n)
s += datum.s
n += datum.n
if n <= ddof:
return float('nan')
else:
return m / (n - ddof)
moments = expressions.ComputedExpression(
'compute_moments',
compute_moments, [self._expr],
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_moments',
combine_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def corr(self, other, method, min_periods):
if method == 'pearson': # Note that this is the default.
x, y = self.dropna().align(other.dropna(), 'inner')
return x._corr_aligned(y, min_periods)
else:
# The rank-based correlations are not obviously parallelizable, though
# perhaps an approximation could be done with a knowledge of quantiles
# and custom partitioning.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'corr',
lambda df,
other: df.corr(other, method=method, min_periods=min_periods),
[self._expr, other._expr],
# TODO(BEAM-11839): Attach an explanation to the Singleton
# partitioning requirement, and include it in raised errors.
requires_partition_by=partitionings.Singleton()))
def _corr_aligned(self, other, min_periods):
std_x = self.std()
std_y = other.std()
cov = self._cov_aligned(other, min_periods)
return cov.apply(
lambda cov, std_x, std_y: cov / (std_x * std_y), args=[std_x, std_y])
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def cov(self, other, min_periods, ddof):
x, y = self.dropna().align(other.dropna(), 'inner')
return x._cov_aligned(y, min_periods, ddof)
def _cov_aligned(self, other, min_periods, ddof=1):
# Use the formulae from
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Covariance
def compute_co_moments(x, y):
n = len(x)
if n <= 1:
c = 0
else:
c = x.cov(y) * (n - 1)
sx = x.sum()
sy = y.sum()
return pd.DataFrame(dict(c=[c], sx=[sx], sy=[sy], n=[n]))
def combine_co_moments(data):
c = sx = sy = n = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
c, sx, sy, n = datum.c, datum.sx, datum.sy, datum.n
else:
c += (
datum.c + (sx / n - datum.sx / datum.n) *
(sy / n - datum.sy / datum.n) * n * datum.n / (n + datum.n))
sx += datum.sx
sy += datum.sy
n += datum.n
if n < max(2, ddof, min_periods or 0):
return float('nan')
else:
return c / (n - ddof)
moments = expressions.ComputedExpression(
'compute_co_moments',
compute_co_moments, [self._expr, other._expr],
requires_partition_by=partitionings.Index())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_co_moments',
combine_co_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def dropna(self, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dropna',
lambda df: df.dropna(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary()))
items = iteritems = frame_base.wont_implement_method('non-lazy')
isnull = isna = frame_base._elementwise_method('isna')
notnull = notna = frame_base._elementwise_method('notna')
tolist = to_numpy = to_string = frame_base.wont_implement_method(
'non-deferred value')
def aggregate(self, func, axis=0, *args, **kwargs):
if isinstance(func, list) and len(func) > 1:
# Aggregate each column separately, then stick them all together.
rows = [self.agg([f], *args, **kwargs) for f in func]
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *rows: pd.concat(rows), [row._expr for row in rows]))
else:
# We're only handling a single column.
base_func = func[0] if isinstance(func, list) else func
if _is_associative(base_func) and not args and not kwargs:
intermediate = expressions.ComputedExpression(
'pre_aggregate',
lambda s: s.agg([base_func], *args, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
allow_nonparallel_final = True
else:
intermediate = self._expr
allow_nonparallel_final = None # i.e. don't change the value
with expressions.allow_non_parallel_operations(allow_nonparallel_final):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda s: s.agg(func, *args, **kwargs), [intermediate],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
agg = aggregate
@property
def axes(self):
return [self.index]
clip = frame_base._elementwise_method('clip')
all = frame_base._agg_method('all')
any = frame_base._agg_method('any')
count = frame_base._agg_method('count')
min = frame_base._agg_method('min')
max = frame_base._agg_method('max')
prod = product = frame_base._agg_method('prod')
sum = frame_base._agg_method('sum')
mean = frame_base._agg_method('mean')
median = frame_base._agg_method('median')
cummax = cummin = cumsum = cumprod = frame_base.wont_implement_method(
'order-sensitive')
diff = frame_base.wont_implement_method('order-sensitive')
filter = frame_base._elementwise_method('filter')
memory_usage = frame_base.wont_implement_method('non-deferred value')
# In Series __contains__ checks the index
__contains__ = frame_base.wont_implement_method('non-deferred value')
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nlargest(self, keep, **kwargs):
# TODO(robertwb): Document 'any' option.
# TODO(robertwb): Consider (conditionally) defaulting to 'any' if no
# explicit keep parameter is requested.
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError('order-sensitive')
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nlargest-per-partition',
lambda df: df.nlargest(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nlargest',
lambda df: df.nlargest(**kwargs), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nsmallest(self, keep, **kwargs):
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError('order-sensitive')
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nsmallest-per-partition',
lambda df: df.nsmallest(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nsmallest',
lambda df: df.nsmallest(**kwargs), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@property
def is_unique(self):
def set_index(s):
s = s[:]
s.index = s
return s
self_index = expressions.ComputedExpression(
'set_index',
set_index, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
is_unique_distributed = expressions.ComputedExpression(
'is_unique_distributed',
lambda s: pd.Series(s.is_unique), [self_index],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations():
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine',
lambda s: s.all(), [is_unique_distributed],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
plot = property(frame_base.wont_implement_method('plot'))
pop = frame_base.wont_implement_method('non-lazy')
rename_axis = frame_base._elementwise_method('rename_axis')
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def replace(self, to_replace, value, limit, method, **kwargs):
if method is not None and not isinstance(to_replace,
dict) and value is None:
# Can't rely on method for replacement, it's order-sensitive
# pandas only relies on method if to_replace is not a dictionary, and
# value is None
raise frame_base.WontImplementError("order-sensitive")
if limit is None:
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Singleton()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'replace',
lambda df: df.replace(
to_replace=to_replace,
value=value,
limit=limit,
method=method,
**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires_partition_by))
round = frame_base._elementwise_method('round')
searchsorted = frame_base.wont_implement_method('order-sensitive')
shift = frame_base.wont_implement_method('order-sensitive')
take = frame_base.wont_implement_method('deprecated')
to_dict = frame_base.wont_implement_method('non-deferred')
to_frame = frame_base._elementwise_method('to_frame')
def unique(self, as_series=False):
if not as_series:
raise frame_base.WontImplementError(
'pass as_series=True to get the result as a (deferred) Series')
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'unique',
lambda df: pd.Series(df.unique()), [self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
def update(self, other):
self._expr = expressions.ComputedExpression(
'update',
lambda df,
other: df.update(other) or df, [self._expr, other._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index())
unstack = frame_base.wont_implement_method('non-deferred column values')
values = property(frame_base.wont_implement_method('non-deferred'))
view = frame_base.wont_implement_method('memory sharing semantics')
@property
def str(self):
return _DeferredStringMethods(self._expr)
apply = frame_base._elementwise_method('apply')
map = frame_base._elementwise_method('map')
# TODO(BEAM-11636): Implement transform using type inference to determine the
# proxy
#transform = frame_base._elementwise_method('transform')
@populate_not_implemented(pd.DataFrame)
@frame_base.DeferredFrame._register_for(pd.DataFrame)
class DeferredDataFrame(DeferredDataFrameOrSeries):
@property
def T(self):
return self.transpose()
@property
def columns(self):
return self._expr.proxy().columns
@columns.setter
def columns(self, columns):
def set_columns(df):
df = df.copy()
df.columns = columns
return df
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_columns',
set_columns, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
def keys(self):
return self.columns
def __getattr__(self, name):
# Column attribute access.
if name in self._expr.proxy().columns:
return self[name]
else:
return object.__getattribute__(self, name)
def __getitem__(self, key):
# TODO: Replicate pd.DataFrame.__getitem__ logic
if isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:
return self.loc[key]
elif isinstance(key, frame_base.DeferredBase):
# Fail early if key is a DeferredBase as it interacts surprisingly with
# key in self._expr.proxy().columns
raise NotImplementedError(
"Indexing with a non-bool deferred frame is not yet supported. "
"Consider using df.loc[...]")
elif isinstance(key, slice):
if _is_null_slice(key):
return self
elif _is_integer_slice(key):
# This depends on the contents of the index.
raise frame_base.WontImplementError(
'Use iloc or loc with integer slices.')
else:
return self.loc[key]
elif (
(isinstance(key, list) and all(key_column in self._expr.proxy().columns
for key_column in key)) or
key in self._expr.proxy().columns):
return self._elementwise(lambda df: df[key], 'get_column')
else:
raise NotImplementedError(key)
def __contains__(self, key):
# Checks if proxy has the given column
return self._expr.proxy().__contains__(key)
def __setitem__(self, key, value):
if isinstance(
key, str) or (isinstance(key, list) and
all(isinstance(c, str)
for c in key)) or (isinstance(key, DeferredSeries) and
key._expr.proxy().dtype == bool):
# yapf: disable
return self._elementwise(
lambda df, key, value: df.__setitem__(key, value),
'set_column',
(key, value),
inplace=True)
else:
raise NotImplementedError(key)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def align(self, other, join, axis, copy, level, method, **kwargs):
if not copy:
raise frame_base.WontImplementError('align(copy=False)')
if method is not None:
raise frame_base.WontImplementError('order-sensitive')
if kwargs:
raise NotImplementedError('align(%s)' % ', '.join(kwargs.keys()))
if level is not None:
# Could probably get by partitioning on the used levels.
requires_partition_by = partitionings.Singleton()
elif axis in ('columns', 1):
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Index()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'align',
lambda df, other: df.align(other, join=join, axis=axis),
[self._expr, other._expr],
requires_partition_by=requires_partition_by,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def append(self, other, ignore_index, verify_integrity, sort, **kwargs):
if not isinstance(other, DeferredDataFrame):
raise frame_base.WontImplementError(
"append() only accepts DeferredDataFrame instances, received " +
str(type(other)))
if ignore_index:
raise frame_base.WontImplementError(
"append(ignore_index=True) is order sensitive")
if verify_integrity:
raise frame_base.WontImplementError(
"append(verify_integrity=True) produces an execution time error")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'append',
lambda s, other: s.append(other, sort=sort, **kwargs),
[self._expr, other._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()
)
)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def set_index(self, keys, **kwargs):
if isinstance(keys, str):
keys = [keys]
if any(isinstance(k, (_DeferredIndex, frame_base.DeferredFrame))
for k in keys):
raise NotImplementedError("set_index with Index or Series instances is "
"not yet supported (BEAM-11711)")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_index',
lambda df: df.set_index(keys, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@property
def loc(self):
return _DeferredLoc(self)
@property
def iloc(self):
return _DeferredILoc(self)
@property
def axes(self):
return (self.index, self.columns)
@property
def dtypes(self):
return self._expr.proxy().dtypes
def assign(self, **kwargs):
for name, value in kwargs.items():
if not callable(value) and not isinstance(value, DeferredSeries):
raise frame_base.WontImplementError("Unsupported value for new "
f"column '{name}': '{value}'. "
"Only callables and Series "
"instances are supported.")
return frame_base._elementwise_method('assign')(self, **kwargs)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def explode(self, column, ignore_index):
# ignoring the index will not preserve it
preserves = (partitionings.Singleton() if ignore_index
else partitionings.Index())
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'explode',
lambda df: df.explode(column, ignore_index),
[self._expr],
preserves_partition_by=preserves,
requires_partition_by=partitionings.Arbitrary()))
def aggregate(self, func, axis=0, *args, **kwargs):
if axis is None:
# Aggregate across all elements by first aggregating across columns,
# then across rows.
return self.agg(func, *args, **dict(kwargs, axis=1)).agg(
func, *args, **dict(kwargs, axis=0))
elif axis in (1, 'columns'):
# This is an easy elementwise aggregation.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda df: df.agg(func, axis=1, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary()))
elif len(self._expr.proxy().columns) == 0 or args or kwargs:
# For these corner cases, just colocate everything.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda df: df.agg(func, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Singleton()))
else:
# In the general case, compute the aggregation of each column separately,
# then recombine.
if not isinstance(func, dict):
col_names = list(self._expr.proxy().columns)
func = {col: func for col in col_names}
else:
col_names = list(func.keys())
aggregated_cols = []
for col in col_names:
funcs = func[col]
if not isinstance(funcs, list):
funcs = [funcs]
aggregated_cols.append(self[col].agg(funcs, *args, **kwargs))
# The final shape is different depending on whether any of the columns
# were aggregated by a list of aggregators.
with expressions.allow_non_parallel_operations():
if any(isinstance(funcs, list) for funcs in func.values()):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *cols: pd.DataFrame(
{col: value for col, value in zip(col_names, cols)}),
[col._expr for col in aggregated_cols],
requires_partition_by=partitionings.Singleton()))
else:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *cols: pd.Series(
{col: value[0] for col, value in zip(col_names, cols)}),
[col._expr for col in aggregated_cols],
requires_partition_by=partitionings.Singleton(),
proxy=self._expr.proxy().agg(func, *args, **kwargs)))
agg = aggregate
applymap = frame_base._elementwise_method('applymap')
memory_usage = frame_base.wont_implement_method('non-deferred value')
info = frame_base.wont_implement_method('non-deferred value')
clip = frame_base._elementwise_method(
'clip', restrictions={'axis': lambda axis: axis in (0, 'index')})
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def corr(self, method, min_periods):
if method == 'pearson':
proxy = self._expr.proxy().corr()
columns = list(proxy.columns)
args = []
arg_indices = []
for ix, col1 in enumerate(columns):
for col2 in columns[ix+1:]:
arg_indices.append((col1, col2))
# Note that this set may be different for each pair.
no_na = self.loc[self[col1].notna() & self[col2].notna()]
args.append(
no_na[col1]._corr_aligned(no_na[col2], min_periods))
def fill_matrix(*args):
data = collections.defaultdict(dict)
for col in columns:
data[col][col] = 1.0
for ix, (col1, col2) in enumerate(arg_indices):
data[col1][col2] = data[col2][col1] = args[ix]
return pd.DataFrame(data, columns=columns, index=columns)
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_matrix',
fill_matrix,
[arg._expr for arg in args],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
else:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'corr',
lambda df: df.corr(method=method, min_periods=min_periods),
[self._expr],
requires_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def cov(self, min_periods, ddof):
proxy = self._expr.proxy().corr()
columns = list(proxy.columns)
args = []
arg_indices = []
for col in columns:
arg_indices.append((col, col))
std = self[col].std(ddof)
args.append(std.apply(lambda x: x*x, 'square'))
for ix, col1 in enumerate(columns):
for col2 in columns[ix+1:]:
arg_indices.append((col1, col2))
# Note that this set may be different for each pair.
no_na = self.loc[self[col1].notna() & self[col2].notna()]
args.append(no_na[col1]._cov_aligned(no_na[col2], min_periods, ddof))
def fill_matrix(*args):
data = collections.defaultdict(dict)
for ix, (col1, col2) in enumerate(arg_indices):
data[col1][col2] = data[col2][col1] = args[ix]
return pd.DataFrame(data, columns=columns, index=columns)
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_matrix',
fill_matrix,
[arg._expr for arg in args],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def corrwith(self, other, axis, drop, method):
if axis not in (0, 'index'):
raise NotImplementedError('corrwith(axis=%r)' % axis)
if not isinstance(other, frame_base.DeferredFrame):
other = frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(other))
if isinstance(other, DeferredSeries):
proxy = self._expr.proxy().corrwith(other._expr.proxy(), method=method)
self, other = self.align(other, axis=0, join='inner')
col_names = proxy.index
other_cols = [other] * len(col_names)
elif isinstance(other, DeferredDataFrame):
proxy = self._expr.proxy().corrwith(
other._expr.proxy(), method=method, drop=drop)
self, other = self.align(other, axis=0, join='inner')
col_names = list(
set(self.columns)
.intersection(other.columns)
.intersection(proxy.index))
other_cols = [other[col_name] for col_name in col_names]
else:
# Raise the right error.
self._expr.proxy().corrwith(other._expr.proxy())
# Just in case something else becomes valid.
raise NotImplementedError('corrwith(%s)' % type(other._expr.proxy))
# Generate expressions to compute the actual correlations.
corrs = [
self[col_name].corr(other_col, method)
for col_name, other_col in zip(col_names, other_cols)]
# Combine the results
def fill_dataframe(*args):
result = proxy.copy(deep=True)
for col, value in zip(proxy.index, args):
result[col] = value
return result
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_dataframe',
fill_dataframe,
[corr._expr for corr in corrs],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
cummax = cummin = cumsum = cumprod = frame_base.wont_implement_method(
'order-sensitive')
diff = frame_base.wont_implement_method('order-sensitive')
def dot(self, other):
# We want to broadcast the right hand side to all partitions of the left.
# This is OK, as its index must be the same size as the columns set of self,
# so cannot be too large.
class AsScalar(object):
def __init__(self, value):
self.value = value
if isinstance(other, frame_base.DeferredFrame):
proxy = other._expr.proxy()
with expressions.allow_non_parallel_operations():
side = expressions.ComputedExpression(
'as_scalar',
lambda df: AsScalar(df),
[other._expr],
requires_partition_by=partitionings.Singleton())
else:
proxy = pd.DataFrame(columns=range(len(other[0])))
side = expressions.ConstantExpression(AsScalar(other))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dot',
lambda left, right: left @ right.value,
[self._expr, side],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
proxy=proxy))
__matmul__ = dot
def mode(self, axis=0, *args, **kwargs):
if axis == 1 or axis == 'columns':
# Number of columns is max(number mode values for each row), so we can't
# determine how many there will be before looking at the data.
raise frame_base.WontImplementError('non-deferred column values')
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'mode',
lambda df: df.mode(*args, **kwargs),
[self._expr],
#TODO(robertwb): Approximate?
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def dropna(self, axis, **kwargs):
# TODO(robertwb): This is a common pattern. Generalize?
if axis == 1 or axis == 'columns':
requires_partition_by = partitionings.Singleton()
else:
requires_partition_by = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dropna',
lambda df: df.dropna(axis=axis, **kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires_partition_by))
def _eval_or_query(self, name, expr, inplace, **kwargs):
for key in ('local_dict', 'global_dict', 'level', 'target', 'resolvers'):
if key in kwargs:
raise NotImplementedError(f"Setting '{key}' is not yet supported")
# look for '@<py identifier>'
if re.search(r'\@[^\d\W]\w*', expr, re.UNICODE):
raise NotImplementedError("Accessing locals with @ is not yet supported "
"(BEAM-11202)")
result_expr = expressions.ComputedExpression(
name,
lambda df: getattr(df, name)(expr, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
if inplace:
self._expr = result_expr
else:
return frame_base.DeferredFrame.wrap(result_expr)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def eval(self, expr, inplace, **kwargs):
return self._eval_or_query('eval', expr, inplace, **kwargs)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def query(self, expr, inplace, **kwargs):
return self._eval_or_query('query', expr, inplace, **kwargs)
isnull = isna = frame_base._elementwise_method('isna')
notnull = notna = frame_base._elementwise_method('notna')
items = itertuples = iterrows = iteritems = frame_base.wont_implement_method(
'non-lazy')
def _cols_as_temporary_index(self, cols, suffix=''):
original_index_names = list(self._expr.proxy().index.names)
new_index_names = [
'__apache_beam_temp_%d_%s' % (ix, suffix)
for (ix, _) in enumerate(original_index_names)]
def reindex(df):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'reindex',
lambda df:
df.rename_axis(index=new_index_names, copy=False)
.reset_index().set_index(cols),
[df._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Arbitrary()))
def revert(df):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_restoreindex',
lambda df:
df.reset_index().set_index(new_index_names)
.rename_axis(index=original_index_names, copy=False),
[df._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Arbitrary()))
return reindex, revert
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def join(self, other, on, **kwargs):
if on is not None:
reindex, revert = self._cols_as_temporary_index(on)
return revert(reindex(self).join(other, **kwargs))
if isinstance(other, list):
other_is_list = True
else:
other = [other]
other_is_list = False
placeholder = object()
other_exprs = [
df._expr for df in other if isinstance(df, frame_base.DeferredFrame)]
const_others = [
placeholder if isinstance(df, frame_base.DeferredFrame) else df
for df in other]
def fill_placeholders(values):
values = iter(values)
filled = [
next(values) if df is placeholder else df for df in const_others]
if other_is_list:
return filled
else:
return filled[0]
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join',
lambda df, *deferred_others: df.join(
fill_placeholders(deferred_others), **kwargs),
[self._expr] + other_exprs,
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def merge(
self,
right,
on,
left_on,
right_on,
left_index,
right_index,
suffixes,
**kwargs):
self_proxy = self._expr.proxy()
right_proxy = right._expr.proxy()
# Validate with a pandas call.
_ = self_proxy.merge(
right_proxy,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
**kwargs)
if kwargs.get('how', None) == 'cross':
raise NotImplementedError("cross join is not yet implemented (BEAM-9547)")
if not any([on, left_on, right_on, left_index, right_index]):
on = [col for col in self_proxy.columns if col in right_proxy.columns]
if not left_on:
left_on = on
if left_on and not isinstance(left_on, list):
left_on = [left_on]
if not right_on:
right_on = on
if right_on and not isinstance(right_on, list):
right_on = [right_on]
if left_index:
indexed_left = self
else:
indexed_left = self.set_index(left_on, drop=False)
if right_index:
indexed_right = right
else:
indexed_right = right.set_index(right_on, drop=False)
if left_on and right_on:
common_cols = set(left_on).intersection(right_on)
if len(common_cols):
# When merging on the same column name from both dfs, we need to make
# sure only one df has the column. Otherwise we end up with
# two duplicate columns, one with lsuffix and one with rsuffix.
# It's safe to drop from either because the data has already been duped
# to the index.
indexed_right = indexed_right.drop(columns=common_cols)
merged = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'merge',
lambda left, right: left.merge(right,
left_index=True,
right_index=True,
suffixes=suffixes,
**kwargs),
[indexed_left._expr, indexed_right._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index()))
if left_index or right_index:
return merged
else:
return merged.reset_index(drop=True)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def nlargest(self, keep, **kwargs):
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError('order-sensitive')
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nlargest-per-partition',
lambda df: df.nlargest(**kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nlargest',
lambda df: df.nlargest(**kwargs),
[per_partition],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def nsmallest(self, keep, **kwargs):
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError('order-sensitive')
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nsmallest-per-partition',
lambda df: df.nsmallest(**kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nsmallest',
lambda df: df.nsmallest(**kwargs),
[per_partition],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
def nunique(self, **kwargs):
if kwargs.get('axis', None) in (1, 'columns'):
requires_partition_by = partitionings.Arbitrary()
preserves_partition_by = partitionings.Index()
else:
# TODO: This could be implemented in a distributed fashion
requires_partition_by = partitionings.Singleton()
preserves_partition_by = partitionings.Singleton()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nunique',
lambda df: df.nunique(**kwargs),
[self._expr],
preserves_partition_by=preserves_partition_by,
requires_partition_by=requires_partition_by))
plot = property(frame_base.wont_implement_method('plot'))
def pop(self, item):
result = self[item]
self._expr = expressions.ComputedExpression(
'popped',
lambda df: df.drop(columns=[item]),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
return result
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def quantile(self, axis, **kwargs):
if axis == 1 or axis == 'columns':
raise frame_base.WontImplementError('non-deferred column values')
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'quantile',
lambda df: df.quantile(axis=axis, **kwargs),
[self._expr],
#TODO(robertwb): distributed approximate quantiles?
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.maybe_inplace
def rename(self, **kwargs):
rename_index = (
'index' in kwargs
or kwargs.get('axis', None) in (0, 'index')
or ('columns' not in kwargs and 'axis' not in kwargs))
rename_columns = (
'columns' in kwargs
or kwargs.get('axis', None) in (1, 'columns'))
if rename_index:
# Technically, it's still partitioned by index, but it's no longer
# partitioned by the hash of the index.
preserves_partition_by = partitionings.Singleton()
else:
preserves_partition_by = partitionings.Index()
if kwargs.get('errors', None) == 'raise' and rename_index:
# Renaming index with checking requires global index.
requires_partition_by = partitionings.Singleton()
else:
requires_partition_by = partitionings.Arbitrary()
proxy = None
if rename_index:
# The proxy can't be computed by executing rename, it will error
# renaming the index.
if rename_columns:
# Note if both are being renamed, index and columns must be specified
# (not axis)
proxy = self._expr.proxy().rename(**{k: v for (k, v) in kwargs.items()
if not k == 'index'})
else:
# No change in columns, reuse proxy
proxy = self._expr.proxy()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'rename',
lambda df: df.rename(**kwargs),
[self._expr],
proxy=proxy,
preserves_partition_by=preserves_partition_by,
requires_partition_by=requires_partition_by))
rename_axis = frame_base._elementwise_method('rename_axis')
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def replace(self, limit, **kwargs):
if limit is None:
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Singleton()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'replace',
lambda df: df.replace(limit=limit, **kwargs),
[self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=requires_partition_by))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def reset_index(self, level=None, **kwargs):
# TODO: Docs should note that the index is not in the same order as it would
# be with pandas. Technically an order-sensitive operation
if level is not None and not isinstance(level, (tuple, list)):
level = [level]
if level is None or len(level) == self._expr.proxy().index.nlevels:
# TODO: Could do distributed re-index with offsets.
requires_partition_by = partitionings.Singleton()
else:
requires_partition_by = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'reset_index',
lambda df: df.reset_index(level=level, **kwargs),
[self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=requires_partition_by))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def round(self, decimals, *args, **kwargs):
if isinstance(decimals, frame_base.DeferredFrame):
# Disallow passing a deferred Series in, our current partitioning model
# prevents us from using it correctly.
raise NotImplementedError("Passing a deferred series to round() is not "
"supported, please use a concrete pd.Series "
"instance or a dictionary")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'round',
lambda df: df.round(decimals, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Index()
)
)
select_dtypes = frame_base._elementwise_method('select_dtypes')
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def shift(self, axis, **kwargs):
if 'freq' in kwargs:
raise frame_base.WontImplementError('data-dependent')
if axis == 1 or axis == 'columns':
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Singleton()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'shift',
lambda df: df.shift(axis=axis, **kwargs),
[self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=requires_partition_by))
@property
def shape(self):
raise frame_base.WontImplementError('scalar value')
stack = frame_base._elementwise_method('stack')
all = frame_base._agg_method('all')
any = frame_base._agg_method('any')
count = frame_base._agg_method('count')
max = frame_base._agg_method('max')
min = frame_base._agg_method('min')
prod = product = frame_base._agg_method('prod')
sum = frame_base._agg_method('sum')
mean = frame_base._agg_method('mean')
median = frame_base._agg_method('median')
take = frame_base.wont_implement_method('deprecated')
to_records = to_dict = to_numpy = to_string = (
frame_base.wont_implement_method('non-deferred value'))
to_sparse = to_string # frame_base._elementwise_method('to_sparse')
transpose = frame_base.wont_implement_method('non-deferred column values')
def unstack(self, *args, **kwargs):
if self._expr.proxy().index.nlevels == 1:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'unstack',
lambda df: df.unstack(*args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Index()))
else:
raise frame_base.WontImplementError('non-deferred column values')
update = frame_base._proxy_method(
'update',
inplace=True,
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary())
values = property(frame_base.wont_implement_method('non-deferred value'))
for io_func in dir(io):
if io_func.startswith('to_'):
setattr(DeferredDataFrame, io_func, getattr(io, io_func))
setattr(DeferredSeries, io_func, getattr(io, io_func))
for meth in ('filter', ):
setattr(DeferredDataFrame, meth, frame_base._elementwise_method(meth))
@populate_not_implemented(pd.core.groupby.generic.DataFrameGroupBy)
class DeferredGroupBy(frame_base.DeferredFrame):
def __init__(self, expr, kwargs,
ungrouped: expressions.Expression,
ungrouped_with_index: expressions.Expression,
grouping_columns,
grouping_indexes,
projection=None):
"""This object represents the result of::
ungrouped.groupby(level=[grouping_indexes + grouping_columns],
**kwargs)[projection]
:param expr: An expression to compute a pandas GroupBy object. Convenient
for unliftable aggregations.
:param ungrouped: An expression to compute the DataFrame pre-grouping, the
(Multi)Index contains only the grouping columns/indexes.
:param ungrouped_with_index: Same as ungrouped, except the index includes
all of the original indexes as well as any grouping columns. This is
important for operations that expose the original index, e.g. .apply(),
but we only use it when necessary to avoid unnessary data transfer and
GBKs.
:param grouping_columns: list of column labels that were in the original
groupby(..) `by` parameter. Only relevant for grouped DataFrames.
:param grouping_indexes: list of index names (or index level numbers) to be
grouped.
:param kwargs: Keywords args passed to the original groupby(..) call."""
super(DeferredGroupBy, self).__init__(expr)
self._ungrouped = ungrouped
self._ungrouped_with_index = ungrouped_with_index
self._projection = projection
self._grouping_columns = grouping_columns
self._grouping_indexes = grouping_indexes
self._kwargs = kwargs
def __getattr__(self, name):
return DeferredGroupBy(
expressions.ComputedExpression(
'groupby_project',
lambda gb: getattr(gb, name), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()),
self._kwargs,
self._ungrouped,
self._ungrouped_with_index,
self._grouping_columns,
self._grouping_indexes,
projection=name)
def __getitem__(self, name):
return DeferredGroupBy(
expressions.ComputedExpression(
'groupby_project',
lambda gb: gb[name], [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()),
self._kwargs,
self._ungrouped,
self._ungrouped_with_index,
self._grouping_columns,
self._grouping_indexes,
projection=name)
def agg(self, fn):
if not callable(fn):
# TODO: Add support for strings in (UN)LIFTABLE_AGGREGATIONS. Test by
# running doctests for pandas.core.groupby.generic
raise NotImplementedError('GroupBy.agg currently only supports callable '
'arguments')
return DeferredDataFrame(
expressions.ComputedExpression(
'agg',
lambda gb: gb.agg(fn), [self._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton()))
def apply(self, fn, *args, **kwargs):
if self._grouping_columns and not self._projection:
grouping_columns = self._grouping_columns
def fn_wrapper(x, *args, **kwargs):
# TODO(BEAM-11710): Moving a column to an index and back is lossy
# since indexes dont support as many dtypes. We should keep the original
# column in groupby() instead. We need it anyway in case the grouping
# column is projected, which is allowed.
# Move the columns back to columns
x = x.assign(**{col: x.index.get_level_values(col)
for col in grouping_columns})
x = x.droplevel(grouping_columns)
return fn(x, *args, **kwargs)
else:
fn_wrapper = fn
project = _maybe_project_func(self._projection)
# Unfortunately pandas does not execute fn to determine the right proxy.
# We run user fn on a proxy here to detect the return type and generate the
# proxy.
result = fn_wrapper(project(self._ungrouped_with_index.proxy()))
if isinstance(result, pd.core.generic.NDFrame):
proxy = result[:0]
def index_to_arrays(index):
return [index.get_level_values(level)
for level in range(index.nlevels)]
# The final result will have the grouped indexes + the indexes from the
# result
proxy.index = pd.MultiIndex.from_arrays(
index_to_arrays(self._ungrouped.proxy().index) +
index_to_arrays(proxy.index),
names=self._ungrouped.proxy().index.names + proxy.index.names)
else:
# The user fn returns some non-pandas type. The expected result is a
# Series where each element is the result of one user fn call.
dtype = pd.Series([result]).dtype
proxy = pd.Series([], dtype=dtype, index=self._ungrouped.proxy().index)
levels = self._grouping_indexes + self._grouping_columns
return DeferredDataFrame(
expressions.ComputedExpression(
'apply',
lambda df: project(df.groupby(level=levels)).apply(
fn_wrapper,
*args,
**kwargs),
[self._ungrouped_with_index],
proxy=proxy,
requires_partition_by=partitionings.Index(levels),
preserves_partition_by=partitionings.Index(levels)))
aggregate = agg
hist = frame_base.wont_implement_method('plot')
plot = frame_base.wont_implement_method('plot')
first = frame_base.wont_implement_method('order sensitive')
last = frame_base.wont_implement_method('order sensitive')
head = frame_base.wont_implement_method('order sensitive')
tail = frame_base.wont_implement_method('order sensitive')
nth = frame_base.wont_implement_method('order sensitive')
cumcount = frame_base.wont_implement_method('order sensitive')
cummax = frame_base.wont_implement_method('order sensitive')
cummin = frame_base.wont_implement_method('order sensitive')
cumsum = frame_base.wont_implement_method('order sensitive')
cumprod = frame_base.wont_implement_method('order sensitive')
# TODO(robertwb): Consider allowing this for categorical keys.
__len__ = frame_base.wont_implement_method('non-deferred')
groups = property(frame_base.wont_implement_method('non-deferred'))
def _maybe_project_func(projection: Optional[List[str]]):
""" Returns identity func if projection is empty or None, else returns
a function that projects the specified columns. """
if projection:
return lambda df: df[projection]
else:
return lambda x: x
def _liftable_agg(meth, postagg_meth=None):
name, agg_func = frame_base.name_and_func(meth)
if postagg_meth is None:
post_agg_name, post_agg_func = name, agg_func
else:
post_agg_name, post_agg_func = frame_base.name_and_func(postagg_meth)
def wrapper(self, *args, **kwargs):
assert isinstance(self, DeferredGroupBy)
to_group = self._ungrouped.proxy().index
is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()
for i in self._grouping_indexes)
groupby_kwargs = self._kwargs
# Don't include un-observed categorical values in the preagg
preagg_groupby_kwargs = groupby_kwargs.copy()
preagg_groupby_kwargs['observed'] = True
project = _maybe_project_func(self._projection)
pre_agg = expressions.ComputedExpression(
'pre_combine_' + name,
lambda df: agg_func(project(
df.groupby(level=list(range(df.index.nlevels)),
**preagg_groupby_kwargs),
), **kwargs),
[self._ungrouped],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
post_agg = expressions.ComputedExpression(
'post_combine_' + post_agg_name,
lambda df: post_agg_func(
df.groupby(level=list(range(df.index.nlevels)), **groupby_kwargs),
**kwargs),
[pre_agg],
requires_partition_by=(partitionings.Singleton()
if is_categorical_grouping
else partitionings.Index()),
preserves_partition_by=partitionings.Arbitrary())
return frame_base.DeferredFrame.wrap(post_agg)
return wrapper
def _unliftable_agg(meth):
name, agg_func = frame_base.name_and_func(meth)
def wrapper(self, *args, **kwargs):
assert isinstance(self, DeferredGroupBy)
to_group = self._ungrouped.proxy().index
is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()
for i in self._grouping_indexes)
groupby_kwargs = self._kwargs
project = _maybe_project_func(self._projection)
post_agg = expressions.ComputedExpression(
name,
lambda df: agg_func(project(
df.groupby(level=list(range(df.index.nlevels)),
**groupby_kwargs),
), **kwargs),
[self._ungrouped],
requires_partition_by=(partitionings.Singleton()
if is_categorical_grouping
else partitionings.Index()),
preserves_partition_by=partitionings.Arbitrary())
return frame_base.DeferredFrame.wrap(post_agg)
return wrapper
LIFTABLE_AGGREGATIONS = ['all', 'any', 'max', 'min', 'prod', 'sum']
LIFTABLE_WITH_SUM_AGGREGATIONS = ['size', 'count']
UNLIFTABLE_AGGREGATIONS = ['mean', 'median', 'std', 'var']
for meth in LIFTABLE_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _liftable_agg(meth))
for meth in LIFTABLE_WITH_SUM_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _liftable_agg(meth, postagg_meth='sum'))
for meth in UNLIFTABLE_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _unliftable_agg(meth))
def _is_associative(agg_func):
return agg_func in LIFTABLE_AGGREGATIONS or (
getattr(agg_func, '__name__', None) in LIFTABLE_AGGREGATIONS
and agg_func.__module__ in ('numpy', 'builtins'))
@populate_not_implemented(pd.core.groupby.generic.DataFrameGroupBy)
class _DeferredGroupByCols(frame_base.DeferredFrame):
# It's not clear that all of these make sense in Pandas either...
agg = aggregate = frame_base._elementwise_method('agg')
any = frame_base._elementwise_method('any')
all = frame_base._elementwise_method('all')
boxplot = frame_base.wont_implement_method('plot')
describe = frame_base.wont_implement_method('describe')
diff = frame_base._elementwise_method('diff')
fillna = frame_base._elementwise_method('fillna')
filter = frame_base._elementwise_method('filter')
first = frame_base.wont_implement_method('order sensitive')
get_group = frame_base._elementwise_method('group')
head = frame_base.wont_implement_method('order sensitive')
hist = frame_base.wont_implement_method('plot')
idxmax = frame_base._elementwise_method('idxmax')
idxmin = frame_base._elementwise_method('idxmin')
last = frame_base.wont_implement_method('order sensitive')
mad = frame_base._elementwise_method('mad')
max = frame_base._elementwise_method('max')
mean = frame_base._elementwise_method('mean')
median = frame_base._elementwise_method('median')
min = frame_base._elementwise_method('min')
nunique = frame_base._elementwise_method('nunique')
plot = frame_base.wont_implement_method('plot')
prod = frame_base._elementwise_method('prod')
quantile = frame_base._elementwise_method('quantile')
shift = frame_base._elementwise_method('shift')
size = frame_base._elementwise_method('size')
skew = frame_base._elementwise_method('skew')
std = frame_base._elementwise_method('std')
sum = frame_base._elementwise_method('sum')
tail = frame_base.wont_implement_method('order sensitive')
take = frame_base.wont_implement_method('deprectated')
tshift = frame_base._elementwise_method('tshift')
var = frame_base._elementwise_method('var')
@property
def groups(self):
return self._expr.proxy().groups
@property
def indices(self):
return self._expr.proxy().indices
@property
def ndim(self):
return self._expr.proxy().ndim
@property
def ngroups(self):
return self._expr.proxy().ngroups
@populate_not_implemented(pd.core.indexes.base.Index)
class _DeferredIndex(object):
def __init__(self, frame):
self._frame = frame
@property
def names(self):
return self._frame._expr.proxy().index.names
@names.setter
def names(self, value):
def set_index_names(df):
df = df.copy()
df.index.names = value
return df
self._frame._expr = expressions.ComputedExpression(
'set_index_names',
set_index_names,
[self._frame._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
@property
def ndim(self):
return self._frame._expr.proxy().index.ndim
@property
def nlevels(self):
return self._frame._expr.proxy().index.nlevels
def __getattr__(self, name):
raise NotImplementedError('index.%s' % name)
@populate_not_implemented(pd.core.indexing._LocIndexer)
class _DeferredLoc(object):
def __init__(self, frame):
self._frame = frame
def __getitem__(self, index):
if isinstance(index, tuple):
rows, cols = index
return self[rows][cols]
elif isinstance(index, list) and index and isinstance(index[0], bool):
# Aligned by numerical index.
raise NotImplementedError(type(index))
elif isinstance(index, list):
# Select rows, but behaves poorly on missing values.
raise NotImplementedError(type(index))
elif isinstance(index, slice):
args = [self._frame._expr]
func = lambda df: df.loc[index]
elif isinstance(index, frame_base.DeferredFrame):
args = [self._frame._expr, index._expr]
func = lambda df, index: df.loc[index]
elif callable(index):
def checked_callable_index(df):
computed_index = index(df)
if isinstance(computed_index, tuple):
row_index, _ = computed_index
else:
row_index = computed_index
if isinstance(row_index, list) and row_index and isinstance(
row_index[0], bool):
raise NotImplementedError(type(row_index))
elif not isinstance(row_index, (slice, pd.Series)):
raise NotImplementedError(type(row_index))
return computed_index
args = [self._frame._expr]
func = lambda df: df.loc[checked_callable_index]
else:
raise NotImplementedError(type(index))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'loc',
func,
args,
requires_partition_by=(
partitionings.Index()
if len(args) > 1
else partitionings.Arbitrary()),
preserves_partition_by=partitionings.Arbitrary()))
__setitem__ = frame_base.not_implemented_method('loc.setitem')
@populate_not_implemented(pd.core.indexing._iLocIndexer)
class _DeferredILoc(object):
def __init__(self, frame):
self._frame = frame
def __getitem__(self, index):
if isinstance(index, tuple):
rows, _ = index
if rows != slice(None, None, None):
raise frame_base.WontImplementError('order-sensitive')
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'iloc',
lambda df: df.iloc[index],
[self._frame._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
else:
raise frame_base.WontImplementError('order-sensitive')
__setitem__ = frame_base.wont_implement_method('iloc.setitem')
class _DeferredStringMethods(frame_base.DeferredBase):
@frame_base.args_to_kwargs(pd.core.strings.StringMethods)
@frame_base.populate_defaults(pd.core.strings.StringMethods)
def cat(self, others, join, **kwargs):
if others is None:
# Concatenate series into a single String
requires = partitionings.Singleton()
func = lambda df: df.str.cat(join=join, **kwargs)
args = [self._expr]
elif (isinstance(others, frame_base.DeferredBase) or
(isinstance(others, list) and
all(isinstance(other, frame_base.DeferredBase) for other in others))):
if join is None:
raise frame_base.WontImplementError("cat with others=Series or "
"others=List[Series] requires "
"join to be specified.")
if isinstance(others, frame_base.DeferredBase):
others = [others]
requires = partitionings.Index()
def func(*args):
return args[0].str.cat(others=args[1:], join=join, **kwargs)
args = [self._expr] + [other._expr for other in others]
else:
raise frame_base.WontImplementError("others must be None, Series, or "
"List[Series]. List[str] is not "
"supported.")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'cat',
func,
args,
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.args_to_kwargs(pd.core.strings.StringMethods)
def repeat(self, repeats):
if isinstance(repeats, int):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series: series.str.repeat(repeats),
[self._expr],
# TODO(BEAM-11155): Defer to pandas to compute this proxy.
# Currently it incorrectly infers dtype bool, may require upstream
# fix.
proxy=self._expr.proxy(),
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, frame_base.DeferredBase):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series, repeats_series: series.str.repeat(repeats_series),
[self._expr, repeats._expr],
# TODO(BEAM-11155): Defer to pandas to compute this proxy.
# Currently it incorrectly infers dtype bool, may require upstream
# fix.
proxy=self._expr.proxy(),
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, list):
raise frame_base.WontImplementError("repeats must be an integer or a "
"Series.")
get_dummies = frame_base.wont_implement_method('non-deferred column values')
ELEMENTWISE_STRING_METHODS = [
'capitalize',
'casefold',
'contains',
'count',
'endswith',
'extract',
'extractall',
'findall',
'fullmatch',
'get',
'isalnum',
'isalpha',
'isdecimal',
'isdigit',
'islower',
'isnumeric',
'isspace',
'istitle',
'isupper',
'join',
'len',
'lower',
'lstrip',
'match',
'pad',
'partition',
'replace',
'rpartition',
'rsplit',
'rstrip',
'slice',
'slice_replace',
'split',
'startswith',
'strip',
'swapcase',
'title',
'upper',
'wrap',
'zfill',
'__getitem__',
]
def make_str_func(method):
def func(df, *args, **kwargs):
try:
df_str = df.str
except AttributeError:
# If there's a non-string value in a Series passed to .str method, pandas
# will generally just replace it with NaN in the result. However if
# there are _only_ non-string values, pandas will raise:
#
# AttributeError: Can only use .str accessor with string values!
#
# This can happen to us at execution time if we split a partition that is
# only non-strings. This branch just replaces all those values with NaN
# in that case.
return df.map(lambda _: np.nan)
else:
return getattr(df_str, method)(*args, **kwargs)
return func
for method in ELEMENTWISE_STRING_METHODS:
setattr(_DeferredStringMethods,
method,
frame_base._elementwise_method(make_str_func(method)))
for base in ['add',
'sub',
'mul',
'div',
'truediv',
'floordiv',
'mod',
'divmod',
'pow',
'and',
'or']:
for p in ['%s', 'r%s', '__%s__', '__r%s__']:
# TODO: non-trivial level?
name = p % base
setattr(
DeferredSeries,
name,
frame_base._elementwise_method(name, restrictions={'level': None}))
setattr(
DeferredDataFrame,
name,
frame_base._elementwise_method(name, restrictions={'level': None}))
setattr(
DeferredSeries,
'__i%s__' % base,
frame_base._elementwise_method('__i%s__' % base, inplace=True))
setattr(
DeferredDataFrame,
'__i%s__' % base,
frame_base._elementwise_method('__i%s__' % base, inplace=True))
for name in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
for p in '%s', '__%s__':
# Note that non-underscore name is used for both as the __xxx__ methods are
# order-sensitive.
setattr(DeferredSeries, p % name, frame_base._elementwise_method(name))
setattr(DeferredDataFrame, p % name, frame_base._elementwise_method(name))
for name in ['__neg__', '__pos__', '__invert__']:
setattr(DeferredSeries, name, frame_base._elementwise_method(name))
setattr(DeferredDataFrame, name, frame_base._elementwise_method(name))
DeferredSeries.multiply = DeferredSeries.mul # type: ignore
DeferredDataFrame.multiply = DeferredDataFrame.mul # type: ignore
def _slice_parts(s):
yield s.start
yield s.stop
yield s.step
def _is_null_slice(s):
return isinstance(s, slice) and all(x is None for x in _slice_parts(s))
def _is_integer_slice(s):
return isinstance(s, slice) and all(
x is None or isinstance(x, int)
for x in _slice_parts(s)) and not _is_null_slice(s)
| 37.193409
| 98
| 0.656963
|
794eaad4cf24445c88cfb64a7f65072873f8d267
| 11,587
|
py
|
Python
|
scvi/module/_scanvae.py
|
jules-samaran/scvi-tools
|
7dcbb819cdc6a7991469fdca6b292276c59a946d
|
[
"BSD-3-Clause"
] | 280
|
2020-09-18T06:26:28.000Z
|
2022-03-01T20:28:14.000Z
|
scvi/module/_scanvae.py
|
jules-samaran/scvi-tools
|
7dcbb819cdc6a7991469fdca6b292276c59a946d
|
[
"BSD-3-Clause"
] | 594
|
2020-09-17T00:03:34.000Z
|
2022-03-02T21:45:17.000Z
|
scvi/module/_scanvae.py
|
jules-samaran/scvi-tools
|
7dcbb819cdc6a7991469fdca6b292276c59a946d
|
[
"BSD-3-Clause"
] | 96
|
2020-09-19T21:26:00.000Z
|
2022-02-25T05:38:05.000Z
|
from typing import Iterable, Optional, Sequence
import numpy as np
import torch
from torch.distributions import Categorical, Normal
from torch.distributions import kl_divergence as kl
from torch.nn import functional as F
from scvi import _CONSTANTS
from scvi._compat import Literal
from scvi.module.base import LossRecorder, auto_move_data
from scvi.nn import Decoder, Encoder
from ._classifier import Classifier
from ._utils import broadcast_labels
from ._vae import VAE
class SCANVAE(VAE):
"""
Single-cell annotation using variational inference.
This is an implementation of the scANVI model described in [Xu21]_,
inspired from M1 + M2 model, as described in (https://arxiv.org/pdf/1406.5298.pdf).
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
n_continuous_cov
Number of continuous covarites
n_cats_per_cov
Number of categories for each extra categorical covariate
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
gene_likelihood
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
y_prior
If None, initialized to uniform probability over cell types
labels_groups
Label group designations
use_labels_groups
Whether to use the label groups
use_batch_norm
Whether to use batch norm in layers
use_layer_norm
Whether to use layer norm in layers
**vae_kwargs
Keyword args for :class:`~scvi.module.VAE`
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
n_continuous_cov: int = 0,
n_cats_per_cov: Optional[Iterable[int]] = None,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
gene_likelihood: str = "zinb",
y_prior=None,
labels_groups: Sequence[int] = None,
use_labels_groups: bool = False,
classifier_parameters: dict = dict(),
use_batch_norm: Literal["encoder", "decoder", "none", "both"] = "both",
use_layer_norm: Literal["encoder", "decoder", "none", "both"] = "none",
**vae_kwargs
):
super().__init__(
n_input,
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
n_continuous_cov=n_continuous_cov,
n_cats_per_cov=n_cats_per_cov,
dropout_rate=dropout_rate,
n_batch=n_batch,
dispersion=dispersion,
log_variational=log_variational,
gene_likelihood=gene_likelihood,
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
**vae_kwargs
)
use_batch_norm_encoder = use_batch_norm == "encoder" or use_batch_norm == "both"
use_batch_norm_decoder = use_batch_norm == "decoder" or use_batch_norm == "both"
use_layer_norm_encoder = use_layer_norm == "encoder" or use_layer_norm == "both"
use_layer_norm_decoder = use_layer_norm == "decoder" or use_layer_norm == "both"
self.n_labels = n_labels
# Classifier takes n_latent as input
cls_parameters = {
"n_layers": n_layers,
"n_hidden": n_hidden,
"dropout_rate": dropout_rate,
}
cls_parameters.update(classifier_parameters)
self.classifier = Classifier(
n_latent,
n_labels=n_labels,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
**cls_parameters
)
self.encoder_z2_z1 = Encoder(
n_latent,
n_latent,
n_cat_list=[self.n_labels],
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
)
self.decoder_z1_z2 = Decoder(
n_latent,
n_latent,
n_cat_list=[self.n_labels],
n_layers=n_layers,
n_hidden=n_hidden,
use_batch_norm=use_batch_norm_decoder,
use_layer_norm=use_layer_norm_decoder,
)
self.y_prior = torch.nn.Parameter(
y_prior
if y_prior is not None
else (1 / n_labels) * torch.ones(1, n_labels),
requires_grad=False,
)
self.use_labels_groups = use_labels_groups
self.labels_groups = (
np.array(labels_groups) if labels_groups is not None else None
)
if self.use_labels_groups:
if labels_groups is None:
raise ValueError("Specify label groups")
unique_groups = np.unique(self.labels_groups)
self.n_groups = len(unique_groups)
if not (unique_groups == np.arange(self.n_groups)).all():
raise ValueError()
self.classifier_groups = Classifier(
n_latent, n_hidden, self.n_groups, n_layers, dropout_rate
)
self.groups_index = torch.nn.ParameterList(
[
torch.nn.Parameter(
torch.tensor(
(self.labels_groups == i).astype(np.uint8),
dtype=torch.uint8,
),
requires_grad=False,
)
for i in range(self.n_groups)
]
)
@auto_move_data
def classify(self, x, batch_index=None):
if self.log_variational:
x = torch.log(1 + x)
qz_m, _, z = self.z_encoder(x, batch_index)
# We classify using the inferred mean parameter of z_1 in the latent space
z = qz_m
if self.use_labels_groups:
w_g = self.classifier_groups(z)
unw_y = self.classifier(z)
w_y = torch.zeros_like(unw_y)
for i, group_index in enumerate(self.groups_index):
unw_y_g = unw_y[:, group_index]
w_y[:, group_index] = unw_y_g / (
unw_y_g.sum(dim=-1, keepdim=True) + 1e-8
)
w_y[:, group_index] *= w_g[:, [i]]
else:
w_y = self.classifier(z)
return w_y
@auto_move_data
def classification_loss(self, labelled_dataset):
x = labelled_dataset[_CONSTANTS.X_KEY]
y = labelled_dataset[_CONSTANTS.LABELS_KEY]
batch_idx = labelled_dataset[_CONSTANTS.BATCH_KEY]
classification_loss = F.cross_entropy(
self.classify(x, batch_idx), y.view(-1).long()
)
return classification_loss
def loss(
self,
tensors,
inference_outputs,
generative_ouputs,
feed_labels=False,
kl_weight=1,
labelled_tensors=None,
classification_ratio=None,
):
px_r = generative_ouputs["px_r"]
px_rate = generative_ouputs["px_rate"]
px_dropout = generative_ouputs["px_dropout"]
qz1_m = inference_outputs["qz_m"]
qz1_v = inference_outputs["qz_v"]
z1 = inference_outputs["z"]
x = tensors[_CONSTANTS.X_KEY]
batch_index = tensors[_CONSTANTS.BATCH_KEY]
if feed_labels:
y = tensors[_CONSTANTS.LABELS_KEY]
else:
y = None
is_labelled = False if y is None else True
# Enumerate choices of label
ys, z1s = broadcast_labels(y, z1, n_broadcast=self.n_labels)
qz2_m, qz2_v, z2 = self.encoder_z2_z1(z1s, ys)
pz1_m, pz1_v = self.decoder_z1_z2(z2, ys)
reconst_loss = self.get_reconstruction_loss(x, px_rate, px_r, px_dropout)
# KL Divergence
mean = torch.zeros_like(qz2_m)
scale = torch.ones_like(qz2_v)
kl_divergence_z2 = kl(
Normal(qz2_m, torch.sqrt(qz2_v)), Normal(mean, scale)
).sum(dim=1)
loss_z1_unweight = -Normal(pz1_m, torch.sqrt(pz1_v)).log_prob(z1s).sum(dim=-1)
loss_z1_weight = Normal(qz1_m, torch.sqrt(qz1_v)).log_prob(z1).sum(dim=-1)
if not self.use_observed_lib_size:
ql_m = inference_outputs["ql_m"]
ql_v = inference_outputs["ql_v"]
(
local_library_log_means,
local_library_log_vars,
) = self._compute_local_library_params(batch_index)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_library_log_means, torch.sqrt(local_library_log_vars)),
).sum(dim=1)
else:
kl_divergence_l = 0.0
if is_labelled:
loss = reconst_loss + loss_z1_weight + loss_z1_unweight
kl_locals = {
"kl_divergence_z2": kl_divergence_z2,
"kl_divergence_l": kl_divergence_l,
}
if labelled_tensors is not None:
classifier_loss = self.classification_loss(labelled_tensors)
loss += classifier_loss * classification_ratio
return LossRecorder(
loss,
reconst_loss,
kl_locals,
kl_global=torch.tensor(0.0),
classification_loss=classifier_loss,
n_labelled_tensors=labelled_tensors[_CONSTANTS.X_KEY].shape[0],
)
return LossRecorder(
loss,
reconst_loss,
kl_locals,
kl_global=torch.tensor(0.0),
)
probs = self.classifier(z1)
reconst_loss += loss_z1_weight + (
(loss_z1_unweight).view(self.n_labels, -1).t() * probs
).sum(dim=1)
kl_divergence = (kl_divergence_z2.view(self.n_labels, -1).t() * probs).sum(
dim=1
)
kl_divergence += kl(
Categorical(probs=probs),
Categorical(probs=self.y_prior.repeat(probs.size(0), 1)),
)
kl_divergence += kl_divergence_l
loss = torch.mean(reconst_loss + kl_divergence * kl_weight)
if labelled_tensors is not None:
classifier_loss = self.classification_loss(labelled_tensors)
loss += classifier_loss * classification_ratio
return LossRecorder(
loss,
reconst_loss,
kl_divergence,
kl_global=torch.tensor(0.0),
classification_loss=classifier_loss,
n_labelled_tensors=labelled_tensors[_CONSTANTS.X_KEY].shape[0],
)
return LossRecorder(
loss, reconst_loss, kl_divergence, kl_global=torch.tensor(0.0)
)
| 35.32622
| 88
| 0.590144
|
794eab1e146f8bd58d37afd22c84ccfe2d55d393
| 4,589
|
py
|
Python
|
airflow/operators/presto_check_operator.py
|
shuva10v/airflow
|
a6daeb544e815fe350a96d24ae3bb14aee4079a7
|
[
"Apache-2.0"
] | 3
|
2019-10-03T21:38:59.000Z
|
2019-10-04T00:39:03.000Z
|
airflow/operators/presto_check_operator.py
|
shuva10v/airflow
|
a6daeb544e815fe350a96d24ae3bb14aee4079a7
|
[
"Apache-2.0"
] | 7
|
2019-03-27T07:58:14.000Z
|
2020-02-12T17:42:33.000Z
|
airflow/operators/presto_check_operator.py
|
upjohnc/airflow-upjohn-k8s
|
caadbc1618d73e054de99138b0892cea3a9327c4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5
|
2017-06-19T19:55:47.000Z
|
2020-10-10T00:49:20.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.hooks.presto_hook import PrestoHook
from airflow.operators.check_operator import CheckOperator, \
ValueCheckOperator, IntervalCheckOperator
from airflow.utils.decorators import apply_defaults
class PrestoCheckOperator(CheckOperator):
"""
Performs checks against Presto. The ``PrestoCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param presto_conn_id: reference to the Presto database
:type presto_conn_id: str
"""
@apply_defaults
def __init__(
self, sql,
presto_conn_id='presto_default',
*args, **kwargs):
super().__init__(sql=sql, *args, **kwargs)
self.presto_conn_id = presto_conn_id
self.sql = sql
def get_db_hook(self):
return PrestoHook(presto_conn_id=self.presto_conn_id)
class PrestoValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: str
:param presto_conn_id: reference to the Presto database
:type presto_conn_id: str
"""
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
presto_conn_id='presto_default',
*args, **kwargs):
super().__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.presto_conn_id = presto_conn_id
def get_db_hook(self):
return PrestoHook(presto_conn_id=self.presto_conn_id)
class PrestoIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics
:type metrics_threshold: dict
:param presto_conn_id: reference to the Presto database
:type presto_conn_id: str
"""
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
presto_conn_id='presto_default',
*args, **kwargs):
super().__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
self.presto_conn_id = presto_conn_id
def get_db_hook(self):
return PrestoHook(presto_conn_id=self.presto_conn_id)
| 35.851563
| 75
| 0.695141
|
794eabbf04d496b50b2312950c483aa22958e82e
| 1,481
|
py
|
Python
|
1M/W4/11.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 2
|
2021-11-25T13:38:36.000Z
|
2021-11-25T13:42:56.000Z
|
1M/W4/11.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | null | null | null |
1M/W4/11.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 1
|
2021-11-25T13:38:43.000Z
|
2021-11-25T13:38:43.000Z
|
# https://www.hackerrank.com/challenges/one-month-preparation-kit-tree-preorder-traversal/problem
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.info (the value of the node)
"""
def preOrder(root):
n = root
if n is None: return
print(n.info, end=' ')
preOrder(n.left)
preOrder(n.right)
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
preOrder(tree.root)
| 24.278689
| 97
| 0.501013
|
794eacbbf1f2cf1daf87d03af26460d9916ffd21
| 3,138
|
py
|
Python
|
xeneta_qualifier/nn.py
|
xeneta/LeadQualifier
|
931fa2ffc65625f6d61ffcd299b0679864449d8d
|
[
"MIT"
] | 664
|
2016-06-07T14:23:32.000Z
|
2022-03-01T15:03:00.000Z
|
xeneta_qualifier/nn.py
|
VovkaZy/LeadQualifier
|
931fa2ffc65625f6d61ffcd299b0679864449d8d
|
[
"MIT"
] | 8
|
2016-06-08T13:19:16.000Z
|
2021-02-04T02:12:59.000Z
|
xeneta_qualifier/nn.py
|
VovkaZy/LeadQualifier
|
931fa2ffc65625f6d61ffcd299b0679864449d8d
|
[
"MIT"
] | 133
|
2016-06-03T13:40:58.000Z
|
2021-09-20T11:03:54.000Z
|
import tensorflow as tf
import csv
import numpy as np
from random import randrange
# This net is not working, as it predicts all 0's or all 1's at the moment.
# variables for the net
SEED = 3
FIRST_HIDDEN = 500
SECOND_HIDDEN = 50
FINAL_LAYER = 2
BATCH_SIZE = 100
def convertToFloat(lst):
return np.array(lst).astype(np.float)
def fetchData(path):
labels = []
data = []
f = open(path)
csv_f = csv.reader(f)
for row in csv_f:
labels.append(convertToFloat(row[0]))
data.append(convertToFloat(row[1:]))
f.close()
return np.array(data), np.array(labels)
def convertToOneHot(arr):
labels = []
for n in arr:
if n == 0:
labels.append([1, 0])
elif n == 1:
labels.append([0, 1])
return np.array(labels)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, seed=SEED)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# create variables and placeholders for tensorflows computational graph
x = tf.placeholder(tf.float32, shape=[None, 5000])
y_ = tf.placeholder(tf.float32, shape=[None, FINAL_LAYER])
W_1 = weight_variable([5000, FIRST_HIDDEN])
b_1 = bias_variable([FIRST_HIDDEN])
W_2 = weight_variable([FIRST_HIDDEN, SECOND_HIDDEN])
b_2 = bias_variable([SECOND_HIDDEN])
W_3 = weight_variable([SECOND_HIDDEN, FINAL_LAYER])
b_3 = bias_variable([FINAL_LAYER])
hidden_layer_1 = tf.nn.sigmoid(tf.matmul(x, W_1) + b_1)
hidden_layer_2 = tf.nn.sigmoid(tf.matmul(hidden_layer_1, W_2) + b_2)
y = tf.nn.softmax(tf.matmul(hidden_layer_2, W_3) + b_3)
# Manually calculating the loss
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Automatically calculating the loss
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(y, y_)
)
# possible other loss function, if not one hot vector
#loss = tf.reduce_mean(tf.abs(tf.sub(y_, y)))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# we need to initialize all variables
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# fetch the training and testing data
X_test, y_test = fetchData('data/test.csv')
X_train, y_train = fetchData('data/train.csv')
y_test = convertToOneHot(y_test)
y_train = convertToOneHot(y_train)
# loop through the data to run the regression and update the weights
for i in range(1000):
r = randrange(0, 1447)
start = r
stop = r + BATCH_SIZE
x_train_batch = X_train[start: stop]
y_train_batch = y_train[start: stop]
sess.run(train_step, feed_dict={
x: x_train_batch,
y_: y_train_batch
})
if i % 100 == 0:
cross_entropy_out = sess.run([cross_entropy], feed_dict={
x: X_test,
y_: y_test
})
print 'cross_entropy_out:', cross_entropy_out
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print 'accuracy: ', sess.run(accuracy, feed_dict={x: X_test, y_: y_test})
| 25.933884
| 86
| 0.695029
|
794ead6b57ac78738a56ff8dfa67ae4a6adf6cac
| 1,867
|
py
|
Python
|
actions/replaceCoreV1NamespacedServiceStatus.py
|
blinkops/stackstorm-kubernetes
|
3b4a15d42f603f3e700efaf534169e2ec361f5d2
|
[
"Apache-2.0"
] | 20
|
2016-12-24T01:35:41.000Z
|
2022-03-06T08:32:16.000Z
|
actions/replaceCoreV1NamespacedServiceStatus.py
|
blinkops/stackstorm-kubernetes
|
3b4a15d42f603f3e700efaf534169e2ec361f5d2
|
[
"Apache-2.0"
] | 16
|
2017-05-02T19:38:57.000Z
|
2021-06-17T08:31:17.000Z
|
actions/replaceCoreV1NamespacedServiceStatus.py
|
blinkops/stackstorm-kubernetes
|
3b4a15d42f603f3e700efaf534169e2ec361f5d2
|
[
"Apache-2.0"
] | 18
|
2017-06-20T00:44:12.000Z
|
2022-03-30T08:41:42.000Z
|
import json
from lib.k8s import K8sClient
class replaceCoreV1NamespacedServiceStatus(K8sClient):
def run(
self,
body,
name,
namespace,
pretty=None,
config_override=None):
ret = False
args = {}
args['config_override'] = {}
args['params'] = {}
if config_override is not None:
args['config_override'] = config_override
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if namespace is not None:
args['namespace'] = namespace
else:
return (False, "namespace is a required parameter")
if pretty is not None:
args['params'].update({'pretty': pretty})
if 'body' in args:
args['data'] = args['body']
args.pop('body')
args['headers'] = {'Content-type': u'application/json', 'Accept': u'application/json, application/yaml, application/vnd.kubernetes.protobuf'} # noqa pylint: disable=line-too-long
args['url'] = "api/v1/namespaces/{namespace}/services/{name}/status".format( # noqa pylint: disable=line-too-long
body=body, name=name, namespace=namespace)
args['method'] = "put"
self.addArgs(**args)
self.makeRequest()
myresp = {}
myresp['status_code'] = self.resp.status_code
try:
myresp['data'] = json.loads(self.resp.content.rstrip())
except ValueError:
myresp['data'] = self.resp.content
if myresp['status_code'] >= 200 and myresp['status_code'] <= 299:
ret = True
return (ret, myresp)
| 30.606557
| 187
| 0.552758
|
794eadbd3e376b7a1d6e2fb64ec0948283f72726
| 16,706
|
py
|
Python
|
examples/remarketing/set_up_remarketing.py
|
claudiapaveljlp/google-ads-python
|
c143e81804e237a9549dd5936503d921033c4e5a
|
[
"Apache-2.0"
] | null | null | null |
examples/remarketing/set_up_remarketing.py
|
claudiapaveljlp/google-ads-python
|
c143e81804e237a9549dd5936503d921033c4e5a
|
[
"Apache-2.0"
] | null | null | null |
examples/remarketing/set_up_remarketing.py
|
claudiapaveljlp/google-ads-python
|
c143e81804e237a9549dd5936503d921033c4e5a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates various operations involved in remarketing.
Operations include (a) creating a user list based on visitors to a website,
(b) targeting a user list with an ad group criterion, (c) updating the bid
modifier on an ad group criterion, (d) finding and removing all ad group
criteria under a given campaign, (e) targeting a user list with a campaign
criterion, and (f) updating the bid modifier on a campaign criterion. It is
unlikely that users will need to perform all of these operations consecutively,
and all of the operations contained herein are meant of for illustrative
purposes.
"""
import argparse
import sys
from uuid import uuid4
from google.api_core import protobuf_helpers
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
_DEFAULT_PAGE_SIZE = 10000
def main(client, customer_id, campaign_id, ad_group_id, bid_modifier_value):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID used to create a user list and
other various entities required for the example.
campaign_id: a str ID for a campaign used to create an ad group
criterion that targets members of a user list.
ad_group_id: a str ID for an ad group used to create an ad group
criterion that targets members of a user list.
bid_modifier_value: a float that specifies a modifier on the bid amount
for newly created ad group criterion.
"""
user_list_resource_name = _create_user_list(client, customer_id)
ad_group_criterion_resource_name = _target_ads_in_ad_group_to_user_list(
client, customer_id, ad_group_id, user_list_resource_name
)
_modify_ad_group_bids(
client,
customer_id,
ad_group_criterion_resource_name,
bid_modifier_value,
)
_remove_existing_criteria_from_ad_group(client, customer_id, campaign_id)
campaign_criterion_resource_name = _target_ads_in_campaign_to_user_list(
client, customer_id, campaign_id, user_list_resource_name
)
_modify_campaign_bids(
client,
customer_id,
campaign_criterion_resource_name,
bid_modifier_value,
)
# [START setup_remarketing]
def _create_user_list(client, customer_id):
"""Creates a user list targeting users that have visited a given URL.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID used to create a user list.
Returns:
a str resource name for the newly created user list.
"""
# Creates a UserListOperation.
user_list_operation = client.get_type("UserListOperation")
# Creates a UserList.
user_list = user_list_operation.create
user_list.name = f"All visitors to example.com #{uuid4()}"
user_list.description = "Any visitor to any page of example.com"
user_list.membership_status = client.enums.UserListMembershipStatusEnum.OPEN
user_list.membership_life_span = 365
# Optional: To include past users in the user list, set the
# prepopulation_status to REQUESTED.
user_list.rule_based_user_list.prepopulation_status = (
client.enums.UserListPrepopulationStatusEnum.REQUESTED
)
# Specifies that the user list targets visitors of a page with a URL that
# contains 'example.com'.
user_list_rule_item_group_info = client.get_type(
"UserListRuleItemGroupInfo"
)
user_list_rule_item_info = client.get_type("UserListRuleItemInfo")
# Uses a built-in parameter to create a domain URL rule.
user_list_rule_item_info.name = "url__"
user_list_rule_item_info.string_rule_item.operator = (
client.enums.UserListStringRuleItemOperatorEnum.CONTAINS
)
user_list_rule_item_info.string_rule_item.value = "example.com"
user_list_rule_item_group_info.rule_items.append(user_list_rule_item_info)
user_list.rule_based_user_list.expression_rule_user_list.rule.rule_item_groups.append(
user_list_rule_item_group_info
)
user_list_service = client.get_service("UserListService")
response = user_list_service.mutate_user_lists(
customer_id=customer_id, operations=[user_list_operation]
)
resource_name = response.results[0].resource_name
print(f"Created user list with resource name: '{resource_name}'")
return resource_name
# [END setup_remarketing]
# [START setup_remarketing_1]
def _target_ads_in_ad_group_to_user_list(
client, customer_id, ad_group_id, user_list_resource_name
):
"""Creates an ad group criterion that targets a user list with an ad group.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID used to create an ad group
criterion.
ad_group_id: a str ID for an ad group used to create an ad group
criterion that targets members of a user list.
user_list_resource_name: a str resource name for a user list.
Returns:
a str resource name for an ad group criterion.
"""
ad_group_criterion_operation = client.get_type("AdGroupCriterionOperation")
# Creates the ad group criterion targeting members of the user list.
ad_group_criterion = ad_group_criterion_operation.create
ad_group_criterion.ad_group = client.get_service(
"AdGroupService"
).ad_group_path(customer_id, ad_group_id)
ad_group_criterion.user_list.user_list = user_list_resource_name
ad_group_criterion_service = client.get_service("AdGroupCriterionService")
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=[ad_group_criterion_operation]
)
resource_name = response.results[0].resource_name
print(
"Successfully created ad group criterion with resource name: "
f"'{resource_name}' targeting user list with resource name: "
f"'{user_list_resource_name}' and with ad group with ID "
f"{ad_group_id}."
)
return resource_name
# [END setup_remarketing_1]
def _modify_ad_group_bids(
client, customer_id, ad_group_criterion_resource_name, bid_modifier_value
):
"""Updates the bid modifier on an ad group criterion.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID.
ad_group_criterion_resoure_name: a str resource name for an ad group
criterion.
bid_modifier_value: a float value specifying an ad group criterion
bid modifier.
"""
# Constructs an operation that will update the ad group criterion with the
# specified resource name.
ad_group_criterion_operation = client.get_type("AdGroupCriterionOperation")
ad_group_criterion = ad_group_criterion_operation.update
# Creates the ad group criterion with a bid modifier. You may alternatively
# set the bid for the ad group criterion directly.
ad_group_criterion.resource_name = ad_group_criterion_resource_name
ad_group_criterion.bid_modifier = bid_modifier_value
# Using the FieldMasks utility to derive the update mask tells the Google
# Ads API which attributes of the ad group criterion you want to change.
client.copy_from(
ad_group_criterion_operation.update_mask,
protobuf_helpers.field_mask(None, ad_group_criterion._pb),
)
ad_group_criterion_service = client.get_service("AdGroupCriterionService")
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=[ad_group_criterion_operation]
)
print(
"Updated bid for ad group criterion with resource name: "
f"'{response.results[0].resource_name}'"
)
# [START setup_remarketing_3]
def _remove_existing_criteria_from_ad_group(client, customer_id, campaign_id):
"""Removes all ad group criteria targeting a user list under a campaign.
This is a necessary step before targeting a user list at the campaign level.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID.
campaign_id: a str ID for a campaign that will have all ad group
criteria that targets user lists removed.
"""
# Retrieves all of the ad group criteria under a campaign.
all_ad_group_criteria = _get_user_list_ad_group_criteria(
client, customer_id, campaign_id
)
# Creates a list of remove operations.
remove_operations = []
for ad_group_criterion_resource_name in all_ad_group_criteria:
remove_operation = client.get_type("AdGroupCriterionOperation")
remove_operation.remove = ad_group_criterion_resource_name
remove_operations.append(remove_operation)
ad_group_criterion_service = client.get_service("AdGroupCriterionService")
response = ad_group_criterion_service.mutate_ad_group_criteria(
customer_id=customer_id, operations=remove_operations
)
print(
"Successfully removed ad group criterion with resource name: "
f"'{response.results[0].resource_name}'"
)
# [END setup_remarketing_3]
# [START setup_remarketing_2]
def _get_user_list_ad_group_criteria(client, customer_id, campaign_id):
"""Finds all of user list ad group criteria under a campaign.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID.
campaign_id: a str campaign ID.
Returns:
a list of ad group criterion resource names.
"""
# Creates a query that retrieves all of the ad group criteria under a
# campaign.
query = f"""
SELECT
ad_group_criterion.criterion_id
FROM ad_group_criterion
WHERE campaign.id = {campaign_id}
AND ad_group_criterion.type = USER_LIST"""
googleads_service = client.get_service("GoogleAdsService")
search_request = client.get_type("SearchGoogleAdsRequest")
search_request.customer_id = customer_id
search_request.query = query
search_request.page_size = _DEFAULT_PAGE_SIZE
response = googleads_service.search(request=search_request)
# Iterates over all rows in all pages. Prints the user list criteria and
# adds the ad group criteria resource names to the list.
user_list_criteria = []
for row in response:
resource_name = row.ad_group_criterion.resource_name
print(
"Ad group criterion with resource name '{resource_name}' was "
"found."
)
user_list_criteria.append(resource_name)
return user_list_criteria
# [END setup_remarketing_2]
# [START setup_remarketing_4]
def _target_ads_in_campaign_to_user_list(
client, customer_id, campaign_id, user_list_resource_name
):
"""Creates a campaign criterion that targets a user list with a campaign.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID used to create an campaign
criterion.
campaign_id: a str ID for a campaign used to create a campaign
criterion that targets members of a user list.
user_list_resource_name: a str resource name for a user list.
Returns:
a str resource name for a campaign criterion.
"""
campaign_criterion_operation = client.get_type("CampaignCriterionOperation")
campaign_criterion = campaign_criterion_operation.create
campaign_criterion.campaign = client.get_service(
"CampaignService"
).campaign_path(customer_id, campaign_id)
campaign_criterion.user_list.user_list = user_list_resource_name
campaign_criterion_service = client.get_service("CampaignCriterionService")
response = campaign_criterion_service.mutate_campaign_criteria(
customer_id=customer_id, operations=[campaign_criterion_operation]
)
resource_name = response.results[0].resource_name
print(
"Successfully created campaign criterion with resource name "
f"'{resource_name}' targeting user list with resource name "
f"'{user_list_resource_name}' with campaign with ID {campaign_id}"
)
return resource_name
# [END setup_remarketing_4]
def _modify_campaign_bids(
client, customer_id, campaign_criterion_resource_name, bid_modifier_value
):
"""Updates the bid modifier on a campaign criterion.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a str client customer ID.
campaign_criterion_resource_name: a str resource name for a campaign
criterion.
bid_modifier_value: a float value specifying a campaign criterion
bid modifier.
"""
# Constructs an operation that will update the campaign criterion with the
# specified resource name.
campaign_criterion_operation = client.get_type("CampaignCriterionOperation")
campaign_criterion = campaign_criterion_operation.update
campaign_criterion.resource_name = campaign_criterion_resource_name
campaign_criterion.bid_modifier = bid_modifier_value
# Using the FieldMasks utility to derive the update mask tells the Google
# Ads API which attributes of the campaign criterion you want to change.
client.copy_from(
campaign_criterion_operation.update_mask,
protobuf_helpers.field_mask(None, campaign_criterion._pb),
)
campaign_criterion_service = client.get_service("CampaignCriterionService")
response = campaign_criterion_service.mutate_campaign_criteria(
customer_id=customer_id, operations=[campaign_criterion_operation]
)
print(
"Successfully updated the bid for campaign criterion with resource "
f"name: '{response.results[0].resource_name}'"
)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v9")
parser = argparse.ArgumentParser(
description="Demonstrates various operations involved in remarketing."
)
# The following arguments are required to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help=(
"A Google Ads customer ID used to create a user list and other "
"various entities required for the example."
),
)
parser.add_argument(
"-i",
"--campaign_id",
type=str,
required=True,
help=(
"The ID for a campaign that will have its ad group criteria "
"modified to target user lists members."
),
)
parser.add_argument(
"-a",
"--ad_group_id",
type=str,
required=True,
help=(
"The ID for an ad group used to create an ad group criterion "
"that targets members of a user list."
),
)
# The following argument is optional.
parser.add_argument(
"-b",
"--bid_modifier_value",
type=float,
default=1.5,
help=(
"A float that specifies a modifier on the bid amount "
"for newly created ad group criterion."
),
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.campaign_id,
args.ad_group_id,
args.bid_modifier_value,
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 38.941725
| 90
| 0.717167
|
794eaec13ec1ac42a7aaf4aa60bab50131342446
| 5,259
|
py
|
Python
|
external_apps/docutils-snapshot/test/test_publisher.py
|
spreeker/democracygame
|
525139955cb739c295051f317ab670049511bcf8
|
[
"BSD-3-Clause"
] | 2
|
2016-05-09T04:57:34.000Z
|
2017-03-03T14:22:24.000Z
|
external_apps/docutils-snapshot/test/test_publisher.py
|
spreeker/democracygame
|
525139955cb739c295051f317ab670049511bcf8
|
[
"BSD-3-Clause"
] | null | null | null |
external_apps/docutils-snapshot/test/test_publisher.py
|
spreeker/democracygame
|
525139955cb739c295051f317ab670049511bcf8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# $Id: test_publisher.py 5889 2009-04-01 20:00:21Z gbrandl $
# Author: Martin Blais <blais@furius.ca>
# Copyright: This module has been placed in the public domain.
"""
Test the `Publisher` facade and the ``publish_*`` convenience functions.
"""
import pickle
import DocutilsTestSupport # must be imported before docutils
import docutils
from docutils import core, nodes, io
from docutils._compat import b, bytes, u_prefix
test_document = """\
Test Document
=============
This is a test document with a broken reference: nonexistent_
"""
pseudoxml_output = b("""\
<document ids="test-document" names="test\ document" source="<string>" title="Test Document">
<title>
Test Document
<paragraph>
This is a test document with a broken reference: \n\
<problematic ids="id2" refid="id1">
nonexistent_
<section classes="system-messages">
<title>
Docutils System Messages
<system_message backrefs="id2" ids="id1" level="3" line="4" source="<string>" type="ERROR">
<paragraph>
Unknown target name: "nonexistent".
""")
exposed_pseudoxml_output = b("""\
<document ids="test-document" internal:refnames="{%s\'nonexistent\': [<reference: <#text: \'nonexistent\'>>]}" names="test\ document" source="<string>" title="Test Document">
<title>
Test Document
<paragraph>
This is a test document with a broken reference: \n\
<problematic ids="id2" refid="id1">
nonexistent_
<section classes="system-messages">
<title>
Docutils System Messages
<system_message backrefs="id2" ids="id1" level="3" line="4" source="<string>" type="ERROR">
<paragraph>
Unknown target name: "nonexistent".
""" % u_prefix)
class PublishDoctreeTestCase(DocutilsTestSupport.StandardTestCase, docutils.SettingsSpec):
settings_default_overrides = {
'_disable_config': 1,
'warning_stream': io.NullOutput()}
def test_publish_doctree(self):
# Test `publish_doctree` and `publish_from_doctree`.
# Produce the document tree.
doctree = core.publish_doctree(
source=test_document, reader_name='standalone',
parser_name='restructuredtext', settings_spec=self,
settings_overrides={'expose_internals':
['refnames', 'do_not_expose'],
'report_level': 5})
self.assert_(isinstance(doctree, nodes.document))
# Confirm that transforms have been applied (in this case, the
# DocTitle transform):
self.assert_(isinstance(doctree[0], nodes.title))
self.assert_(isinstance(doctree[1], nodes.paragraph))
# Confirm that the Messages transform has not yet been applied:
self.assertEquals(len(doctree), 2)
# The `do_not_expose` attribute may not show up in the
# pseudoxml output because the expose_internals transform may
# not be applied twice.
doctree.do_not_expose = 'test'
# Write out the document:
output = core.publish_from_doctree(
doctree, writer_name='pseudoxml',
settings_spec=self,
settings_overrides={'expose_internals':
['refnames', 'do_not_expose'],
'report_level': 1})
self.assertEquals(output, exposed_pseudoxml_output)
# Test publishing parts using document as the source.
parts = core.publish_parts(
reader_name='doctree', source_class=io.DocTreeInput,
source=doctree, source_path='test', writer_name='html',
settings_spec=self)
self.assert_(isinstance(parts, dict))
def test_publish_pickle(self):
# Test publishing a document tree with pickling and unpickling.
# Produce the document tree.
doctree = core.publish_doctree(
source=test_document,
reader_name='standalone',
parser_name='restructuredtext',
settings_spec=self)
self.assert_(isinstance(doctree, nodes.document))
# Pickle the document. Note: if this fails, some unpickleable
# reference has been added somewhere within the document tree.
# If so, you need to fix that.
#
# Note: Please do not remove this test, this is an important
# requirement, applications will be built on the assumption
# that we can pickle the document.
# Remove the reporter and the transformer before pickling.
doctree.reporter = None
doctree.transformer = None
doctree_pickled = pickle.dumps(doctree)
self.assert_(isinstance(doctree_pickled, bytes))
del doctree
# Unpickle the document.
doctree_zombie = pickle.loads(doctree_pickled)
self.assert_(isinstance(doctree_zombie, nodes.document))
# Write out the document:
output = core.publish_from_doctree(
doctree_zombie, writer_name='pseudoxml',
settings_spec=self)
self.assertEquals(output, pseudoxml_output)
if __name__ == '__main__':
import unittest
unittest.main()
| 37.035211
| 174
| 0.637764
|
794eaecb023e41ad2a04293753ad51a10f7f910a
| 8,532
|
py
|
Python
|
lib/tool_shed/webapp/api/tools.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | 1
|
2021-10-08T02:14:24.000Z
|
2021-10-08T02:14:24.000Z
|
lib/tool_shed/webapp/api/tools.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
lib/tool_shed/webapp/api/tools.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
import json
import logging
from collections import namedtuple
from galaxy import (
exceptions,
util,
web
)
from galaxy.tools.parameters import params_to_strings
from galaxy.tools.repositories import ValidationContext
from galaxy.web import expose_api_raw_anonymous_and_sessionless
from galaxy.webapps.base.controller import BaseAPIController
from tool_shed.dependencies.repository import relation_builder
from tool_shed.tools import tool_validator
from tool_shed.util import (
common_util,
metadata_util,
repository_util,
shed_util_common as suc
)
from tool_shed.utility_containers import ToolShedUtilityContainerManager
from tool_shed.webapp.search.tool_search import ToolSearch
log = logging.getLogger(__name__)
class ToolsController(BaseAPIController):
"""RESTful controller for interactions with tools in the Tool Shed."""
@expose_api_raw_anonymous_and_sessionless
def index(self, trans, **kwd):
"""
GET /api/tools
Displays a collection of tools with optional criteria.
:param q: (optional)if present search on the given query will be performed
:type q: str
:param page: (optional)requested page of the search
:type page: int
:param page_size: (optional)requested page_size of the search
:type page_size: int
:param jsonp: (optional)flag whether to use jsonp format response, defaults to False
:type jsonp: bool
:param callback: (optional)name of the function to wrap callback in
used only when jsonp is true, defaults to 'callback'
:type callback: str
:returns dict: object containing list of results and metadata
Examples:
GET http://localhost:9009/api/tools
GET http://localhost:9009/api/tools?q=fastq
"""
q = kwd.get('q', '')
if not q:
raise exceptions.NotImplemented('Listing of all the tools is not implemented. Provide parameter "q" to search instead.')
else:
page = kwd.get('page', 1)
page_size = kwd.get('page_size', 10)
try:
page = int(page)
page_size = int(page_size)
except ValueError:
raise exceptions.RequestParameterInvalidException('The "page" and "page_size" have to be integers.')
return_jsonp = util.asbool(kwd.get('jsonp', False))
callback = kwd.get('callback', 'callback')
search_results = self._search(trans, q, page, page_size)
if return_jsonp:
response = str('{}({});'.format(callback, json.dumps(search_results)))
else:
response = json.dumps(search_results)
return response
def _search(self, trans, q, page=1, page_size=10):
"""
Perform the search over TS tools index.
Note that search works over the Whoosh index which you have
to pre-create with scripts/tool_shed/build_ts_whoosh_index.sh manually.
Also TS config option toolshed_search_on has to be True and
whoosh_index_dir has to be specified.
"""
conf = self.app.config
if not conf.toolshed_search_on:
raise exceptions.ConfigDoesNotAllowException('Searching the TS through the API is turned off for this instance.')
if not conf.whoosh_index_dir:
raise exceptions.ConfigDoesNotAllowException('There is no directory for the search index specified. Please contact the administrator.')
search_term = q.strip()
if len(search_term) < 1:
raise exceptions.RequestParameterInvalidException('The search term has to be at least one character long.')
tool_search = ToolSearch()
Boosts = namedtuple('Boosts', ['tool_name_boost',
'tool_description_boost',
'tool_help_boost',
'tool_repo_owner_username_boost'])
boosts = Boosts(float(conf.get('tool_name_boost', 1.2)),
float(conf.get('tool_description_boost', 0.6)),
float(conf.get('tool_help_boost', 0.4)),
float(conf.get('tool_repo_owner_username_boost', 0.3)))
results = tool_search.search(trans,
search_term,
page,
page_size,
boosts)
results['hostname'] = web.url_for('/', qualified=True)
return results
@expose_api_raw_anonymous_and_sessionless
def json(self, trans, **kwd):
"""
GET /api/tools/json
Get the tool form JSON for a tool in a repository.
:param guid: the GUID of the tool
:param guid: str
:param tsr_id: the ID of the repository
:param tsr_id: str
:param changeset: the changeset at which to load the tool json
:param changeset: str
"""
guid = kwd.get('guid', None)
tsr_id = kwd.get('tsr_id', None)
changeset = kwd.get('changeset', None)
if None in [changeset, tsr_id, guid]:
message = 'Changeset, repository ID, and tool GUID are all required parameters.'
trans.response.status = 400
return {'status': 'error', 'message': message}
tsucm = ToolShedUtilityContainerManager(trans.app)
repository = repository_util.get_repository_in_tool_shed(self.app, tsr_id)
repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed(repository.user, repository)
repository_metadata = metadata_util.get_repository_metadata_by_changeset_revision(trans.app, tsr_id, changeset)
toolshed_base_url = str(web.url_for('/', qualified=True)).rstrip('/')
rb = relation_builder.RelationBuilder(trans.app, repository, repository_metadata, toolshed_base_url)
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
containers_dict = tsucm.build_repository_containers(repository,
changeset,
repository_dependencies,
repository_metadata)
found_tool = None
for folder in containers_dict['valid_tools'].folders:
if hasattr(folder, 'valid_tools'):
for tool in folder.valid_tools:
tool.id = tool.tool_id
tool_guid = suc.generate_tool_guid(repository_clone_url, tool)
if tool_guid == guid:
found_tool = tool
break
if found_tool is None:
message = f'Unable to find tool with guid {guid} in repository {repository.name}.'
trans.response.status = 404
return {'status': 'error', 'message': message}
with ValidationContext.from_app(trans.app) as validation_context:
tv = tool_validator.ToolValidator(validation_context)
repository, tool, valid, message = tv.load_tool_from_changeset_revision(tsr_id,
changeset,
found_tool.tool_config)
if message or not valid:
status = 'error'
return dict(message=message, status=status)
tool_help = ''
if tool.help:
tool_help = tool.help.render(static_path=web.url_for('/static'), host_url=web.url_for('/', qualified=True))
tool_help = util.unicodify(tool_help, 'utf-8')
tool_dict = tool.to_dict(trans)
tool_dict['inputs'] = {}
tool.populate_model(trans, tool.inputs, {}, tool_dict['inputs'])
tool_dict.update({
'help': tool_help,
'citations': bool(tool.citations),
'requirements': [{'name': r.name, 'version': r.version} for r in tool.requirements],
'state_inputs': params_to_strings(tool.inputs, {}, trans.app),
'display': tool.display_interface,
'action': web.url_for(tool.action),
'method': tool.method,
'enctype': tool.enctype
})
return json.dumps(tool_dict)
| 45.142857
| 147
| 0.597867
|
794eaeef5ccadd77634b7869f9f521a337446e86
| 11,687
|
py
|
Python
|
tensorflow_probability/python/distributions/plackett_luce.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 3,670
|
2018-02-14T03:29:40.000Z
|
2022-03-30T01:19:52.000Z
|
tensorflow_probability/python/distributions/plackett_luce.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,395
|
2018-02-24T02:28:49.000Z
|
2022-03-31T16:12:06.000Z
|
tensorflow_probability/python/distributions/plackett_luce.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,135
|
2018-02-14T01:51:10.000Z
|
2022-03-28T02:24:11.000Z
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The PlackettLuce distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import gumbel
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
class PlackettLuce(distribution.AutoCompositeTensorDistribution):
"""Plackett-Luce distribution over permutations.
The Plackett-Luce distribution is defined over permutations of
fixed length. It is parameterized by a positive score vector of same length.
This class provides methods to create indexed batches of PlackettLuce
distributions. If the provided `scores` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single PlackettLuce distribution. When calling distribution
functions (e.g. `dist.log_prob(x)`), `scores` and `x` are broadcast to the
same shape (if possible). In all cases, the last dimension of `scores, x`
represents single PlackettLuce distributions.
#### Mathematical Details
The Plackett-Luce is a distribution over permutation vectors `p` of length `k`
where the permutation `p` is an arbitrary ordering of `k` indices
`{0, 1, ..., k-1}`.
The probability mass function (pmf) is,
```none
pmf(p; s) = prod_i s_{p_i} / (Z - Z_i)
Z = sum_{j=0}^{k-1} s_j
Z_i = sum_{j=0}^{i-1} s_{p_j} for i>0 and 0 for i=0
```
where:
* `scores = s = [s_0, ..., s_{k-1}]`, `s_i >= 0`.
Samples from Plackett-Luce distribution are generated sequentially as follows.
Initialize normalization `N_0 = Z`
For `i` in `{0, 1, ..., k-1}`
1. Sample i-th element of permutation
`p_i ~ Categorical(probs=[s_0/N_i, ..., s_{k-1}/N_i])`
2. Update normalization
`N_{i+1} = N_i-s_{p_i}`
3. Mask out sampled index for subsequent rounds
`s_{p_i} = 0`
Return p
Alternately, an equivalent way to sample from this distribution is to sort
Gumbel perturbed log-scores [1].
```none
p = argsort(log s + g) ~ PlackettLuce(s)
g = [g_0, ..., g_{k-1}], g_i~ Gumbel(0, 1)
```
#### Examples
```python
scores = [0.1, 2., 5.]
dist = PlackettLuce(scores)
```
Creates a distribution over permutations of length 3, with the 3rd index
likely to appear first in the permutation.
The distribution function can be evaluated on permutations as follows.
```python
# permutations same shape as scores.
permutations = [2, 1, 0]
dist.prob(permutations) # Shape []
# scores broadcast to [[0.1, 2.3, 5.], [0.1, 2.3, 5.]] to match permutations.
permutations = [[2, 1, 0], [1, 0, 2]]
dist.prob(permutations) # Shape [2]
# scores broadcast to shape [5, 7, 3] to match permutations.
permutations = [[...]] # Shape [5, 7, 3]
dist.prob(permutaions) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
scores = [[0.1, 2.3, 5.], [4.2, 0.5, 3.1]] # Shape [2, 3]
dist = PlackettLuce(scores)
# permutations broadcast to [[2, 1, 0], [2, 1, 0]] to match shape of scores.
permutations = [2, 1, 0]
dist.prob(permutations) # Shape [2]
```
#### References
[1]: Aditya Grover, Eric Wang, Aaron Zweig, Stefano Ermon. Stochastic
Optimization of Sorting Networks via Continuous Relaxations. ICLR 2019.
"""
def __init__(self,
scores,
dtype=tf.int32,
validate_args=False,
allow_nan_stats=True,
name='PlackettLuce'):
"""Initialize a batch of PlackettLuce distributions.
Args:
scores: An N-D `Tensor`, `N >= 1`, representing the scores of a set of
elements to be permuted. The first `N - 1` dimensions index into a
batch of independent distributions and the last dimension represents a
vector of scores for the elements.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
self._scores = tensor_util.convert_nonref_to_tensor(
scores, dtype_hint=tf.float32, name='scores')
super(PlackettLuce, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
scores=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
# pylint: enable=g-long-lambda
def _event_size(self, scores=None):
if scores is None:
scores = self._scores
if scores.shape is not None:
event_size = tf.compat.dimension_value(scores.shape[-1])
if event_size is not None:
return event_size
return tf.shape(scores)[-1]
@property
def scores(self):
"""Input argument `scores`.
Each element is a non-negative value for which the sorted permutation is
an ordering supported by this distribution.
Returns:
scores: A batch of scores used for initializing the distribution.
"""
return self._scores
def _event_shape_tensor(self, scores=None):
scores = self._scores if scores is None else scores
return ps.shape(scores)[-1:]
def _event_shape(self, scores=None):
scores = self._scores if scores is None else scores
return tensorshape_util.with_rank_at_least(scores.shape, 1)[-1:]
def _mode(self):
return tf.cast(
tf.argsort(self.scores, axis=-1, direction='DESCENDING'),
self.dtype)
def _log_prob(self, x):
scores = tf.convert_to_tensor(self.scores)
event_size = self._event_size(scores)
x = tf.cast(x, self.dtype)
# Broadcast scores or x if need be.
if (not tensorshape_util.is_fully_defined(x.shape) or
not tensorshape_util.is_fully_defined(scores.shape) or
x.shape != scores.shape):
broadcast_shape = ps.broadcast_shape(
ps.shape(scores), ps.shape(x))
scores = tf.broadcast_to(scores, broadcast_shape)
x = tf.broadcast_to(x, broadcast_shape)
scores_shape = ps.shape(scores)[:-1]
scores_2d = tf.reshape(scores, [-1, event_size])
x_2d = tf.reshape(x, [-1, event_size])
rearranged_scores = tf.gather(scores_2d, x_2d, batch_dims=1)
normalization_terms = tf.cumsum(rearranged_scores, axis=-1, reverse=True)
ret = tf.math.reduce_sum(
tf.math.log(rearranged_scores / normalization_terms), axis=-1)
# Reshape back to user-supplied batch and sample dims prior to 2D reshape.
ret = tf.reshape(ret, scores_shape)
return ret
def _sample_n(self, n, seed=None):
scores = tf.convert_to_tensor(self.scores)
sample_shape = ps.concat([[n], ps.shape(scores)], axis=0)
gumbel_noise = gumbel.Gumbel(loc=0, scale=1).sample(sample_shape,
seed=seed)
noisy_log_scores = gumbel_noise + tf.math.log(scores)
return tf.cast(
tf.argsort(noisy_log_scores, axis=-1, direction='DESCENDING'),
self.dtype)
def scores_parameter(self, name=None):
"""Scores vec computed from non-`None` input arg (`scores`)."""
with self._name_and_control_scope(name or 'scores_parameter'):
return tf.identity(self._scores)
def _default_event_space_bijector(self):
return
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_equal(
tf.range(self._event_size(), dtype=x.dtype),
tf.sort(x, axis=-1),
message='Sample must be a permutation of `{0, ..., k-1}`, where `k` is '
'the size of the last dimension of `scores`.'))
return assertions
def _parameter_control_dependencies(self, is_init):
assertions = []
scores = self._scores
param, name = (scores, 'scores')
# In init, we can always build shape and dtype checks because
# we assume shape doesn't change for Variable backed args.
if is_init:
if not dtype_util.is_floating(param.dtype):
raise TypeError('Argument `{}` must having floating type.'.format(name))
msg = 'Argument `{}` must have rank at least 1.'.format(name)
shape_static = tensorshape_util.dims(param.shape)
if shape_static is not None:
if len(shape_static) < 1:
raise ValueError(msg)
elif self.validate_args:
param = tf.convert_to_tensor(param)
assertions.append(
assert_util.assert_rank_at_least(param, 1, message=msg))
with tf.control_dependencies(assertions):
param = tf.identity(param)
msg1 = 'Argument `{}` must have final dimension >= 1.'.format(name)
msg2 = 'Argument `{}` must have final dimension <= {}.'.format(
name, dtype_util.max(tf.int32))
event_size = shape_static[-1] if shape_static is not None else None
if event_size is not None:
if event_size < 1:
raise ValueError(msg1)
if event_size > dtype_util.max(tf.int32):
raise ValueError(msg2)
elif self.validate_args:
param = tf.convert_to_tensor(param)
assertions.append(assert_util.assert_greater_equal(
tf.shape(param)[-1], 1, message=msg1))
# NOTE: For now, we leave out a runtime assertion that
# `tf.shape(param)[-1] <= tf.int32.max`. An earlier `tf.shape` call
# will fail before we get to this point.
if not self.validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(scores):
scores = tf.convert_to_tensor(scores)
assertions.extend([
assert_util.assert_positive(scores),
])
return assertions
| 37.338658
| 81
| 0.676735
|
794eb01f6087a7212cfeffbfc66120c9ffe00f3b
| 6,652
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/brucellamelitensis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/brucellamelitensis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/brucellamelitensis.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Brucella melitensis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:05:09.129327
The undirected graph Brucella melitensis has 3176 nodes and 326751 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06481 and has 5 connected components, where the component with most
nodes has 3165 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 180, the mean node degree is 205.76, and
the node degree mode is 2. The top 5 most central nodes are 224914.BMEI1825
(degree 1371), 224914.BMEI1606 (degree 1125), 224914.BMEII0887 (degree
1048), 224914.BMEII0040 (degree 1033) and 224914.BMEI1436 (degree 944).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import BrucellaMelitensis
# Then load the graph
graph = BrucellaMelitensis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def BrucellaMelitensis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Brucella melitensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Brucella melitensis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:05:09.129327
The undirected graph Brucella melitensis has 3176 nodes and 326751 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06481 and has 5 connected components, where the component with most
nodes has 3165 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 180, the mean node degree is 205.76, and
the node degree mode is 2. The top 5 most central nodes are 224914.BMEI1825
(degree 1371), 224914.BMEI1606 (degree 1125), 224914.BMEII0887 (degree
1048), 224914.BMEII0040 (degree 1033) and 224914.BMEI1436 (degree 944).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import BrucellaMelitensis
# Then load the graph
graph = BrucellaMelitensis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="BrucellaMelitensis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.195767
| 223
| 0.702195
|
794eb23a22817bb625551efaa59bd747dcc3650a
| 3,229
|
py
|
Python
|
scripts/gen_fh.py
|
ziqing26/nushackers-site
|
8d1d7814a7aa4df87829e5047f0c321d52bbbaa6
|
[
"MIT"
] | 31
|
2015-02-19T17:51:52.000Z
|
2022-01-31T23:04:25.000Z
|
scripts/gen_fh.py
|
ziqing26/nushackers-site
|
8d1d7814a7aa4df87829e5047f0c321d52bbbaa6
|
[
"MIT"
] | 278
|
2015-08-27T16:11:39.000Z
|
2022-01-26T03:41:24.000Z
|
scripts/gen_fh.py
|
ziqing26/nushackers-site
|
8d1d7814a7aa4df87829e5047f0c321d52bbbaa6
|
[
"MIT"
] | 46
|
2015-10-08T13:20:13.000Z
|
2022-03-18T07:23:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Generate this week's friday hack
# To generate some other FH pass in a number as argument
# e.g python gen_fh.py 1 generates next week's
# e.g python gen_fh.py 3 generates next next next week's
# As for numbering, it will take the next number
# (e.g. if the previous post is FH #1000, the generated one will be FH #1001)
# Please first update data/friday_hacks.yml before running this
import yaml
from datetime import datetime, timedelta
from os import listdir
from os.path import isfile, join
from sys import argv
import re
with open('../data/friday_hacks.yml', 'r') as fin:
doc = yaml.load(fin)
start_date = datetime.strptime(doc['start_date'],
'%Y-%m-%d %H:%M:%S +0800')
# Time delta fixes weird bug
now = datetime.today() - timedelta(hours=3)
# Sick undocumented feature
if len(argv) > 1:
now += timedelta(days=7 * int(argv[1]))
hacks = doc['hacks']
cur = start_date
next_hack = None
next_date = None
for hack in hacks:
if cur > now:
next_hack = hack
next_date = cur
break
cur += timedelta(days=7)
if not next_hack:
print "Dude semester's over"
quit()
if not next_hack.get('topics'):
print "Dude no hackz"
quit()
date = cur
print "Creating FH post for " + str(cur)
name = raw_input("Your name? ")
# so future-proof it's sick
fhre = re.compile(
r'^20[0-9][0-9]-[01][0-9]-[0-3][0-9]-friday-hacks-([1-9][0-9]*)\.md$')
num = 0
# so.. tempted... to... use lazy evaluation
for f in listdir('../content/post/'):
result = fhre.search(f)
if result:
cur = int(result.group(1))
if cur > num:
num = cur
num += 1
# In case you want to skip FH numbers BUT WHYYY!?!?
# What is abstraction?
# if len(argv) > 1:
# num += int(argv[1])
print "Creating FH post for #" + str(num) + ", at " + str(date)
# In case you want a different name, BUT WHYYY!?!?
# name = raw_input("Your name? ")
# now witness templating in raw string
content = '''\
---
title: "Friday Hacks #{num}, {month} {day}"
date: {now}
author: {author}
url: /{year}/{no_of_month}/friday-hacks-{num}
---
--- say something as introduction ---
{{{{< friday_hack_header
venue="{venue}"
date="{month} {day}"
fb_event="#" >}}}}
'''.format(
num=num,
now=datetime.today(),
year=next_date.strftime("%Y"),
month=next_date.strftime("%B"),
no_of_month=next_date.strftime('%m'),
day=next_date.day,
author=name,
venue=next_hack['venue']) + '\n'.join([
'''
### {talk_name}
#### Talk Description:
--- describe ----
#### Speaker Profile
--- describe ----
'''.format(talk_name=topic['title'].encode('utf-8')) for topic in next_hack['topics']
])
filename = '../content/post/{now}-friday-hacks-{num}.md'.format(
now=next_date.strftime("%Y-%m-%d"),
num=num,
month=next_date.strftime('%b'),
day=next_date.day,
)
with open(filename, 'a') as fout:
fout.write(content)
| 26.252033
| 85
| 0.574172
|
794eb2a2521df23dca19204064241e74c2447439
| 6,416
|
py
|
Python
|
pytorch_lightning/accelerators/horovod_backend.py
|
GimmickNG/pytorch-lightning
|
b36c5e86d014671b0fa922d750b27420bc73b6f9
|
[
"Apache-2.0"
] | 1
|
2021-03-10T20:13:50.000Z
|
2021-03-10T20:13:50.000Z
|
pytorch_lightning/accelerators/horovod_backend.py
|
GimmickNG/pytorch-lightning
|
b36c5e86d014671b0fa922d750b27420bc73b6f9
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/accelerators/horovod_backend.py
|
GimmickNG/pytorch-lightning
|
b36c5e86d014671b0fa922d750b27420bc73b6f9
|
[
"Apache-2.0"
] | 1
|
2020-09-11T22:53:18.000Z
|
2020-09-11T22:53:18.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import ExitStack
import torch
from pytorch_lightning.core import LightningModule
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.accelerators.base_backend import Accelerator
from pytorch_lightning.utilities.distributed import rank_zero_only
from torch.optim.lr_scheduler import _LRScheduler
try:
from apex import amp
except ImportError:
amp = None
try:
import horovod.torch as hvd
except (ModuleNotFoundError, ImportError):
HOROVOD_AVAILABLE = False
else:
HOROVOD_AVAILABLE = True
class HorovodBackend(Accelerator):
amp_backend: AMPType
def __init__(self, trainer):
super().__init__(trainer)
def setup(self, model):
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
if torch.cuda.is_available() and self.trainer.on_gpu:
# Horovod: pin GPU to local rank
assert self.trainer.root_gpu == hvd.local_rank()
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
# avoid duplicating progress bar
if hvd.rank() != 0 and self.trainer.progress_bar_callback is not None:
self.trainer.progress_bar_callback.disable()
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
# Horovod: scale the learning rate by the number of workers to account for
# increased total batch size
for optimizer in self.trainer.optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] *= hvd.size()
# Horovod: adjust base LR used by schedulers to match scaled optimizer initial LR
for scheduler in self.trainer.lr_schedulers:
scheduler = scheduler['scheduler']
if isinstance(scheduler, _LRScheduler):
scheduler.base_lrs = [lr * hvd.size() for lr in scheduler.base_lrs]
# Horovod: broadcast parameters & optimizer state to ensure consistent initialization
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
for optimizer in self.trainer.optimizers:
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
def filter_named_parameters(model, optimizer):
opt_params = set([p for group in optimizer.param_groups for p in group.get('params', [])])
return [(name, p) for name, p in model.named_parameters() if p in opt_params]
# Horovod: wrap optimizers to perform gradient aggregation via allreduce
self.trainer.optimizers = [
hvd.DistributedOptimizer(optimizer, named_parameters=filter_named_parameters(model, optimizer))
for optimizer in self.trainer.optimizers
]
if self.trainer.amp_backend == AMPType.APEX:
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
self.trainer.optimizers = optimizers
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
# Update logger rank info from Horovod to avoid race conditions from different ranks
# creating directories / writing files in the same locations.
self.trainer.global_rank = hvd.rank()
rank_zero_only.rank = self.trainer.global_rank
self.trainer.model = model
def train(self):
with ExitStack() as stack:
for optimizer in self.trainer.optimizers:
# Synchronization will be performed explicitly following backward()
stack.enter_context(optimizer.skip_synchronize())
# set up training routine
self.trainer.setup_training(self.trainer.model)
# train or test
results = self.trainer.train_or_test()
# Make sure all workers have finished training before returning to the user
hvd.join()
return results
def teardown(self):
pass
def training_step(self, args):
if self.trainer.on_gpu:
batch = args[0]
batch = self.batch_to_device(batch, hvd.local_rank())
args[0] = batch
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model.training_step(*args)
else:
output = self.trainer.model.training_step(*args)
return output
def validation_step(self, args):
if self.trainer.on_gpu:
batch = args[0]
batch = self.batch_to_device(batch, hvd.local_rank())
args[0] = batch
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model.validation_step(*args)
else:
output = self.trainer.model.validation_step(*args)
return output
def test_step(self, args):
if self.trainer.on_gpu:
batch = args[0]
batch = self.batch_to_device(batch, hvd.local_rank())
args[0] = batch
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model.test_step(*args)
else:
output = self.trainer.model.test_step(*args)
return output
def backward(self, closure_loss, optimizer, opt_idx):
super().backward(closure_loss, optimizer, opt_idx)
optimizer.synchronize()
def on_train_epoch_end(self):
hvd.join(hvd.local_rank() if self.trainer.on_gpu else -1)
| 38.190476
| 113
| 0.672537
|
794eb2d38319bf9366c3260216c46c9da1cab796
| 47,089
|
py
|
Python
|
decoding/IAD/fairseq/fairseq/models/transformer.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | 1
|
2021-11-07T00:30:05.000Z
|
2021-11-07T00:30:05.000Z
|
decoding/IAD/fairseq/fairseq/models/transformer.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | null | null | null |
decoding/IAD/fairseq/fairseq/models/transformer.py
|
maxpark/unilm
|
cd0cc7e7207dd029db9c8f11e3568fb385be6a29
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("transformer")
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
self.supports_align_args = True
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, args):
layer = TransformerEncoderLayer(args)
if getattr(args, "checkpoint_activations", False):
layer = checkpoint_wrapper(layer)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = []
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `foward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = TransformerDecoderLayer(args, no_encoder_attn)
if getattr(args, "checkpoint_activations", False):
layer = checkpoint_wrapper(layer)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
parallel_forward_start_pos: Optional[int] = None
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
parallel_forward_start_pos=parallel_forward_start_pos
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
parallel_forward_start_pos: Optional[int] = None
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
parallel_forward_start_pos
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
parallel_forward_start_pos: Optional[int] = None
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# mode incremental_state parallel_forward_start_pos
# train None None
# one-by-one inference not None None
# aggressive inference not None not None
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state if parallel_forward_start_pos is None else None
)
if self.embed_positions is not None
else None
)
original_len = None
if incremental_state is not None: # inference
if parallel_forward_start_pos is None: # one-by-one
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
else: # aggressive
original_len = prev_output_tokens.size(1)
prev_output_tokens = prev_output_tokens[:, parallel_forward_start_pos:]
if positions is not None:
positions = positions[:, parallel_forward_start_pos:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
# train | aggressive inference
if (incremental_state is None or parallel_forward_start_pos is not None) and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x, dim=original_len)
if parallel_forward_start_pos is not None:
self_attn_mask = self_attn_mask[parallel_forward_start_pos:]
else: # one-by-one inference
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor, dim=None):
# tensor: t, b, h
if dim is None:
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 43.600926
| 159
| 0.629553
|
794eb41377388ff6c5f36125fde35b8a7c9daf7c
| 539
|
py
|
Python
|
models/field/dict.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
models/field/dict.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
models/field/dict.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
from ._base import BaseField
class DictionaryField(BaseField):
def __init__(self, key, **kwargs):
"""
Default Properties Overrided:
- ``allow_none`` - ``False``
.. seealso::
Check the document of :class:`BaseField` for other default properties.
"""
if "allow_none" not in kwargs:
kwargs["allow_none"] = False
super().__init__(key, **kwargs)
def none_obj(self):
return {}
@property
def expected_types(self):
return dict,
| 21.56
| 82
| 0.571429
|
794eb473f4efedff2f7988fadee49a633ce4c593
| 1,158
|
py
|
Python
|
src/plot_simulation.py
|
millengustavo/covid19-analytics
|
a4dfc0edd56b8a498c7fb318c9b3e75ed858dd9c
|
[
"MIT"
] | null | null | null |
src/plot_simulation.py
|
millengustavo/covid19-analytics
|
a4dfc0edd56b8a498c7fb318c9b3e75ed858dd9c
|
[
"MIT"
] | null | null | null |
src/plot_simulation.py
|
millengustavo/covid19-analytics
|
a4dfc0edd56b8a498c7fb318c9b3e75ed858dd9c
|
[
"MIT"
] | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import streamlit as st
# plt.style.use("seaborn-whitegrid")
def plot_simulation_output(df_simulated_data):
df_simulated_data = df_simulated_data[["S", "E", "I", "R", "E+I", "E+I+R"]]
fig1 = plt.figure()
sns.despine()
plt.grid()
ax = sns.lineplot(data=df_simulated_data)
ax.set_title("Visão Geral da Epidemia")
fig2 = plt.figure()
sns.despine()
plt.grid()
ax = sns.lineplot(data=df_simulated_data[["E", "I", "E+I", "E+I+R"]])
ax.set_title("Apenas Expostos e Infectados")
zoom_length = 30
peak_date = df_simulated_data["I"].idxmax().date()
zoom_on = (pd.Timestamp(peak_date) - pd.DateOffset(days=zoom_length)).date()
zoom_end = (pd.Timestamp(peak_date) + pd.DateOffset(days=zoom_length)).date()
fig3 = plt.figure()
sns.despine()
plt.grid()
ax = sns.lineplot(
data=df_simulated_data[["E", "I", "E+I"]][zoom_on:zoom_end], markers=True
)
ax.set_title(f"Zoom (de {zoom_on} a {zoom_end})")
plt.xticks(fontsize=8, rotation=30)
return st.pyplot(fig1), st.pyplot(fig2), st.pyplot(fig3)
| 29.692308
| 81
| 0.65544
|
794eb54cdd64125d408be9ba90b7f78a7b4a0277
| 22,372
|
py
|
Python
|
electrum/bitcoin.py
|
IHIHIKI/electrum
|
5f527720cf2ae4c7aef1cfdcf4244dbceb54a5bc
|
[
"MIT"
] | 1
|
2020-09-10T21:33:38.000Z
|
2020-09-10T21:33:38.000Z
|
electrum/bitcoin.py
|
lukechilds/electrum
|
7bcb59ffb5a30d3a116b086c9ae291bf4bb788f3
|
[
"MIT"
] | null | null | null |
electrum/bitcoin.py
|
lukechilds/electrum
|
7bcb59ffb5a30d3a116b086c9ae291bf4bb788f3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from typing import List, Tuple, TYPE_CHECKING, Optional, Union
import enum
from enum import IntEnum, Enum
from .util import bfh, bh2u, BitcoinException, assert_bytes, to_bytes, inv_dict
from . import version
from . import segwit_addr
from . import constants
from . import ecc
from .crypto import sha256d, sha256, hash_160, hmac_oneshot
if TYPE_CHECKING:
from .network import Network
################################## transactions
COINBASE_MATURITY = 100
COIN = 100000000
TOTAL_COIN_SUPPLY_LIMIT_IN_BTC = 21000000
NLOCKTIME_MIN = 0
NLOCKTIME_BLOCKHEIGHT_MAX = 500_000_000 - 1
NLOCKTIME_MAX = 2 ** 32 - 1
# supported types of transaction outputs
# TODO kill these with fire
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
class opcodes(IntEnum):
# push value
OP_0 = 0x00
OP_FALSE = OP_0
OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d
OP_PUSHDATA4 = 0x4e
OP_1NEGATE = 0x4f
OP_RESERVED = 0x50
OP_1 = 0x51
OP_TRUE = OP_1
OP_2 = 0x52
OP_3 = 0x53
OP_4 = 0x54
OP_5 = 0x55
OP_6 = 0x56
OP_7 = 0x57
OP_8 = 0x58
OP_9 = 0x59
OP_10 = 0x5a
OP_11 = 0x5b
OP_12 = 0x5c
OP_13 = 0x5d
OP_14 = 0x5e
OP_15 = 0x5f
OP_16 = 0x60
# control
OP_NOP = 0x61
OP_VER = 0x62
OP_IF = 0x63
OP_NOTIF = 0x64
OP_VERIF = 0x65
OP_VERNOTIF = 0x66
OP_ELSE = 0x67
OP_ENDIF = 0x68
OP_VERIFY = 0x69
OP_RETURN = 0x6a
# stack ops
OP_TOALTSTACK = 0x6b
OP_FROMALTSTACK = 0x6c
OP_2DROP = 0x6d
OP_2DUP = 0x6e
OP_3DUP = 0x6f
OP_2OVER = 0x70
OP_2ROT = 0x71
OP_2SWAP = 0x72
OP_IFDUP = 0x73
OP_DEPTH = 0x74
OP_DROP = 0x75
OP_DUP = 0x76
OP_NIP = 0x77
OP_OVER = 0x78
OP_PICK = 0x79
OP_ROLL = 0x7a
OP_ROT = 0x7b
OP_SWAP = 0x7c
OP_TUCK = 0x7d
# splice ops
OP_CAT = 0x7e
OP_SUBSTR = 0x7f
OP_LEFT = 0x80
OP_RIGHT = 0x81
OP_SIZE = 0x82
# bit logic
OP_INVERT = 0x83
OP_AND = 0x84
OP_OR = 0x85
OP_XOR = 0x86
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_RESERVED1 = 0x89
OP_RESERVED2 = 0x8a
# numeric
OP_1ADD = 0x8b
OP_1SUB = 0x8c
OP_2MUL = 0x8d
OP_2DIV = 0x8e
OP_NEGATE = 0x8f
OP_ABS = 0x90
OP_NOT = 0x91
OP_0NOTEQUAL = 0x92
OP_ADD = 0x93
OP_SUB = 0x94
OP_MUL = 0x95
OP_DIV = 0x96
OP_MOD = 0x97
OP_LSHIFT = 0x98
OP_RSHIFT = 0x99
OP_BOOLAND = 0x9a
OP_BOOLOR = 0x9b
OP_NUMEQUAL = 0x9c
OP_NUMEQUALVERIFY = 0x9d
OP_NUMNOTEQUAL = 0x9e
OP_LESSTHAN = 0x9f
OP_GREATERTHAN = 0xa0
OP_LESSTHANOREQUAL = 0xa1
OP_GREATERTHANOREQUAL = 0xa2
OP_MIN = 0xa3
OP_MAX = 0xa4
OP_WITHIN = 0xa5
# crypto
OP_RIPEMD160 = 0xa6
OP_SHA1 = 0xa7
OP_SHA256 = 0xa8
OP_HASH160 = 0xa9
OP_HASH256 = 0xaa
OP_CODESEPARATOR = 0xab
OP_CHECKSIG = 0xac
OP_CHECKSIGVERIFY = 0xad
OP_CHECKMULTISIG = 0xae
OP_CHECKMULTISIGVERIFY = 0xaf
# expansion
OP_NOP1 = 0xb0
OP_CHECKLOCKTIMEVERIFY = 0xb1
OP_NOP2 = OP_CHECKLOCKTIMEVERIFY
OP_CHECKSEQUENCEVERIFY = 0xb2
OP_NOP3 = OP_CHECKSEQUENCEVERIFY
OP_NOP4 = 0xb3
OP_NOP5 = 0xb4
OP_NOP6 = 0xb5
OP_NOP7 = 0xb6
OP_NOP8 = 0xb7
OP_NOP9 = 0xb8
OP_NOP10 = 0xb9
OP_INVALIDOPCODE = 0xff
def hex(self) -> str:
return bytes([self]).hex()
def rev_hex(s: str) -> str:
return bh2u(bfh(s)[::-1])
def int_to_hex(i: int, length: int=1) -> str:
"""Converts int to little-endian hex string.
`length` is the number of bytes available
"""
if not isinstance(i, int):
raise TypeError('{} instead of int'.format(i))
range_size = pow(256, length)
if i < -(range_size//2) or i >= range_size:
raise OverflowError('cannot convert int {} to hex ({} bytes)'.format(i, length))
if i < 0:
# two's complement
i = range_size + i
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def script_num_to_hex(i: int) -> str:
"""See CScriptNum in Bitcoin Core.
Encodes an integer as hex, to be used in script.
ported from https://github.com/bitcoin/bitcoin/blob/8cbc5c4be4be22aca228074f087a374a7ec38be8/src/script/script.h#L326
"""
if i == 0:
return ''
result = bytearray()
neg = i < 0
absvalue = abs(i)
while absvalue > 0:
result.append(absvalue & 0xff)
absvalue >>= 8
if result[-1] & 0x80:
result.append(0x80 if neg else 0x00)
elif neg:
result[-1] |= 0x80
return bh2u(result)
def var_int(i: int) -> str:
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
# https://github.com/bitcoin/bitcoin/blob/efe1ee0d8d7f82150789f1f6840f139289628a2b/src/serialize.h#L247
# "CompactSize"
assert i >= 0, i
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def witness_push(item: str) -> str:
"""Returns data in the form it should be present in the witness.
hex -> hex
"""
return var_int(len(item) // 2) + item
def _op_push(i: int) -> str:
if i < opcodes.OP_PUSHDATA1:
return int_to_hex(i)
elif i <= 0xff:
return opcodes.OP_PUSHDATA1.hex() + int_to_hex(i, 1)
elif i <= 0xffff:
return opcodes.OP_PUSHDATA2.hex() + int_to_hex(i, 2)
else:
return opcodes.OP_PUSHDATA4.hex() + int_to_hex(i, 4)
def push_script(data: str) -> str:
"""Returns pushed data to the script, automatically
choosing canonical opcodes depending on the length of the data.
hex -> hex
ported from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptbuilder.go#L128
"""
data = bfh(data)
data_len = len(data)
# "small integer" opcodes
if data_len == 0 or data_len == 1 and data[0] == 0:
return opcodes.OP_0.hex()
elif data_len == 1 and data[0] <= 16:
return bh2u(bytes([opcodes.OP_1 - 1 + data[0]]))
elif data_len == 1 and data[0] == 0x81:
return opcodes.OP_1NEGATE.hex()
return _op_push(data_len) + bh2u(data)
def add_number_to_script(i: int) -> bytes:
return bfh(push_script(script_num_to_hex(i)))
def relayfee(network: 'Network' = None) -> int:
"""Returns feerate in sat/kbyte."""
from .simple_config import FEERATE_DEFAULT_RELAY, FEERATE_MAX_RELAY
if network and network.relay_fee is not None:
fee = network.relay_fee
else:
fee = FEERATE_DEFAULT_RELAY
# sanity safeguards, as network.relay_fee is coming from a server:
fee = min(fee, FEERATE_MAX_RELAY)
fee = max(fee, FEERATE_DEFAULT_RELAY)
return fee
def dust_threshold(network: 'Network' = None) -> int:
"""Returns the dust limit in satoshis."""
# Change <= dust threshold is added to the tx fee
dust_lim = 182 * 3 * relayfee(network) # in msat
# convert to sat, but round up:
return (dust_lim // 1000) + (dust_lim % 1000 > 0)
def hash_encode(x: bytes) -> str:
return bh2u(x[::-1])
def hash_decode(x: str) -> bytes:
return bfh(x)[::-1]
############ functions from pywallet #####################
def hash160_to_b58_address(h160: bytes, addrtype: int) -> str:
s = bytes([addrtype]) + h160
s = s + sha256d(s)[0:4]
return base_encode(s, base=58)
def b58_address_to_hash160(addr: str) -> Tuple[int, bytes]:
addr = to_bytes(addr, 'ascii')
_bytes = DecodeBase58Check(addr)
if len(_bytes) != 21:
raise Exception(f'expected 21 payload bytes in base58 address. got: {len(_bytes)}')
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_b58_address(h160, net.ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash160_to_p2pkh(hash_160(public_key), net=net)
def hash_to_segwit_addr(h: bytes, witver: int, *, net=None) -> str:
if net is None: net = constants.net
return segwit_addr.encode(net.SEGWIT_HRP, witver, h)
def public_key_to_p2wpkh(public_key: bytes, *, net=None) -> str:
if net is None: net = constants.net
return hash_to_segwit_addr(hash_160(public_key), witver=0, net=net)
def script_to_p2wsh(script: str, *, net=None) -> str:
if net is None: net = constants.net
return hash_to_segwit_addr(sha256(bfh(script)), witver=0, net=net)
def p2wpkh_nested_script(pubkey: str) -> str:
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script: str) -> str:
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type: str, pubkey: str, *, net=None) -> str:
if net is None: net = constants.net
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey), net=net)
elif txin_type == 'p2wpkh':
return public_key_to_p2wpkh(bfh(pubkey), net=net)
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)), net=net)
else:
raise NotImplementedError(txin_type)
# TODO this method is confusingly named
def redeem_script_to_address(txin_type: str, scriptcode: str, *, net=None) -> str:
if net is None: net = constants.net
if txin_type == 'p2sh':
# given scriptcode is a redeem_script
return hash160_to_p2sh(hash_160(bfh(scriptcode)), net=net)
elif txin_type == 'p2wsh':
# given scriptcode is a witness_script
return script_to_p2wsh(scriptcode, net=net)
elif txin_type == 'p2wsh-p2sh':
# given scriptcode is a witness_script
redeem_script = p2wsh_nested_script(scriptcode)
return hash160_to_p2sh(hash_160(bfh(redeem_script)), net=net)
else:
raise NotImplementedError(txin_type)
def script_to_address(script: str, *, net=None) -> str:
from .transaction import get_address_from_output_script
return get_address_from_output_script(bfh(script), net=net)
def address_to_script(addr: str, *, net=None) -> str:
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if not (0 <= witver <= 16):
raise BitcoinException(f'impossible witness version: {witver}')
script = bh2u(add_number_to_script(witver))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
script = pubkeyhash_to_p2pkh_script(bh2u(hash_160_))
elif addrtype == net.ADDRTYPE_P2SH:
script = opcodes.OP_HASH160.hex()
script += push_script(bh2u(hash_160_))
script += opcodes.OP_EQUAL.hex()
else:
raise BitcoinException(f'unknown address type: {addrtype}')
return script
class OnchainOutputType(Enum):
"""Opaque types of scriptPubKeys.
In case of p2sh, p2wsh and similar, no knowledge of redeem script, etc.
"""
P2PKH = enum.auto()
P2SH = enum.auto()
WITVER0_P2WPKH = enum.auto()
WITVER0_P2WSH = enum.auto()
def address_to_hash(addr: str, *, net=None) -> Tuple[OnchainOutputType, bytes]:
"""Return (type, pubkey hash / witness program) for an address."""
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if witver != 0:
raise BitcoinException(f"not implemented handling for witver={witver}")
if len(witprog) == 20:
return OnchainOutputType.WITVER0_P2WPKH, bytes(witprog)
elif len(witprog) == 32:
return OnchainOutputType.WITVER0_P2WSH, bytes(witprog)
else:
raise BitcoinException(f"unexpected length for segwit witver=0 witprog: len={len(witprog)}")
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
return OnchainOutputType.P2PKH, hash_160_
elif addrtype == net.ADDRTYPE_P2SH:
return OnchainOutputType.P2SH, hash_160_
raise BitcoinException(f"unknown address type: {addrtype}")
def address_to_scripthash(addr: str) -> str:
script = address_to_script(addr)
return script_to_scripthash(script)
def script_to_scripthash(script: str) -> str:
h = sha256(bfh(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey: str) -> str:
return push_script(pubkey) + opcodes.OP_CHECKSIG.hex()
def pubkeyhash_to_p2pkh_script(pubkey_hash160: str) -> str:
script = bytes([opcodes.OP_DUP, opcodes.OP_HASH160]).hex()
script += push_script(pubkey_hash160)
script += bytes([opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]).hex()
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v: bytes, *, base: int) -> str:
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
# naive but slow variant: long_value += (256**i) * c
long_value += power_of_base * c
power_of_base <<= 8
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v: Union[bytes, str], *, base: int, length: int = None) -> Optional[bytes]:
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
if base not in (58, 43):
raise ValueError('not supported base: {}'.format(base))
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
power_of_base = 1
for c in v[::-1]:
digit = chars.find(bytes([c]))
if digit == -1:
raise ValueError('Forbidden character {} for base {}'.format(c, base))
# naive but slow variant: long_value += digit * (base**i)
long_value += digit * power_of_base
power_of_base *= base
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
class InvalidChecksum(Exception):
pass
def EncodeBase58Check(vchIn: bytes) -> str:
hash = sha256d(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz: Union[bytes, str]) -> bytes:
vchRet = base_decode(psz, base=58)
payload = vchRet[0:-4]
csum_found = vchRet[-4:]
csum_calculated = sha256d(payload)[0:4]
if csum_calculated != csum_found:
raise InvalidChecksum(f'calculated {bh2u(csum_calculated)}, found {bh2u(csum_found)}')
else:
return payload
# backwards compat
# extended WIF for segwit (used in 3.0.x; but still used internally)
# the keys in this dict should be a superset of what Imported Wallets can import
WIF_SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
WIF_SCRIPT_TYPES_INV = inv_dict(WIF_SCRIPT_TYPES)
def is_segwit_script_type(txin_type: str) -> bool:
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
def serialize_privkey(secret: bytes, compressed: bool, txin_type: str, *,
internal_use: bool = False) -> str:
# we only export secrets inside curve range
secret = ecc.ECPrivkey.normalize_secret_bytes(secret)
if internal_use:
prefix = bytes([(WIF_SCRIPT_TYPES[txin_type] + constants.net.WIF_PREFIX) & 255])
else:
prefix = bytes([constants.net.WIF_PREFIX])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
base58_wif = EncodeBase58Check(vchIn)
if internal_use:
return base58_wif
else:
return '{}:{}'.format(txin_type, base58_wif)
def deserialize_privkey(key: str) -> Tuple[str, bytes, bool]:
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), False
txin_type = None
if ':' in key:
txin_type, key = key.split(sep=':', maxsplit=1)
if txin_type not in WIF_SCRIPT_TYPES:
raise BitcoinException('unknown script type: {}'.format(txin_type))
try:
vch = DecodeBase58Check(key)
except BaseException:
neutered_privkey = str(key)[:3] + '..' + str(key)[-2:]
raise BitcoinException("cannot deserialize privkey {}"
.format(neutered_privkey))
if txin_type is None:
# keys exported in version 3.0.x encoded script type in first byte
prefix_value = vch[0] - constants.net.WIF_PREFIX
try:
txin_type = WIF_SCRIPT_TYPES_INV[prefix_value]
except KeyError:
raise BitcoinException('invalid prefix ({}) for WIF key (1)'.format(vch[0]))
else:
# all other keys must have a fixed first byte
if vch[0] != constants.net.WIF_PREFIX:
raise BitcoinException('invalid prefix ({}) for WIF key (2)'.format(vch[0]))
if len(vch) not in [33, 34]:
raise BitcoinException('invalid vch len for WIF key: {}'.format(len(vch)))
compressed = False
if len(vch) == 34:
if vch[33] == 0x01:
compressed = True
else:
raise BitcoinException(f'invalid WIF key. length suggests compressed pubkey, '
f'but last byte is {vch[33]} != 0x01')
if is_segwit_script_type(txin_type) and not compressed:
raise BitcoinException('only compressed public keys can be used in segwit scripts')
secret_bytes = vch[1:33]
# we accept secrets outside curve range; cast into range here:
secret_bytes = ecc.ECPrivkey.normalize_secret_bytes(secret_bytes)
return txin_type, secret_bytes, compressed
def is_compressed_privkey(sec: str) -> bool:
return deserialize_privkey(sec)[2]
def address_from_private_key(sec: str) -> str:
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
try:
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
except Exception as e:
return False
return witprog is not None
def is_b58_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
try:
# test length, checksum, encoding:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [net.ADDRTYPE_P2PKH, net.ADDRTYPE_P2SH]:
return False
return True
def is_address(addr: str, *, net=None) -> bool:
if net is None: net = constants.net
return is_segwit_address(addr, net=net) \
or is_b58_address(addr, net=net)
def is_private_key(key: str, *, raise_on_error=False) -> bool:
try:
deserialize_privkey(key)
return True
except BaseException as e:
if raise_on_error:
raise
return False
########### end pywallet functions #######################
def is_minikey(text: str) -> bool:
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitcoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text: str) -> bytes:
return sha256(text)
| 30.857931
| 125
| 0.655596
|
794eb60bd2fa12dce74139bbb540ecd554888ec0
| 67,691
|
py
|
Python
|
armi/reactor/tests/test_blocks.py
|
crisobg1/armi
|
38d9febdbec7ab8a67dd9b8e50780e11ea127022
|
[
"Apache-2.0"
] | 1
|
2020-07-07T16:58:43.000Z
|
2020-07-07T16:58:43.000Z
|
armi/reactor/tests/test_blocks.py
|
crisobg1/armi
|
38d9febdbec7ab8a67dd9b8e50780e11ea127022
|
[
"Apache-2.0"
] | null | null | null |
armi/reactor/tests/test_blocks.py
|
crisobg1/armi
|
38d9febdbec7ab8a67dd9b8e50780e11ea127022
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests blocks.py"""
import copy
import math
import os
import unittest
import numpy
from numpy.testing import assert_allclose
from armi.reactor import blocks
from armi.reactor import components
import armi.runLog as runLog
import armi.settings as settings
from armi.reactor.components import UnshapedComponent
from armi import materials
from armi.nucDirectory import nucDir, nuclideBases
from armi.utils.units import MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
from armi.tests import TEST_ROOT
from armi.utils import units
from armi.utils import hexagon
from armi.reactor.flags import Flags
from armi import tests
from armi.reactor import grids
from armi.reactor.tests.test_assemblies import makeTestAssembly
from armi.tests import ISOAA_PATH
from armi.nuclearDataIO import isotxs
from armi.reactor import geometry
def loadTestBlock(cold=True):
"""Build an annular test block for evaluating unit tests."""
caseSetting = settings.Settings()
settings.setMasterCs(caseSetting)
caseSetting["xsKernel"] = "MC2v2"
runLog.setVerbosity("error")
caseSetting["nCycles"] = 1
r = tests.getEmptyHexReactor()
assemNum = 3
block = blocks.HexBlock("TestHexBlock")
block.setType("defaultType")
block.p.nPins = 217
Assembly = makeTestAssembly(assemNum, 1, r=r)
# NOTE: temperatures are supposed to be in C
coldTemp = 25.0
hotTempCoolant = 430.0
hotTempStructure = 25.0 if cold else hotTempCoolant
hotTempFuel = 25.0 if cold else 600.0
fuelDims = {
"Tinput": coldTemp,
"Thot": hotTempFuel,
"od": 0.84,
"id": 0.6,
"mult": 217.0,
}
fuel = components.Circle("fuel", "UZr", **fuelDims)
bondDims = {
"Tinput": coldTemp,
"Thot": hotTempCoolant,
"od": "fuel.id",
"id": 0.3,
"mult": 217.0,
}
bondDims["components"] = {"fuel": fuel}
bond = components.Circle("bond", "Sodium", **bondDims)
annularVoidDims = {
"Tinput": hotTempStructure,
"Thot": hotTempStructure,
"od": "bond.id",
"id": 0.0,
"mult": 217.0,
}
annularVoidDims["components"] = {"bond": bond}
annularVoid = components.Circle("annular void", "Void", **annularVoidDims)
innerLinerDims = {
"Tinput": coldTemp,
"Thot": hotTempStructure,
"od": 0.90,
"id": 0.85,
"mult": 217.0,
}
innerLiner = components.Circle("inner liner", "Graphite", **innerLinerDims)
fuelLinerGapDims = {
"Tinput": hotTempStructure,
"Thot": hotTempStructure,
"od": "inner liner.id",
"id": "fuel.od",
"mult": 217.0,
}
fuelLinerGapDims["components"] = {"inner liner": innerLiner, "fuel": fuel}
fuelLinerGap = components.Circle("gap1", "Void", **fuelLinerGapDims)
outerLinerDims = {
"Tinput": coldTemp,
"Thot": hotTempStructure,
"od": 0.95,
"id": 0.90,
"mult": 217.0,
}
outerLiner = components.Circle("outer liner", "HT9", **outerLinerDims)
linerLinerGapDims = {
"Tinput": hotTempStructure,
"Thot": hotTempStructure,
"od": "outer liner.id",
"id": "inner liner.od",
"mult": 217.0,
}
linerLinerGapDims["components"] = {
"outer liner": outerLiner,
"inner liner": innerLiner,
}
linerLinerGap = components.Circle("gap2", "Void", **linerLinerGapDims)
claddingDims = {
"Tinput": coldTemp,
"Thot": hotTempStructure,
"od": 1.05,
"id": 0.95,
"mult": 217.0,
}
cladding = components.Circle("clad", "HT9", **claddingDims)
linerCladGapDims = {
"Tinput": hotTempStructure,
"Thot": hotTempStructure,
"od": "clad.id",
"id": "outer liner.od",
"mult": 217.0,
}
linerCladGapDims["components"] = {"clad": cladding, "outer liner": outerLiner}
linerCladGap = components.Circle("gap3", "Void", **linerCladGapDims)
wireDims = {
"Tinput": coldTemp,
"Thot": hotTempStructure,
"od": 0.1,
"id": 0.0,
"axialPitch": 30.0,
"helixDiameter": 1.1,
"mult": 217.0,
}
wire = components.Helix("wire", "HT9", **wireDims)
coolantDims = {"Tinput": hotTempCoolant, "Thot": hotTempCoolant}
coolant = components.DerivedShape("coolant", "Sodium", **coolantDims)
ductDims = {
"Tinput": coldTemp,
"Thot": hotTempStructure,
"ip": 16.6,
"op": 17.3,
"mult": 1.0,
}
duct = components.Hexagon("duct", "HT9", **ductDims)
interDims = {
"Tinput": hotTempCoolant,
"Thot": hotTempCoolant,
"op": 17.8,
"ip": "duct.op",
"mult": 1.0,
}
interDims["components"] = {"duct": duct}
interSodium = components.Hexagon("interCoolant", "Sodium", **interDims)
block.addComponent(annularVoid)
block.addComponent(bond)
block.addComponent(fuel)
block.addComponent(fuelLinerGap)
block.addComponent(innerLiner)
block.addComponent(linerLinerGap)
block.addComponent(outerLiner)
block.addComponent(linerCladGap)
block.addComponent(cladding)
block.addComponent(wire)
block.addComponent(coolant)
block.addComponent(duct)
block.addComponent(interSodium)
block.getVolumeFractions() # TODO: remove, should be no-op when removed self.cached
block.setHeight(16.0)
Assembly.add(block)
r.core.add(Assembly)
return block
# pylint: disable=protected-access
def applyDummyData(block):
"""Add some dummy data to a block for physics-like tests."""
# typical SFR-ish flux in 1/cm^2/s
flux = [
161720716762.12997,
2288219224332.647,
11068159130271.139,
26473095948525.742,
45590249703180.945,
78780459664094.23,
143729928505629.06,
224219073208464.06,
229677567456769.22,
267303906113313.16,
220996878365852.22,
169895433093246.28,
126750484612975.31,
143215138794766.53,
74813432842005.5,
32130372366225.85,
21556243034771.582,
6297567411518.368,
22365198294698.45,
12211256796917.86,
5236367197121.363,
1490736020048.7847,
1369603135573.731,
285579041041.55945,
73955783965.98692,
55003146502.73623,
18564831886.20426,
4955747691.052108,
3584030491.076041,
884015567.3986057,
4298964991.043116,
1348809158.0353086,
601494405.293505,
]
xslib = isotxs.readBinary(ISOAA_PATH)
# slight hack here because the test block was created
# by hand rather than via blueprints and so elemental expansion
# of isotopics did not occur. But, the ISOTXS library being used
# did go through an isotopic expansion, so we map nuclides here.
xslib._nuclides["NAAA"] = xslib._nuclides["NA23AA"]
xslib._nuclides["WAA"] = xslib._nuclides["W184AA"]
xslib._nuclides["MNAA"] = xslib._nuclides["MN55AA"]
block.p.mgFlux = flux
block.r.core.lib = xslib
def getComponentDataFromBlock(component, block):
density = 0.0
for nuc in component.getNuclides():
density += (
component.getNumberDensity(nuc)
* nucDir.getAtomicWeight(nuc)
/ units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
)
volume = component.getVolume()
mass = component.getMass()
return component, density, volume, mass
class Block_TestCase(unittest.TestCase):
def setUp(self):
self.Block = loadTestBlock()
self._hotBlock = loadTestBlock(cold=False)
self.r = self.Block.r
def test_getSmearDensity(self):
cur = self.Block.getSmearDensity()
ref = (
self.Block.getDim(Flags.FUEL, "od") ** 2
- self.Block.getDim(Flags.FUEL, "id") ** 2
) / self.Block.getDim(Flags.LINER, "id") ** 2
places = 10
self.assertAlmostEqual(cur, ref, places=places)
# test with liner instead of clad
ref = (
self.Block.getDim(Flags.FUEL, "od") ** 2
- self.Block.getDim(Flags.FUEL, "id") ** 2
) / self.Block.getDim(Flags.LINER, "id") ** 2
cur = self.Block.getSmearDensity()
self.assertAlmostEqual(
cur,
ref,
places=places,
msg="Incorrect getSmearDensity with liner. Got {0}. Should be {1}".format(
cur, ref
),
)
# test with annular fuel.
fuelDims = {
"Tinput": 273.0,
"Thot": 273.0,
"od": 0.87,
"id": 0.2,
"mult": 271.0,
}
self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims)
ref = (
self.Block.getDim(Flags.FUEL, "od") ** 2
- self.Block.getDim(Flags.FUEL, "id") ** 2
) / self.Block.getDim(Flags.LINER, "id") ** 2
cur = self.Block.getSmearDensity()
self.assertAlmostEqual(
cur,
ref,
places=places,
msg="Incorrect getSmearDensity with annular fuel. Got {0}. Should be {1}".format(
cur, ref
),
)
def test_getSmearDensityMultipleLiner(self):
numLiners = sum(
1 for c in self.Block if "liner" in c.name and "gap" not in c.name
)
self.assertEqual(
numLiners,
2,
"self.Block needs at least 2 liners for this test to be functional.",
)
cur = self.Block.getSmearDensity()
ref = (
self.Block.getDim(Flags.FUEL, "od") ** 2
- self.Block.getDim(Flags.FUEL, "id") ** 2
) / self.Block.getDim(Flags.INNER | Flags.LINER, "id") ** 2
self.assertAlmostEqual(cur, ref, places=10)
def test_timeNodeParams(self):
self.Block.p["avgFuelTemp", 3] = 2.0
self.assertEqual(2.0, self.Block.p[("avgFuelTemp", 3)])
def test_getType(self):
ref = "plenum pin"
self.Block.setType(ref)
cur = self.Block.getType()
self.assertEqual(cur, ref)
self.assertTrue(self.Block.hasFlags(Flags.PLENUM))
self.assertTrue(self.Block.hasFlags(Flags.PLENUM | Flags.PIN))
self.assertTrue(self.Block.hasFlags(Flags.PLENUM | Flags.PIN, exact=True))
self.assertFalse(self.Block.hasFlags(Flags.PLENUM, exact=True))
def test_hasFlags(self):
self.Block.setType("feed fuel")
cur = self.Block.hasFlags(Flags.FEED | Flags.FUEL)
self.assertTrue(cur)
cur = self.Block.hasFlags(Flags.PLENUM)
self.assertFalse(cur)
def test_setType(self):
self.Block.setType("igniter fuel")
self.assertEqual("igniter fuel", self.Block.getType())
self.assertTrue(self.Block.hasFlags(Flags.IGNITER | Flags.FUEL))
self.Block.adjustUEnrich(0.0001)
self.Block.setType("feed fuel")
self.assertTrue(self.Block.hasFlags(Flags.FEED | Flags.FUEL))
self.assertTrue(self.Block.hasFlags(Flags.FUEL))
self.assertFalse(self.Block.hasFlags(Flags.IGNITER | Flags.FUEL))
def test_duplicate(self):
Block2 = copy.deepcopy(self.Block)
originalComponents = self.Block.getComponents()
newComponents = Block2.getComponents()
for c1, c2 in zip(originalComponents, newComponents):
self.assertEqual(c1.getName(), c2.getName())
a1, a2 = c1.getArea(), c2.getArea()
self.assertIsNot(c1, c2)
self.assertAlmostEqual(
a1,
a2,
msg="The area of {0}={1} but "
"the area of {2} in the copy={3}".format(c1, a1, c2, a2),
)
for key in c2.DIMENSION_NAMES:
dim = c2.p[key]
if isinstance(dim, tuple):
self.assertNotIn(dim[0], originalComponents)
self.assertIn(dim[0], newComponents)
ref = self.Block.getMass()
cur = Block2.getMass()
places = 6
self.assertAlmostEqual(ref, cur, places=places)
ref = self.Block.getArea()
cur = Block2.getArea()
places = 6
self.assertAlmostEqual(ref, cur, places=places)
ref = self.Block.getHeight()
cur = Block2.getHeight()
places = 6
self.assertAlmostEqual(ref, cur, places=places)
def test_getXsType(self):
self.cs = settings.getMasterCs()
self.cs["loadingFile"] = os.path.join(TEST_ROOT, "refSmallReactor.yaml")
self.Block.p.xsType = "B"
cur = self.Block.p.xsType
ref = "B"
self.assertEqual(cur, ref)
oldBuGroups = self.cs["buGroups"]
self.cs["buGroups"] = [100]
self.Block.p.xsType = "BB"
cur = self.Block.p.xsType
ref = "BB"
self.assertEqual(cur, ref)
self.cs["buGroups"] = oldBuGroups
def test27b_setBuGroup(self):
type_ = "A"
self.Block.p.buGroup = type_
cur = self.Block.p.buGroupNum
ref = ord(type_) - 65
self.assertEqual(cur, ref)
typeNumber = 25
self.Block.p.buGroupNum = typeNumber
cur = self.Block.p.buGroup
ref = chr(typeNumber + 65)
self.assertEqual(cur, ref)
def test_clearDensity(self):
self.Block.clearNumberDensities()
for nuc in self.Block.getNuclides():
cur = self.Block.getNumberDensity(nuc)
ref = 0.0
places = 5
self.assertAlmostEqual(cur, ref, places=places)
def test_getNumberDensity(self):
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W182": 1.09115150103e-05,
"W183": 5.89214392093e-06,
"W184": 1.26159558164e-05,
"W186": 1.17057432664e-05,
"ZR": 0.00709003962772,
}
self.Block.setNumberDensities(refDict)
for nuc in refDict.keys():
cur = self.Block.getNumberDensity(nuc)
ref = refDict[nuc]
places = 6
self.assertAlmostEqual(ref, cur, places=places)
def test_setNumberDensity(self):
ref = 0.05
self.Block.setNumberDensity("U235", ref)
cur = self.Block.getNumberDensity("U235")
places = 5
self.assertAlmostEqual(cur, ref, places=places)
def test_setNumberDensities(self):
"""Make sure we can set multiple number densities at once."""
b = self.Block
b.setNumberDensity("NA", 0.5)
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W": 1.09115150103e-05,
"ZR": 0.00709003962772,
}
b.setNumberDensities(refDict)
for nuc in refDict.keys():
cur = self.Block.getNumberDensity(nuc)
ref = refDict[nuc]
places = 6
self.assertAlmostEqual(cur, ref, places=places)
nucBase = nuclideBases.byName[nuc]
self.assertAlmostEqual(
b.p[nucBase.getDatabaseName()], ref
) # required for DB viewing/loading
# make sure U235 stayed fully contained in the fuel component
fuelC = b.getComponent(Flags.FUEL)
self.assertAlmostEqual(
b.getNumberDensity("U235"),
fuelC.getNumberDensity("U235") * fuelC.getVolumeFraction(),
)
# make sure other vals were zeroed out
self.assertAlmostEqual(b.getNumberDensity("NA23"), 0.0)
def test_getMass(self):
self.Block.setHeight(100.0)
nucName = "U235"
d = self.Block.getNumberDensity(nucName)
v = self.Block.getVolume()
A = nucDir.getAtomicWeight(nucName)
ref = d * v * A / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
cur = self.Block.getMass(nucName)
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_setMass(self):
self.Block.setHeight(100.0)
mass = 100.0
nuc = "U238"
self.Block.setMass(nuc, mass)
cur = self.Block.getMass(nuc)
ref = mass
places = 6
self.assertAlmostEqual(cur, ref, places=places)
cur = self.Block.getNumberDensity(nuc)
v = self.Block.getVolume()
A = nucDir.getAtomicWeight(nuc)
ref = MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * mass / (v * A)
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getTotalMass(self):
self.Block.setHeight(100.0)
self.Block.clearNumberDensities()
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W182": 1.09115150103e-05,
"W183": 5.89214392093e-06,
"W184": 1.26159558164e-05,
"W186": 1.17057432664e-05,
"ZR": 0.00709003962772,
}
self.Block.setNumberDensities(refDict)
cur = self.Block.getMass()
tot = 0.0
for nucName in refDict.keys():
d = refDict[nucName]
A = nucDir.getAtomicWeight(nucName)
tot += d * A
v = self.Block.getVolume()
ref = tot * v / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
places = 9
self.assertAlmostEqual(cur, ref, places=places)
def test_replaceBlockWithBlock(self):
r"""
Tests conservation of mass flag in replaceBlockWithBlock
"""
block = self.Block
ductBlock = block.__class__("duct")
ductBlock.addComponent(block.getComponent(Flags.COOLANT, exact=True))
ductBlock.addComponent(block.getComponent(Flags.DUCT, exact=True))
ductBlock.addComponent(block.getComponent(Flags.INTERCOOLANT, exact=True))
# get reference data
refLoc = block.spatialLocator
refName = block.name
refHeight = block.p.height
ductBlock.p.height = 99 * block.p.height
self.assertGreater(len(block), 3)
block.replaceBlockWithBlock(ductBlock)
self.assertEqual(block.spatialLocator, refLoc)
self.assertEqual(refName, block.name)
self.assertEqual(3, len(block))
self.assertEqual(block.p.height, refHeight)
def test_getWettedPerimeter(self):
cur = self.Block.getWettedPerimeter()
ref = math.pi * (
self.Block.getDim(Flags.CLAD, "od") + self.Block.getDim(Flags.WIRE, "od")
) + 6 * self.Block.getDim(Flags.DUCT, "ip") / math.sqrt(3) / self.Block.getDim(
Flags.CLAD, "mult"
)
self.assertAlmostEqual(cur, ref)
def test_getFlowAreaPerPin(self):
area = self.Block.getComponent(Flags.COOLANT).getArea()
nPins = self.Block.getNumPins()
cur = self.Block.getFlowAreaPerPin()
ref = area / nPins
self.assertAlmostEqual(cur, ref)
def test_getHydraulicDiameter(self):
cur = self.Block.getHydraulicDiameter()
ref = 4.0 * self.Block.getFlowAreaPerPin() / self.Block.getWettedPerimeter()
self.assertAlmostEqual(cur, ref)
def test_getCladdingOR(self):
cur = self.Block.getCladdingOR()
ref = self.Block.getDim(Flags.CLAD, "od") / 2.0
self.assertAlmostEqual(cur, ref)
def test_getCladdingIR(self):
cur = self.Block.getCladdingIR()
ref = self.Block.getDim(Flags.CLAD, "id") / 2.0
self.assertAlmostEqual(cur, ref)
def test_getFuelRadius(self):
cur = self.Block.getFuelRadius()
ref = self.Block.getDim(Flags.FUEL, "od") / 2.0
self.assertAlmostEqual(cur, ref)
def test_adjustCladThicknessByOD(self):
thickness = 0.05
clad = self.Block.getComponent(Flags.CLAD)
ref = clad.getDimension("id", cold=True) + 2.0 * thickness
self.Block.adjustCladThicknessByOD(thickness)
cur = clad.getDimension("od", cold=True)
curThickness = (
clad.getDimension("od", cold=True) - clad.getDimension("id", cold=True)
) / 2.0
self.assertAlmostEqual(cur, ref)
self.assertAlmostEqual(curThickness, thickness)
def test_adjustCladThicknessByID(self):
thickness = 0.05
clad = self.Block.getComponent(Flags.CLAD)
ref = clad.getDimension("od", cold=True) - 2.0 * thickness
self.Block.adjustCladThicknessByID(thickness)
cur = clad.getDimension("id", cold=True)
curThickness = (
clad.getDimension("od", cold=True) - clad.getDimension("id", cold=True)
) / 2.0
self.assertAlmostEqual(cur, ref)
self.assertAlmostEqual(curThickness, thickness)
def test_adjustUEnrich(self):
self.Block.setHeight(100.0)
ref = 0.25
self.Block.adjustUEnrich(ref)
cur = self.Block.getComponent(Flags.FUEL).getEnrichment()
places = 5
self.assertAlmostEqual(cur, ref, places=places)
def test_setLocation(self):
b = self.Block
# a bit obvious, but location is a property now...
i, j = grids.getIndicesFromRingAndPos(2, 3)
b.spatialLocator = b.core.spatialGrid[i, j, 0]
self.assertEqual(b.getLocation(), "A2003A")
self.assertEqual(0, b.spatialLocator.k)
self.assertEqual(b.getSymmetryFactor(), 1.0)
# now if we don't specify axial, it will move to the new xy, location and have original z index
i, j = grids.getIndicesFromRingAndPos(4, 4)
b.spatialLocator = b.core.spatialGrid[i, j, 0]
self.assertEqual(0, b.spatialLocator.k)
self.assertEqual(b.getSymmetryFactor(), 1.0)
# center blocks have a different symmetry factor for 1/3rd core
for symmetry, powerMult in (
(geometry.FULL_CORE, 1),
(geometry.THIRD_CORE + geometry.PERIODIC, 3),
):
self.r.core.symmetry = symmetry
i, j = grids.getIndicesFromRingAndPos(1, 1)
b.spatialLocator = b.core.spatialGrid[i, j, 0]
self.assertEqual(0, b.spatialLocator.k)
self.assertEqual(b.getSymmetryFactor(), powerMult)
def test_setBuLimitInfo(self):
cs = settings.getMasterCs()
self.Block.adjustUEnrich(0.1)
self.Block.setType("igniter fuel")
self.Block.setBuLimitInfo(cs)
cur = self.Block.p.buLimit
ref = 0.0
self.assertEqual(cur, ref)
def test_getTotalNDens(self):
self.Block.setType("fuel")
self.Block.clearNumberDensities()
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W182": 1.09115150103e-05,
"W183": 5.89214392093e-06,
"W184": 1.26159558164e-05,
"W186": 1.17057432664e-05,
"ZR": 0.00709003962772,
}
self.Block.setNumberDensities(refDict)
cur = self.Block.getTotalNDens()
tot = 0.0
for nucName in refDict.keys():
ndens = self.Block.getNumberDensity(nucName)
tot += ndens
ref = tot
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getHMDens(self):
self.Block.setType("fuel")
self.Block.clearNumberDensities()
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W182": 1.09115150103e-05,
"W183": 5.89214392093e-06,
"W184": 1.26159558164e-05,
"W186": 1.17057432664e-05,
"ZR": 0.00709003962772,
}
self.Block.setNumberDensities(refDict)
cur = self.Block.getHMDens()
hmDens = 0.0
for nuclide in refDict.keys():
if nucDir.isHeavyMetal(nuclide):
# then nuclide is a HM
hmDens += self.Block.getNumberDensity(nuclide)
ref = hmDens
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getFissileMassEnrich(self):
fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 0.76, "id": 0.0, "mult": 1.0}
self.fuelComponent = components.Circle("fuel", "UZr", **fuelDims)
self.Block.addComponent(self.fuelComponent)
self.Block.setHeight(100.0)
self.Block.clearNumberDensities()
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W182": 1.09115150103e-05,
"W183": 5.89214392093e-06,
"W184": 1.26159558164e-05,
"W186": 1.17057432664e-05,
"ZR": 0.00709003962772,
}
self.Block.setNumberDensities(refDict)
cur = self.Block.getFissileMassEnrich()
ref = self.Block.getFissileMass() / self.Block.getHMMass()
places = 4
self.assertAlmostEqual(cur, ref, places=places)
self.Block.removeComponent(self.fuelComponent)
def test_getUraniumMassEnrich(self):
self.Block.adjustUEnrich(0.25)
ref = 0.25
self.Block.adjustUEnrich(ref)
cur = self.Block.getUraniumMassEnrich()
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getUraniumNumEnrich(self):
self.Block.adjustUEnrich(0.25)
cur = self.Block.getUraniumNumEnrich()
u8 = self.Block.getNumberDensity("U238")
u5 = self.Block.getNumberDensity("U235")
ref = u5 / (u8 + u5)
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getNumberOfAtoms(self):
self.Block.clearNumberDensities()
refDict = {
"U235": 0.00275173784234,
"U238": 0.0217358415457,
"W182": 1.09115150103e-05,
"W183": 5.89214392093e-06,
"W184": 1.26159558164e-05,
"W186": 1.17057432664e-05,
"ZR": 0.00709003962772,
}
self.Block.setNumberDensities(refDict)
nucName = "U238"
moles = (
self.Block.getNumberOfAtoms(nucName) / units.AVOGADROS_NUMBER
) # about 158 moles
refMoles = (
refDict["U238"]
* self.Block.getVolume()
/ (units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM)
)
self.assertAlmostEqual(moles, refMoles)
def test_getPuN(self):
fuel = self.Block.getComponent(Flags.FUEL)
vFrac = fuel.getVolumeFraction()
refDict = {
"AM241": 2.695633500634074e-05,
"U238": 0.015278429635341755,
"O16": 0.04829586365251901,
"U235": 0.004619446966056436,
"PU239": 0.0032640382635406515,
"PU238": 4.266845903720035e-06,
"PU240": 0.000813669265183342,
"PU241": 0.00011209296581262849,
"PU242": 2.3078961257395204e-05,
}
fuel.setNumberDensities({nuc: v / vFrac for nuc, v in refDict.items()})
cur = self.Block.getPuN()
ndens = 0.0
for nucName in refDict.keys():
if nucName in ["PU238", "PU239", "PU240", "PU241", "PU242"]:
ndens += self.Block.getNumberDensity(nucName)
ref = ndens
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_getPuMass(self):
fuel = self.Block.getComponent(Flags.FUEL)
refDict = {
"AM241": 2.695633500634074e-05,
"U238": 0.015278429635341755,
"O16": 0.04829586365251901,
"U235": 0.004619446966056436,
"PU239": 0.0032640382635406515,
"PU238": 4.266845903720035e-06,
"PU240": 0.000813669265183342,
"PU241": 0.00011209296581262849,
"PU242": 2.3078961257395204e-05,
}
fuel.setNumberDensities(refDict)
cur = self.Block.getPuMass()
pu = 0.0
for nucName in refDict.keys():
if nucName in ["PU238", "PU239", "PU240", "PU241", "PU242"]:
pu += self.Block.getMass(nucName)
self.assertAlmostEqual(cur, pu)
def test_adjustDensity(self):
u235Dens = 0.003
u238Dens = 0.010
self.Block.setNumberDensity("U235", u235Dens)
self.Block.setNumberDensity("U238", u238Dens)
mass1 = self.Block.getMass(["U235", "U238"])
densAdj = 0.9
nucList = ["U235", "U238"]
massDiff = self.Block.adjustDensity(densAdj, nucList, returnMass=True)
mass2 = self.Block.getMass(["U235", "U238"])
cur = self.Block.getNumberDensity("U235")
ref = densAdj * u235Dens
places = 6
self.assertAlmostEqual(cur, ref, places=places)
cur = self.Block.getNumberDensity("U238")
ref = densAdj * u238Dens
self.assertAlmostEqual(cur, ref, places=places)
self.assertAlmostEqual(mass2 - mass1, massDiff)
def test_completeInitialLoading(self):
area = self.Block.getArea()
height = 2.0
self.Block.setHeight(height)
self.Block.clearNumberDensities()
self.Block.setNumberDensities(
{
"U238": 0.018518936996911595,
"ZR": 0.006040713762820692,
"U235": 0.0023444806416701184,
"NA23": 0.009810163826158255,
}
)
self.Block.completeInitialLoading()
cur = self.Block.p.molesHmBOL
ref = self.Block.getHMDens() / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * height * area
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_addComponent(self):
numComps = len(self.Block.getComponents())
fuelDims = {"Tinput": 25.0, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0}
newComp = components.Circle("fuel", "UZr", **fuelDims)
self.Block.addComponent(newComp)
self.assertEqual(numComps + 1, len(self.Block.getComponents()))
self.assertIn(newComp, self.Block.getComponents())
self.Block.removeComponent(newComp)
def test_hasComponents(self):
self.assertTrue(self.Block.hasComponents([Flags.FUEL, Flags.CLAD]))
self.assertTrue(self.Block.hasComponents(Flags.FUEL))
self.assertFalse(
self.Block.hasComponents([Flags.FUEL, Flags.CLAD, Flags.DUMMY])
)
def test_getComponentNames(self):
cur = self.Block.getComponentNames()
ref = set(
[
"annular void",
"bond",
"fuel",
"gap1",
"inner liner",
"gap2",
"outer liner",
"gap3",
"clad",
"wire",
"coolant",
"duct",
"interCoolant",
]
)
self.assertEqual(cur, ref)
def test_getComponents(self):
cur = self.Block.getComponents(Flags.FUEL)
self.assertEqual(len(cur), 1)
comps = self.Block.getComponents(Flags.FUEL) + self.Block.getComponents(
Flags.CLAD
)
self.assertEqual(len(comps), 2)
inter = self.Block.getComponents(Flags.INTERCOOLANT)
self.assertEqual(len(inter), 1)
inter = self.Block.getComponents(
Flags.INTERCOOLANT, exact=True
) # case insensitive
self.assertEqual(inter, [self.Block.getComponent(Flags.INTERCOOLANT)])
cool = self.Block.getComponents(Flags.COOLANT, exact=True)
self.assertEqual(len(cool), 1)
def test_getComponent(self):
cur = self.Block.getComponent(Flags.FUEL)
self.assertIsInstance(cur, components.Component)
inter = self.Block.getComponent(Flags.INTERCOOLANT)
self.assertIsInstance(inter, components.Component)
with self.assertRaises(KeyError):
# this really isnt the responsibility of block, more of Flags, but until this refactor
# is over...
inter = self.Block.getComponent(
Flags.fromString("intercoolantlala"), exact=True
)
cool = self.Block.getComponent(Flags.COOLANT, exact=True)
self.assertIsInstance(cool, components.Component)
def test_getComponentsOfShape(self):
ref = [
"annular void",
"bond",
"fuel",
"gap1",
"inner liner",
"gap2",
"outer liner",
"gap3",
"clad",
]
cur = [c.name for c in self.Block.getComponentsOfShape(components.Circle)]
self.assertEqual(sorted(ref), sorted(cur))
def test_getComponentsOfMaterial(self):
cur = self.Block.getComponentsOfMaterial(materials.UZr())
ref = self.Block.getComponent(Flags.FUEL)
self.assertEqual(cur[0], ref)
self.assertEqual(
self.Block.getComponentsOfMaterial(materials.HT9()),
[
self.Block.getComponent(Flags.OUTER | Flags.LINER),
self.Block.getComponent(Flags.CLAD),
self.Block.getComponent(Flags.WIRE),
self.Block.getComponent(Flags.DUCT),
],
)
def test_getComponentByName(self):
self.assertIsNone(
self.Block.getComponentByName("not the droid youre looking for")
)
self.assertIsNotNone(self.Block.getComponentByName("annular void"))
def test_getSortedComponentsInsideOfComponent(self):
"""Test that components can be sorted within a block and returned in the correct order."""
expected = [
self.Block.getComponentByName(c)
for c in [
"annular void",
"bond",
"fuel",
"gap1",
"inner liner",
"gap2",
"outer liner",
"gap3",
]
]
clad = self.Block.getComponent(Flags.CLAD)
actual = self.Block.getSortedComponentsInsideOfComponent(clad)
self.assertListEqual(actual, expected)
def test_getSortedComponentsInsideOfComponentSpecifiedTypes(self):
expected = [
self.Block.getComponentByName(c)
for c in [
"annular void",
"bond",
"fuel",
"gap1",
"inner liner",
"gap2",
"outer liner",
"gap3",
]
]
clad = self.Block.getComponent(Flags.CLAD)
actual = self.Block.getSortedComponentsInsideOfComponent(clad)
self.assertListEqual(actual, expected)
def test_getNumComponents(self):
cur = self.Block.getNumComponents(Flags.FUEL)
ref = self.Block.getDim(Flags.FUEL, "mult")
self.assertEqual(cur, ref)
self.assertEqual(ref, self.Block.getNumComponents(Flags.CLAD))
self.assertEqual(1, self.Block.getNumComponents(Flags.DUCT))
def test_getNumPins(self):
cur = self.Block.getNumPins()
ref = self.Block.getDim(Flags.FUEL, "mult")
self.assertEqual(cur, ref)
emptyBlock = blocks.HexBlock("empty")
self.assertEqual(emptyBlock.getNumPins(), 0)
def test_getComponentAreaFrac(self):
def calcFracManually(names):
tFrac = 0.0
for n in names:
for c, frac in fracs:
if c.getName() == n:
tFrac += frac
return tFrac
self.Block.setHeight(2.0)
refList = [Flags.BOND, Flags.COOLANT]
cur = self.Block.getComponentAreaFrac(refList)
fracs = self.Block.getVolumeFractions()
ref = calcFracManually(("bond", "coolant"))
places = 6
self.assertAlmostEqual(cur, ref, places=places)
# allow inexact for things like fuel1, fuel2 or clad vs. cladding
val = self.Block.getComponentAreaFrac(
[Flags.COOLANT, Flags.INTERCOOLANT], exact=False
)
ref = calcFracManually(["coolant", "interCoolant"])
refWrong = calcFracManually(
["coolant", "interCoolant", "clad"]
) # can't use 'clad' b/c ``calcFracManually`` is exact only
self.assertAlmostEqual(ref, val)
self.assertNotAlmostEqual(refWrong, val)
def test100_getPinPitch(self):
cur = self.Block.getPinPitch()
ref = self.Block.getDim(Flags.CLAD, "od") + self.Block.getDim(Flags.WIRE, "od")
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test101_getPitch(self):
cur = self.Block.getPitch(returnComp=True)
ref = (
self.Block.getDim(Flags.INTERCOOLANT, "op"),
self.Block.getComponent(Flags.INTERCOOLANT),
)
self.assertEqual(cur, ref)
newb = copy.deepcopy(self.Block)
p1, c1 = self.Block.getPitch(returnComp=True)
p2, c2 = newb.getPitch(returnComp=True)
self.assertTrue(c1 is not c2)
self.assertTrue(newb.getLargestComponent("op") is c2)
self.assertTrue(p1 == p2)
def test102_setPitch(self):
pitch = 17.5
self.Block.setPitch(pitch)
cur = self.Block.getPitch()
self.assertEqual(cur, pitch)
self.assertEqual(
self.Block.getComponent(Flags.INTERCOOLANT).getDimension("op"), pitch
)
def test106_getAreaFractions(self):
cur = self.Block.getVolumeFractions()
tot = 0.0
areas = []
for c in self.Block.getComponents():
a = c.getArea()
tot += a
areas.append((c, a))
fracs = {}
for c, a in areas:
fracs[c.getName()] = a / tot
places = 6
for (c, a) in cur:
self.assertAlmostEqual(a, fracs[c.getName()], places=places)
self.assertAlmostEqual(sum(fracs.values()), sum([a for c, a in cur]))
def test_rotatePins(self):
b = self.Block
b.setRotationNum(0)
index = b.rotatePins(0, justCompute=True)
self.assertEqual(b.getRotationNum(), 0)
self.assertEqual(index[5], 5)
self.assertEqual(index[2], 2) # pin 1 is center and never rotates.
index = b.rotatePins(1)
self.assertEqual(b.getRotationNum(), 1)
self.assertEqual(index[2], 3)
index = b.rotatePins(1)
self.assertEqual(b.getRotationNum(), 2)
self.assertEqual(index[2], 4)
index = b.rotatePins(4) # back to 0
self.assertEqual(b.getRotationNum(), 0)
self.assertEqual(index[2], 2)
self.assertRaises(ValueError, b.rotatePins, -1)
self.assertRaises(ValueError, b.rotatePins, 10)
self.assertRaises((ValueError, TypeError), b.rotatePins, None)
self.assertRaises((ValueError, TypeError), b.rotatePins, "a")
def test_expandElementalToIsotopics(self):
r"""
Tests the expand to elementals capability.
"""
initialN = {}
initialM = {}
elementals = [nuclideBases.byName[nn] for nn in ["FE", "CR", "SI", "V", "MO"]]
for elemental in elementals:
initialN[elemental] = self.Block.getNumberDensity(
elemental.name
) # homogenized
initialM[elemental] = self.Block.getMass(elemental.name)
for elemental in elementals:
self.Block.expandElementalToIsotopics(elemental)
newDens = 0.0
newMass = 0.0
for natNuc in elemental.getNaturalIsotopics():
newDens += self.Block.getNumberDensity(natNuc.name)
newMass += self.Block.getMass(natNuc.name)
self.assertAlmostEqual(
initialN[elemental],
newDens,
msg="Isotopic {2} ndens does not add up to {0}. It adds to {1}"
"".format(initialN[elemental], newDens, elemental),
)
self.assertAlmostEqual(
initialM[elemental],
newMass,
msg="Isotopic {2} mass does not add up to {0} g. "
"It adds to {1}".format(initialM[elemental], newMass, elemental),
)
def test_setPitch(self):
r"""
Checks consistency after adjusting pitch
Needed to verify fix to Issue #165.
"""
b = self.Block
moles1 = b.p.molesHmBOL
b.setPitch(17.5)
moles2 = b.p.molesHmBOL
self.assertAlmostEqual(moles1, moles2)
b.setPitch(20.0)
moles3 = b.p.molesHmBOL
self.assertAlmostEqual(moles2, moles3)
def test_getMfp(self):
"""Test mean free path."""
applyDummyData(self.Block)
# These are unverified numbers, just the result of this calculation.
mfp, mfpAbs, diffusionLength = self.Block.getMfp()
# no point testing these number to high accuracy.
assert_allclose(3.9, mfp, rtol=0.1)
assert_allclose(235.0, mfpAbs, rtol=0.1)
assert_allclose(17.0, diffusionLength, rtol=0.1)
def test_consistentMassDensityVolumeBetweenColdBlockAndColdComponents(self):
block = self.Block
expectedData = []
actualData = []
for c in block:
expectedData.append(getComponentDataFromBlock(c, block))
actualData.append(
(c, c.density(), c.getVolume(), c.density() * c.getVolume())
)
for expected, actual in zip(expectedData, actualData):
msg = "Data (component, density, volume, mass) for component {} does not match. Expected: {}, Actual: {}".format(
expected[0], expected, actual
)
for expectedVal, actualVal in zip(expected, actual):
self.assertAlmostEqual(expectedVal, actualVal, msg=msg)
def test_consistentMassDensityVolumeBetweenHotBlockAndHotComponents(self):
block = self._hotBlock
expectedData = []
actualData = []
for c in block:
expectedData.append(getComponentDataFromBlock(c, block))
actualData.append(
(c, c.density(), c.getVolume(), c.density() * c.getVolume())
)
for expected, actual in zip(expectedData, actualData):
msg = "Data (component, density, volume, mass) for component {} does not match. Expected: {}, Actual: {}".format(
expected[0], expected, actual
)
for expectedVal, actualVal in zip(expected, actual):
self.assertAlmostEqual(expectedVal, actualVal, msg=msg)
def test_consistentAreaWithOverlappingComponents(self):
"""
Test that negative gap areas correctly account for area overlapping upon thermal expansion.
Notes
-----
This test calculates a reference coolant area by subtracting the areas of the intercoolant, duct, wire wrap,
and pins from the total hex block area.
The area of the pins is calculated using only the outer radius of the clad.
This avoids the use of negative areas as implemented in Block.getVolumeFractions.
Na-23 mass will not be conserved as when duct/clad expands sodium is evacuated
See Also
--------
armi.reactor.blocks.Block.getVolumeFractions
"""
numFE56 = self.Block.getNumberOfAtoms("FE56")
numU235 = self.Block.getNumberOfAtoms("U235")
for c in self.Block:
c.setTemperature(800)
hasNegativeArea = any(c.getArea() < 0 for c in self.Block)
self.assertTrue(hasNegativeArea)
self.Block.getVolumeFractions() # sets coolant area
self._testDimensionsAreLinked() # linked dimensions are needed for this test to work
blockPitch = self.Block.getPitch()
self.assertAlmostEqual(
blockPitch, self.Block.getComponent(Flags.INTERCOOLANT).getDimension("op")
)
totalHexArea = blockPitch ** 2 * math.sqrt(3) / 2.0
clad = self.Block.getComponent(Flags.CLAD)
pinArea = (
math.pi / 4.0 * clad.getDimension("od") ** 2 * clad.getDimension("mult")
)
ref = (
totalHexArea
- self.Block.getComponent(Flags.INTERCOOLANT).getArea()
- self.Block.getComponent(Flags.DUCT).getArea()
- self.Block.getComponent(Flags.WIRE).getArea()
- pinArea
)
self.assertAlmostEqual(totalHexArea, self.Block.getArea())
self.assertAlmostEqual(ref, self.Block.getComponent(Flags.COOLANT).getArea())
self.assertTrue(numpy.allclose(numFE56, self.Block.getNumberOfAtoms("FE56")))
self.assertTrue(numpy.allclose(numU235, self.Block.getNumberOfAtoms("U235")))
def _testDimensionsAreLinked(self):
prevC = None
for c in self.Block.getComponentsOfShape(components.Circle):
if prevC:
self.assertAlmostEqual(prevC.getDimension("od"), c.getDimension("id"))
prevC = c
self.assertAlmostEqual(
self.Block.getComponent(Flags.DUCT).getDimension("op"),
self.Block.getComponent(Flags.INTERCOOLANT).getDimension("ip"),
)
def test_breakFuelComponentsIntoIndividuals(self):
fuel = self.Block.getComponent(Flags.FUEL)
mult = fuel.getDimension("mult")
self.assertGreater(mult, 1.0)
self.Block.completeInitialLoading()
self.Block.breakFuelComponentsIntoIndividuals()
self.assertEqual(fuel.getDimension("mult"), 1.0)
def test_plotFlux(self):
try:
xslib = isotxs.readBinary(ISOAA_PATH)
self.Block.r.core.lib = xslib
self.Block.p.mgFlux = range(33)
self.Block.plotFlux(self.Block.r.core, fName="flux.png", bList=[self.Block])
self.assertTrue(os.path.exists("flux.png"))
finally:
os.remove("flux.txt") # secondarily created during the call.
os.remove("flux.png") # created during the call.
def test_pinMgFluxes(self):
"""
Test setting/getting of pin-wise fluxes.
.. warning:: This will likely be pushed to the component level.
"""
fluxes = numpy.ones((33, 10))
self.Block.setPinMgFluxes(fluxes, 10)
self.Block.setPinMgFluxes(fluxes * 2, 10, adjoint=True)
self.Block.setPinMgFluxes(fluxes * 3, 10, gamma=True)
self.assertEqual(self.Block.p.pinMgFluxes[0][2], 1.0)
self.assertEqual(self.Block.p.pinMgFluxesAdj[0][2], 2.0)
self.assertEqual(self.Block.p.pinMgFluxesGamma[0][2], 3.0)
def test_getComponentsInLinkedOrder(self):
comps = self.Block.getComponentsInLinkedOrder()
self.assertEqual(len(comps), len(self.Block))
comps.pop(0)
with self.assertRaises(RuntimeError):
comps2 = self.Block.getComponentsInLinkedOrder(comps)
def test_mergeWithBlock(self):
fuel1 = self.Block.getComponent(Flags.FUEL)
fuel1.setNumberDensity("CM246", 0.0)
block2 = loadTestBlock()
fuel2 = block2.getComponent(Flags.FUEL)
fuel2.setNumberDensity("CM246", 0.02)
self.assertEqual(self.Block.getNumberDensity("CM246"), 0.0)
self.Block.mergeWithBlock(block2, 0.1)
self.assertGreater(self.Block.getNumberDensity("CM246"), 0.0)
self.assertLess(self.Block.getNumberDensity("CM246"), 0.02)
def test_getDimensions(self):
dims = self.Block.getDimensions("od")
self.assertIn(self.Block.getComponent(Flags.FUEL).p.od, dims)
class HexBlock_TestCase(unittest.TestCase):
def setUp(self):
caseSetting = settings.Settings()
self.HexBlock = blocks.HexBlock("TestHexBlock")
hexDims = {"Tinput": 273.0, "Thot": 273.0, "op": 70.6, "ip": 70.0, "mult": 1.0}
self.hexComponent = components.Hexagon("duct", "UZr", **hexDims)
self.HexBlock.addComponent(self.hexComponent)
self.HexBlock.addComponent(
components.Circle(
"clad", "HT9", Tinput=273.0, Thot=273.0, od=0.1, mult=169.0
)
)
self.HexBlock.addComponent(
components.Circle(
"wire", "HT9", Tinput=273.0, Thot=273.0, od=0.01, mult=169.0
)
)
self.HexBlock.addComponent(
components.DerivedShape("coolant", "Sodium", Tinput=273.0, Thot=273.0)
)
r = tests.getEmptyHexReactor()
a = makeTestAssembly(1, 1)
a.add(self.HexBlock)
loc1 = r.core.spatialGrid[0, 1, 0]
r.core.add(a, loc1)
def test_getArea(self):
cur = self.HexBlock.getArea()
ref = math.sqrt(3) / 2.0 * 70.6 ** 2
places = 6
self.assertAlmostEqual(cur, ref, places=places)
def test_coords(self):
r = self.HexBlock.r
a = self.HexBlock.parent
loc1 = r.core.spatialGrid[0, 1, 0]
a.spatialLocator = loc1
x0, y0 = self.HexBlock.coords()
a.spatialLocator = r.core.spatialGrid[0, -1, 0] # symmetric
x2, y2 = self.HexBlock.coords()
a.spatialLocator = loc1
self.HexBlock.p.displacementX = 0.01
self.HexBlock.p.displacementY = 0.02
x1, y1 = self.HexBlock.coords()
# make sure displacements are working
self.assertAlmostEqual(x1 - x0, 1.0)
self.assertAlmostEqual(y1 - y0, 2.0)
# make sure location symmetry is working
self.assertAlmostEqual(x0, -x2)
self.assertAlmostEqual(y0, -y2)
def test_getNumPins(self):
self.assertEqual(self.HexBlock.getNumPins(), 169)
def testSymmetryFactor(self):
self.HexBlock.spatialLocator = self.HexBlock.r.core.spatialGrid[
2, 0, 0
] # full hex
self.HexBlock.clearCache()
self.assertEqual(1.0, self.HexBlock.getSymmetryFactor())
a0 = self.HexBlock.getArea()
v0 = self.HexBlock.getVolume()
m0 = self.HexBlock.getMass()
self.HexBlock.spatialLocator = self.HexBlock.r.core.spatialGrid[
0, 0, 0
] # 1/3 symmetric
self.HexBlock.clearCache()
self.assertEqual(3.0, self.HexBlock.getSymmetryFactor())
self.assertEqual(a0 / 3.0, self.HexBlock.getArea())
self.assertEqual(v0 / 3.0, self.HexBlock.getVolume())
self.assertAlmostEqual(m0 / 3.0, self.HexBlock.getMass())
def test_retainState(self):
"""Ensure retainState restores params and spatialGrids."""
self.HexBlock.spatialGrid = grids.HexGrid.fromPitch(1.0)
self.HexBlock.setType("intercoolant")
with self.HexBlock.retainState():
self.HexBlock.setType("fuel")
self.HexBlock.spatialGrid.changePitch(2.0)
self.assertEqual(self.HexBlock.spatialGrid.pitch, 1.0)
self.assertTrue(self.HexBlock.hasFlags(Flags.INTERCOOLANT))
def test_getPinCoords(self):
xyz = self.HexBlock.getPinCoordinates()
x, y, _z = zip(*xyz)
self.assertAlmostEqual(
y[1], y[2]
) # first two pins should be side by side on top.
self.assertNotAlmostEqual(x[1], x[2])
self.assertEqual(len(xyz), self.HexBlock.getNumPins())
def test_getPitchHomogenousBlock(self):
"""
Demonstrate how to communicate pitch on a hex block with unshaped components.
Notes
-----
This assumes there are 3 materials in the homogeneous block, one with half
the area fraction, and 2 with 1/4 each.
"""
desiredPitch = 14.0
hexTotalArea = hexagon.area(desiredPitch)
compArgs = {"Tinput": 273.0, "Thot": 273.0}
areaFractions = [0.5, 0.25, 0.25]
materials = ["HT9", "UZr", "Sodium"]
# There are 2 ways to do this, the first is to pick a component to be the pitch
# defining component, and given it the shape of a hexagon to define the pitch
# The hexagon outer pitch (op) is defined by the pitch of the block/assembly.
# the ip is defined by whatever thickness is necessary to have the desired area
# fraction. The second way is shown in the second half of this test.
hexBlock = blocks.HexBlock("TestHexBlock")
hexComponentArea = areaFractions[0] * hexTotalArea
# Picking 1st material to use for the hex component here, but really the choice
# is arbitrary.
# area grows quadratically with op
ipNeededForCorrectArea = desiredPitch * areaFractions[0] ** 0.5
self.assertEqual(
hexComponentArea, hexTotalArea - hexagon.area(ipNeededForCorrectArea)
)
hexArgs = {"op": desiredPitch, "ip": ipNeededForCorrectArea, "mult": 1.0}
hexArgs.update(compArgs)
pitchDefiningComponent = components.Hexagon(
"pitchComp", materials[0], **hexArgs
)
hexBlock.addComponent(pitchDefiningComponent)
# hex component is added, now add the rest as unshaped.
for aFrac, material in zip(areaFractions[1:], materials[1:]):
unshapedArgs = {"area": hexTotalArea * aFrac}
unshapedArgs.update(compArgs)
name = f"unshaped {material}"
comp = components.UnshapedComponent(name, material, **unshapedArgs)
hexBlock.addComponent(comp)
self.assertEqual(desiredPitch, hexBlock.getPitch())
self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea())
self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea)
# For this second way, we will simply define the 3 components as unshaped, with
# the desired area fractions, and make a 4th component that is an infinitely
# thin hexagon with the the desired pitch. The downside of this method is that
# now the block has a fourth component with no volume.
hexBlock = blocks.HexBlock("TestHexBlock")
for aFrac, material in zip(areaFractions, materials):
unshapedArgs = {"area": hexTotalArea * aFrac}
unshapedArgs.update(compArgs)
name = f"unshaped {material}"
comp = components.UnshapedComponent(name, material, **unshapedArgs)
hexBlock.addComponent(comp)
# We haven't set a pitch defining component this time so set it now with 0 area.
pitchDefiningComponent = components.Hexagon(
"pitchComp", "Void", op=desiredPitch, ip=desiredPitch, mult=1, **compArgs
)
hexBlock.addComponent(pitchDefiningComponent)
self.assertEqual(desiredPitch, hexBlock.getPitch())
self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea())
self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea)
class CartesianBlock_TestCase(unittest.TestCase):
"""Tests for blocks with rectangular/square outer shape."""
PITCH = 70
def setUp(self):
caseSetting = settings.Settings()
self.cartesianBlock = blocks.CartesianBlock("TestCartesianBlock", caseSetting)
self.cartesianComponent = components.HoledSquare(
"duct",
"UZr",
Tinput=273.0,
Thot=273.0,
holeOD=68.0,
widthOuter=self.PITCH,
mult=1.0,
)
self.cartesianBlock.addComponent(self.cartesianComponent)
self.cartesianBlock.addComponent(
components.Circle(
"clad", "HT9", Tinput=273.0, Thot=273.0, od=68.0, mult=169.0
)
)
def test_getPitchSquare(self):
self.assertEqual(self.cartesianBlock.getPitch(), (self.PITCH, self.PITCH))
def test_getPitchHomogenousBlock(self):
"""
Demonstrate how to communicate pitch on a hex block with unshaped components.
Notes
-----
This assumes there are 3 materials in the homogeneous block, one with half
the area fraction, and 2 with 1/4 each.
"""
desiredPitch = (10.0, 12.0)
rectTotalArea = desiredPitch[0] * desiredPitch[1]
compArgs = {"Tinput": 273.0, "Thot": 273.0}
areaFractions = [0.5, 0.25, 0.25]
materials = ["HT9", "UZr", "Sodium"]
# There are 2 ways to do this, the first is to pick a component to be the pitch
# defining component, and given it the shape of a rectangle to define the pitch
# The rectangle outer dimensions is defined by the pitch of the block/assembly.
# the inner dimensions is defined by whatever thickness is necessary to have
# the desired area fraction.
# The second way is to define all physical material components as unshaped, and
# add an additional infinitely thin Void component (no area) that defines pitch.
# See second part of HexBlock_TestCase.test_getPitchHomogenousBlock for
# demonstration.
cartBlock = blocks.CartesianBlock("TestCartBlock")
hexComponentArea = areaFractions[0] * rectTotalArea
# Picking 1st material to use for the hex component here, but really the choice
# is arbitrary.
# area grows quadratically with outer dimensions.
# Note there are infinitely many inner dims that would preserve area,
# this is just one of them.
innerDims = [dim * areaFractions[0] ** 0.5 for dim in desiredPitch]
self.assertAlmostEqual(
hexComponentArea, rectTotalArea - innerDims[0] * innerDims[1]
)
rectArgs = {
"lengthOuter": desiredPitch[0],
"lengthInner": innerDims[0],
"widthOuter": desiredPitch[1],
"widthInner": innerDims[1],
"mult": 1.0,
}
rectArgs.update(compArgs)
pitchDefiningComponent = components.Rectangle(
"pitchComp", materials[0], **rectArgs
)
cartBlock.addComponent(pitchDefiningComponent)
# Rectangle component is added, now add the rest as unshaped.
for aFrac, material in zip(areaFractions[1:], materials[1:]):
unshapedArgs = {"area": rectTotalArea * aFrac}
unshapedArgs.update(compArgs)
name = f"unshaped {material}"
comp = components.UnshapedComponent(name, material, **unshapedArgs)
cartBlock.addComponent(comp)
self.assertEqual(desiredPitch, cartBlock.getPitch())
self.assertAlmostEqual(rectTotalArea, cartBlock.getMaxArea())
self.assertAlmostEqual(sum(c.getArea() for c in cartBlock), rectTotalArea)
class MassConservationTests(unittest.TestCase):
r"""
Tests designed to verify mass conservation during thermal expansion
"""
def setUp(self):
# build a block that has some basic components in it.
self.b = blocks.HexBlock("fuel", height=10.0)
fuelDims = {"Tinput": 25.0, "Thot": 600, "od": 0.76, "id": 0.00, "mult": 127.0}
cladDims = {"Tinput": 25.0, "Thot": 450, "od": 0.80, "id": 0.77, "mult": 127.0}
ductDims = {"Tinput": 25.0, "Thot": 400, "op": 16, "ip": 15.3, "mult": 1.0}
coolDims = {"Tinput": 25.0, "Thot": 400}
fuel = components.Circle("fuel", "UZr", **fuelDims)
clad = components.Circle("clad", "HT9", **cladDims)
duct = components.Hexagon("duct", "HT9", **ductDims)
coolant = components.DerivedShape("coolant", "Sodium", **coolDims)
self.b.addComponent(fuel)
self.b.addComponent(clad)
self.b.addComponent(duct)
self.b.addComponent(coolant)
self.b.getVolumeFractions() # TODO: remove, should be no-op when removed self.cached
def test_adjustSmearDensity(self):
r"""
Tests the getting, setting, and getting of smear density functions
"""
bolBlock = copy.deepcopy(self.b)
s = self.b.getSmearDensity(cold=False)
fuel = self.b.getComponent(Flags.FUEL)
clad = self.b.getComponent(Flags.CLAD)
self.assertAlmostEqual(
s, (fuel.getDimension("od") ** 2) / clad.getDimension("id") ** 2, 8
)
self.b.adjustSmearDensity(self.b.getSmearDensity(), bolBlock=bolBlock)
s2 = self.b.getSmearDensity(cold=False)
self.assertAlmostEqual(s, s2, 8)
self.b.adjustSmearDensity(0.733, bolBlock=bolBlock)
self.assertAlmostEqual(0.733, self.b.getSmearDensity(), 8)
# try annular fuel
clad = self.b.getComponent(Flags.CLAD)
fuel = self.b.getComponent(Flags.FUEL)
fuel.setDimension("od", clad.getDimension("id", cold=True))
fuel.setDimension("id", 0.0001)
self.b.adjustSmearDensity(0.733, bolBlock=bolBlock)
self.assertAlmostEqual(0.733, self.b.getSmearDensity(), 8)
def test_heightExpansionDifferences(self):
r""" The point of this test is to determine if the number densities stay the same
with two different heights of the same block. Since we want to expand a block
from cold temperatures to hot using the fuel expansion coefficient (most important neutronicall),
other components are not grown correctly. This means that on the block level, axial expansion will
NOT conserve mass of non-fuel components. However, the excess mass is simply added to the top of
the reactor in the plenum regions (or any non fueled region).
"""
# assume the default block height is 'cold' height. Now we must determine
# what the hot height should be based on thermal expansion. Change the height
# of the block based on the different thermal expansions of the components then
# see the effect on the number densities.
fuel = self.b.getComponent(Flags.FUEL)
height = self.b.getHeight()
Thot = fuel.temperatureInC
Tcold = fuel.inputTemperatureInC
dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold)
hotFuelHeight = height * (1 + dllHot)
self.b.setHeight(hotFuelHeight)
hotFuelU238 = self.b.getNumberDensity("U238")
hotFuelIRON = self.b.getNumberDensity("FE")
# look at clad
clad = self.b.getComponent(Flags.CLAD)
Thot = clad.temperatureInC
Tcold = clad.inputTemperatureInC
dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold)
hotCladHeight = height * (1 + dllHot)
self.b.setHeight(hotCladHeight)
hotCladU238 = self.b.getNumberDensity("U238")
hotCladIRON = self.b.getNumberDensity("FE")
self.assertAlmostEqual(
hotFuelU238,
hotCladU238,
10,
"Number Density of fuel in one height ({0}) != number density of fuel at another height {1}. Number density conservation "
"violated during thermal expansion".format(hotFuelU238, hotCladU238),
)
self.assertAlmostEqual(
hotFuelIRON,
hotCladIRON,
10,
"Number Density of clad in one height ({0}) != number density of clad at another height {1}. Number density conservation "
"violated during thermal expansion".format(hotFuelIRON, hotCladIRON),
)
def test_massFuelHeatup(self):
fuel = self.b.getComponent(Flags.FUEL)
massCold = fuel.getMass()
fuel.setTemperature(100)
massHot = fuel.getMass()
self.assertAlmostEqual(
massCold,
massHot,
10,
"Cold mass of fuel ({0}) != hot mass {1}. Mass conservation "
"violated during thermal expansion".format(massCold, massHot),
)
def test_massCladHeatup(self):
cladding = self.b.getComponent(Flags.CLAD)
massCold = cladding.getMass()
cladding.setTemperature(100)
massHot = cladding.getMass()
self.assertAlmostEqual(
massCold,
massHot,
10,
"Cold mass of clad ({0}) != hot mass {1}. Mass conservation "
"violated during thermal expansion".format(massCold, massHot),
)
def test_massDuctHeatup(self):
duct = self.b.getComponent(Flags.DUCT)
massCold = duct.getMass()
duct.setTemperature(100)
massHot = duct.getMass()
self.assertAlmostEqual(
massCold,
massHot,
10,
"Cold mass of duct ({0}) != hot mass {1}. Mass conservation "
"violated during thermal expansion".format(massCold, massHot),
)
def test_massCoolHeatup(self):
"""Make sure mass of coolant goes down when it heats up."""
coolant = self.b.getComponent(Flags.COOLANT)
massCold = coolant.getMass()
coolant.setTemperature(coolant.temperatureInC + 100)
massHot = coolant.getMass()
self.assertGreater(
massCold,
massHot,
"Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation "
"not violated during thermal expansion of coolant".format(
massCold, massHot
),
)
def test_dimensionDuctHeatup(self):
duct = self.b.getComponent(Flags.DUCT)
pitchCold = duct.getDimension("op", cold=True)
duct.setTemperature(100)
pitchHot = duct.getDimension("op")
dLL = duct.getProperties().linearExpansionFactor(100, 25)
correctHot = pitchCold * (1 + dLL)
self.assertAlmostEqual(
correctHot,
pitchHot,
10,
"Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion "
"violated during heatup. \nTc={tc} Tref={tref} dLL={dLL} cold={pcold}".format(
correctHot,
pitchHot,
tc=duct.temperatureInC,
tref=duct.inputTemperatureInC,
dLL=dLL,
pcold=pitchCold,
),
)
def test_coldMass(self):
"""
Verify that the cold mass is what it should be, even though the hot height is input.
At the cold temperature (but with hot height), the mass should be the same as at hot temperature
and hot height.
"""
fuel = self.b.getComponent(Flags.FUEL)
# set ref (input/cold) temperature.
Thot = fuel.temperatureInC
Tcold = fuel.inputTemperatureInC
fuel.setTemperature(Tcold)
massCold = fuel.getMass()
fuelArea = fuel.getArea()
height = self.b.getHeight() # hot height.
rho = fuel.getProperties().density(Tc=Tcold)
dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold)
coldHeight = height / (1 + dllHot)
theoreticalMass = fuelArea * coldHeight * rho
self.assertAlmostEqual(
massCold,
theoreticalMass,
7,
"Cold mass of fuel ({0}) != theoretical mass {1}. "
"Check calculation of cold mass".format(massCold, theoreticalMass),
)
def test_massConsistency(self):
r"""
Verify that the sum of the component masses equals the total mass.
"""
tMass = 0.0
for child in self.b:
tMass += child.getMass()
bMass = self.b.getMass()
self.assertAlmostEqual(
tMass,
bMass,
10,
"Sum of component mass {0} != total block mass {1}. ".format(tMass, bMass),
)
if __name__ == "__main__":
# import sys;sys.argv = ['', '-f']
unittest.main()
| 34.946309
| 134
| 0.601631
|
794eb619f3b50a8fd8d5302c7fc00901f29e9b6f
| 113
|
py
|
Python
|
src/pycounts_rr/__init__.py
|
rrrohit1/pycounts_rr
|
c86167bf0175bf82fcd470526cdb780e74907c13
|
[
"MIT"
] | null | null | null |
src/pycounts_rr/__init__.py
|
rrrohit1/pycounts_rr
|
c86167bf0175bf82fcd470526cdb780e74907c13
|
[
"MIT"
] | null | null | null |
src/pycounts_rr/__init__.py
|
rrrohit1/pycounts_rr
|
c86167bf0175bf82fcd470526cdb780e74907c13
|
[
"MIT"
] | null | null | null |
# read version from installed package
from importlib.metadata import version
__version__ = version("pycounts_rr")
| 37.666667
| 38
| 0.831858
|
794eb6b0f0e36d35c78cafd7d5c4c2e15356e667
| 2,628
|
py
|
Python
|
play/number_theory/utils.py
|
MuggleWei/Hakuna_Matata
|
6a3a012dc2a5942599098d94e90e9381d660500d
|
[
"WTFPL"
] | null | null | null |
play/number_theory/utils.py
|
MuggleWei/Hakuna_Matata
|
6a3a012dc2a5942599098d94e90e9381d660500d
|
[
"WTFPL"
] | 30
|
2020-03-04T21:59:09.000Z
|
2022-01-04T16:46:52.000Z
|
play/number_theory/utils.py
|
MuggleWei/Hakuna_Matata
|
6a3a012dc2a5942599098d94e90e9381d660500d
|
[
"WTFPL"
] | null | null | null |
from sympy import symbols, Poly
def is_square(n):
"""
判断n是否是平方数
:param n:
:return: True或False
"""
low = 1
high = n
while low <= high:
mid = int((low + high) / 2)
power = mid * mid
if power > n:
high = mid - 1
elif power < n:
low = mid + 1
else:
return True
return False
def euclidean_gcd(a, b):
"""
使用欧几里得算法, 计算两个数的最大公约数
:param a:
:param b:
:return: gcd(a,b)
"""
if a == 0 and b == 0:
return 0
elif a == 0 and b != 0:
return b
elif a != 0 and b == 0:
return a
while b != 0:
# 不使用 % , 因为 % 在不同的编程语言中的结果可能是不同的(虽然都是同余的)
# a, b = b, a % b
q = int(a / b)
r = a - q * b
a, b = b, r
return a
def euclidean_linear_combination(a, b, show_trace=False):
"""
求线性方程的 ax + by = gcd(a,b) 的一个解 (x1, y1)
根据线性方程定理, 方程的一般解可由 (x1 + k * (b / g), y1 - k * (a / g)) 得到
其中 g = gcd(a,b), k为任意整数
:param a:
:param b:
:param show_trace: 显示计算过程
:return: 返回 x1, y1, gcd(a,b)
"""
if a == 0 and b == 0:
return 0, 0, 0
elif a == 0 and b != 0:
return 0, 1, b
elif a != 0 and b == 0:
return 1, 0, a
# 使用欧几里得算法求最大公约数并记录中间过程的除数与余数
q_list = []
r_list = [a, b]
while b != 0:
q = int(a / b)
r = a - q * b
q_list.append(q)
a, b = b, r
r_list.append(r)
g = a
# 递推获取得到解的方程
a, b = symbols('a b')
eq_list = []
eq_list.append(a)
eq_list.append(b)
len_list = len(q_list) - 1
for i in range(len_list):
eq_list.append(eq_list[i] - q_list[i] * eq_list[i + 1])
if show_trace is True:
print("{} = {} * {} + {} => {} = {}".format(
r_list[i], q_list[i], r_list[i + 1], r_list[i + 2], r_list[i + 2], eq_list[-1]))
# 获取方程中a, b的系数
p = Poly(eq_list[-1])
x1 = None
y1 = None
for monom, coeff in p.as_dict().items():
if monom[0] == 1:
x1 = coeff
elif monom[1] == 1:
y1 = coeff
else:
continue
return x1, y1, g
def linear_congruence(a, c, m):
"""
求线性同余方程 ax=c(mod m) 的解
:param a:
:param c:
:param m:
:return: 解的列表
"""
if a == 0 or m == 0:
raise Exception("linear congruence input invalid arguments")
u, v, g = euclidean_linear_combination(a, m)
if int(c / g) * g != c:
return []
sol_list = []
x0 = int(u * c / g)
for k in range(0, g):
sol_list.append(x0 + int(k * m / g))
return sol_list
| 21.719008
| 96
| 0.465753
|
794eb7d6cd9290dbe957f861502a7db156fe7366
| 59,861
|
py
|
Python
|
official/benchmark/keras_imagenet_benchmark.py
|
MaverickLegacy/FollowDetect
|
29331b13fa74e6fe1a8efa370f2e3de082b20fc6
|
[
"Apache-2.0"
] | 1
|
2020-08-14T18:16:22.000Z
|
2020-08-14T18:16:22.000Z
|
official/benchmark/keras_imagenet_benchmark.py
|
MaverickLegacy/FollowDetect
|
29331b13fa74e6fe1a8efa370f2e3de082b20fc6
|
[
"Apache-2.0"
] | 7
|
2020-09-26T01:03:33.000Z
|
2022-02-10T01:30:14.000Z
|
official/benchmark/keras_imagenet_benchmark.py
|
MaverickLegacy/FollowDetect
|
29331b13fa74e6fe1a8efa370f2e3de082b20fc6
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
# pylint: disable=line-too-long
from __future__ import print_function
import json
import os
import time
from typing import Any, MutableMapping, Optional
from absl import flags
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.benchmark import benchmark_wrappers
from official.benchmark import keras_benchmark
from official.benchmark.models import resnet_imagenet_main
from official.vision.image_classification import classifier_trainer
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
MOBILENET_V1_MIN_TOP_1_ACCURACY = 0.65
MOBILENET_V1_MAX_TOP_1_ACCURACY = 0.68
# Range of top-1 accracies for model optimization techniques.
# Each item indicates (MIN_TOP_1_ACCURACY, MAX_TOP_1_ACCURACY).
MODEL_OPTIMIZATION_TOP_1_ACCURACY = {
'RESNET50_FINETUNE_PRUNING': (0.76, 0.77),
'MOBILENET_V1_FINETUNE_PRUNING': (0.67, 0.68),
}
FLAGS = flags.FLAGS
def _get_classifier_parameters(
num_gpus: int = 0,
builder: str = 'records',
skip_eval: bool = False,
distribution_strategy: str = 'mirrored',
per_replica_batch_size: int = 128,
epochs: int = 90,
steps: int = 0,
epochs_between_evals: int = 1,
dtype: str = 'float32',
enable_xla: bool = False,
run_eagerly: bool = False,
gpu_thread_mode: Optional[str] = None,
dataset_num_private_threads: Optional[int] = None,
loss_scale: Optional[str] = None) -> MutableMapping[str, Any]:
"""Gets classifier trainer's ResNet parameters."""
return {
'runtime': {
'num_gpus': num_gpus,
'distribution_strategy': distribution_strategy,
'run_eagerly': run_eagerly,
'enable_xla': enable_xla,
'dataset_num_private_threads': dataset_num_private_threads,
'gpu_thread_mode': gpu_thread_mode,
'loss_scale': loss_scale,
},
'train_dataset': {
'builder': builder,
'use_per_replica_batch_size': True,
'batch_size': per_replica_batch_size,
'image_size': 224,
'dtype': dtype,
},
'validation_dataset': {
'builder': builder,
'batch_size': per_replica_batch_size,
'use_per_replica_batch_size': True,
'image_size': 224,
'dtype': dtype,
},
'train': {
'epochs': epochs,
'steps': steps,
'callbacks': {
'enable_tensorboard': False,
'enable_checkpoint_and_export': False,
'enable_time_history': True,
},
},
'model': {
'loss': {
'label_smoothing': 0.1,
},
},
'evaluation': {
'epochs_between_evals': epochs_between_evals,
'skip_eval': skip_eval,
},
}
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
"""Benchmark accuracy tests for ResNet50 in Keras."""
def __init__(self,
output_dir: Optional[str] = None,
root_data_dir: Optional[str] = None,
**kwargs):
"""A benchmark class.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [classifier_trainer.define_classifier_flags]
self.data_dir = os.path.join(root_data_dir, 'imagenet')
super(Resnet50KerasAccuracy, self).__init__(
output_dir=output_dir, flag_methods=flag_methods)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(
self,
experiment_name: str,
top_1_min: float = MIN_TOP_1_ACCURACY,
top_1_max: float = MAX_TOP_1_ACCURACY,
num_gpus: int = 0,
distribution_strategy: str = 'mirrored',
per_replica_batch_size: int = 128,
epochs: int = 90,
steps: int = 0,
epochs_between_evals: int = 1,
dtype: str = 'float32',
enable_xla: bool = False,
run_eagerly: bool = False,
gpu_thread_mode: Optional[str] = None,
dataset_num_private_threads: Optional[int] = None,
loss_scale: Optional[str] = None):
"""Runs and reports the benchmark given the provided configuration."""
FLAGS.model_type = 'resnet'
FLAGS.dataset = 'imagenet'
FLAGS.mode = 'train_and_eval'
FLAGS.data_dir = self.data_dir
FLAGS.model_dir = self._get_model_dir(experiment_name)
parameters = _get_classifier_parameters(
num_gpus=num_gpus,
distribution_strategy=distribution_strategy,
per_replica_batch_size=per_replica_batch_size,
epochs=epochs,
steps=steps,
epochs_between_evals=epochs_between_evals,
dtype=dtype,
enable_xla=enable_xla,
run_eagerly=run_eagerly,
gpu_thread_mode=gpu_thread_mode,
dataset_num_private_threads=dataset_num_private_threads,
loss_scale=loss_scale)
FLAGS.params_override = json.dumps(parameters)
total_batch_size = num_gpus * per_replica_batch_size
start_time_sec = time.time()
stats = classifier_trainer.run(flags.FLAGS)
wall_time_sec = time.time() - start_time_sec
super(Resnet50KerasAccuracy, self)._report_benchmark(
stats,
wall_time_sec,
top_1_min=top_1_min,
top_1_max=top_1_max,
total_batch_size=total_batch_size,
log_steps=100)
def benchmark_8_gpu(self):
"""Tests Keras model with eager, dist_strat and 8 GPUs."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu',
num_gpus=8,
per_replica_batch_size=128,
epochs=90,
epochs_between_evals=10,
dtype='float32')
def benchmark_8_gpu_fp16(self):
"""Tests Keras model with eager, dist_strat, 8 GPUs, and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu_fp16',
num_gpus=8,
per_replica_batch_size=256,
epochs=90,
epochs_between_evals=10,
dtype='float16')
def benchmark_xla_8_gpu_fp16(self):
"""Tests Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_fp16',
num_gpus=8,
per_replica_batch_size=256,
epochs=90,
epochs_between_evals=10,
dtype='float16',
enable_xla=True)
def benchmark_xla_8_gpu_fp16_dynamic(self):
"""Tests Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_fp16_dynamic',
top_1_min=0.736,
num_gpus=8,
per_replica_batch_size=256,
epochs=90,
epochs_between_evals=10,
dtype='float16',
loss_scale='dynamic')
def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name)
class MobilenetV1KerasAccuracy(keras_benchmark.KerasBenchmark):
"""Benchmark accuracy tests for MobilenetV1 in Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""A benchmark class.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
self.data_dir = os.path.join(root_data_dir, 'imagenet')
super(MobilenetV1KerasAccuracy, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags={
'model': 'mobilenet',
'optimizer': 'mobilenet_default',
'initial_learning_rate_per_sample': 0.00039,
})
def benchmark_8_gpu(self):
"""Test Keras model with eager, dist_strat and 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128 * 8
FLAGS.train_epochs = 90
FLAGS.epochs_between_evals = 10
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
self._run_and_report_benchmark()
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
top_1_min=MOBILENET_V1_MIN_TOP_1_ACCURACY,
top_1_max=MOBILENET_V1_MAX_TOP_1_ACCURACY):
start_time_sec = time.time()
stats = resnet_imagenet_main.run(flags.FLAGS)
wall_time_sec = time.time() - start_time_sec
super(MobilenetV1KerasAccuracy, self)._report_benchmark(
stats,
wall_time_sec,
top_1_min=top_1_min,
top_1_max=top_1_max,
total_batch_size=FLAGS.batch_size,
log_steps=100)
def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name)
class Resnet50KerasClassifierBenchmarkBase(keras_benchmark.KerasBenchmark):
"""Resnet50 (classifier_trainer) benchmarks."""
def __init__(self, output_dir=None, default_flags=None,
tpu=None, dataset_builder='records', train_epochs=1,
train_steps=110, data_dir=None):
flag_methods = [classifier_trainer.define_classifier_flags]
self.dataset_builder = dataset_builder
self.train_epochs = train_epochs
self.train_steps = train_steps
self.data_dir = data_dir
super(Resnet50KerasClassifierBenchmarkBase, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags=default_flags,
tpu=tpu)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(
self,
experiment_name: str,
skip_steps: Optional[int] = None,
top_1_min: float = MIN_TOP_1_ACCURACY,
top_1_max: float = MAX_TOP_1_ACCURACY,
num_gpus: int = 0,
num_tpus: int = 0,
distribution_strategy: str = 'mirrored',
per_replica_batch_size: int = 128,
epochs_between_evals: int = 1,
dtype: str = 'float32',
enable_xla: bool = False,
run_eagerly: bool = False,
gpu_thread_mode: Optional[str] = None,
dataset_num_private_threads: Optional[int] = None,
loss_scale: Optional[str] = None):
"""Runs and reports the benchmark given the provided configuration."""
FLAGS.model_type = 'resnet'
FLAGS.dataset = 'imagenet'
FLAGS.mode = 'train_and_eval'
FLAGS.data_dir = self.data_dir
FLAGS.model_dir = self._get_model_dir(experiment_name)
parameters = _get_classifier_parameters(
builder=self.dataset_builder,
skip_eval=True,
num_gpus=num_gpus,
distribution_strategy=distribution_strategy,
per_replica_batch_size=per_replica_batch_size,
epochs=self.train_epochs,
steps=self.train_steps,
epochs_between_evals=epochs_between_evals,
dtype=dtype,
enable_xla=enable_xla,
gpu_thread_mode=gpu_thread_mode,
dataset_num_private_threads=dataset_num_private_threads,
loss_scale=loss_scale)
FLAGS.params_override = json.dumps(parameters)
if distribution_strategy == 'tpu':
total_batch_size = num_tpus * per_replica_batch_size
else:
total_batch_size = num_gpus * per_replica_batch_size
start_time_sec = time.time()
stats = classifier_trainer.run(flags.FLAGS)
wall_time_sec = time.time() - start_time_sec
# Number of logged step time entries that are excluded in performance
# report. We keep results from last 100 batches, or skip the steps based on
# input skip_steps.
warmup = (skip_steps or (self.train_steps - 100)) // FLAGS.log_steps
super(Resnet50KerasClassifierBenchmarkBase, self)._report_benchmark(
stats,
wall_time_sec,
total_batch_size=total_batch_size,
log_steps=FLAGS.log_steps,
warmup=warmup,
start_time_sec=start_time_sec)
def benchmark_1_gpu_no_dist_strat(self):
"""Tests Keras model with 1 GPU, no distribution strategy."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_1_gpu_no_dist_strat',
num_gpus=1,
distribution_strategy='off',
per_replica_batch_size=128)
def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
"""Tests Keras model with 1 GPU, no distribution strategy, run eagerly."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly',
num_gpus=1,
run_eagerly=True,
distribution_strategy='off',
per_replica_batch_size=64)
def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
"""Tests with 1 GPU, no distribution strategy, fp16, run eagerly."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_1_gpu_no_dist_strat_run_eagerly_fp16',
num_gpus=1,
run_eagerly=True,
distribution_strategy='off',
dtype='float16',
per_replica_batch_size=128)
def benchmark_1_gpu(self):
"""Tests Keras model with 1 GPU."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_1_gpu',
num_gpus=1,
distribution_strategy='one_device',
per_replica_batch_size=128)
def benchmark_xla_1_gpu(self):
"""Tests Keras model with XLA and 1 GPU."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_1_gpu',
num_gpus=1,
enable_xla=True,
distribution_strategy='one_device',
per_replica_batch_size=128)
def benchmark_1_gpu_fp16(self):
"""Tests Keras model with 1 GPU and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_1_gpu_fp16',
num_gpus=1,
distribution_strategy='one_device',
dtype='float16',
per_replica_batch_size=256)
def benchmark_1_gpu_fp16_dynamic(self):
"""Tests Keras model with 1 GPU, fp16, and dynamic loss scaling."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_1_gpu_fp16_dynamic',
num_gpus=1,
distribution_strategy='one_device',
dtype='float16',
per_replica_batch_size=256,
loss_scale='dynamic')
def benchmark_xla_1_gpu_fp16(self):
"""Tests Keras model with XLA, 1 GPU and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_1_gpu_fp16',
num_gpus=1,
enable_xla=True,
distribution_strategy='one_device',
dtype='float16',
per_replica_batch_size=256)
def benchmark_xla_1_gpu_fp16_tweaked(self):
"""Tests Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_1_gpu_fp16_tweaked',
num_gpus=1,
enable_xla=True,
distribution_strategy='one_device',
dtype='float16',
per_replica_batch_size=256,
gpu_thread_mode='gpu_private')
def benchmark_xla_1_gpu_fp16_dynamic(self):
"""Tests Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_1_gpu_fp16_dynamic',
num_gpus=1,
enable_xla=True,
distribution_strategy='one_device',
dtype='float16',
per_replica_batch_size=256,
loss_scale='dynamic')
def benchmark_8_gpu(self):
"""Tests Keras model with 8 GPUs."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu',
num_gpus=8,
distribution_strategy='mirrored',
per_replica_batch_size=128)
def benchmark_8_gpu_tweaked(self):
"""Tests Keras model with manual config tuning and 8 GPUs."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu_tweaked',
num_gpus=8,
distribution_strategy='mirrored',
per_replica_batch_size=128,
dataset_num_private_threads=14)
def benchmark_xla_8_gpu(self):
"""Tests Keras model with XLA and 8 GPUs."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu',
num_gpus=8,
enable_xla=True,
distribution_strategy='mirrored',
per_replica_batch_size=128)
def benchmark_xla_8_gpu_tweaked(self):
"""Tests Keras model with manual config tuning, 8 GPUs, and XLA."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_tweaked',
num_gpus=8,
enable_xla=True,
distribution_strategy='mirrored',
per_replica_batch_size=128,
gpu_thread_mode='gpu_private',
dataset_num_private_threads=24)
def benchmark_8_gpu_fp16(self):
"""Tests Keras model with 8 GPUs and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu_fp16',
num_gpus=8,
dtype='float16',
distribution_strategy='mirrored',
per_replica_batch_size=256)
def benchmark_8_gpu_fp16_tweaked(self):
"""Tests Keras model with 8 GPUs, fp16, and manual config tuning."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu_fp16_tweaked',
num_gpus=8,
dtype='float16',
distribution_strategy='mirrored',
per_replica_batch_size=256,
gpu_thread_mode='gpu_private',
dataset_num_private_threads=40)
def benchmark_8_gpu_fp16_dynamic_tweaked(self):
"""Tests Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8_gpu_fp16_dynamic_tweaked',
num_gpus=8,
dtype='float16',
distribution_strategy='mirrored',
per_replica_batch_size=256,
loss_scale='dynamic',
gpu_thread_mode='gpu_private',
dataset_num_private_threads=40)
def benchmark_xla_8_gpu_fp16(self):
"""Tests Keras model with XLA, 8 GPUs and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_fp16',
dtype='float16',
num_gpus=8,
enable_xla=True,
distribution_strategy='mirrored',
per_replica_batch_size=256)
def benchmark_xla_8_gpu_fp16_tweaked(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_fp16_tweaked',
dtype='float16',
num_gpus=8,
enable_xla=True,
distribution_strategy='mirrored',
per_replica_batch_size=256,
gpu_thread_mode='gpu_private',
dataset_num_private_threads=48)
def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Tests with manual config tuning, XLA, 8 GPUs and fp16.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_fp16_tweaked_delay_measure',
dtype='float16',
num_gpus=8,
enable_xla=True,
distribution_strategy='mirrored',
per_replica_batch_size=256,
gpu_thread_mode='gpu_private',
dataset_num_private_threads=48,
steps=310)
def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
"""Tests Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_xla_8_gpu_fp16_dynamic_tweaked',
dtype='float16',
num_gpus=8,
enable_xla=True,
distribution_strategy='mirrored',
per_replica_batch_size=256,
gpu_thread_mode='gpu_private',
loss_scale='dynamic',
dataset_num_private_threads=48)
def benchmark_2x2_tpu_bf16(self):
"""Test Keras model with 2x2 TPU, bf16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_2x2_tpu_bf16',
dtype='bfloat16',
num_tpus=8,
distribution_strategy='tpu',
per_replica_batch_size=128)
def benchmark_4x4_tpu_bf16(self):
"""Test Keras model with 4x4 TPU, bf16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_4x4_tpu_bf16',
dtype='bfloat16',
num_tpus=32,
distribution_strategy='tpu',
per_replica_batch_size=128)
def benchmark_8x8_tpu_bf16(self):
"""Test Keras model with 8x8 TPU, bf16."""
self._setup()
self._run_and_report_benchmark(
experiment_name='benchmark_8x8_tpu_bf16',
dtype='bfloat16',
num_tpus=128,
distribution_strategy='tpu',
per_replica_batch_size=64)
def fill_report_object(self, stats):
super(Resnet50KerasClassifierBenchmarkBase, self).fill_report_object(
stats,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
"""Resnet50 benchmarks."""
def __init__(self, output_dir=None, default_flags=None, tpu=None):
flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
super(Resnet50KerasBenchmarkBase, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags=default_flags,
tpu=tpu)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self, skip_steps=None):
start_time_sec = time.time()
stats = resnet_imagenet_main.run(FLAGS)
wall_time_sec = time.time() - start_time_sec
# Number of logged step time entries that are excluded in performance
# report. We keep results from last 100 batches, or skip the steps based on
# input skip_steps.
warmup = (skip_steps or (FLAGS.train_steps - 100)) // FLAGS.log_steps
super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
stats,
wall_time_sec,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
warmup=warmup,
start_time_sec=start_time_sec)
def benchmark_1_gpu_no_dist_strat(self):
"""Test Keras model with 1 GPU, no distribution strategy."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
"""Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly')
FLAGS.batch_size = 64
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self):
"""Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.explicit_gpu_placement = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked')
FLAGS.batch_size = 64
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
"""Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self):
"""Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.explicit_gpu_placement = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu(self):
"""Test Keras model with 1 GPU."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_amp(self):
"""Test Keras model with 1 GPU with automatic mixed precision."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp')
FLAGS.batch_size = 256
self._run_and_report_benchmark()
def benchmark_xla_1_gpu(self):
"""Test Keras model with XLA and 1 GPU."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_amp(self):
"""Test Keras model with XLA and 1 GPU with automatic mixed precision."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp')
FLAGS.batch_size = 256
self._run_and_report_benchmark()
def benchmark_1_gpu_fp16(self):
"""Test Keras model with 1 GPU and fp16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
self._run_and_report_benchmark()
def benchmark_1_gpu_fp16_dynamic(self):
"""Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
FLAGS.loss_scale = 'dynamic'
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_fp16(self):
"""Test Keras model with XLA, 1 GPU and fp16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_fp16_tweaked(self):
"""Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
FLAGS.tf_gpu_thread_mode = 'gpu_private'
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_fp16_dynamic(self):
"""Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
FLAGS.loss_scale = 'dynamic'
self._run_and_report_benchmark()
def benchmark_8_gpu(self):
"""Test Keras model with 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
FLAGS.batch_size = 128 * 8 # 8 GPUs
self._run_and_report_benchmark()
def benchmark_8_gpu_amp(self):
"""Test Keras model with 8 GPUs with automatic mixed precision."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp')
FLAGS.batch_size = 256 * 8 # 8 GPUs
self._run_and_report_benchmark()
def benchmark_8_gpu_tweaked(self):
"""Test Keras model with manual config tuning and 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
FLAGS.batch_size = 128 * 8 # 8 GPUs
FLAGS.datasets_num_private_threads = 14
self._run_and_report_benchmark()
def benchmark_xla_8_gpu(self):
"""Test Keras model with XLA and 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
FLAGS.batch_size = 128 * 8 # 8 GPUs
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_amp(self):
"""Test Keras model with XLA and 8 GPUs with automatic mixed precision."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_amp')
FLAGS.batch_size = 256 * 8 # 8 GPUs
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_tweaked(self):
"""Test Keras model with manual config tuning, 8 GPUs, and XLA."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
FLAGS.batch_size = 128 * 8
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.datasets_num_private_threads = 24
self._run_and_report_benchmark()
def benchmark_8_gpu_fp16(self):
"""Test Keras model with 8 GPUs and fp16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
FLAGS.batch_size = 256 * 8 # 8 GPUs
self._run_and_report_benchmark()
def benchmark_8_gpu_fp16_tweaked(self):
"""Test Keras model with 8 GPUs, fp16, and manual config tuning."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
FLAGS.batch_size = 256 * 8 # 8 GPUs
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.dataset_num_private_threads = 40
self._run_and_report_benchmark()
def benchmark_8_gpu_fp16_dynamic_tweaked(self):
"""Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir(
'benchmark_8_gpu_fp16_dynamic_tweaked')
FLAGS.batch_size = 256 * 8 # 8 GPUs
FLAGS.loss_scale = 'dynamic'
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.dataset_num_private_threads = 40
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16(self):
"""Test Keras model with XLA, 8 GPUs and fp16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
FLAGS.batch_size = 256 * 8 # 8 GPUs
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_tweaked(self):
"""Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
FLAGS.batch_size = 256 * 8 # 8 GPUs
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.datasets_num_private_threads = 48
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
"""Test with manual config tuning, XLA, 8 GPUs and fp16.
Delay performance measurement for stable performance on 96 vCPU platforms.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_8_gpu_fp16_tweaked_delay_measure')
FLAGS.batch_size = 256 * 8
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.datasets_num_private_threads = 48
FLAGS.train_steps = 310
self._run_and_report_benchmark()
def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
"""Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'mirrored'
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
FLAGS.batch_size = 256 * 8 # 8 GPUs
FLAGS.loss_scale = 'dynamic'
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.datasets_num_private_threads = 48
self._run_and_report_benchmark()
def benchmark_2x2_tpu_bf16(self):
"""Test Keras model with 2x2 TPU, bf16."""
self._setup()
FLAGS.dtype = 'bf16'
FLAGS.distribution_strategy = 'tpu'
FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_bf16')
FLAGS.batch_size = 1024
self._run_and_report_benchmark()
def benchmark_4x4_tpu_bf16(self):
"""Test Keras model with 4x4 TPU, bf16."""
self._setup()
FLAGS.dtype = 'bf16'
FLAGS.distribution_strategy = 'tpu'
FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu_bf16')
FLAGS.batch_size = 4096
self._run_and_report_benchmark()
def benchmark_8x8_tpu_bf16(self):
"""Test Keras model with 8x8 TPU, bf16."""
self._setup()
FLAGS.dtype = 'bf16'
FLAGS.distribution_strategy = 'tpu'
FLAGS.model_dir = self._get_model_dir('benchmark_8x8_tpu_bf16')
FLAGS.batch_size = 8192
self._run_and_report_benchmark()
def fill_report_object(self, stats):
super(Resnet50KerasBenchmarkBase, self).fill_report_object(
stats,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
class Resnet50KerasBenchmarkSynth(Resnet50KerasClassifierBenchmarkBase):
"""Resnet50 synthetic benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
def_flags = {}
def_flags['log_steps'] = 10
super(Resnet50KerasBenchmarkSynth, self).__init__(
output_dir=output_dir, default_flags=def_flags, tpu=tpu,
dataset_builder='synthetic', train_epochs=1, train_steps=110)
class Resnet50KerasBenchmarkReal(Resnet50KerasClassifierBenchmarkBase):
"""Resnet50 real data benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
data_dir = os.path.join(root_data_dir, 'imagenet')
def_flags = {}
def_flags['log_steps'] = 10
super(Resnet50KerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags, tpu=tpu,
dataset_builder='records', train_epochs=1, train_steps=110,
data_dir=data_dir)
class Resnet50KerasBenchmarkRemoteData(Resnet50KerasBenchmarkBase):
"""Resnet50 real data (stored in remote storage) benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
def_flags = {}
def_flags['skip_eval'] = True
def_flags['report_accuracy_metrics'] = False
def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
# Defining multiple epochs overrides the train_steps setting in benchmarks.
def_flags['train_epochs'] = 2
# Cache dataset so performance is stable after the first epoch.
def_flags['training_dataset_cache'] = True
def_flags['log_steps'] = 100
# Note that for single GPU and pure eager tests which are less likely to be
# input bound and more stable, these tests will run for shorter time by
# overriding FLAGS.train_epochs, train_seteps, log_steps in benchmark
# methods, and skip_steps in _run_and_report_benchmark().
super(Resnet50KerasBenchmarkRemoteData, self).__init__(
output_dir=output_dir, default_flags=def_flags)
def _override_flags_to_run_test_shorter(self):
FLAGS.train_epochs = 1
FLAGS.train_steps = 300
FLAGS.log_steps = 10
def benchmark_1_gpu_no_dist_strat(self):
"""Test Keras model with 1 GPU, no distribution strategy."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
FLAGS.batch_size = 128
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
"""Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly')
FLAGS.batch_size = 64
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self):
"""Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.explicit_gpu_placement = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked')
FLAGS.batch_size = 64
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
"""Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 128
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self):
"""Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.explicit_gpu_placement = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 128
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu(self):
"""Test Keras model with 1 GPU."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
FLAGS.batch_size = 128
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_amp(self):
"""Test Keras model with 1 GPU with automatic mixed precision."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp')
FLAGS.batch_size = 256
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_xla_1_gpu(self):
"""Test Keras model with XLA and 1 GPU."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
FLAGS.batch_size = 128
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_amp(self):
"""Test Keras model with XLA and 1 GPU with automatic mixed precision."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp')
FLAGS.batch_size = 256
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_fp16(self):
"""Test Keras model with 1 GPU and fp16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_1_gpu_fp16_dynamic(self):
"""Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
FLAGS.loss_scale = 'dynamic'
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_fp16(self):
"""Test Keras model with XLA, 1 GPU and fp16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_fp16_tweaked(self):
"""Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
FLAGS.tf_gpu_thread_mode = 'gpu_private'
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
def benchmark_xla_1_gpu_fp16_dynamic(self):
"""Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'one_device'
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
FLAGS.dtype = 'fp16'
FLAGS.batch_size = 256
FLAGS.loss_scale = 'dynamic'
self._override_flags_to_run_test_shorter()
self._run_and_report_benchmark()
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self):
if FLAGS.num_gpus == 1 or FLAGS.run_eagerly:
# For single GPU and pure eager tests which are less likely to be input
# bound and more stable, run for shorter time and use the default
# skip_steps.
skip_steps = None
else:
# skip the first epoch for performance measurement.
skip_steps = 600
super(Resnet50KerasBenchmarkRemoteData,
self)._run_and_report_benchmark(skip_steps=skip_steps)
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
"""Trivial model with real data benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
def_flags = {}
def_flags['use_trivial_model'] = True
def_flags['skip_eval'] = True
def_flags['report_accuracy_metrics'] = False
def_flags['dtype'] = 'fp16'
def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
def_flags['train_steps'] = 600
def_flags['log_steps'] = 100
def_flags['distribution_strategy'] = 'mirrored'
super(TrivialKerasBenchmarkReal, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags=def_flags)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self):
start_time_sec = time.time()
stats = resnet_imagenet_main.run(FLAGS)
wall_time_sec = time.time() - start_time_sec
super(TrivialKerasBenchmarkReal, self)._report_benchmark(
stats,
wall_time_sec,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_warmup(self):
"""Dummy test that runs over an epoch to warmup the machine."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_eager = True
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
FLAGS.batch_size = 256 * 8
FLAGS.train_steps = 700
self._run_and_report_benchmark()
def fill_report_object(self, stats):
super(TrivialKerasBenchmarkReal, self).fill_report_object(
stats,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
class Resnet50MultiWorkerKerasAccuracy(keras_benchmark.KerasBenchmark):
"""Resnet50 distributed accuracy tests with multiple workers."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
flag_methods = [classifier_trainer.define_imagenet_keras_flags]
self.data_dir = os.path.join(root_data_dir, 'imagenet')
super(Resnet50MultiWorkerKerasAccuracy, self).__init__(
output_dir=output_dir, flag_methods=flag_methods)
def _benchmark_common(self, eager, num_workers, all_reduce_alg):
"""Common to all benchmarks in this class."""
self._setup()
num_gpus = 8
FLAGS.num_gpus = num_gpus
FLAGS.data_dir = self.data_dir
FLAGS.train_epochs = 90
FLAGS.epochs_between_evals = 10
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = eager
FLAGS.enable_xla = False
FLAGS.distribution_strategy = 'multi_worker_mirrored'
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.datasets_num_private_threads = 32
FLAGS.model_dir = self._get_model_dir(
'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format(
'eager' if eager else 'graph', num_workers, all_reduce_alg))
FLAGS.batch_size = 256 * num_gpus * num_workers
FLAGS.all_reduce_alg = all_reduce_alg
self._run_and_report_benchmark()
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
top_1_min=MIN_TOP_1_ACCURACY,
top_1_max=MAX_TOP_1_ACCURACY):
start_time_sec = time.time()
stats = classifier_trainer.run(flags.FLAGS)
wall_time_sec = time.time() - start_time_sec
super(Resnet50MultiWorkerKerasAccuracy, self)._report_benchmark(
stats,
wall_time_sec,
top_1_min=top_1_min,
top_1_max=top_1_max,
total_batch_size=FLAGS.batch_size,
log_steps=100)
def _get_model_dir(self, folder_name):
return os.path.join(self.output_dir, folder_name)
def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self):
"""Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce."""
self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring')
def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self):
"""Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce."""
self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl')
def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self):
"""Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce."""
self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring')
def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self):
"""Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce."""
self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl')
class Resnet50MultiWorkerKerasBenchmark(Resnet50KerasBenchmarkBase):
"""Resnet50 distributed benchmark tests with multiple workers."""
def __init__(self, output_dir=None, default_flags=None):
super(Resnet50MultiWorkerKerasBenchmark, self).__init__(
output_dir=output_dir, default_flags=default_flags)
def _benchmark_common(self, eager, num_workers, all_reduce_alg):
"""Common to all benchmarks in this class."""
self._setup()
num_gpus = 8
FLAGS.num_gpus = num_gpus
FLAGS.dtype = 'fp16'
FLAGS.enable_eager = eager
FLAGS.enable_xla = False
FLAGS.distribution_strategy = 'multi_worker_mirrored'
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.datasets_num_private_threads = 32
FLAGS.model_dir = self._get_model_dir(
'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format(
'eager' if eager else 'graph', num_workers, all_reduce_alg))
FLAGS.batch_size = 256 * num_gpus * num_workers
FLAGS.all_reduce_alg = all_reduce_alg
self._run_and_report_benchmark()
def benchmark_eager_8_gpu_1_worker_fp16_ring_tweaked(self):
"""Eager, 8 GPUs per worker, 1 worker, fp16, ring all-reduce."""
self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='ring')
def benchmark_eager_8_gpu_1_worker_fp16_nccl_tweaked(self):
"""Eager, 8 GPUs per worker, 1 worker, fp16, nccl all-reduce."""
self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='nccl')
def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self):
"""Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce."""
self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring')
def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self):
"""Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce."""
self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl')
def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self):
"""Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce."""
self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring')
def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self):
"""Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce."""
self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl')
class Resnet50MultiWorkerKerasBenchmarkSynth(Resnet50MultiWorkerKerasBenchmark):
"""Resnet50 multi-worker synthetic data benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
def_flags = {}
def_flags['skip_eval'] = True
def_flags['report_accuracy_metrics'] = False
def_flags['use_synthetic_data'] = True
def_flags['train_steps'] = 110
def_flags['log_steps'] = 10
super(Resnet50MultiWorkerKerasBenchmarkSynth, self).__init__(
output_dir=output_dir, default_flags=def_flags)
class Resnet50MultiWorkerKerasBenchmarkReal(Resnet50MultiWorkerKerasBenchmark):
"""Resnet50 multi-worker real data benchmark tests."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
def_flags = {}
def_flags['skip_eval'] = True
def_flags['report_accuracy_metrics'] = False
def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
def_flags['train_steps'] = 110
def_flags['log_steps'] = 10
super(Resnet50MultiWorkerKerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags)
# TODO(kimjaehong): It also should be also cover other metheods of model
# optimization techniques. In that time, this class will change to something
# like 'KerasModelOptimizationAccuracyBase'.
class KerasPruningAccuracyBase(keras_benchmark.KerasBenchmark):
"""Benchmark accuracy tests for pruning method."""
def __init__(self,
output_dir=None,
root_data_dir=None,
default_flags=None,
**kwargs):
"""A accuracy benchmark class for pruning method.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
default_flags: default flags
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
if default_flags is None:
default_flags = {}
default_flags['pruning_method'] = 'polynomial_decay'
default_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
super(KerasPruningAccuracyBase, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags=default_flags,
**kwargs)
def benchmark_8_gpu(self):
"""Test Keras model with eager, dist_strat and 8 GPUs."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.batch_size = 32 * 8
FLAGS.train_epochs = 90
FLAGS.epochs_between_evals = 10
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
self._run_and_report_benchmark()
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
top_1_min=MODEL_OPTIMIZATION_TOP_1_ACCURACY[
'RESNET50_FINETUNE_PRUNING'][0],
top_1_max=MODEL_OPTIMIZATION_TOP_1_ACCURACY[
'RESNET50_FINETUNE_PRUNING'][1]):
start_time_sec = time.time()
stats = resnet_imagenet_main.run(flags.FLAGS)
wall_time_sec = time.time() - start_time_sec
super(KerasPruningAccuracyBase, self)._report_benchmark(
stats,
wall_time_sec,
top_1_min=top_1_min,
top_1_max=top_1_max,
total_batch_size=FLAGS.batch_size,
log_steps=100)
class MobilenetV1KerasPruningAccuracy(KerasPruningAccuracyBase):
"""Benchmark accuracy tests for MobilenetV1 with pruning method."""
def __init__(self, root_data_dir=None, **kwargs):
default_flags = {
'model': 'mobilenet',
'optimizer': 'mobilenet_default',
'initial_learning_rate_per_sample': 0.00007,
'pretrained_filepath': tf.train.latest_checkpoint(
os.path.join(root_data_dir, 'mobilenet_v1')),
'pruning_begin_step': 0,
'pruning_end_step': 100000,
'pruning_initial_sparsity': 0.0,
'pruning_final_sparsity': 0.5,
'pruning_frequency': 100,
}
super(MobilenetV1KerasPruningAccuracy, self).__init__(
root_data_dir=root_data_dir,
default_flags=default_flags,
**kwargs)
def _run_and_report_benchmark(self):
super(MobilenetV1KerasPruningAccuracy, self)._run_and_report_benchmark(
top_1_min=\
MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][0],
top_1_max=\
MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][1])
class Resnet50KerasPruningAccuracy(KerasPruningAccuracyBase):
"""Benchmark accuracy tests for resnet50 with pruning method."""
def __init__(self, root_data_dir=None, **kwargs):
default_flags = {
'model': 'resnet50_v1.5',
'optimizer': 'mobilenet_default',
'initial_learning_rate_per_sample': 0.0000039,
'pretrained_filepath': tf.train.latest_checkpoint(
os.path.join(root_data_dir, 'resnet50')),
'pruning_begin_step': 0,
'pruning_end_step': 50000,
'pruning_initial_sparsity': 0.0,
'pruning_final_sparsity': 0.5,
'pruning_frequency': 100,
}
super(Resnet50KerasPruningAccuracy, self).__init__(
root_data_dir=root_data_dir,
default_flags=default_flags,
**kwargs)
def _run_and_report_benchmark(self):
super(Resnet50KerasPruningAccuracy, self)._run_and_report_benchmark(
top_1_min=\
MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][0],
top_1_max=\
MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][1])
class KerasPruningBenchmarkRealBase(Resnet50KerasBenchmarkBase):
"""Pruning method benchmarks."""
def __init__(self, root_data_dir=None, default_flags=None, **kwargs):
if default_flags is None:
default_flags = {}
default_flags.update({
'skip_eval': True,
'report_accuracy_metrics': False,
'data_dir': os.path.join(root_data_dir, 'imagenet'),
'train_steps': 110,
'log_steps': 10,
'pruning_method': 'polynomial_decay',
'pruning_begin_step': 0,
'pruning_end_step': 50000,
'pruning_initial_sparsity': 0,
'pruning_final_sparsity': 0.5,
'pruning_frequency': 100,
})
super(KerasPruningBenchmarkRealBase, self).__init__(
default_flags=default_flags, **kwargs)
class MobilenetV1KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase):
"""Pruning method benchmarks for MobilenetV1."""
def __init__(self, **kwargs):
default_flags = {
'model': 'mobilenet',
'optimizer': 'mobilenet_default',
}
super(MobilenetV1KerasPruningBenchmarkReal, self).__init__(
default_flags=default_flags, **kwargs)
class Resnet50KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase):
"""Pruning method benchmarks for resnet50."""
def __init__(self, **kwargs):
default_flags = {
'model': 'resnet50_v1.5',
'optimizer': 'mobilenet_default',
}
super(Resnet50KerasPruningBenchmarkReal, self).__init__(
default_flags=default_flags, **kwargs)
if __name__ == '__main__':
tf.test.main()
| 34.863716
| 80
| 0.705551
|
794eb8898f5e374d904e881a3263ce190b882e25
| 3,108
|
py
|
Python
|
spyne/util/__init__.py
|
PyGuDev/spyne
|
1076f5301c2381b8c077577e3c87855bbe9541fd
|
[
"BSD-3-Clause"
] | 786
|
2015-01-04T10:46:28.000Z
|
2022-03-31T19:24:35.000Z
|
spyne/util/__init__.py
|
uallasleles/spyne
|
1076f5301c2381b8c077577e3c87855bbe9541fd
|
[
"BSD-3-Clause"
] | 248
|
2015-01-01T21:52:47.000Z
|
2022-03-09T08:55:04.000Z
|
spyne/util/__init__.py
|
uallasleles/spyne
|
1076f5301c2381b8c077577e3c87855bbe9541fd
|
[
"BSD-3-Clause"
] | 210
|
2015-01-10T14:20:31.000Z
|
2022-03-09T08:38:43.000Z
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logger = logging.getLogger(__name__)
from spyne.util import six
from spyne.util.coopmt import keepfirst
from spyne.util.coopmt import coroutine
from spyne.util.coopmt import Break
from spyne.util.memo import memoize
from spyne.util.memo import memoize_first
from spyne.util.memo import memoize_ignore
from spyne.util.memo import memoize_ignore_none
from spyne.util.memo import memoize_id
from spyne.util.attrdict import AttrDict
from spyne.util.attrdict import AttrDictColl
from spyne.util.attrdict import DefaultAttrDict
from spyne.util._base import utctime
from spyne.util._base import get_version
try:
import thread
from urllib import splittype, splithost, quote, urlencode
from urllib2 import urlopen, Request, HTTPError
except ImportError: # Python 3
import _thread as thread
from urllib.parse import splittype, splithost, quote, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
def split_url(url):
"""Splits a url into (uri_scheme, host[:port], path)"""
scheme, remainder = splittype(url)
host, path = splithost(remainder)
return scheme.lower(), host, path
def sanitize_args(a):
try:
args, kwargs = a
if isinstance(args, tuple) and isinstance(kwargs, dict):
return args, dict(kwargs)
except (TypeError, ValueError):
args, kwargs = (), {}
if a is not None:
if isinstance(a, dict):
args = tuple()
kwargs = a
elif isinstance(a, tuple):
if isinstance(a[-1], dict):
args, kwargs = a[0:-1], a[-1]
else:
args = a
kwargs = {}
return args, kwargs
if six.PY2:
def _bytes_join(val, joiner=''):
return joiner.join(val)
else:
def _bytes_join(val, joiner=b''):
if isinstance(val, six.binary_type):
return val
return joiner.join(val)
def utf8(s):
if isinstance(s, bytes):
return s.decode('utf8')
if isinstance(s, list):
return [utf8(ss) for ss in s]
if isinstance(s, tuple):
return tuple([utf8(ss) for ss in s])
if isinstance(s, set):
return {utf8(ss) for ss in s}
if isinstance(s, frozenset):
return frozenset([utf8(ss) for ss in s])
return s
| 27.026087
| 75
| 0.682754
|
794ebb0850ab5b204f40200028f5352af96f80ec
| 1,284
|
py
|
Python
|
recipes/sota/2019/raw_lm_corpus/filter_distances.py
|
Zilv1128/test1
|
49fbb1392e69b5194c077df9847505ec995b4e3d
|
[
"BSD-3-Clause"
] | 5,921
|
2017-12-29T17:04:46.000Z
|
2021-04-16T00:37:35.000Z
|
recipes/sota/2019/raw_lm_corpus/filter_distances.py
|
piEYj/wav2letter
|
49fbb1392e69b5194c077df9847505ec995b4e3d
|
[
"BSD-3-Clause"
] | 949
|
2018-01-01T06:36:58.000Z
|
2021-04-16T06:49:05.000Z
|
recipes/sota/2019/raw_lm_corpus/filter_distances.py
|
piEYj/wav2letter
|
49fbb1392e69b5194c077df9847505ec995b4e3d
|
[
"BSD-3-Clause"
] | 1,032
|
2017-12-30T09:47:51.000Z
|
2021-04-11T11:40:00.000Z
|
import argparse
import os
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def run(filename, score, distance_ratio):
eprint("Starting with filename ", filename)
with open(filename, "r") as f:
done = 0
for line in f:
done += 1
str1, str2, scoreRaw = line.split("|")
distance = float(scoreRaw)
len1 = len(str1.split())
len2 = len(str2.split())
maxlen = max(len1, len2)
minlen = min(len1, len2)
if (
maxlen - minlen
) / minlen < distance_ratio and distance <= score * maxlen:
print("{s1}|{s2}|{d}".format(s1=str1, s2=str2, d=scoreRaw.strip()))
if done % 1000000 == 0:
eprint(done)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Filters levenshtein scored title pairs")
parser.add_argument("--infile", type=str, required=True)
parser.add_argument("--score", type=float, required=True)
parser.add_argument("--distance_ratio", type=float, required=True)
args = parser.parse_args()
if not os.path.exists(args.infile):
raise ValueError("infile not found")
run(args.infile, args.score, args.distance_ratio)
| 30.571429
| 83
| 0.588006
|
794ebc26607374dd7365ea54c929b5620dbb5993
| 14,294
|
py
|
Python
|
tests/inspectdb/tests.py
|
JayTechX/django
|
cede5111bbeea1f02a7d35941a4264c7ff95df0a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/inspectdb/tests.py
|
JayTechX/django
|
cede5111bbeea1f02a7d35941a4264c7ff95df0a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/inspectdb/tests.py
|
JayTechX/django
|
cede5111bbeea1f02a7d35941a4264c7ff95df0a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import re
from io import StringIO
from unittest import mock, skipUnless
from django.core.management import call_command
from django.db import connection
from django.db.backends.base.introspection import TableInfo
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import ColumnTypes
def inspectdb_tables_only(table_name):
"""
Limit introspection to tables created for models of this app.
Some databases such as Oracle are extremely slow at introspection.
"""
return table_name.startswith('inspectdb_')
def special_table_only(table_name):
return table_name.startswith('inspectdb_special')
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def test_table_option(self):
"""
inspectdb can inspect a subset of tables by passing the table names as
arguments.
"""
out = StringIO()
call_command('inspectdb', 'inspectdb_people', stdout=out)
output = out.getvalue()
self.assertIn('class InspectdbPeople(models.Model):', output)
self.assertNotIn("InspectdbPeopledata", output)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb', 'inspectdb_columntypes', stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
# Inspecting Oracle DB doesn't produce correct results (#19884):
# - it reports fields as blank=True when they aren't.
if not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('null_char_field', "models.CharField(max_length=10, blank=True, null=True)")
assertFieldType('email_field', "models.CharField(max_length=254)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
assertFieldType('slug_field', "models.CharField(max_length=50)")
assertFieldType('text_field', "models.TextField()")
assertFieldType('url_field', "models.CharField(max_length=200)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
if connection.features.can_introspect_ip_address_field:
assertFieldType('gen_ip_address_field', "models.GenericIPAddressField()")
elif not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('gen_ip_address_field', "models.CharField(max_length=39)")
if connection.features.can_introspect_time_field:
assertFieldType('time_field', "models.TimeField()")
if connection.features.has_native_uuid_field:
assertFieldType('uuid_field', "models.UUIDField()")
elif not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('uuid_field', "models.CharField(max_length=32)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
if connection.features.can_introspect_big_integer_field:
assertFieldType('big_int_field', "models.BigIntegerField()")
else:
assertFieldType('big_int_field', "models.IntegerField()")
bool_field = ColumnTypes._meta.get_field('bool_field')
bool_field_type = connection.features.introspected_boolean_field_type(bool_field)
assertFieldType('bool_field', "models.{}()".format(bool_field_type))
null_bool_field = ColumnTypes._meta.get_field('null_bool_field')
null_bool_field_type = connection.features.introspected_boolean_field_type(null_bool_field)
if 'BooleanField' in null_bool_field_type:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
else:
assertFieldType('null_bool_field', "models.{}(blank=True, null=True)".format(null_bool_field_type))
if connection.features.can_introspect_decimal_field:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
else: # Guessed arguments on SQLite, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
else:
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.PositiveIntegerField()")
else:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.features.can_introspect_small_integer_field:
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self', models.DO_NOTHING)", output)
self.assertNotIn(
"from = models.ForeignKey(InspectdbPeople, models.DO_NOTHING)",
output,
msg=error_message,
)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn(
"from_field = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, db_column='from_id')",
output,
)
self.assertIn(
"people_pk = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, primary_key=True)",
output,
)
self.assertIn(
"people_unique = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, unique=True)",
output,
)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
call_command('inspectdb', 'inspectdb_digitsincolumnname', stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb', table_name_filter=special_table_only, stdout=out)
output = out.getvalue()
base_name = 'field' if connection.features.uppercases_column_names else 'Field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
self.assertIn("tamaño = models.IntegerField()", output)
def test_table_name_introspection(self):
"""
Introspection of table names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb', table_name_filter=special_table_only, stdout=out)
output = out.getvalue()
self.assertIn("class InspectdbSpecialTableName(models.Model):", output)
def test_managed_models(self):
"""By default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb', 'inspectdb_columntypes', stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
def test_unique_together_meta(self):
out = StringIO()
call_command('inspectdb', 'inspectdb_uniquetogether', stdout=out)
output = out.getvalue()
unique_re = re.compile(r'.*unique_together = \((.+),\).*')
unique_together_match = re.findall(unique_re, output)
# There should be one unique_together tuple.
self.assertEqual(len(unique_together_match), 1)
fields = unique_together_match[0]
# Fields with db_column = field name.
self.assertIn("('field1', 'field2')", fields)
# Fields from columns whose names are Python keywords.
self.assertIn("('field1', 'field2')", fields)
# Fields whose names normalize to the same Python field name and hence
# are given an integer suffix.
self.assertIn("('non_unique_column', 'non_unique_column_0')", fields)
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb', 'inspectdb_columntypes', stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse
def test_introspection_errors(self):
"""
Introspection errors should not crash the command, and the error should
be visible in the output.
"""
out = StringIO()
with mock.patch('django.db.connection.introspection.get_table_list',
return_value=[TableInfo(name='nonexistent', type='t')]):
call_command('inspectdb', stdout=out)
output = out.getvalue()
self.assertIn("# Unable to inspect table 'nonexistent'", output)
# The error message depends on the backend
self.assertIn("# The error was:", output)
class InspectDBTransactionalTests(TransactionTestCase):
available_apps = None
def test_include_views(self):
"""inspectdb --include-views creates models for database views."""
with connection.cursor() as cursor:
cursor.execute(
'CREATE VIEW inspectdb_people_view AS '
'SELECT id, name FROM inspectdb_people'
)
out = StringIO()
view_model = 'class InspectdbPeopleView(models.Model):'
view_managed = 'managed = False # Created from a view.'
try:
call_command('inspectdb', table_name_filter=inspectdb_tables_only, stdout=out)
no_views_output = out.getvalue()
self.assertNotIn(view_model, no_views_output)
self.assertNotIn(view_managed, no_views_output)
call_command('inspectdb', table_name_filter=inspectdb_tables_only, include_views=True, stdout=out)
with_views_output = out.getvalue()
self.assertIn(view_model, with_views_output)
self.assertIn(view_managed, with_views_output)
finally:
with connection.cursor() as cursor:
cursor.execute('DROP VIEW inspectdb_people_view')
| 48.454237
| 111
| 0.673779
|
794ebc357b4d1f0900f6df7bc0c1a4ed3fdc3c63
| 5,903
|
py
|
Python
|
tests/test_appelb.py
|
nyetsche/cloud-custodian
|
e4e3e538f1242a7f5516b557f91d269035141b06
|
[
"Apache-2.0"
] | null | null | null |
tests/test_appelb.py
|
nyetsche/cloud-custodian
|
e4e3e538f1242a7f5516b557f91d269035141b06
|
[
"Apache-2.0"
] | null | null | null |
tests/test_appelb.py
|
nyetsche/cloud-custodian
|
e4e3e538f1242a7f5516b557f91d269035141b06
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import BaseTest
from c7n.executor import MainThreadExecutor
from c7n.resources.appelb import AppELB
class AppELBTest(BaseTest):
def test_appelb_simple(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-simple',
'resource': 'app-elb'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_simple_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-simple-filter',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_tags_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-tags-filter',
'resource': 'app-elb',
'filters': [{"tag:KEY1": "VALUE1"}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'appelb-tags-filter',
'resource': 'app-elb',
'filters': [{"tag:KEY1": "VALUE2"}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_is_ssl_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-is-ssl-filter',
'resource': 'app-elb',
'filters': ['is-ssl']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_default_vpc_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-default-vpc-filter',
'resource': 'app-elb',
'filters': ['default-vpc']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_add_tag(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_add_tag')
p = self.load_policy({
'name': 'appelb-add-tag',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'tag', 'key': 'KEY42', 'value': 'VALUE99'}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_remove_tag(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_remove_tag')
p = self.load_policy({
'name': 'appelb-remove-tag',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'remove-tag', 'tags': ['KEY42']}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_mark_for_delete(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_mark_for_delete')
p = self.load_policy({
'name': 'appelb-mark-for-delete',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'mark-for-op', 'op': 'delete',
'tag': 'custodian_next', 'days': 1}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_delete(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_delete')
p = self.load_policy({
'name': 'appelb-delete',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-2'}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
| 38.835526
| 80
| 0.579028
|
794ebc837fd4165c92820a7ecf52810baf357cb9
| 6,403
|
py
|
Python
|
examples/offline/atari_cql.py
|
BFAnas/tianshou
|
6e86a0bed7d1117c5ad6a421b483b45a6adfe336
|
[
"MIT"
] | null | null | null |
examples/offline/atari_cql.py
|
BFAnas/tianshou
|
6e86a0bed7d1117c5ad6a421b483b45a6adfe336
|
[
"MIT"
] | null | null | null |
examples/offline/atari_cql.py
|
BFAnas/tianshou
|
6e86a0bed7d1117c5ad6a421b483b45a6adfe336
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import datetime
import os
import pickle
import pprint
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from examples.atari.atari_network import QRDQN
from examples.atari.atari_wrapper import make_atari_env
from examples.offline.utils import load_buffer
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.policy import DiscreteCQLPolicy
from tianshou.trainer import offline_trainer
from tianshou.utils import TensorboardLogger, WandbLogger
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="PongNoFrameskip-v4")
parser.add_argument("--seed", type=int, default=1626)
parser.add_argument("--eps-test", type=float, default=0.001)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--num-quantiles", type=int, default=200)
parser.add_argument("--n-step", type=int, default=1)
parser.add_argument("--target-update-freq", type=int, default=500)
parser.add_argument("--min-q-weight", type=float, default=10.)
parser.add_argument("--epoch", type=int, default=100)
parser.add_argument("--update-per-epoch", type=int, default=10000)
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--hidden-sizes", type=int, nargs="*", default=[512])
parser.add_argument("--test-num", type=int, default=10)
parser.add_argument("--frames-stack", type=int, default=4)
parser.add_argument("--scale-obs", type=int, default=0)
parser.add_argument("--logdir", type=str, default="log")
parser.add_argument("--render", type=float, default=0.)
parser.add_argument("--resume-path", type=str, default=None)
parser.add_argument("--resume-id", type=str, default=None)
parser.add_argument(
"--logger",
type=str,
default="tensorboard",
choices=["tensorboard", "wandb"],
)
parser.add_argument("--wandb-project", type=str, default="offline_atari.benchmark")
parser.add_argument(
"--watch",
default=False,
action="store_true",
help="watch the play of pre-trained policy only"
)
parser.add_argument("--log-interval", type=int, default=100)
parser.add_argument(
"--load-buffer-name", type=str, default="./expert_DQN_PongNoFrameskip-v4.hdf5"
)
parser.add_argument(
"--buffer-from-rl-unplugged", action="store_true", default=False
)
parser.add_argument(
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu"
)
args = parser.parse_known_args()[0]
return args
def test_discrete_cql(args=get_args()):
# envs
env, _, test_envs = make_atari_env(
args.task,
args.seed,
1,
args.test_num,
scale=args.scale_obs,
frame_stack=args.frames_stack,
)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
# should be N_FRAMES x H x W
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# model
net = QRDQN(*args.state_shape, args.action_shape, args.num_quantiles, args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
# define policy
policy = DiscreteCQLPolicy(
net,
optim,
args.gamma,
args.num_quantiles,
args.n_step,
args.target_update_freq,
min_q_weight=args.min_q_weight,
).to(args.device)
# load a previous policy
if args.resume_path:
policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))
print("Loaded agent from: ", args.resume_path)
# buffer
if args.buffer_from_rl_unplugged:
buffer = load_buffer(args.load_buffer_name)
else:
assert os.path.exists(args.load_buffer_name), \
"Please run atari_dqn.py first to get expert's data buffer."
if args.load_buffer_name.endswith(".pkl"):
buffer = pickle.load(open(args.load_buffer_name, "rb"))
elif args.load_buffer_name.endswith(".hdf5"):
buffer = VectorReplayBuffer.load_hdf5(args.load_buffer_name)
else:
print(f"Unknown buffer format: {args.load_buffer_name}")
exit(0)
print("Replay buffer size:", len(buffer), flush=True)
# collector
test_collector = Collector(policy, test_envs, exploration_noise=True)
# log
now = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
args.algo_name = "cql"
log_name = os.path.join(args.task, args.algo_name, str(args.seed), now)
log_path = os.path.join(args.logdir, log_name)
# logger
if args.logger == "wandb":
logger = WandbLogger(
save_interval=1,
name=log_name.replace(os.path.sep, "__"),
run_id=args.resume_id,
config=args,
project=args.wandb_project,
)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
if args.logger == "tensorboard":
logger = TensorboardLogger(writer)
else: # wandb
logger.load(writer)
def save_best_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
def stop_fn(mean_rewards):
return False
# watch agent's performance
def watch():
print("Setup test envs ...")
policy.eval()
policy.set_eps(args.eps_test)
test_envs.seed(args.seed)
print("Testing agent ...")
test_collector.reset()
result = test_collector.collect(n_episode=args.test_num, render=args.render)
pprint.pprint(result)
rew = result["rews"].mean()
print(f'Mean reward (over {result["n/ep"]} episodes): {rew}')
if args.watch:
watch()
exit(0)
result = offline_trainer(
policy,
buffer,
test_collector,
args.epoch,
args.update_per_epoch,
args.test_num,
args.batch_size,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
)
pprint.pprint(result)
watch()
if __name__ == "__main__":
test_discrete_cql(get_args())
| 34.058511
| 87
| 0.658129
|
794ebda12bc12785e1d67a516bc488f2cac0229b
| 424
|
py
|
Python
|
setup_s3_bucket.py
|
chambridge/aws-cost-mgmt-access
|
994d3ed62601469d2d68a67e6806a3601178328a
|
[
"MIT"
] | null | null | null |
setup_s3_bucket.py
|
chambridge/aws-cost-mgmt-access
|
994d3ed62601469d2d68a67e6806a3601178328a
|
[
"MIT"
] | null | null | null |
setup_s3_bucket.py
|
chambridge/aws-cost-mgmt-access
|
994d3ed62601469d2d68a67e6806a3601178328a
|
[
"MIT"
] | null | null | null |
import os
from cloud.aws_service import AwsService
def main():
"""Execute script."""
region = os.environ.get('REGION', 'us-east-1')
s3_bucket = os.environ.get('S3_BUCKET', 'costmgmtacct1234')
aws = AwsService()
result = aws.create_bucket(s3_bucket, region)
if result:
print(f'S3 bucket {s3_bucket} was created.')
else:
print(f'Failed creating S3 bucket {s3_bucket}.')
main()
| 22.315789
| 63
| 0.653302
|
794ebdfc927609f7662ade182d468e4428489b2d
| 2,839
|
py
|
Python
|
test/SConscript/SConscriptChdir.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1
|
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
test/SConscript/SConscriptChdir.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
test/SConscript/SConscriptChdir.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/SConscript/SConscriptChdir.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.subdir('dir1', 'dir2', 'dir3', 'dir4', 'dir5')
test.write('SConstruct', """
env = Environment()
SConscript('dir1/SConscript')
SConscriptChdir(1)
SConscript('dir2/SConscript')
SConscriptChdir(0)
SConscript('dir3/SConscript')
env.SConscriptChdir(1)
SConscript('dir4/SConscript')
env.SConscriptChdir(0)
SConscript('dir5/SConscript')
""")
test.write(['dir1', 'SConscript'], """
exec(open("create_test.py", 'rU').read())
""")
test.write(['dir2', 'SConscript'], """
exec(open("create_test.py", 'rU').read())
""")
test.write(['dir3', 'SConscript'], """
import os.path
name = os.path.join('dir3', 'create_test.py')
exec(open(name, 'rU').read())
""")
test.write(['dir4', 'SConscript'], """
exec(open("create_test.py", 'rU').read())
""")
test.write(['dir5', 'SConscript'], """
import os.path
name = os.path.join('dir5', 'create_test.py')
exec(open(name, 'rU').read())
""")
for dir in ['dir1', 'dir2', 'dir3','dir4', 'dir5']:
test.write([dir, 'create_test.py'], r"""
f = open("test.txt", "ab")
f.write("This is the %s test.\n")
f.close()
""" % dir)
test.run(arguments=".", stderr=None)
test.fail_test(test.read(['dir1', 'test.txt']) != "This is the dir1 test.\n")
test.fail_test(test.read(['dir2', 'test.txt']) != "This is the dir2 test.\n")
test.fail_test(test.read('test.txt') != "This is the dir3 test.\nThis is the dir5 test.\n")
test.fail_test(test.read(['dir4', 'test.txt']) != "This is the dir4 test.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 31.197802
| 108
| 0.704826
|
794ebea621c445d8ac4a72921d8c8a8062157255
| 719
|
py
|
Python
|
tests/__init__.py
|
gwu-libraries/sfm-web-harvester
|
6959cae698328cd344f569dead594d70749aea39
|
[
"MIT"
] | 3
|
2016-03-15T20:22:02.000Z
|
2020-09-15T12:25:20.000Z
|
tests/__init__.py
|
gwu-libraries/sfm-web-harvester
|
6959cae698328cd344f569dead594d70749aea39
|
[
"MIT"
] | 3
|
2016-10-09T23:23:10.000Z
|
2017-04-05T15:15:55.000Z
|
tests/__init__.py
|
gwu-libraries/sfm-web-harvester
|
6959cae698328cd344f569dead594d70749aea39
|
[
"MIT"
] | 1
|
2020-09-15T09:43:28.000Z
|
2020-09-15T09:43:28.000Z
|
import logging
import unittest
import os
import socket
mq_port_available = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("mq", 5672))
except socket.error:
mq_port_available = False
heritrix_port_available = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("heritrix", 8443))
except socket.error:
heritrix_port_available = False
mq_username = os.environ.get("RABBITMQ_USER")
mq_password = os.environ.get("RABBITMQ_PASSWORD")
integration_env_available = mq_port_available and heritrix_port_available
class TestCase(unittest.TestCase):
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("web_harvester").setLevel(logging.DEBUG)
| 23.966667
| 73
| 0.781641
|
794ebf3929e8f6b93cd25f1f5794264c90269d3e
| 1,612
|
py
|
Python
|
instructions/base/instruction.py
|
kimi641/pyJVM
|
9e2b2392044a8ddd41ff8dda18a26e307776ae34
|
[
"MIT"
] | null | null | null |
instructions/base/instruction.py
|
kimi641/pyJVM
|
9e2b2392044a8ddd41ff8dda18a26e307776ae34
|
[
"MIT"
] | 1
|
2021-01-21T09:38:24.000Z
|
2021-01-21T09:38:24.000Z
|
instructions/base/instruction.py
|
kimi641/pyJVM
|
9e2b2392044a8ddd41ff8dda18a26e307776ae34
|
[
"MIT"
] | null | null | null |
import struct
from abc import ABCMeta, abstractmethod
class BytecodeReader:
def Reset(self, code, pc:int):
self.code = code
self.pc = pc
def ReadUint8(self):
i = self.code[self.pc]
self.pc += 1
return i
def ReadInt8(self):
byte = struct.pack('>B',self.ReadUint8())
return struct.unpack('>b',byte)[0]
def ReadUint16(self):
bits = self.code[self.pc:self.pc+2]
self.pc += 2
return struct.unpack('>H',bits)[0]
def ReadInt16(self):
bits = self.code[self.pc:self.pc+2]
self.pc += 2
return struct.unpack('>h',bits)[0]
def ReadInt32(self):
bits = self.code[self.pc:self.pc+4]
self.pc += 4
return struct.unpack('>i',bits)[0]
def ReadInt32s(self):
pass
def SkipPadding(self):
pass
@property
def PC(self):
return self.pc
class Instruction(metaclass=ABCMeta):
@abstractmethod
def FetchOperands(self, reader:BytecodeReader):
pass
@abstractmethod
def Execute(self, frame):
pass
class NoOperandsInstruction(Instruction):
def FetchOperands(self, reader:BytecodeReader):
pass
class BranchInstruction(Instruction):
def FetchOperands(self, reader:BytecodeReader):
self.Offset = reader.ReadInt16()
class Index8Instruction(Instruction):
def FetchOperands(self, reader:BytecodeReader):
self.Index = reader.ReadUint8()
class Index16Instruction(Instruction):
def FetchOperands(self, reader:BytecodeReader):
self.Index = reader.ReadUint16()
| 23.705882
| 51
| 0.625931
|
794ebff01c952c7fa43eb426af3eabb14b3e914d
| 1,777
|
py
|
Python
|
src/rogerthat/bizz/communities/homescreen/models.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/rogerthat/bizz/communities/homescreen/models.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/rogerthat/bizz/communities/homescreen/models.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from google.appengine.ext import ndb
from rogerthat.bizz.communities.models import Community
from rogerthat.models import NdbModel
class CommunityHomeScreen(NdbModel):
data = ndb.JsonProperty() # See oca.models.HomeScreen
update_date = ndb.DateTimeProperty(auto_now=True)
community_id = ndb.IntegerProperty()
@property
def id(self):
return self.key.id()
@classmethod
def create_key(cls, community_id, home_screen_id):
return ndb.Key(cls, home_screen_id, parent=Community.create_key(community_id))
@classmethod
def list_by_community(cls, community_id):
return cls.query().filter(cls.community_id == community_id)
class HomeScreenTestUser(NdbModel):
reset_date = ndb.DateTimeProperty(required=True)
community_id = ndb.IntegerProperty(required=True)
home_screen_id = ndb.StringProperty(required=True)
@property
def user_email(self):
return self.key.id()
@classmethod
def create_key(cls, user_id):
return ndb.Key(cls, user_id)
@classmethod
def list_expired(cls, date):
return cls.query().filter(cls.reset_date < date)
| 31.175439
| 86
| 0.728194
|
794ec05fbfab3684fd001f166134ec61a8324527
| 250
|
py
|
Python
|
FS/Pre-FS_programs/Day11~26Oct-2021_Mon/tep.py
|
Balaji-Ganesh/CompetitiveProgramming
|
8a5b44202e3d98e507a6ac27007273145d7fd56f
|
[
"MIT"
] | null | null | null |
FS/Pre-FS_programs/Day11~26Oct-2021_Mon/tep.py
|
Balaji-Ganesh/CompetitiveProgramming
|
8a5b44202e3d98e507a6ac27007273145d7fd56f
|
[
"MIT"
] | null | null | null |
FS/Pre-FS_programs/Day11~26Oct-2021_Mon/tep.py
|
Balaji-Ganesh/CompetitiveProgramming
|
8a5b44202e3d98e507a6ac27007273145d7fd56f
|
[
"MIT"
] | null | null | null |
#`num_salaries = int(input());
salaries = sorted(set(input().split(' ')));
print(salaries[::-1]);
if(len(salaries) < 3):
print(salaries[::-1][-1]);
else:
print(salaries[::-1][2]);
"""
Integer n1 = 10
n2 = 10
n1 = n1+ 5
sol(n1+n2);
"""
| 15.625
| 43
| 0.544
|
794ec05ff421e48eb7607244e84d071e3411a7d6
| 667
|
py
|
Python
|
backend/manage.py
|
DRP-33/drp-name-in-progress
|
47518bb77090a9fbd87561c4fc37850008e1c782
|
[
"MIT"
] | null | null | null |
backend/manage.py
|
DRP-33/drp-name-in-progress
|
47518bb77090a9fbd87561c4fc37850008e1c782
|
[
"MIT"
] | null | null | null |
backend/manage.py
|
DRP-33/drp-name-in-progress
|
47518bb77090a9fbd87561c4fc37850008e1c782
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website.settings.dev')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29
| 75
| 0.67916
|
794ec0eb75dd5e81b6891e29ada939c059b4aeef
| 533
|
py
|
Python
|
server/insertHabit.py
|
asaadielhassan/revue
|
ed12d2d63db35f6bb510c88660d904a0eb0e916d
|
[
"MIT"
] | null | null | null |
server/insertHabit.py
|
asaadielhassan/revue
|
ed12d2d63db35f6bb510c88660d904a0eb0e916d
|
[
"MIT"
] | null | null | null |
server/insertHabit.py
|
asaadielhassan/revue
|
ed12d2d63db35f6bb510c88660d904a0eb0e916d
|
[
"MIT"
] | 1
|
2021-04-06T22:44:30.000Z
|
2021-04-06T22:44:30.000Z
|
import pymongo
import habits
def connectDB():
try:
conn = MongoClient()
print("Successfully connected")
except:
print("Could not connect")
db = conn.database
def insertHabit(user, habit):
entry = {
"user":user,
"name":habit.getName(),
"description":habit.getDescription(),
"days":habit.getDays(),
"start_Day":habit.getStartDay(),
"start_Date":habit.getStartDate(),
"end_Date":habit.getEndDate()
}
db.habits.insert_one(entry)
| 21.32
| 45
| 0.594747
|
794ec1ba4396ff8166481d09f4b4546fb2363f10
| 1,208
|
py
|
Python
|
grouper/fe/handlers/group_leave.py
|
lfaraone/grouper
|
7df5eda8003a0b4a9ba7f0dcb044ae1e4710b171
|
[
"Apache-2.0"
] | null | null | null |
grouper/fe/handlers/group_leave.py
|
lfaraone/grouper
|
7df5eda8003a0b4a9ba7f0dcb044ae1e4710b171
|
[
"Apache-2.0"
] | 1
|
2016-02-18T18:55:29.000Z
|
2016-02-18T18:55:29.000Z
|
grouper/fe/handlers/group_leave.py
|
lfaraone/grouper
|
7df5eda8003a0b4a9ba7f0dcb044ae1e4710b171
|
[
"Apache-2.0"
] | null | null | null |
from grouper.fe.util import GrouperHandler
from grouper.model_soup import Group
from grouper.models.audit_log import AuditLog
from grouper.user import user_role
class GroupLeave(GrouperHandler):
def get(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
members = group.my_members()
if not user_role(self.current_user, members):
return self.forbidden()
return self.render(
"group-leave.html", group=group
)
def post(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
members = group.my_members()
if not user_role(self.current_user, members):
return self.forbidden()
group.revoke_member(self.current_user, self.current_user, "User self-revoked.")
AuditLog.log(self.session, self.current_user.id, 'leave_group',
'{} left the group.'.format(self.current_user.name),
on_group_id=group.id)
return self.redirect("/groups/{}?refresh=yes".format(group.name))
| 32.648649
| 87
| 0.640728
|
794ec449bd8ecfc16514c7942b9e9f20b2fee367
| 6,385
|
py
|
Python
|
rest_framework_datatables/filters.py
|
BenjiWilson/django-rest-framework-datatables
|
213b344b8159445703152b93856d034a59bb56eb
|
[
"MIT"
] | null | null | null |
rest_framework_datatables/filters.py
|
BenjiWilson/django-rest-framework-datatables
|
213b344b8159445703152b93856d034a59bb56eb
|
[
"MIT"
] | null | null | null |
rest_framework_datatables/filters.py
|
BenjiWilson/django-rest-framework-datatables
|
213b344b8159445703152b93856d034a59bb56eb
|
[
"MIT"
] | null | null | null |
import re
from copy import deepcopy
from django.db.models import Q
from rest_framework.filters import BaseFilterBackend
class DatatablesFilterBackend(BaseFilterBackend):
"""
Filter that works with datatables params.
"""
def filter_queryset(self, request, queryset, view):
if request.accepted_renderer.format != 'datatables':
return queryset
# total_count = view.get_queryset().count()
total_count = 100000000
if len(getattr(view, 'filter_backends', [])) > 1:
# case of a view with more than 1 filter backend
# filtered_count_before = queryset.count()
filtered_count_before = 100000000
else:
filtered_count_before = total_count
# set the queryset count as an attribute of the view for later
# TODO: find a better way than this hack
setattr(view, '_datatables_total_count', total_count)
# parse params
if request.method == 'POST':
request_data = request.data
else:
request_data = request.query_params
getter = request_data.get
fields = self.get_fields(getter)
ordering = self.get_ordering(getter, fields)
search_value = getter('search[value]')
search_regex = getter('search[regex]') == 'true'
# filter queryset
q = Q()
for f in fields:
if not f['searchable']:
continue
if search_value and search_value != 'false':
if search_regex:
if self.is_valid_regex(search_value):
# iterate through the list created from the 'name'
# param and create a string of 'ior' Q() objects.
for x in f['name']:
q |= Q(**{'%s__iregex' % x: search_value})
else:
# same as above.
for x in f['name']:
q |= Q(**{'%s__icontains' % x: search_value})
f_search_value = f.get('search_value')
f_search_regex = f.get('search_regex') == 'true'
if f_search_value:
if f_search_regex:
if self.is_valid_regex(f_search_value):
# create a temporary q variable to hold the Q()
# objects adhering to the field's name criteria.
temp_q = Q()
for x in f['name']:
temp_q |= Q(**{'%s__iregex' % x: f_search_value})
# Use deepcopy() to transfer them to the global Q()
# object. Deepcopy() necessary, since the var will be
# reinstantiated next iteration.
q = q & deepcopy(temp_q)
else:
temp_q = Q()
for x in f['name']:
temp_q |= Q(**{'%s__icontains' % x: f_search_value})
q = q & deepcopy(temp_q)
if q:
queryset = queryset.filter(q)
# filtered_count = queryset.count()
filtered_count = 1000000000
else:
filtered_count = filtered_count_before
# set the queryset count as an attribute of the view for later
# TODO: maybe find a better way than this hack ?
setattr(view, '_datatables_filtered_count', filtered_count)
# order queryset
if len(ordering):
if hasattr(view, 'datatables_additional_order_by'):
additional = view.datatables_additional_order_by
# Django will actually only take the first occurrence if the
# same column is added multiple times in an order_by, but it
# feels cleaner to double check for duplicate anyway.
if not any((o[1:] if o[0] == '-' else o) == additional
for o in ordering):
ordering.append(additional)
queryset = queryset.order_by(*ordering)
return queryset
def get_fields(self, getter):
fields = []
i = 0
while True:
col = 'columns[%d][%s]'
data = getter(col % (i, 'data'))
if data == "": # null or empty string on datatables (JS) side
fields.append({'searchable': False, 'orderable': False})
i += 1
continue
# break out only when there are no more fields to get.
if data is None:
break
name = getter(col % (i, 'name'))
if not name:
name = data
search_col = col % (i, 'search')
# to be able to search across multiple fields (e.g. to search
# through concatenated names), we create a list of the name field,
# replacing dot notation with double-underscores and splitting
# along the commas.
field = {
'name': [
n.lstrip() for n in name.replace('.', '__').split(',')
],
'data': data,
'searchable': getter(col % (i, 'searchable')) == 'true',
'orderable': getter(col % (i, 'orderable')) == 'true',
'search_value': getter('%s[%s]' % (search_col, 'value')),
'search_regex': getter('%s[%s]' % (search_col, 'regex')),
}
fields.append(field)
i += 1
return fields
def get_ordering(self, getter, fields):
ordering = []
i = 0
while True:
col = 'order[%d][%s]'
idx = getter(col % (i, 'column'))
if idx is None:
break
try:
field = fields[int(idx)]
except IndexError:
i += 1
continue
if not field['orderable']:
i += 1
continue
dir_ = getter(col % (i, 'dir'), 'asc')
ordering.append('%s%s' % (
'-' if dir_ == 'desc' else '',
field['name'][0]
))
i += 1
return ordering
def is_valid_regex(cls, regex):
try:
re.compile(regex)
return True
except re.error:
return False
| 38.463855
| 78
| 0.499139
|
794ec4d645a1941953fc6cc2596c347f252cf4c4
| 2,388
|
py
|
Python
|
roblox/members.py
|
Warhawk947/ro.py
|
dac29116ef72f577d2e086e3297a79201d58c895
|
[
"MIT"
] | 1
|
2021-11-25T02:29:12.000Z
|
2021-11-25T02:29:12.000Z
|
roblox/members.py
|
Warhawk947/ro.py
|
dac29116ef72f577d2e086e3297a79201d58c895
|
[
"MIT"
] | null | null | null |
roblox/members.py
|
Warhawk947/ro.py
|
dac29116ef72f577d2e086e3297a79201d58c895
|
[
"MIT"
] | null | null | null |
"""
This module contains classes intended to parse and deal with data from Roblox group member endpoints.
"""
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from .bases.baseuser import BaseUser
from .partials.partialrole import PartialRole
from .utilities.shared import ClientSharedObject
if TYPE_CHECKING:
from .bases.basegroup import BaseGroup
from .bases.baserole import BaseRole
class MemberRelationship(BaseUser):
"""
Represents a relationship between a user and a group.
"""
def __init__(self, shared: ClientSharedObject, user: Union[BaseUser, int], group: Union[BaseGroup, int]):
self._shared: ClientSharedObject = shared
super().__init__(shared=self._shared, user_id=int(user))
self.group: BaseGroup
if isinstance(group, int):
self.group = BaseGroup(shared=self._shared, group_id=group)
else:
self.group = group
async def set_role(self, role: BaseRole):
"""
Sets this member's role.
Arguments:
role: The new role this member should be assigned.
"""
await self.group.set_role(self, role)
async def set_rank(self, rank: int):
"""
Sets this member's rank.
Arguments:
rank: The new rank this member should be assigned. Should be in the range of 0-255.
"""
await self.group.set_rank(self, rank)
async def kick(self):
"""
Kicks this member from the group.
"""
await self.group.kick_user(self)
class Member(MemberRelationship):
"""
Represents a group member.
Attributes:
_shared: The shared object.
role: The member's role.
group: The member's group.
"""
def __init__(self, shared: ClientSharedObject, data: dict, group: BaseGroup):
self._shared: ClientSharedObject = shared
self.id: int = data["user"]["userId"]
self.name: str = data["user"]["username"]
self.display_name: str = data["user"]["displayName"]
super().__init__(shared=self._shared, user=self.id, group=group)
self.role: PartialRole = PartialRole(shared=self._shared, data=data["role"])
self.group: BaseGroup = group
def __repr__(self):
return f"<{self.__class__.__name__} id={self.id} name={self.name!r} role={self.role}>"
| 28.094118
| 109
| 0.646566
|
794ec4ef4bb22ee9ec8b1c2f9e55232e581736c8
| 40,998
|
py
|
Python
|
RecoLuminosity/LumiDB/python/matplotRender.py
|
bisnupriyasahu/cmssw
|
6cf37ca459246525be0e8a6f5172c6123637d259
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
RecoLuminosity/LumiDB/python/matplotRender.py
|
bisnupriyasahu/cmssw
|
6cf37ca459246525be0e8a6f5172c6123637d259
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
RecoLuminosity/LumiDB/python/matplotRender.py
|
bisnupriyasahu/cmssw
|
6cf37ca459246525be0e8a6f5172c6123637d259
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
'''
Specs:
-- We use matplotlib OO class level api, we do not use its high-level helper modules. Favor endured stability over simplicity.
-- PNG as default batch file format
-- we support http mode by sending string buf via meme type image/png. Sending a premade static plot to webserver is considered a uploading process instead of http dynamic graphical mode.
'''
from __future__ import print_function
from builtins import range
import sys,os
import numpy,datetime
import matplotlib
from RecoLuminosity.LumiDB import CommonUtil,lumiTime,csvReporter
batchonly=False
if 'DISPLAY' not in os.environ or not os.environ['DISPLAY']:
batchonly=True
matplotlib.use('Agg',warn=False)
else:
try:
from RecoLuminosity.LumiDB import lumiQTWidget
except ImportError:
print('unable to import GUI backend, switch to batch only mode')
matplotlib.use('Agg',warn=False)
batchonly=True
from matplotlib.backends.backend_agg import FigureCanvasAgg as CanvasBackend
from matplotlib.figure import Figure
from matplotlib.font_manager import fontManager,FontProperties
matplotlib.rcParams['lines.linewidth']=1.5
matplotlib.rcParams['grid.linewidth']=0.2
matplotlib.rcParams['xtick.labelsize']=11
matplotlib.rcParams['ytick.labelsize']=11
matplotlib.rcParams['legend.fontsize']=10
matplotlib.rcParams['axes.labelsize']=11
matplotlib.rcParams['font.weight']=567
def guessInstLumiUnit(t):
'''
input : largest total lumivalue
output: (unitstring,denomitor)
'''
unitstring='$\mu$b$^{-1}$s$^{-1}$'
denomitor=1.0
if t>=1.0e3 and t<1.0e06:
denomitor=1.0e3
unitstring='nb$^{-1}$s$^{-1}$'
elif t>=1.0e6 and t<1.0e9:
denomitor=1.0e6
unitstring='pb$^{-1}$s$^{-1}$'
elif t>=1.0e9 and t<1.0e12:
denomitor=1.0e9
unitstring='fb$^{-1}$s$^{-1}$'
elif t>=1.0e12 and t<1.0e15:
denomitor=1.0e12
unitstring='ab$^{-1}$s$^{-1}$'
elif t<=1.0e-3 and t>1.0e-6: #left direction
denomitor=1.0e-3
unitstring='mb$^{-1}$s$^{-1}$'
elif t<=1.0e-6 and t>1.0e-9:
denomitor=1.0e-6
unitstring='b$^{-1}$s$^{-1}$'
elif t<=1.0e-9 and t>1.0e-12:
denomitor=1.0e-9
unitstring='kb$^{-1}$s$^{-1}$'
return (unitstring,denomitor)
def guessLumiUnit(t):
'''
input : largest total lumivalue
output: (unitstring,denomitor)
'''
unitstring='$\mu$b$^{-1}$'
denomitor=1.0
if t>=1.0e3 and t<1.0e06:
denomitor=1.0e3
unitstring='nb$^{-1}$'
elif t>=1.0e6 and t<1.0e9:
denomitor=1.0e6
unitstring='pb$^{-1}$'
elif t>=1.0e9 and t<1.0e12:
denomitor=1.0e9
unitstring='fb$^{-1}$'
elif t>=1.0e12 and t<1.0e15:
denomitor=1.0e12
unitstring='ab$^{-1}$'
elif t<=1.0e-3 and t>1.0e-6: #left direction
denomitor=1.0e-3
unitstring='mb$^{-1}$'
elif t<=1.0e-6 and t>1.0e-9:
denomitor=1.0e-6
unitstring='b$^{-1}$'
elif t<=1.0e-9 and t>1.0e-12:
denomitor=1.0e-9
unitstring='kb$^{-1}$'
return (unitstring,denomitor)
class matplotRender():
def __init__(self,fig):
self.__fig=fig
self.__canvas=''
self.colormap={}
self.colormap['Delivered']='r'
self.colormap['Recorded']='b'
self.colormap['Effective']='g'
self.colormap['Max Inst']='r'
def plotSumX_Run(self,rawdata={},resultlines=[],minRun=None,maxRun=None,nticks=6,yscale='linear',withannotation=False,referenceLabel='Delivered',labels=['Delivered','Recorded'],textoutput=None):
'''
input:
rawdata = {'Delivered':[(runnumber,lumiperrun),..],'Recorded':[(runnumber,lumiperrun),..]}
resultlines = [[runnumber,dellumiperrun,reclumiperrun],[runnumber,dellumiperrun,reclumiperrun],]
minRun : minimal runnumber required
maxRun : max runnumber required
yscale: linear,log or both
withannotation: wheather the boundary points should be annotated
referenceLabel: the one variable that decides the total unit and the plot x-axis range
labels: labels of the variables to plot
textoutput: text output file name.
'''
ypoints={}
ytotal={}
for r in resultlines:#parse old text data
runnumber=int(r[0])
if rawdata and runnumber in [t[0] for t in rawdata[referenceLabel]]:continue#use text input only if not in selected data
if minRun and runnumber<minRun: continue
if maxRun and runnumber>maxRun: continue
for i,lab in enumerate(labels) :
v=float(r[-(len(labels)-i)-1])#the values to plot are always the last n fields
rawdata.setdefault(lab,[]).append((runnumber,v))
if not rawdata:
print('[WARNING]: no data to plot , exit')
return
tot=sum([t[1] for t in rawdata[referenceLabel]])
(unitstring,denomitor)=guessLumiUnit(tot)
csvreport=None
rows=[]
flat=[]
for label,yvalues in rawdata.items():
yvalues.sort()
flat.append([t[1] for t in yvalues])
ypoints[label]=[]
ytotal[label]=0.0
lumivals=[t[1] for t in yvalues]
for i,val in enumerate(lumivals):
ypoints[label].append(sum(lumivals[0:i+1])/denomitor)#integrated lumi
ytotal[label]=sum(lumivals)/denomitor
xpoints=[t[0] for t in rawdata[referenceLabel]]
ax=self.__fig.add_subplot(111)
if yscale=='linear':
ax.set_yscale('linear')
elif yscale=='log':
ax.set_yscale('log')
else:
raise RuntimeError('unsupported yscale '+yscale)
ax.set_xlabel(r'Run',position=(0.95,0))
ax.set_ylabel(r'L '+unitstring,position=(0,0.9))
xticklabels=ax.get_xticklabels()
for tx in xticklabels:
tx.set_rotation(30)
majorLocator=matplotlib.ticker.LinearLocator( nticks )
majorFormatter=matplotlib.ticker.FormatStrFormatter('%d')
minorLocator=matplotlib.ticker.LinearLocator(numticks=4*nticks)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_xbound(lower=xpoints[0],upper=xpoints[-1])
ax.grid(True)
keylist=sorted(ypoints.keys())
keylist.insert(0,keylist.pop(keylist.index(referenceLabel)))#move refereceLabel to front from now on
legendlist=[]
head=['#Run']
textsummaryhead=['#TotalRun']
textsummaryline=['#'+str(len(xpoints))]
for ylabel in keylist:
cl='k'
if ylabel in self.colormap:
cl=self.colormap[ylabel]
ax.plot(xpoints,ypoints[ylabel],label=ylabel,color=cl,drawstyle='steps')
legendlist.append(ylabel+' '+'%.3f'%(ytotal[ylabel])+' '+unitstring)
textsummaryhead.append('Total'+ylabel)
textsummaryline.append('%.3f'%(ytotal[ylabel])+' '+unitstring)
head.append(ylabel)
if textoutput:
csvreport=csvReporter.csvReporter(textoutput)
csvreport.writeRow(head)
allruns=[int(t[0]) for t in rawdata[referenceLabel]]
flat.insert(0,allruns)
rows=list(zip(*flat))
csvreport.writeRows([list(t) for t in rows])
csvreport.writeRow(textsummaryhead)
csvreport.writeRow(textsummaryline)
#font=FontProperties(size='medium',weight='demibold')
#legend
ax.legend(tuple(legendlist),loc='upper left')
#adjust
self.__fig.subplots_adjust(bottom=0.18,left=0.1)
#annotations
if withannotation:
trans=matplotlib.transforms.BlendedGenericTransform(ax.transData,ax.transAxes)
ax.text(xpoints[0],1.025,str(xpoints[0]),transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.text(xpoints[-1],1.025,str(xpoints[-1]),transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
def plotSumX_Fill(self,rawdata={},resultlines=[],minFill=None,maxFill=None,nticks=6,yscale='linear',withannotation=False,referenceLabel='Delivered',labels=['Delivered','Recorded'],textoutput=None):
'''
input:
rawdata = {'Delivered':[(fill,runnumber,lumiperrun)],'Recorded':[(fill,runnumber,lumiperrun)]}
resultlines = [[fillnumber,runnumber,dellumiperrun,reclumiperrun],[fillnumber,runnumber,dellumiperrun,reclumiperrun],]
minFill : min fill to draw
maxFill : max fill to draw
yscale: linear,log or both
withannotation: wheather the boundary points should be annotated
textoutput: text output file name.
'''
ytotal={}
ypoints={}
for r in resultlines: #parse old text data
fillnum=int(r[0])
runnum=int(r[1])
if rawdata and (fillnum,runnum) in [(t[0],t[1]) for t in rawdata[referenceLabel]]:continue
if minFill and fillnum<minFill:continue
if maxFill and fillnum>maxFill:continue
for i,lab in enumerate(labels) :
v=float(r[-(len(labels)-i)])#the values to plot are always the last n fields
rawdata.setdefault(lab,[]).append((fillnum,runnum,v))
#print 'fillrunDict ',fillrunDict
if not rawdata:
print('[WARNING]: no data, do nothing')
return
tot=sum([t[2] for t in rawdata[referenceLabel]])
beginfo=''
endinfo=''
(unitstring,denomitor)=guessLumiUnit(tot)
csvreport=None
rows=[]
flat=[]
for label,yvalues in rawdata.items():
yvalues.sort()
flat.append([t[2] for t in yvalues])
ypoints[label]=[]
ytotal[label]=0.0
lumivals=[t[2] for t in yvalues]
for i,val in enumerate(lumivals):
ypoints[label].append(sum(lumivals[0:i+1])/denomitor)
ytotal[label]=sum(lumivals)/denomitor
xpoints=[t[0] for t in rawdata[referenceLabel]]#after sort
ax=self.__fig.add_subplot(111)
ax.set_xlabel(r'LHC Fill Number',position=(0.84,0))
ax.set_ylabel(r'L '+unitstring,position=(0,0.9))
ax.set_xbound(lower=xpoints[0],upper=xpoints[-1])
if yscale=='linear':
ax.set_yscale('linear')
elif yscale=='log':
ax.set_yscale('log')
else:
raise RuntimeError('unsupported yscale '+yscale)
xticklabels=ax.get_xticklabels()
majorLocator=matplotlib.ticker.LinearLocator( nticks )
majorFormatter=matplotlib.ticker.FormatStrFormatter('%d')
#minorLocator=matplotlib.ticker.MultipleLocator(sampleinterval)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
#ax.xaxis.set_minor_locator(minorLocator)
ax.grid(True)
keylist=sorted(ypoints.keys())
keylist.insert(0,keylist.pop(keylist.index(referenceLabel)))#move refereceLabel to front from now on
legendlist=[]
head=['#fill','run']
textsummaryhead=['#TotalFill']
textsummaryline=['#'+str(len(xpoints))]
for ylabel in keylist:
cl='k'
if ylabel in self.colormap:
cl=self.colormap[ylabel]
ax.plot(xpoints,ypoints[ylabel],label=ylabel,color=cl,drawstyle='steps')
legendlist.append(ylabel+' '+'%.3f'%(ytotal[ylabel])+' '+unitstring)
textsummaryhead.append('Total'+ylabel)
textsummaryline.append('%.3f'%(ytotal[ylabel])+' '+unitstring)
head.append(ylabel)
if textoutput:
csvreport=csvReporter.csvReporter(textoutput)
allfills=[int(t[0]) for t in rawdata[referenceLabel]]
allruns=[int(t[1]) for t in rawdata[referenceLabel]]
flat.insert(0,allfills)
flat.insert(1,allruns)
rows=list(zip(*flat))
csvreport.writeRow(head)
csvreport.writeRows([list(t) for t in rows])
csvreport.writeRow(textsummaryhead)
csvreport.writeRow(textsummaryline)
#font=FontProperties(size='medium',weight='demibold')
#annotations
if withannotation:
trans=matplotlib.transforms.BlendedGenericTransform(ax.transData,ax.transAxes)
ax.text(xpoints[0],1.025,beginfo,transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.text(xpoints[-1],1.025,endinfo,transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
#legend
ax.legend(tuple(legendlist),loc='upper left')
#adjust
self.__fig.subplots_adjust(bottom=0.1,left=0.1)
def plotSumX_Time(self,rawdata={},resultlines=[],minTime=None,maxTime=None,nticks=6,yscale='linear',withannotation=False,referenceLabel='Delivered',labels=['Delivered','Recorded'],textoutput=None):
'''
input:
rawdata = {'Delivered':[(runnumber,starttimestamp,stoptimestamp,lumiperrun)],'Recorded':[(runnumber,starttimestamp,stoptimestamp,lumiperrun)]}
resultlines = [[runnumber,starttimestampStr,stoptimestampStr,dellumiperrun,reclumiperrun],[runnumber,starttimestampStr,stoptimestampStr,dellumiperrun,reclumiperrun],]
minTime (python DateTime) : min *begin* time to draw: format %m/%d/%y %H:%M:%S
maxTime (python DateTime): max *begin* time to draw %m/%d/%y %H:%M:%S
yscale: linear,log or both
withannotation: wheather the boundary points should be annotated
referenceLabel: the one variable that decides the total unit and the plot x-axis range
labels: labels of the variables to plot
'''
xpoints=[]
ypoints={}
ytotal={}
lut=lumiTime.lumiTime()
if not minTime:
minTime='03/01/10 00:00:00'
minTime=lut.StrToDatetime(minTime,customfm='%m/%d/%y %H:%M:%S')
if not maxTime:
maxTime=datetime.datetime.utcnow()
else:
maxTime=lut.StrToDatetime(maxTime,customfm='%m/%d/%y %H:%M:%S')
for r in resultlines:
runnumber=int(r[0])
starttimeStr=r[1].split('.')[0]
starttime=lut.StrToDatetime(starttimeStr,customfm='%Y-%m-%d %H:%M:%S')
stoptimeStr=r[2].split('.')[0]
stoptime=lut.StrToDatetime(stoptimeStr,customfm='%Y-%m-%d %H:%M:%S')
if rawdata and runnumber in [t[0] for t in rawdata[referenceLabel]]:continue
if starttime<minTime:continue
if starttime>maxTime:continue
for i,lab in enumerate(labels):
v=float(r[-(len(labels)-i)])
rawdata.setdefault(lab,[]).append((runnumber,starttime,stoptime,v))
if not rawdata:
print('[WARNING]: no data, do nothing')
return
tot=sum([t[3] for t in rawdata[referenceLabel]])
(unitstring,denomitor)=guessLumiUnit(tot)
csvreport=None
rows=[]
flat=[]
for label,yvalues in rawdata.items():
yvalues.sort()
flat.append([t[3] for t in yvalues])
if label==referenceLabel:
minTime=yvalues[0][1]
maxTime=yvalues[-1][1]
ypoints[label]=[]
lumivals=[t[3] for t in yvalues]
for i,val in enumerate(lumivals):
ypoints[label].append(sum(lumivals[0:i+1])/denomitor)
ytotal[label]=sum(lumivals)/denomitor
xpoints=[matplotlib.dates.date2num(t[1]) for t in rawdata[referenceLabel]]
ax=self.__fig.add_subplot(111)
ax.set_yscale(yscale)
yearStrMin=minTime.strftime('%Y')
yearStrMax=maxTime.strftime('%Y')
if yearStrMin==yearStrMax:
dateFmt=matplotlib.dates.DateFormatter('%d/%m')
else:
dateFmt=matplotlib.dates.DateFormatter('%d/%m/%y')
majorLoc=matplotlib.ticker.LinearLocator(numticks=nticks)
ax.xaxis.set_major_locator(majorLoc)
minorLoc=matplotlib.ticker.LinearLocator(numticks=nticks*4)
ax.xaxis.set_major_formatter(dateFmt)
ax.set_xlabel(r'Date',position=(0.84,0))
ax.set_ylabel(r'L '+unitstring,position=(0,0.9))
ax.xaxis.set_minor_locator(minorLoc)
ax.set_xbound(lower=xpoints[0],upper=xpoints[-1])
xticklabels=ax.get_xticklabels()
for tx in xticklabels:
tx.set_horizontalalignment('left')
ax.grid(True)
keylist=sorted(ypoints.keys())
keylist.insert(0,keylist.pop(keylist.index(referenceLabel)))#move refereceLabel to front from now on
legendlist=[]
head=['#Run','StartTime','StopTime']
textsummaryhead=['#TotalRun']
textsummaryline=['#'+str(len(xpoints))]
for ylabel in keylist:
cl='k'
if ylabel in self.colormap:
cl=self.colormap[ylabel]
ax.plot(xpoints,ypoints[ylabel],label=ylabel,color=cl,drawstyle='steps')
legendlist.append(ylabel+' '+'%.3f'%(ytotal[ylabel])+' '+unitstring)
textsummaryhead.append('Total'+ylabel)
textsummaryline.append('%.3f'%(ytotal[ylabel])+' '+unitstring)
head.append(ylabel)
if textoutput:
csvreport=csvReporter.csvReporter(textoutput)
csvreport.writeRow(head)
allruns=[int(t[0]) for t in rawdata[referenceLabel]]
allstarts=[ lut.DatetimeToStr(t[1],customfm='%Y-%m-%d %H:%M:%S') for t in rawdata[referenceLabel] ]
allstops=[ lut.DatetimeToStr(t[2],customfm='%Y-%m-%d %H:%M:%S') for t in rawdata[referenceLabel] ]
flat.insert(0,allruns)
flat.insert(1,allstarts)
flat.insert(2,allstops)
rows=list(zip(*flat))
csvreport.writeRows([list(t) for t in rows])
csvreport.writeRow(textsummaryhead)
csvreport.writeRow(textsummaryline)
#annotations
trans=matplotlib.transforms.BlendedGenericTransform(ax.transData,ax.transAxes)
#print 'run boundary ',runs[0],runs[-1]
#print 'xpoints boundary ',xpoints[0],xpoints[-1]
#annotation
if withannotation:
runs=[t[0] for t in rawdata[referenceLabel]]
ax.text(xpoints[0],1.025,str(runs[0]),transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.text(xpoints[-1],1.025,str(runs[-1]),transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
if yearStrMin==yearStrMax:
firsttimeStr=rawdata[referenceLabel][1][1].strftime('%b %d %H:%M') #time range(start) in the title is the first run beg time
lasttimeStr=rawdata[referenceLabel][-1][2].strftime('%b %d %H:%M') #time range(stop) in the tile is the last run stop time
#firstimeStr=minTime.strftime('%b %d %H:%M')
#lasttimeStr=maxTime.strftime('%b %d %H:%M')
#ax.set_title('CMS Total Integrated Luminosity '+yearStrMin+' ('+firstimeStr+' - '+lasttimeStr+' UTC)',size='small',family='fantasy')
ax.set_title('CMS Total Integrated Luminosity '+yearStrMin+' ('+firsttimeStr+' - '+lasttimeStr+' UTC)',size='small')
else:
#ax.set_title('CMS Total Integrated Luminosity '+yearStrMin+'-'+yearStrMax,size='small',family='fantasy')
ax.set_title('CMS Total Integrated Luminosity '+yearStrMin+'-'+yearStrMax,size='small')
ax.legend(tuple(legendlist),loc='upper left')
ax.autoscale_view(tight=True,scalex=True,scaley=False)
self.__fig.autofmt_xdate(bottom=0.18,rotation=15,ha='right')
self.__fig.subplots_adjust(bottom=0.2,left=0.15)
def plotPerdayX_Time(self,rawdata={},resultlines=[],minTime=None,maxTime=None,nticks=6,yscale='linear',withannotation=False,referenceLabel='Delivered',labels=['Delivered','Recorded'],textoutput=None):
'''
Input:
rawdata={'Delivered':[(day,begrun:ls,endrun:ls,lumi)],'Recorded':[(dayofyear,begrun:ls,endrun:ls,lumi)]}
resultlines=[[day,begrun:ls,endrun:ls,deliveredperday,recordedperday],[]]
minTime (python DateTime) : min *begin* time to draw: format %m/%d/%y %H:%M:%S
maxTime (python DateTime): max *begin* time to draw %m/%d/%y %H:%M:%S
withannotation: wheather the boundary points should be annotated
referenceLabel: the one variable that decides the total unit and the plot x-axis range
labels: labels of the variables to plot
'''
xpoints=[]
ypoints={}
ymax={}
lut=lumiTime.lumiTime()
if not minTime:
minTime='03/01/10 00:00:00'
minTime=lut.StrToDatetime(minTime,customfm='%m/%d/%y %H:%M:%S')
if not maxTime:
maxTime=datetime.datetime.utcnow()
else:
maxTime=lut.StrToDatetime(maxTime,customfm='%m/%d/%y %H:%M:%S')
for r in resultlines:
day=int(r[0])
begrunls=r[1]
endrunls=r[2]
#[begrun,begls]=[int(s) for s in r[1].split(':')]
if rawdata and day in [t[0] for t in rawdata[referenceLabel]]:continue
if day < minTime.date().toordinal():continue
if day > maxTime.date().toordinal():continue
for i,lab in enumerate(labels):
v=float(r[-(len(labels)-i)-1])
rawdata.setdefault(lab,[]).append((day,begrunls,endrunls,v))
if not rawdata:
print('[WARNING]: no data, do nothing')
return
maxlum=max([t[3] for t in rawdata[referenceLabel]])
minlum=min([t[3] for t in rawdata[referenceLabel] if t[3]>0]) #used only for log scale, fin the non-zero bottom
(unitstring,denomitor)=guessLumiUnit(maxlum)
csvreport=None
rows=[]
flat=[]
MinDay=minTime.date().toordinal()
MaxDay=maxTime.date().toordinal()
fulldays=list(range(MinDay,MaxDay+1))
allstarts=[]
allstops=[]
for label,yvalues in rawdata.items():
yvalues.sort()
flat.append([t[3] for t in yvalues])
alldays=[t[0] for t in yvalues]
alldates=[str(datetime.date.fromordinal(t)) for t in alldays]
ypoints[label]=[]
lumivals=[t[3] for t in yvalues]
for d in fulldays:
if not d in alldays:
ypoints[label].append(0.0)
else:
thisdaylumi=[t[3] for t in yvalues if t[0]==d][0]
if yscale=='log':
if thisdaylumi<minlum:
thisdaylumi=minlum/denomitor
else:
thisdaylumi=thisdaylumi/denomitor
else:
thisdaylumi=thisdaylumi/denomitor
ypoints[label].append(thisdaylumi)
ymax[label]=max(lumivals)/denomitor
xpoints=fulldays
if textoutput:
csvreport=csvReporter.csvReporter(textoutput)
head=['#day','begrunls','endrunls','delivered','recorded','date']
csvreport.writeRow(head)
flat.insert(0,alldays)
allstarts=[ t[1] for t in rawdata[referenceLabel]]
allstops=[ t[2] for t in rawdata[referenceLabel]]
#print 'allstarts ',allstarts
flat.insert(1,allstarts)
flat.insert(2,allstops)
flat.append(alldates)
rows=list(zip(*flat))
csvreport.writeRows([list(t) for t in rows])
yearStrMin=minTime.strftime('%Y')
yearStrMax=maxTime.strftime('%Y')
if yearStrMin==yearStrMax:
dateFmt=matplotlib.dates.DateFormatter('%d/%m')
else:
dateFmt=matplotlib.dates.DateFormatter('%d/%m/%y')
ax=self.__fig.add_subplot(111)
if yscale=='linear':
ax.set_yscale('linear')
elif yscale=='log':
ax.set_yscale('log')
else:
raise RuntimeError('unsupported yscale '+yscale)
majorLoc=matplotlib.ticker.LinearLocator(numticks=nticks)
minorLoc=matplotlib.ticker.LinearLocator(numticks=nticks*4)
ax.xaxis.set_major_formatter(dateFmt)
ax.set_xlabel(r'Date',position=(0.84,0))
ax.xaxis.set_major_locator(majorLoc)
ax.xaxis.set_minor_locator(minorLoc)
xticklabels=ax.get_xticklabels()
for tx in xticklabels:
tx.set_horizontalalignment('right')
ax.grid(True)
legendlist=[]
ax.set_ylabel(r'L '+unitstring,position=(0,0.9))
textsummaryhead=['#TotalRunningDays']
textsummaryline=['#'+str(len(alldays))]
for ylabel in labels:
cl='k'
if ylabel in self.colormap:
cl=self.colormap[ylabel]
ax.plot(xpoints,ypoints[ylabel],label=ylabel,color=cl,drawstyle='steps')
legendlist.append(ylabel+' Max '+'%.3f'%(ymax[ylabel])+' '+unitstring)
textsummaryhead.append('Max'+ylabel)
textsummaryline.append('%.3f'%(ymax[ylabel])+' '+unitstring)
if textoutput:
csvreport.writeRow(textsummaryhead)
csvreport.writeRow(textsummaryline)
ax.legend(tuple(legendlist),loc='upper left')
ax.set_xbound(lower=matplotlib.dates.date2num(minTime),upper=matplotlib.dates.date2num(maxTime))
#if withannotation:
# begtime=boundaryInfo[0][0]
# beginfo=boundaryInfo[0][1]
# endtime=boundaryInfo[1][0]
# endinfo=boundaryInfo[1][1]
# #annotations
# trans=matplotlib.transforms.BlendedGenericTransform(ax.transData,ax.transAxes)
# ax.text(matplotlib.dates.date2num(begtime),1.025,beginfo,transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
# ax.text(matplotlib.dates.date2num(endtime),1.025,endinfo,transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
firstday=datetime.date.fromordinal(rawdata[referenceLabel][0][0])
lastday=datetime.date.fromordinal(rawdata[referenceLabel][-1][0])
firstdayStr=firstday.strftime('%Y %b %d')
lastdayStr=lastday.strftime('%Y %b %d')
ax.set_title('CMS Integrated Luminosity/Day ('+firstdayStr+' - '+lastdayStr+')',size='small')
#ax.autoscale(tight=True)
ax.autoscale_view(tight=True,scalex=True,scaley=False)
#ax.set_xmargin(0.015)
self.__fig.autofmt_xdate(bottom=0.18,rotation=15,ha='right')
self.__fig.subplots_adjust(bottom=0.2,left=0.15)
def plotPeakPerday_Time(self,rawdata={},resultlines=[],minTime=None,maxTime=None,nticks=6,withannotation=False,yscale='linear',referenceLabel='Delivered',labels=['Delivered'],textoutput=None):
'''
THIS PLOT IS DELIVERED ONLY
Input:
rawdata={'Delivered':[(day,run,ls,instlumi)]}
resultlines=[[day,run,ls,maxinstlum],[]]
minTime (python DateTime) : min *begin* time to draw: format %m/%d/%y %H:%M:%S
maxTime (python DateTime): max *begin* time to draw %m/%d/%y %H:%M:%S
withannotation: wheather the boundary points should be annotated
referenceLabel: the one variable that decides the total unit and the plot x-axis range
labels: labels of the variables to plot
'''
xpoints=[]
ypoints={}
legendlist=[]
maxinfo=''
ymax={}
lut=lumiTime.lumiTime()
if not minTime:
minTime='03/01/10 00:00:00'
minTime=lut.StrToDatetime(minTime,customfm='%m/%d/%y %H:%M:%S')
if not maxTime:
maxTime=datetime.datetime.utcnow()
else:
maxTime=lut.StrToDatetime(maxTime,customfm='%m/%d/%y %H:%M:%S')
for r in resultlines:
day=int(r[0])
runnumber=int(r[1])
lsnum=int(r[2].split('.')[0])
if rawdata and day in [int(t[0]) for t in rawdata[referenceLabel]]:continue
if day < minTime.date().toordinal():continue
if day > maxTime.date().toordinal():continue
for i,lab in enumerate(labels):
v=float(r[-(len(labels)-i)-1])
rawdata.setdefault(lab,[]).append((day,runnumber,lsnum,v))
if not rawdata:
print('[WARNING]: no data, do nothing')
return
maxlum=max([t[3] for t in rawdata[referenceLabel]])
minlum=min([t[3] for t in rawdata[referenceLabel] if t[3]>0]) #used only for log scale, fin the non-zero bottom
(unitstring,denomitor)=guessInstLumiUnit(maxlum)
csvreport=None
rows=[]
flat=[]
MinDay=minTime.date().toordinal()
MaxDay=maxTime.date().toordinal()
fulldays=list(range(MinDay,MaxDay+1))
for label in rawdata.keys():
yvalues=sorted(rawdata[label])
alldays=[t[0] for t in yvalues]
alldates=[str(datetime.date.fromordinal(t)) for t in alldays]
ypoints[label]=[]
lumivals=[t[3] for t in yvalues]
flat.append(lumivals)
for d in fulldays:
if not d in alldays:
ypoints[label].append(0.0)
else:
thisdaylumi=[t[3] for t in yvalues if t[0]==d][0]
if yscale=='log':
if thisdaylumi<minlum:
thisdaylumi=minlum/denomitor
else:
thisdaylumi=thisdaylumi/denomitor
else:
thisdaylumi=thisdaylumi/denomitor
ypoints[label].append(thisdaylumi)
ymax[label]=max(lumivals)/denomitor
'ymax ',max(lumivals)
xpoints=fulldays
if textoutput:
csvreport=csvReporter.csvReporter(textoutput)
head=['#day','run','lsnum','maxinstlumi','date']
csvreport.writeRow(head)
flat.insert(0,alldays)
allruns=[ t[1] for t in rawdata[referenceLabel]]
allls=[ t[2] for t in rawdata[referenceLabel]]
flat.insert(1,allruns)
flat.insert(2,allls)
flat.append(alldates)
rows=list(zip(*flat))
csvreport.writeRows([list(t) for t in rows])
yearStrMin=minTime.strftime('%Y')
yearStrMax=maxTime.strftime('%Y')
if yearStrMin==yearStrMax:
dateFmt=matplotlib.dates.DateFormatter('%d/%m')
else:
dateFmt=matplotlib.dates.DateFormatter('%d/%m/%y')
ax=self.__fig.add_subplot(111)
if yscale=='linear':
ax.set_yscale('linear')
elif yscale=='log':
ax.set_yscale('log')
else:
raise RuntimeError('unsupported yscale '+yscale)
majorLoc=matplotlib.ticker.LinearLocator(numticks=nticks)
minorLoc=matplotlib.ticker.LinearLocator(numticks=nticks*4)
ax.xaxis.set_major_formatter(dateFmt)
ax.set_xlabel(r'Date',position=(0.84,0))
ax.set_ylabel(r'L '+unitstring,position=(0,0.9))
ax.xaxis.set_major_locator(majorLoc)
ax.xaxis.set_minor_locator(minorLoc)
xticklabels=ax.get_xticklabels()
for tx in xticklabels:
tx.set_horizontalalignment('right')
ax.grid(True)
cl=self.colormap['Max Inst']
textsummaryhead=['#TotalRunningDays']
textsummaryline=['#'+str(len(alldays))]
for ylabel in labels:
cl='k'
if ylabel in self.colormap:
cl=self.colormap[ylabel]
ax.plot(xpoints,ypoints[ylabel],label='Max Inst',color=cl,drawstyle='steps')
legendlist.append('Max Inst %.3f'%(ymax[ylabel])+' '+unitstring)
textsummaryhead.append('Max Inst'+ylabel)
textsummaryline.append('%.3f'%(ymax[ylabel])+' '+unitstring)
if textoutput:
csvreport.writeRow(textsummaryhead)
csvreport.writeRow(textsummaryline)
ax.legend(tuple(legendlist),loc='upper left')
ax.set_xbound(lower=matplotlib.dates.date2num(minTime),upper=matplotlib.dates.date2num(maxTime))
if withannotation:
#annotations
trans=matplotlib.transforms.BlendedGenericTransform(ax.transData,ax.transAxes)
ax.text(xpoints[0],1.025,beginfo,transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.text(xpoints[-1],1.025,endinfo,transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.annotate(maxinfo,xy=(xmax,ymax),xycoords='data',xytext=(0,13),textcoords='offset points',arrowprops=dict(facecolor='green',shrink=0.05),size='x-small',horizontalalignment='center',color='green',bbox=dict(facecolor='white'))
firstday=datetime.date.fromordinal(rawdata[referenceLabel][0][0])
lastday=datetime.date.fromordinal(rawdata[referenceLabel][-1][0])
firstdayStr=firstday.strftime('%Y %b %d')
lastdayStr=lastday.strftime('%Y %b %d')
ax.set_title('CMS Peak Luminosity/Day ('+firstdayStr+' - '+lastdayStr+')',size='small')
#ax.autoscale(tight=True)
ax.autoscale_view(tight=True,scalex=True,scaley=False)
#ax.set_xmargin(0.015)
self.__fig.autofmt_xdate(bottom=0.18,rotation=15,ha='right')
self.__fig.subplots_adjust(bottom=0.2,left=0.15)
def plotInst_RunLS(self,rawxdata,rawydata,nticks=6,textoutput=None):
'''
Input: rawxdata [run,fill,starttime,stoptime,totalls,ncmsls]
rawydata {label:[lumi]}
'''
lslength=23.357
lut=lumiTime.lumiTime()
runnum=rawxdata[0]
fill=rawxdata[1]
starttime=lut.DatetimeToStr(rawxdata[2],customfm='%m/%d/%y %H:%M:%S')
stoptime=lut.DatetimeToStr(rawxdata[3],customfm='%m/%d/%y %H:%M:%S')
totalls=rawxdata[-2]
ncmsls=rawxdata[-1]
peakinst=max(rawydata['Delivered'])/lslength
totaldelivered=sum(rawydata['Delivered'])
totalrecorded=sum(rawydata['Recorded'])
xpoints=list(range(1,totalls+1))
#print len(xpoints)
ypoints={}
ymax={}
for ylabel,yvalue in rawydata.items():
ypoints[ylabel]=[y/lslength for y in yvalue]
ymax[ylabel]=max(yvalue)/lslength
left=0.15
width=0.7
bottom=0.1
height=0.65
bottom_h=bottom+height
rect_scatter=[left,bottom,width,height]
rect_table=[left,bottom_h,width,0.25]
nullfmt=matplotlib.ticker.NullFormatter()
nullloc=matplotlib.ticker.NullLocator()
axtab=self.__fig.add_axes(rect_table,frameon=False)
axtab.set_axis_off()
axtab.xaxis.set_major_formatter(nullfmt)
axtab.yaxis.set_major_formatter(nullfmt)
axtab.xaxis.set_major_locator(nullloc)
axtab.yaxis.set_major_locator(nullloc)
ax=self.__fig.add_axes(rect_scatter)
majorLoc=matplotlib.ticker.LinearLocator(numticks=nticks)
minorLoc=matplotlib.ticker.LinearLocator(numticks=nticks*4)
ax.set_xlabel(r'LS',position=(0.96,0))
ax.set_ylabel(r'L $\mu$b$^{-1}$s$^{-1}$',position=(0,0.9))
ax.xaxis.set_major_locator(majorLoc)
ax.xaxis.set_minor_locator(minorLoc)
ax.set_xbound(lower=xpoints[0],upper=xpoints[-1])
xticklabels=ax.get_xticklabels()
for tx in xticklabels:
tx.set_horizontalalignment('right')
ax.grid(True)
keylist=sorted(ypoints.keys())
legendlist=[]
for ylabel in keylist:
cl='k'
if ylabel in self.colormap:
cl=self.colormap[ylabel]
ax.plot(xpoints,ypoints[ylabel],'.',label=ylabel,color=cl)
legendlist.append(ylabel)
#ax.axhline(0,color='green',linewidth=0.2)
ax.axvline(xpoints[ncmsls-1],color='green',linewidth=0.2)
(unitstring,denomitor)=guessLumiUnit(totaldelivered)
colLabels=('run','fill','max inst(/$\mu$b/s)','delivered('+unitstring+')','recorded('+unitstring+')')
cellText=[[str(runnum),str(fill),'%.3f'%(peakinst),'%.3f'%(totaldelivered/denomitor),'%.3f'%(totalrecorded/denomitor)]]
sumtable=axtab.table(cellText=cellText,colLabels=colLabels,colWidths=[0.12,0.1,0.27,0.27,0.27],cellLoc='center',loc='center')
trans=matplotlib.transforms.BlendedGenericTransform(ax.transData,ax.transAxes)
axtab.add_table(sumtable)
ax.text(xpoints[0],1.02,starttime[0:17],transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.text(xpoints[ncmsls-1],1.02,stoptime[0:17],transform=trans,horizontalalignment='left',size='x-small',color='green',bbox=dict(facecolor='white'))
ax.legend(tuple(legendlist),loc='upper right',numpoints=1)
def drawHTTPstring(self):
self.__canvas=CanvasBackend(self.__fig)
cherrypy.response.headers['Content-Type']='image/png'
buf=StringIO()
self.__canvas.print_png(buf)
return buf.getvalue()
def drawPNG(self,filename):
self.__canvas=CanvasBackend(self.__fig)
self.__canvas.print_figure(filename)
def drawInteractive(self):
if batchonly:
print('interactive mode is not available for your setup, exit')
sys.exit()
aw=lumiQTWidget.ApplicationWindow(fig=self.__fig)
aw.show()
aw.destroy()
if __name__=='__main__':
import csv
print('=====testing plotSumX_Run======')
f=open('/afs/cern.ch/cms/lumi/www/plots/operation/totallumivsrun-2011.csv','r')
reader=csv.reader(f,delimiter=',')
resultlines=[]
for row in reader:
if not row[0].isdigit():continue
resultlines.append(row)
#print resultlines
fig=Figure(figsize=(7.2,5.4),dpi=120)
m=matplotRender(fig)
m.plotSumX_Run(rawdata={},resultlines=resultlines,minRun=None,maxRun=None,nticks=6,yscale='linear',withannotation=False)
#m.drawPNG('totallumivsrun-2011test.png')
m.drawInteractive()
print('DONE')
'''
print '=====testing plotSumX_Fill======'
f=open('/afs/cern.ch/cms/lumi/www/plots/operation/totallumivsfill-2011.csv','r')
reader=csv.reader(f,delimiter=',')
resultlines=[]
for row in reader:
if not row[0].isdigit():continue
resultlines.append(row)
#print resultlines
fig=Figure(figsize=(7.2,5.4),dpi=120)
m=matplotRender(fig)
m.plotSumX_Fill(rawdata={},resultlines=resultlines,minFill=None,maxFill=None,nticks=6,yscale='linear',withannotation=True)
m.drawPNG('totallumivsfill-2011test.png')
print 'DONE'
print '=====testing plotSumX_Time======'
f=open('/afs/cern.ch/cms/lumi/www/publicplots/totallumivstime-2011.csv','r')
reader=csv.reader(f,delimiter=',')
resultlines=[]
for row in reader:
if not row[0].isdigit():continue
resultlines.append(row)
#print resultlines
fig=Figure(figsize=(7.25,5.4),dpi=120)
m=matplotRender(fig)
m.plotSumX_Time(rawdata={},resultlines=resultlines,minTime="03/14/11 09:00:00",maxTime=None,nticks=6,yscale='linear',withannotation=False)
m.drawPNG('totallumivstime-2011test.png')
print 'DONE'
print '=====testing plotPerdayX_Time======'
f=open('/afs/cern.ch/cms/lumi/www/publicplots/lumiperday-2011.csv','r')
reader=csv.reader(f,delimiter=',')
resultlines=[]
for row in reader:
if not row[0].isdigit():continue
resultlines.append(row)
#print resultlines
fig=Figure(figsize=(7.25,5.4),dpi=120)
m=matplotRender(fig)
m.plotPerdayX_Time(rawdata={},resultlines=resultlines,minTime="03/14/11 09:00:00",maxTime=None,nticks=6,yscale='linear',withannotation=False)
m.drawPNG('lumiperday-2011test.png')
print 'DONE'
print '=====testing plotPeakPerday_Time======'
f=open('/afs/cern.ch/cms/lumi/www/publicplots/lumipeak-2011.csv','r')
reader=csv.reader(f,delimiter=',')
resultlines=[]
for row in reader:
if not row[0].isdigit():continue
resultlines.append(row)
#print resultlines
fig=Figure(figsize=(7.25,5.4),dpi=120)
m=matplotRender(fig)
m.plotPeakPerday_Time(rawdata={},resultlines=resultlines,minTime="03/14/11 09:00:00",maxTime=None,nticks=6,yscale='linear',withannotation=False)
m.drawPNG('lumipeak-2011test.png')
print 'DONE'
'''
| 46.273138
| 237
| 0.614371
|
794ec531581a19d82a65bb898d97840d53fcbb69
| 2,511
|
py
|
Python
|
app/migrations/0002_auto_20210405_1851.py
|
dorothymuhonja/awwards
|
3e689e2f6dc059cc146d577327ee45b170115f92
|
[
"MIT"
] | null | null | null |
app/migrations/0002_auto_20210405_1851.py
|
dorothymuhonja/awwards
|
3e689e2f6dc059cc146d577327ee45b170115f92
|
[
"MIT"
] | null | null | null |
app/migrations/0002_auto_20210405_1851.py
|
dorothymuhonja/awwards
|
3e689e2f6dc059cc146d577327ee45b170115f92
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-05 15:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='rating',
name='profile',
),
migrations.RemoveField(
model_name='rating',
name='project',
),
migrations.AddField(
model_name='rating',
name='content_average',
field=models.FloatField(blank=True, default=0),
),
migrations.AddField(
model_name='rating',
name='design_average',
field=models.FloatField(blank=True, default=0),
),
migrations.AddField(
model_name='rating',
name='post',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='app.project'),
),
migrations.AddField(
model_name='rating',
name='score',
field=models.FloatField(blank=True, default=0),
),
migrations.AddField(
model_name='rating',
name='usability_average',
field=models.FloatField(blank=True, default=0),
),
migrations.AddField(
model_name='rating',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rater', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='rating',
name='content',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')]),
),
migrations.AlterField(
model_name='rating',
name='design',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')], default=0),
),
migrations.AlterField(
model_name='rating',
name='usability',
field=models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10')]),
),
]
| 35.871429
| 173
| 0.526483
|
794ec5ec452833a4487441a0ef9c0daeead18fca
| 390
|
py
|
Python
|
gerapy/server/server/wsgi.py
|
toyourheart163/Gerapy
|
de1971ab953b09225072bb9f895492f9f51a0f60
|
[
"MIT"
] | null | null | null |
gerapy/server/server/wsgi.py
|
toyourheart163/Gerapy
|
de1971ab953b09225072bb9f895492f9f51a0f60
|
[
"MIT"
] | null | null | null |
gerapy/server/server/wsgi.py
|
toyourheart163/Gerapy
|
de1971ab953b09225072bb9f895492f9f51a0f60
|
[
"MIT"
] | null | null | null |
'''
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
'''
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'heroku_settings')
application = get_wsgi_application()
| 22.941176
| 78
| 0.787179
|
794ec71cb75c10e56b8d4fe48d94e208d97eb568
| 4,584
|
py
|
Python
|
penguindome/client.py
|
jikamens/PenguinDome
|
4efd8c6cb758a78857a5dbd5817b78eef79e7f32
|
[
"Apache-2.0"
] | 81
|
2017-07-30T18:40:13.000Z
|
2022-02-23T23:04:54.000Z
|
penguindome/client.py
|
jikamens/PenguinDome
|
4efd8c6cb758a78857a5dbd5817b78eef79e7f32
|
[
"Apache-2.0"
] | 18
|
2017-10-24T00:20:49.000Z
|
2021-06-08T12:12:34.000Z
|
penguindome/client.py
|
jikamens/PenguinDome
|
4efd8c6cb758a78857a5dbd5817b78eef79e7f32
|
[
"Apache-2.0"
] | 22
|
2017-09-12T20:22:29.000Z
|
2021-11-11T18:45:04.000Z
|
# Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from functools import partial
import getpass
import json
import os
import requests
import sys
from tempfile import NamedTemporaryFile
from penguindome import (
load_settings,
get_setting as main_get_setting,
set_setting as main_set_setting,
get_logger as main_get_logger,
save_settings as main_save_settings,
get_selectors as main_get_selectors,
encrypt_document as main_encrypt_document,
client_gpg_version,
gpg_command as main_gpg_command,
top_dir,
)
gpg_command = partial(main_gpg_command, with_user_id=True,
minimum_version=client_gpg_version)
session = None
def get_setting(setting, default=None, check_defaults=True):
"""Fetch a setting from `client/settings.xml`
`setting` is a colon-separated list of keys and to transit to fetch the
desired setting. For example, `logging:handler` fetches the type Logbook
handler configured on the client.
`default` is the value to return if the setting does not exist.
`check_defaults` indicates whether `client/default-settings.yml` should be
checked if the specified setting isn't in `client/settings.xml`.
Returns None if the setting does not exist.
"""
return main_get_setting(load_settings('client'), setting, default,
check_defaults)
def set_setting(setting, value):
return main_set_setting(load_settings('client'), setting, value)
def save_settings():
main_save_settings('client')
def get_logger(name):
return main_get_logger(get_setting, name, fail_to_local=True)
def get_selectors():
return main_get_selectors(get_setting)
def encrypt_document(*args, **kwargs):
return main_encrypt_document(get_setting, *args, **kwargs)
def server_request(cmd, data=None, data_path=None,
exit_on_connection_error=False, logger=None,
# Clients should never need to use these. They are for
# internal use on the server.
local_port=None, signed=True):
global session
if session is None:
session = requests.Session()
server_url = 'http://127.0.0.1:{}'.format(local_port) if local_port \
else get_setting('server_url')
if data and data_path:
raise Exception('Both data and data_path specified')
with NamedTemporaryFile('w+') as temp_data_file, \
NamedTemporaryFile('w+') as signature_file:
if data_path:
data = open(data_path).read()
else:
data = json.dumps(data)
temp_data_file.write(data)
temp_data_file.flush()
data_path = temp_data_file.name
post_data = {'data': data}
if signed:
gpg_command('--armor', '--detach-sign', '-o', signature_file.name,
data_path, log=logger)
signature_file.seek(0)
post_data['signature'] = signature_file.read()
kwargs = {
'data': post_data,
'timeout': 30,
}
if not local_port:
ca_path = get_setting('ssl:ca_path')
if ca_path:
if not ca_path.startswith('/'):
ca_path = os.path.join(top_dir, ca_path)
kwargs['verify'] = ca_path
try:
while True:
response = session.post('{}{}'.format(server_url, cmd), **kwargs)
if response.status_code == 401 and os.isatty(sys.stderr.fileno()):
username = input('Username:')
pw = getpass.getpass('Password:')
kwargs['auth'] = (username, pw)
continue
response.raise_for_status()
if 'auth' in kwargs and logger:
logger.info('Authenticated {} to {}', kwargs['auth'][0], cmd)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
if exit_on_connection_error:
sys.exit('Connection error posting to {}'.format(server_url))
raise
return response
| 33.955556
| 79
| 0.657286
|
794ec76515f83520494079e4287d157782ae3f2f
| 4,461
|
py
|
Python
|
Google/benchmarks/ssd/implementations/tpu-v3-512-ssd/ssd/utils.py
|
mengkai94/training_results_v0.6
|
43dc3e250f8da47b5f8833197d74cb8cf1004fc9
|
[
"Apache-2.0"
] | 42
|
2019-07-11T18:23:52.000Z
|
2021-09-14T08:21:09.000Z
|
Google/benchmarks/ssd/implementations/tpu-v3-512-ssd/ssd/utils.py
|
mengkai94/training_results_v0.6
|
43dc3e250f8da47b5f8833197d74cb8cf1004fc9
|
[
"Apache-2.0"
] | 23
|
2019-07-29T05:21:52.000Z
|
2020-08-31T18:51:42.000Z
|
Google/benchmarks/ssd/implementations/tpu-v3-512-ssd/ssd/utils.py
|
mengkai94/training_results_v0.6
|
43dc3e250f8da47b5f8833197d74cb8cf1004fc9
|
[
"Apache-2.0"
] | 51
|
2019-07-12T05:10:25.000Z
|
2021-07-28T16:19:06.000Z
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for SSD train/eval with low level API."""
from absl import flags
import tensorflow as tf
FLAGS = flags.FLAGS
def wrap_computation_in_while_loop(op_fn, n, host_name):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
with tf.device(device_for_host(host_name)):
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=1)
def device_for_host(host_name):
return host_name + '/device:CPU:0'
def device_for_tpu_core(host_name, core=0):
return host_name + '/device:TPU_REPLICATED_CORE:%d' % core
def tpu_ordinal_fn(shard_index_in_host):
"""Return the TPU ordinal associated with a shard."""
return shard_index_in_host % FLAGS.num_shards_per_host
class InputDimsFlattener(object):
""""Flatten input_partition_dims for spatial partition."""
def __init__(self, input_partition_dims):
self._initialized = False
self._flattened_input_dims = None
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must not '
'be None')
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def validate_and_flatten_input_dims(self, features, labels):
"""Flatten input dims with the same order as flattened input tensors."""
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
if self._initialized:
return self._flattened_input_dims
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError('TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(
feature_names, feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError('TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(
label_names, label_dims_names))
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[self._feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(self._feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[self._label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([self._label_dims] * num_tensors_in_label)
self._flattened_input_dims = flattened_input_dims
self._initialized = True
| 35.404762
| 80
| 0.694239
|
794ec7fe10f3be35e04670c341fbde6db91a82e5
| 400
|
py
|
Python
|
examples/move_husky.py
|
Robotics-Club-IIT-BHU/LaRoboLiga_PS2Arena
|
8fddadb7b7078368ec2e8d6c99d7a5c4a60e8f44
|
[
"MIT"
] | 1
|
2022-02-14T11:01:48.000Z
|
2022-02-14T11:01:48.000Z
|
examples/move_husky.py
|
Robotics-Club-IIT-BHU/LaRoboLiga_PS2Arena
|
8fddadb7b7078368ec2e8d6c99d7a5c4a60e8f44
|
[
"MIT"
] | null | null | null |
examples/move_husky.py
|
Robotics-Club-IIT-BHU/LaRoboLiga_PS2Arena
|
8fddadb7b7078368ec2e8d6c99d7a5c4a60e8f44
|
[
"MIT"
] | 13
|
2022-02-14T12:57:06.000Z
|
2022-03-02T11:57:23.000Z
|
# Example to use the move_husky() function
import gym
import LRL_main_arena
import time
import pybullet as p
import pybullet_data
import cv2
import os
if __name__ == "__main__":
parent_path = os.path.dirname(os.getcwd())
os.chdir(parent_path)
env = gym.make("la_robo_liga_arena-v0")
time.sleep(2)
while True:
p.stepSimulation()
env.move_husky(0.2, 0.2, 0.2, 0.2)
| 22.222222
| 46
| 0.695
|
794ec9884a93f96ca6dafa1b90d61c95c5835060
| 149
|
py
|
Python
|
configs/hrnet/htc_hrnetv2p_w40_28e_coco_05.py
|
anley1/Swin-Transformer-Object-Detection
|
0f26c7979ee2360acd359a5789cd8d52527e0d64
|
[
"Apache-2.0"
] | null | null | null |
configs/hrnet/htc_hrnetv2p_w40_28e_coco_05.py
|
anley1/Swin-Transformer-Object-Detection
|
0f26c7979ee2360acd359a5789cd8d52527e0d64
|
[
"Apache-2.0"
] | null | null | null |
configs/hrnet/htc_hrnetv2p_w40_28e_coco_05.py
|
anley1/Swin-Transformer-Object-Detection
|
0f26c7979ee2360acd359a5789cd8d52527e0d64
|
[
"Apache-2.0"
] | null | null | null |
_base_ = './htc_hrnetv2p_w40_20e_coco_05.py'
# learning policy
lr_config = dict(step=[24, 27])
runner = dict(type='EpochBasedRunner', max_epochs=28)
| 29.8
| 53
| 0.758389
|
794ecbbac52bbc61e3c69a373b79ae4333d01901
| 2,912
|
py
|
Python
|
transform_funcs.py
|
Institute-Web-Science-and-Technologies/community-trust-opensourced
|
704185c6bc40564815b39d024a829b077bf65777
|
[
"Apache-2.0"
] | null | null | null |
transform_funcs.py
|
Institute-Web-Science-and-Technologies/community-trust-opensourced
|
704185c6bc40564815b39d024a829b077bf65777
|
[
"Apache-2.0"
] | null | null | null |
transform_funcs.py
|
Institute-Web-Science-and-Technologies/community-trust-opensourced
|
704185c6bc40564815b39d024a829b077bf65777
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
import igraph
import numpy as np
import powerlaw
from scipy.stats import binom
# no transformation
def no_transform(feature, **kwargs):
return np.array(feature)
# tranform feature to quantile
def quantile_transform(feature, **kwargs):
total = len(feature)
feature = np.array(feature)
# feature - quantile mapping
D = {}
for f in np.unique(feature):
D[f] = len(feature[feature < f]) / total
quantile = [D[f] for f in feature]
return np.array(quantile)
# divide-by-average transformation
def average_transform(degree, **kwargs):
return np.array(degree) / np.mean(degree)
# degree transformation
def degree_transform(degree, **kwargs):
# pre-processing
degree = np.array(degree)
# fitting power-law distribution
fit = powerlaw.Fit(degree, discrete=True, xmin=(1,6))
alpha = fit.alpha
x_min = fit.xmin
n = len(degree)
total = len(degree[degree >= x_min])
c = (alpha - 1) * total / n
T = {}
for d in np.unique(degree):
if (d <= x_min):
T[d] = d
else:
T[d] = np.power(d/x_min, alpha-1) * x_min
degree = np.round([ T[d] for d in degree ])
return degree
# degree transformation with fallback
def degree_transform_with_fallback(degree, **kwargs):
# pre-processing
degree = np.array(degree)
total = len(degree[degree < 0])
# fitting power-law distribution
fit = powerlaw.Fit(degree, discrete=True)
alpha = fit.alpha
sigma = fit.sigma
x_min = min(6, fit.xmin)
P = {}; D = {}; T = {}
for d in np.unique(degree):
P[d] = len(degree[degree >= d]) / total
D[d] = d if d <= 1 else 1/P[d]
# fallback
if (sigma > 0.05):
print 'sigma =', sigma, ', fallback!'
return degree
c = (alpha - 1) * total / len(degree)
for d in np.unique(degree):
if (d <= 1):
T[d] = d
else:
P_r = len(degree[degree == d]) / total
P_p = np.power(d, -alpha) * c
T_d = np.power(d, alpha-1)
if (sigma > 0.05):
T[d] = (d*(P_r-P_p) + D[d]*P_p) / P_r if d < x_min else D[d]
else:
T[d] = (d*(P_r-P_p) + c/d) / P_r if d < x_min else T_d
degree = np.array([ T[d] for d in degree ])
return degree
# transform local clustering coeffient
def lcc_transform(lcc, degree):
degree, lcc = np.array(degree), np.array(lcc)
s = (degree * (degree - 1) / 2).astype(np.int)
t = np.round(lcc * s).astype(np.int)
if sum(s) == 0:
return lcc
P = {}
for S in np.unique(s):
t_s = t[s == S]
p0 = len(t_s[t_s == 0]) / len(t_s)
for T in np.unique(t_s):
P[(T,S)] = (len(t_s[t_s <= T]) / len(t_s) - p0) / (1 - p0) if p0 < 1 else 0
lcc = np.array([ P[(t[i], s[i])] for i in range(len(degree)) ])
return lcc
| 25.769912
| 87
| 0.562157
|
794ecc208ee65d310b2573f32ab437d91072efe4
| 2,500
|
py
|
Python
|
aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/ResetMainPasswordRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/ResetMainPasswordRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/ResetMainPasswordRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkopenanalytics_open.endpoint import endpoint_data
class ResetMainPasswordRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'openanalytics-open', '2018-06-19', 'ResetMainPassword','openanalytics')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ExternalUid(self):
return self.get_body_params().get('ExternalUid')
def set_ExternalUid(self,ExternalUid):
self.add_body_params('ExternalUid', ExternalUid)
def get_InitPassword(self):
return self.get_body_params().get('InitPassword')
def set_InitPassword(self,InitPassword):
self.add_body_params('InitPassword', InitPassword)
def get_ExternalAliyunUid(self):
return self.get_body_params().get('ExternalAliyunUid')
def set_ExternalAliyunUid(self,ExternalAliyunUid):
self.add_body_params('ExternalAliyunUid', ExternalAliyunUid)
def get_UseRandomPassword(self):
return self.get_body_params().get('UseRandomPassword')
def set_UseRandomPassword(self,UseRandomPassword):
self.add_body_params('UseRandomPassword', UseRandomPassword)
def get_EnableKMS(self):
return self.get_body_params().get('EnableKMS')
def set_EnableKMS(self,EnableKMS):
self.add_body_params('EnableKMS', EnableKMS)
def get_ExternalBizAliyunUid(self):
return self.get_body_params().get('ExternalBizAliyunUid')
def set_ExternalBizAliyunUid(self,ExternalBizAliyunUid):
self.add_body_params('ExternalBizAliyunUid', ExternalBizAliyunUid)
| 36.764706
| 101
| 0.7796
|
794eccb14075105cb5351c1591f40de421114620
| 426
|
py
|
Python
|
macropy/logging.py
|
CyberFlameGO/macropy
|
a815f5a58231d8fa65386cd71ff0d15d09fe9fa3
|
[
"Unlicense",
"MIT"
] | 2,061
|
2015-01-02T16:53:18.000Z
|
2022-03-31T12:01:07.000Z
|
macropy/logging.py
|
CyberFlameGO/macropy
|
a815f5a58231d8fa65386cd71ff0d15d09fe9fa3
|
[
"Unlicense",
"MIT"
] | 41
|
2015-02-25T02:54:46.000Z
|
2022-01-28T19:08:45.000Z
|
macropy/logging.py
|
CyberFlameGO/macropy
|
a815f5a58231d8fa65386cd71ff0d15d09fe9fa3
|
[
"Unlicense",
"MIT"
] | 151
|
2015-01-01T22:07:55.000Z
|
2022-03-03T07:55:20.000Z
|
# -*- coding: utf-8 -*-
# :Project: macropy3 -- enable basic logging
# :Created: gio 01 mar 2018 02:43:14 CET
# :Author: Alberto Berti <alberto@metapensiero.it>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2018 Alberto Berti
#
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
log.debug('Log started')
| 28.4
| 59
| 0.725352
|
794ecd1e2deaca7603bcf2c33f35adcbc7f451db
| 3,434
|
py
|
Python
|
desktop/core/ext-py/parquet-1.3.1/parquet/converted_types.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/parquet-1.3.1/parquet/converted_types.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/parquet-1.3.1/parquet/converted_types.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# -#- coding: utf-8 -#-
"""
Deal with parquet logical types (aka converted types), higher-order things built from primitive types.
The implementations in this class are pure python for the widest compatibility,
but they're not necessarily the most performant.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import codecs
import datetime
import json
import logging
import os
import struct
import sys
from decimal import Decimal
import thriftpy2 as thriftpy
THRIFT_FILE = os.path.join(os.path.dirname(__file__), "parquet.thrift")
parquet_thrift = thriftpy.load(THRIFT_FILE, module_name=str("parquet_thrift")) # pylint: disable=invalid-name
logger = logging.getLogger('parquet') # pylint: disable=invalid-name
bson = None # pylint: disable=invalid-name
try:
import bson
except ImportError:
pass
PY3 = sys.version_info.major > 2
# define bytes->int for non 2, 4, 8 byte ints
if PY3:
def intbig(data):
"""Convert big ints using python 3's built-in support."""
return int.from_bytes(data, 'big', signed=True)
else:
def intbig(data):
"""Convert big ints using a hack of encoding bytes as hex and decoding to int."""
return int(codecs.encode(data, 'hex'), 16)
DAYS_TO_MILLIS = 86400000000000
"""Number of millis in a day. Used to convert a Date to a date"""
def _convert_unsigned(data, fmt):
"""Convert data from signed to unsigned in bulk."""
num = len(data)
return struct.unpack(
"{}{}".format(num, fmt.upper()).encode("utf-8"),
struct.pack("{}{}".format(num, fmt).encode("utf-8"), *data)
)
def convert_column(data, schemae):
"""Convert known types from primitive to rich."""
ctype = schemae.converted_type
if ctype == parquet_thrift.ConvertedType.DECIMAL:
scale_factor = Decimal("10e-{}".format(schemae.scale))
if schemae.type == parquet_thrift.Type.INT32 or schemae.type == parquet_thrift.Type.INT64:
return [Decimal(unscaled) * scale_factor for unscaled in data]
return [Decimal(intbig(unscaled)) * scale_factor for unscaled in data]
if ctype == parquet_thrift.ConvertedType.DATE:
return [datetime.date.fromordinal(d) for d in data]
if ctype == parquet_thrift.ConvertedType.TIME_MILLIS:
return [datetime.timedelta(milliseconds=d) for d in data]
if ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS:
return [datetime.datetime.utcfromtimestamp(d / 1000.0) for d in data]
if ctype == parquet_thrift.ConvertedType.UTF8:
return [codecs.decode(item, "utf-8") for item in data]
if ctype == parquet_thrift.ConvertedType.UINT_8:
return _convert_unsigned(data, 'b')
if ctype == parquet_thrift.ConvertedType.UINT_16:
return _convert_unsigned(data, 'h')
if ctype == parquet_thrift.ConvertedType.UINT_32:
return _convert_unsigned(data, 'i')
if ctype == parquet_thrift.ConvertedType.UINT_64:
return _convert_unsigned(data, 'q')
if ctype == parquet_thrift.ConvertedType.JSON:
return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")]
if ctype == parquet_thrift.ConvertedType.BSON and bson:
return [bson.BSON(s).decode() for s in data]
logger.info("Converted type '%s'' not handled",
parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype]) # pylint:disable=protected-access
return data
| 37.736264
| 110
| 0.699767
|
794ecd1eb6a41ac96430052375378ebc1b7ebd57
| 117
|
py
|
Python
|
tasks/admin.py
|
Hernandes-Silva/To-do-list-API
|
fff8e4c3a795285f659d63363d1d0c66f018b357
|
[
"MIT"
] | null | null | null |
tasks/admin.py
|
Hernandes-Silva/To-do-list-API
|
fff8e4c3a795285f659d63363d1d0c66f018b357
|
[
"MIT"
] | null | null | null |
tasks/admin.py
|
Hernandes-Silva/To-do-list-API
|
fff8e4c3a795285f659d63363d1d0c66f018b357
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from tasks.models import Task
# Register your models here.
admin.site.register(Task)
| 29.25
| 32
| 0.820513
|
794ecec1c3a19c7a20ffb850054b220fda3ed946
| 919
|
py
|
Python
|
user/vistas/widgets/header.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/header.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
user/vistas/widgets/header.py
|
ZerpaTechnology/occoa
|
a8c0bd2657bc058801a883109c0ec0d608d04ccc
|
[
"Apache-2.0"
] | null | null | null |
doc+='''
<header id="header" style="background-image: url('''
try: doc+=str(data['base_url']+'static/img/')
except Exception, e: doc+=str(e)
doc+='''logo_unexpo_blanco.png) !important;">
<!--Menu-->
<div class="content-menu">
<div class="container">
<div class="row web-cross-center">
<div class="col-lg-4">
<div class="logo">
<img class="logo__img" src="'''
try: doc+=str(data['base_url']+'static/img/')
except Exception, e: doc+=str(e)
doc+='''logo_unexpo.png" alt="logo">
</div>
</div>
<div class="col-lg-8">
<ul class="menu">
<li><a href="#">Home</a></li>
<li><a href="#">About</a></li>
<li><a href="portafolio.html">Portafolio</a></li>
<li><a href="#">Blog</a></li>
<li><a href="#">Contacto</a></li>
</ul>
</div>
</div>
</div>
</div>
<!--end Menu-->
</header>'''
| 29.645161
| 58
| 0.508161
|
794ecf0d4e6d1f337d24f44f9d8055f22b72f867
| 18,073
|
py
|
Python
|
cloud_storage/YandexDisk.py
|
coolworld2049/Social-media-file-downloader
|
45d2238f29dfcd049f4ecf610390b67af7dc8827
|
[
"MIT"
] | 1
|
2022-03-28T02:49:56.000Z
|
2022-03-28T02:49:56.000Z
|
cloud_storage/YandexDisk.py
|
coolworld2049/Social-media-file-downloader
|
45d2238f29dfcd049f4ecf610390b67af7dc8827
|
[
"MIT"
] | 1
|
2022-03-28T03:14:37.000Z
|
2022-03-28T03:15:02.000Z
|
cloud_storage/YandexDisk.py
|
coolworld2049/Social-media-file-downloader
|
45d2238f29dfcd049f4ecf610390b67af7dc8827
|
[
"MIT"
] | null | null | null |
import asyncio
import os
import time
from abc import abstractmethod
from typing import TextIO
import nest_asyncio
from aiohttp import ClientSession as clientSession, ClientConnectorError
from tqdm.contrib.telegram import tqdm
from core import users_db, logger
class YandexDisk:
def __init__(self):
self.__RESOURCES_URL = 'https://cloud-api.yandex.net/v1/disk/resources'
self.__ROOT_FOLDER = 'Saved from tg'
# ----authorization---
@staticmethod
@abstractmethod
def link():
link = f'https://oauth.yandex.ru/authorize?&response_type=code' \
f'&client_id={os.environ.get("ya_client_id")}'
return link
@staticmethod
@abstractmethod
async def auth(user_id, ya_token: str):
if len(ya_token) == 7:
async with clientSession() as session:
async with session.post('https://oauth.yandex.ru/token',
data={
'grant_type': 'authorization_code',
'code': ya_token,
'client_id': os.environ.get('ya_client_id'),
'client_secret': os.environ.get('ya_client_secret')
}) as resp:
get_access_token = await resp.json()
if resp.status == 200:
users_db["user"].upsert(
{
"user_id": user_id,
"y_api_token": get_access_token['access_token'],
"ya_user_authorized": True,
}, pk='user_id')
return 'Вы успешно авторизовались в Яндекс диске!'
else:
return f'Ошибка авторизации: {resp.status} в Яндекс диске!'
else:
return f'Вы ввели некорректную информацию'
@staticmethod
async def __request_upload_worker(url: str, params: dict, data: str, headers: dict):
async with clientSession() as session:
async with session.post(url=url, params=params, data=data, headers=headers):
await session.close()
@staticmethod
async def __wrapper(delay, coro):
await asyncio.sleep(delay)
return await coro
async def __multitask_post_requests(self, user_id: int, data: dict, folder_name: str, overwrite: bool = False):
counter = 0
subfolder_path = f'{self.__ROOT_FOLDER}/{folder_name}'
requests_dict = {}
for url, ext in data.items():
requests_dict[counter] = {
'url': f"{self.__RESOURCES_URL}/upload",
'params': {
'path': f'{subfolder_path}/{counter + 1}_file{ext}',
'url': url,
'fields': 'href',
'overwrite': f'{overwrite}'},
'data': None,
'headers': {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'}
}
counter += 1
chunk_size = 10
requests_list = [value for key, value in requests_dict.items()]
list_of_chunks = [requests_list[i:i + chunk_size]
for i in range(0, len(requests_list), chunk_size)]
if len(requests_dict) >= chunk_size:
tasks = []
nest_asyncio.apply()
loop = asyncio.get_running_loop()
for i in tqdm(range(len(list_of_chunks)), token=os.environ.get("BOT_TOKEN"),
chat_id=user_id):
for ch_items in list_of_chunks[i]:
tasks.append(loop.create_task(
self.__wrapper(0.03, self.__request_upload_worker(ch_items['url'],
ch_items['params'],
ch_items['data'],
ch_items['headers']))))
await asyncio.sleep(1.1)
for k in range(len(tasks)):
await tasks[i]
# logger.info(f'user_id {user_id}. Task {i} await: {tasks[i]}')
# ----yandex disk api requests----
async def __request_create_folder(self, user_id, folder_name, recreate_folder):
status = 0
count = 0
while status != 201 or status not in (400, 401, 503, 507):
try:
async with clientSession() as session:
async with session.put(f'{self.__RESOURCES_URL}?',
params={
'path': folder_name
},
data=None,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'
}) as resp:
status = resp.status
count += 1
# logger.info(f'user_id: {user_id}. Try create dir "{folder_name}" in cloud storage.'
# f' Response code: {str(resp.status)}. Message: {await resp.json()}')
match status:
case 0:
pass
case 201:
return True
case 423:
continue
case 429:
await asyncio.sleep(0.05)
case 404:
await self.__request_create_folder(user_id, self.__ROOT_FOLDER,
recreate_folder)
case 409:
if folder_name == self.__ROOT_FOLDER:
return True
elif not recreate_folder:
return True
else:
await self.__request_delete_folder(user_id, folder_name)
case _:
return False
except ClientConnectorError as cce:
logger.info(f'__request_create_folder(user_id: {user_id}) ClientConnectorError' + str(cce.args))
await asyncio.sleep(0.1)
continue
async def __request_delete_folder(self, user_id, folder_name):
status = 0
count = 0
while status != 200 or 202 or 204:
try:
await asyncio.sleep(0.05)
async with clientSession() as session:
async with session.delete(f'{self.__RESOURCES_URL}?',
params={
'path': f'{folder_name}',
'permanently': 'True'
},
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'
}) as resp:
status = resp.status
count += 1
# logger.info(f'user_id: {user_id}. Try delete dir "{folder_name}" in cloud storage.'
# f' Response code: {str(resp.status)}. Message: {await resp.json()}')
match status:
case 200 | 202 | 204:
return True
case 423:
continue
case _:
return False
except ClientConnectorError as cce:
logger.info(f'__request_delete_folder(user_id: {user_id}) ClientConnectorError' + str(cce.args))
await asyncio.sleep(0.1)
continue
async def request_publish(self, user_id, folder_name: str):
if users_db["user"].get(user_id).get("ya_upload_completed"):
try:
async with clientSession() as session:
async with session.put(f"{self.__RESOURCES_URL}/publish",
params={
'path': f"{self.__ROOT_FOLDER}/{folder_name}"
},
data=None,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'
}) as put_resp:
logger.info(f'user_id: {user_id}. Publish folder: {self.__ROOT_FOLDER}/{folder_name}.'
f' Response: {put_resp.status}')
except KeyError as ke:
logger.info(f'get_link_file(user_id: {user_id}) KeyError' + str(ke.args))
return f'get_link_file() KeyError {ke.args}'
finally:
published = await self.__request_public(user_id, folder_name)
if published:
for item in published['items']:
if item['name'] == folder_name:
return item['public_url']
else:
return 'При получении ссылки на опубликованный ресурс произошла ошибка'
else:
return f'get_link_file(user_id: {user_id}): ya_upload_completed: 0'
async def __request_public(self, user_id, folder_name: str = ''):
"""get_published_file"""
async with clientSession() as session:
async with session.get(f"{self.__RESOURCES_URL}/public",
params={
'path': f"{self.__ROOT_FOLDER}/{folder_name}",
'type': 'dir',
'preview_crop': 'true'
},
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'
}) as resp:
logger.info(f'user_id: {user_id}. Get published folder: {self.__ROOT_FOLDER}/{folder_name}.'
f' Response: {resp.status}')
if resp.status == 200:
return await resp.json()
else:
error = await resp.json()
return error['descriptions']
async def request_download(self, user_id, folder_name: str = '', file: str = '', ext: str = ''):
"""get link to file or folder"""
if users_db["user"].get(user_id).get("ya_upload_completed"):
try:
async with clientSession() as session:
async with session.get(f"{self.__RESOURCES_URL}/download",
params={
'path': f"{self.__ROOT_FOLDER}/{folder_name}/{file}{ext}"
},
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'
}) as resp:
logger.info(f'user_id: {user_id}. Download folder: {self.__ROOT_FOLDER}/{folder_name}.'
f' Response: {resp.status}')
except KeyError as ke:
logger.info(f'download_file(user_id: {user_id}) KeyError' + str(ke.args))
return f'download_file() KeyError {ke.args}'
except ClientConnectorError as cce:
logger.info(f'download_file(user_id: {user_id}) ClientConnectorError' + str(cce.args))
return f'download_file() ClientConnectorError {cce.args}'
finally:
href = await resp.json()
if resp.status == 200:
return href['href']
else:
return 'При получении ссылки на загрузку файла произошла ошибка'
else:
return f'download_file(user_id: {user_id}): ya_upload_completed: 0'
# ----processing response from yandex disk api----
async def request_upload_file(self, user_id: int, data: dict, folder_name: str, overwrite: bool = False):
counter = 0
subfolder_path = f'{self.__ROOT_FOLDER}/{folder_name}'
mininterval = len(data) / 1000
async with clientSession() as session:
async for url, ext in tqdm(data.items(), mininterval=mininterval, token=os.environ.get("BOT_TOKEN"),
chat_id=user_id):
try:
async with session.post(f"{self.__RESOURCES_URL}/upload",
params={
'path': f'{subfolder_path}/{counter + 1}_file{ext}',
'url': url,
'overwrite': str(overwrite)
},
data=None,
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': f'OAuth {users_db["user"].get(user_id).get("y_api_token")}'
}) as resp:
counter += 1
# logger.info(f" user_id: {user_id} | album: {subfolder_path} | status: {resp.status}")
except ClientConnectorError:
await asyncio.sleep(0.07)
continue
await session.close()
users_db['user'].upsert(
{
"user_id": user_id,
"total_number_uploaded_file":
users_db["user"].get(user_id).get("total_number_uploaded_file") + counter
}, pk="user_id")
logger.info(f'uploaded {counter}')
return counter
async def __create_directory(self, user_id, folder_name, recreate_folder):
users_db['user'].upsert(
{
"user_id": user_id,
"ya_upload_completed": False,
}, pk="user_id")
start_create_dir = time.perf_counter()
logger.info(f'user_id: {user_id}. Try create dir "{folder_name}" in cloud storage.')
if await self.__request_create_folder(user_id, self.__ROOT_FOLDER, recreate_folder=False):
if await self.__request_create_folder(user_id, f'{self.__ROOT_FOLDER}/{folder_name}',
recreate_folder):
end_create_dir = time.perf_counter()
logger.info(f'user_id: {user_id}. Directory creation was done in '
f'{end_create_dir - start_create_dir:0.4f} seconds')
return True
else:
end_create_dir = time.perf_counter()
logger.info(f'user_id: {user_id}. Directory overwrite was done in '
f'{end_create_dir - start_create_dir:0.4f} seconds')
return True
async def upload_file(self, user_id: int, data: dict | TextIO, folder_name: str, overwrite: bool = False,
recreate_folder: bool = True):
start = time.perf_counter()
if isinstance(data, dict):
if await self.__create_directory(user_id, folder_name, recreate_folder):
if (1 <= len(data) <= 10) and (len(data) / await self.request_upload_file(
user_id, data, folder_name, overwrite)) < 1.11111111111:
users_db["user"].upsert(
{
"user_id": user_id,
"ya_upload_completed": True,
}, pk='user_id')
else:
await self.__multitask_post_requests(user_id, data, folder_name, overwrite)
users_db["user"].upsert(
{
"user_id": user_id,
"ya_upload_completed": True,
}, pk='user_id')
elif isinstance(data, TextIO):
pass
end = time.perf_counter()
logger.info(f'upload_file(user_id: {user_id}) was completed in {end - start:0.4f} seconds')
| 50.909859
| 126
| 0.44857
|
794ecf75475e6a86e7d2b17bb6b8396ce12af170
| 108
|
py
|
Python
|
chevah/compat/tests/elevated/testing/__init__.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 5
|
2016-12-03T22:54:50.000Z
|
2021-11-17T11:17:39.000Z
|
chevah/compat/tests/elevated/testing/__init__.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 76
|
2015-01-22T16:00:31.000Z
|
2022-02-09T22:13:34.000Z
|
chevah/compat/tests/elevated/testing/__init__.py
|
chevah/compat
|
d22e5f551a628f8a1652c9f2eea306e17930cb8f
|
[
"BSD-3-Clause"
] | 1
|
2016-12-10T15:57:31.000Z
|
2016-12-10T15:57:31.000Z
|
# Copyright (c) 2012 Adi Roiban.
# See LICENSE for details.
"""
Tests executed under elevated accounts.
"""
| 18
| 39
| 0.712963
|
794ed1041eb290b3474b38d9361d8918f8ff488c
| 2,087
|
py
|
Python
|
may-solution/generator.py
|
maygnificent/story-prompt
|
ed6af00532bd76dc0187504830f8c5f735661f15
|
[
"Apache-2.0"
] | null | null | null |
may-solution/generator.py
|
maygnificent/story-prompt
|
ed6af00532bd76dc0187504830f8c5f735661f15
|
[
"Apache-2.0"
] | null | null | null |
may-solution/generator.py
|
maygnificent/story-prompt
|
ed6af00532bd76dc0187504830f8c5f735661f15
|
[
"Apache-2.0"
] | null | null | null |
import sys
import json
if __name__ == '__main__':
# convert input to dictionary
inputString = sys.argv[1]
inputDict = json.loads(inputString)
# check for missing inputs
while len(inputDict) < 5:
missingInput = ''
try:
if 'number' not in inputDict:
missingInput = 'number'
inputDict['number'] = int(input("Please enter a number: "))
if 'unit_of_measure' not in inputDict:
missingInput = 'unit_of_measure'
inputDict['unit_of_measure'] = input("Please enter a unit of measure: ")
if 'place' not in inputDict:
missingInput = 'place'
inputDict['place'] = input("Please enter a place: ")
if 'adjective' not in inputDict:
missingInput = 'adjective'
inputDict['adjective'] = input("Please enter an adjective: ")
if 'noun' not in inputDict:
missingInput = 'noun'
inputDict['noun'] = input("Please enter a noun: ")
except ValueError:
print(f"Invalid input. Please enter a(n) {missingInput}: ")
continue
else:
break
# check string input length
for item in inputDict:
# checks for strings with over 100 chars or empty strings / strings with just spaces
if item != 'number' and (len(inputDict[item]) > 100 or not(inputDict[item] and not inputDict[item].isspace())):
inputDict[item] = input(f"Please enter another {item} (max 100 characters): ")
# archive inputs to 'record' file
archive = open("record.txt", "a")
# archive.write(str(inputDict) + '\n')
archive.write(json.dumps(inputDict) + '\n')
archive.close
# place each input into template
output = f"One day Anna was walking her {inputDict['number']} {inputDict['unit_of_measure']} commute to {inputDict['place']} and found a {inputDict['adjective']} {inputDict['noun']} on the ground."
print (output)
| 37.267857
| 201
| 0.576425
|
794ed13c13414d3d48bcf2ada5953841f44a0871
| 1,038
|
py
|
Python
|
Python3/0407-Trapping-Rain-Water-II/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0407-Trapping-Rain-Water-II/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0407-Trapping-Rain-Water-II/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def trapRainWater(self, heightMap):
"""
:type heightMap: List[List[int]]
:rtype: int
"""
if not heightMap or not heightMap[0]:
return 0
heap = []
m, n = len(heightMap), len(heightMap[0])
seen = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if i == 0 or j == 0 or i == m - 1 or j == n - 1:
heapq.heappush(heap, (heightMap[i][j], i, j))
seen[i][j] = 1
ans = 0
while heap:
height, i, j = heapq.heappop(heap)
for di, dj in ((-1, 0), (1, 0), (0, 1), (0, -1)):
newi, newj = i + di, j + dj
if 0 <= newi < m and 0 <= newj < n and seen[newi][newj] == 0:
seen[newi][newj] = 1
ans += max(0, height - heightMap[newi][newj])
heapq.heappush(heap, (max(heightMap[newi][newj], height), newi, newj))
return ans
| 38.444444
| 90
| 0.425819
|
794ed1729fc71aa3a2ae360ec2116df2220db6c0
| 18,823
|
py
|
Python
|
jase_im/blog/views.py
|
chenomg/blog.jase.im_v0.1
|
41e77fae1435f2c16701d982bd71fddbd399508e
|
[
"MIT"
] | 2
|
2019-02-22T16:09:35.000Z
|
2019-11-27T10:22:49.000Z
|
jase_im/blog/views.py
|
chenomg/blog.jase.im_v0.1
|
41e77fae1435f2c16701d982bd71fddbd399508e
|
[
"MIT"
] | 3
|
2018-12-22T13:40:15.000Z
|
2020-06-05T19:26:48.000Z
|
jase_im/blog/views.py
|
chenomg/blog.jase.im_v0.1
|
41e77fae1435f2c16701d982bd71fddbd399508e
|
[
"MIT"
] | null | null | null |
import datetime
import json
import logging
from markdown import markdown, Markdown
from markdown.extensions.toc import TocExtension
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.text import slugify
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.views.generic.edit import UpdateView
from registration.backends.simple.views import RegistrationView
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import permissions, status
from blog.forms import CommentForm, UserProfileForm, UserUpdateForm, MDEditorModelForm
from blog.serializers import PostGetSerializer, PostAddSerializer
from blog.models import Category, Tag, Post, Comment, Page, UserProfile
md = Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
TocExtension(slugify=slugify),
])
def get_remote_ip(request):
return request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR'))
def index(request):
login_user = get_login_user(request)
query = request.GET.get('query')
if query:
logging.info('用户: {}, IP: {}, 打开主页, query={}'.format(
login_user, get_remote_ip(request), query))
posts = Post.objects.filter(
Q(title__icontains=query)
| Q(publish_content__icontains=query)).order_by('-modified_time')
else:
logging.info('用户: {}, IP: {}, 打开主页'.format(
login_user, get_remote_ip(request)))
posts = Post.objects.filter(is_publish=True).order_by('-modified_time')
for post in posts:
post.publish_excerpt = md.convert(post.publish_excerpt)
posts_per_page = 4
paginator = Paginator(posts, posts_per_page)
pages_count = paginator.num_pages
page_id = int(request.GET.get('page', '1'))
page_previous_id = 1
page_next_id = pages_count
if page_id == 1:
page_previous = False
else:
page_previous = True
page_previous_id = page_id - 1
if page_id == pages_count:
page_next = False
else:
page_next = True
page_next_id = page_id + 1
if page_id > pages_count:
page_id = pages_count
selected_page = paginator.page(page_id)
context_dic = {
'login_user': login_user,
'posts': selected_page,
'pages_total': pages_count,
'page_current': page_id,
'page_previous': page_previous,
'page_previous_id': page_previous_id,
'page_next': page_next,
'page_next_id': page_next_id,
'query': query,
}
return render(request, 'blog/index.html', context=context_dic)
def about(request):
login_user = get_login_user(request)
logging.info('用户: {}, IP: {}, 打开about'.format(login_user,
get_remote_ip(request)))
page = get_object_or_404(Page, slug='about')
page.views += 1
page.save()
page.content = md.convert(page.content)
page.toc = md.toc
context_dic = {'page': page, 'login_user': login_user}
return render(request, 'blog/about.html', context=context_dic)
def post_detail(request, slug):
login_user = get_login_user(request)
post = get_object_or_404(Post, slug=slug)
logging.info('用户: {}, IP: {}, 打开post: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
post.views = post.views + 1
post.save()
post.publish_content = md.convert(post.publish_content)
# post.content = md.convert(post.content)
post.toc = md.toc
comments = post.comment_set.all()
tags = post.tags.all().order_by('slug')
context = {
'post': post,
'comments': comments,
'tags': tags,
'login_user': login_user,
}
form = CommentForm()
context['form'] = form
return render(request, 'blog/post_detail.html', context=context)
def comment_submit(request):
post_slug = request.POST['post_slug']
post = get_object_or_404(Post, slug=post_slug)
logging.debug('用户: {}, IP: {}, 评论-准备接收, post: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
if request.method == 'POST':
form = CommentForm(request.POST)
response_data = {
'success': False,
'name': form['name'],
'content': form['content'],
}
if form.is_valid():
logging.debug('用户: {}, IP: {}, 评论-接收有效, post: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
name = form.cleaned_data['name']
email = form.cleaned_data['email']
content = form.cleaned_data['content']
comment = Comment.objects.create(
name=name, content=content, email=email, post=post)
response_data['success'] = True
response_data['name'] = name
response_data['content'] = content
logging.info(
'用户: {}, IP: {}, 评论-成功, post: {} - {}, comment-id: {}'.format(
comment.id, login_user, get_remote_ip(request),
post.id, post.slug))
else:
logging.warn('用户: {}, IP: {}, 评论-数据无效, post: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
return HttpResponse(
json.dumps(response_data), content_type='application/json')
def category(request):
login_user = get_login_user(request)
logging.info('用户: {}, IP: {}, 打开Category'.format(
login_user, get_remote_ip(request)))
categories = Category.objects.all().order_by('-name')
posts = Post.objects.filter(is_publish=True)
context = {
'categories': categories,
'posts': posts,
'login_user': login_user
}
return render(request, 'blog/category.html', context=context)
def archive(request):
login_user = get_login_user(request)
logging.info('用户: {}, IP: {}, 打开存档'.format(login_user,
get_remote_ip(request)))
posts = Post.objects.filter(is_publish=True).order_by('-created_time')
dates = set([(p.created_time.year, p.created_time.month) for p in posts])
# 存档页面月份倒序
dates = sorted(
[datetime.date(dt[0], dt[1], 1) for dt in dates], reverse=True)
context = {'posts': posts, 'dates': dates, 'login_user': login_user}
return render(request, 'blog/archive.html', context=context)
def tag_list_show(request):
login_user = get_login_user(request)
logging.info('用户: {}, IP: {}, 打开tag_list_show'.format(
login_user, get_remote_ip(request)))
tags = Tag.objects.all().order_by('slug')
context = {'tags': tags, 'login_user': login_user}
return render(request, 'blog/tags_list_show.html', context=context)
def tag_show(request, tag_slug):
login_user = get_login_user(request)
try:
tag = Tag.objects.get(slug=tag_slug)
logging.info('用户: {}, IP: {}, 打开tag: {}'.format(
login_user, get_remote_ip(request), tag.slug))
posts = Tag.objects.get(slug=tag_slug).post_set.filter(is_publish=True)
for p in posts:
p.publish_excerpt = md.convert(p.publish_excerpt)
except Exception as e:
logging.warn('用户: {}, IP: {}, 打开tag失败: {}'.format(
get_remote_ip(request), tag_slug, login_user))
tag = False
posts = None
context = {
'tag': tag,
'tag_slug': tag_slug,
'posts': posts,
'login_user': login_user
}
return render(request, 'blog/tag_show.html', context=context)
@login_required
def register_profile(request):
"""
用于展示目前登陆用户的信息,并且可以更新部分信息, 未完成
"""
login_user = get_login_user(request)
logging.info('用户: {}, IP: {}, 打开登陆用户信息编辑'.format(
login_user, get_remote_ip(request)))
if UserProfile.objects.filter(user=login_user):
userprofile = UserProfile.objects.get(user=login_user)
else:
logging.warn('用户: {}, IP: {}, 登陆用户信息详情暂无, 即将生成'.format(
login_user, get_remote_ip(request)))
userprofile = UserProfile(user=login_user)
userprofile.save()
userform = UserUpdateForm({
'email': login_user.email,
'website': userprofile.website
})
if request.method == 'POST':
logging.info('用户: {}, IP: {}, 用户信息变更提交'.format(
login_user, get_remote_ip(request)))
userform = UserUpdateForm(request.POST)
if userform.is_valid():
logging.info('用户: {}, IP: {}, 用户信息变更提交有效'.format(
login_user, get_remote_ip(request)))
login_user.email = userform.cleaned_data['email']
login_user.save()
userprofile.website = userform.cleaned_data['website']
picture = request.FILES.get('avator')
if picture:
logging.info('用户: {}, IP: {}, 用户信息变更照片提交有效'.format(
login_user, get_remote_ip(request)))
userprofile.picture = picture
userprofile.save()
context = {
'login_user': login_user,
'userprofile': userprofile,
'userform': userform
}
return render(request, 'blog/register_profile.html', context=context)
@login_required
def update_post(request, slug):
login_user = get_login_user(request)
post = get_object_or_404(Post, slug=slug, author=login_user)
logging.info('用户: {}, IP: {}, 更新开始: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
post_form = MDEditorModelForm(instance=post)
context = {'post_form': post_form, 'post': post, 'login_user': login_user}
if request.method == 'POST':
logging.info('用户: {}, IP: {}, 更新提交: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
form = MDEditorModelForm(request.POST)
context['post_form'] = form
if form.is_valid():
logging.info('用户: {}, IP: {}, 更新提交有效: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
update_is_publish = request.POST.getlist('is_publish')
if post.title != form.cleaned_data['title']:
post.slug = ''
post.title = form.cleaned_data['title']
logging.info('用户: {}, IP: {}, 更新标题变更: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id,
post.slug))
post.content = form.cleaned_data['content']
post.excerpt = form.cleaned_data['excerpt']
if update_is_publish:
logging.info('用户: {}, IP: {}, 更新完成时将同时发布: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id,
post.slug))
post.is_publish = True
post.publish_content = post.content
post.publish_excerpt = post.excerpt
post.category = Category.objects.get(
name=form.cleaned_data['category'])
post.save()
post.tags = form.cleaned_data['tags']
add_tags = form.cleaned_data['add_tags']
if add_tags:
tgs = [i.strip() for i in add_tags.split(',')]
for t in tgs:
try:
tag = Tag(name=t)
tag.save()
except exception as e:
logging.warn(
'用户: {}, IP: {}, tag: {} 已存在,不需要新建. 文章: {} - {}'.
format(login_user, get_remote_ip(request), t,
post.id, post.slug))
tag = Tag.objects.get(name=t)
post.tags.add(tag)
post.modified_time = datetime.datetime.now()
post.save()
if update_is_publish:
logging.info('用户: {}, IP: {}, 更新发布: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id,
post.slug))
return HttpResponseRedirect(reverse('blog:index'))
else:
# 后续使用ajax实现
logging.info('用户: {}, IP: {}, 更新保存: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id,
post.slug))
return HttpResponseRedirect(
reverse('blog:update_post', args=[post.slug]))
else:
logging.warn('用户: {}, IP: {}, 更新提交无效: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
return render(request, 'blog/post_update_form.html', context=context)
@login_required
def add_post(request):
login_user = get_login_user(request)
logging.info('用户: {}, IP: {}, Add_Post开始'.format(
login_user, get_remote_ip(request)))
post_form = MDEditorModelForm()
context = {'post_form': post_form, 'login_user': login_user}
if request.method == 'POST':
logging.info('用户: {}, IP: {}, Add_Post提交'.format(
login_user, get_remote_ip(request)))
form = MDEditorModelForm(request.POST)
context['post_form'] = form
if form.is_valid():
logging.info('用户: {}, IP: {}, Add_Post提交信息有效'.format(
login_user, get_remote_ip(request)))
is_publish = request.POST.getlist('is_publish')
post = Post()
post.title = form.cleaned_data['title']
post.author = login_user
post.content = form.cleaned_data['content']
post.excerpt = form.cleaned_data['excerpt']
post.category = Category.objects.get(
name=form.cleaned_data['category'])
# 保存后excerpt若为空值则自动生成
post.save()
logging.info('用户: {}, IP: {}, Add_Post提交已保存: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
post.tags = form.cleaned_data['tags']
add_tags = form.cleaned_data['add_tags']
if add_tags:
ts = [i.strip() for i in add_tags.split(',')]
for t in ts:
try:
tag = Tag(name=t)
tag.save()
except exception as e:
logging.warn(
'用户: {}, IP: {}, Add_Post标签 {} 已存在: 文章: {} - {}'.
format(login_user, get_remote_ip(request), t,
post.id, post.slug))
tag = Tag.objects.get(name=t)
post.tags.add(tag)
post.save()
logging.info('用户: {}, IP: {}, Add_Post标签已保存: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
if is_publish:
post.is_publish = True
post.publish_content = post.content
post.publish_excerpt = post.excerpt
post.save()
logging.info('用户: {}, IP: {}, Add_Post已发布: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id,
post.slug))
return HttpResponseRedirect(reverse('blog:index'))
else:
logging.info('用户: {}, IP: {}, Add_Post已保存: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id,
post.slug))
# 后续使用ajax实现
return HttpResponseRedirect(
reverse('blog:update_post', args=[post.slug]))
else:
logging.warn('用户: {}, IP: {}, Add_Post提交无效: 文章: {} - {}'.format(
login_user, get_remote_ip(request), post.id, post.slug))
return render(request, 'blog/add_post.html', context=context)
@login_required
def user_show(request, username):
login_user = get_login_user(request)
show_user = get_object_or_404(User, username=username)
logging.info('用户: {}, IP: {}, 用户展示, 正在查看: {}'.format(
login_user, get_remote_ip(request), show_user))
if UserProfile.objects.filter(user=show_user):
userprofile = UserProfile.objects.get(user=show_user)
else:
userprofile = UserProfile(user=show_user)
userprofile.save()
posts = Post.objects.filter(author=show_user).order_by('-created_time')
context = {
'show_user': show_user,
'login_user': login_user,
'posts': posts,
'userprofile': userprofile,
'is_current_user': False
}
if login_user == show_user:
context['is_current_user'] = True
return render(request, 'blog/user_show.html', context=context)
def page_not_found(request):
login_user = get_login_user(request)
page = Page.objects.get(slug='404')
page.views += 1
page.save()
page.content = md.convert(page.content)
page.toc = md.toc
context_dic = {'page': page, 'login_user': login_user}
return render(request, 'blog/404.html', {'page': page})
def get_login_user(request):
if request.user.is_authenticated:
return User.objects.get(username=request.user.username)
else:
return None
@api_view(['GET', 'POST'])
@permission_classes((permissions.AllowAny, ))
def post_collection(request):
if request.method == 'GET':
# posts = Post.objects.all()
posts = Post.objects.filter(is_publish=True).order_by('-modified_time')
serializer = PostGetSerializer(posts, many=True)
return Response(serializer.data)
elif request.method == 'POST':
data = {
'title': request.DATA.get('title'),
'author': request.user.pk,
'content': request.DATA.get('content'),
'excerpt': request.DATA.get('excerpt'),
'category': request.DATA.get('category'),
'tags': request.DATA.get('tags'),
'is_publish': request.DATA.get('is_publish')
}
serializer = PostAddSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
@permission_classes((permissions.AllowAny, ))
def post_element(request, pk):
try:
post = Post.objects.get(pk=pk)
except Post.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = PostGetSerializer(post)
return Response(serializer.data)
| 39.963907
| 86
| 0.593582
|
794ed1796a22358b940d6bfa87af1ebd8c0a419d
| 9,236
|
py
|
Python
|
test/performance/legion/analysis_performance/process.py
|
stkaplan/legion
|
ad82a1c1f39ed20a16df29aa331428d42c0ecfb6
|
[
"Apache-2.0"
] | null | null | null |
test/performance/legion/analysis_performance/process.py
|
stkaplan/legion
|
ad82a1c1f39ed20a16df29aa331428d42c0ecfb6
|
[
"Apache-2.0"
] | null | null | null |
test/performance/legion/analysis_performance/process.py
|
stkaplan/legion
|
ad82a1c1f39ed20a16df29aa331428d42c0ecfb6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from legion_serializer import LegionProfASCIIDeserializer
noop = lambda **kwargs: None
id_to_task_group = {}
task_groups = {}
num_task_groups = 0
summaries = []
versioning_ops = set([])
last_op_id = None
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
def length(self):
return self.end - self.start
def __repr__(self):
return "[" + str(self.start) + ", " + str(self.end) + "] : " + \
str(self.length()) + "us"
def __add__(self, other):
return Interval(min(self.start, other.start), max(self.end, other.end))
def itv_sum(l):
return reduce(lambda x, y: x + y, l)
class Summary(object):
def __init__(self, span):
self.span = span
self.num_tasks = 0
self.logical_analysis = 0
self.physical_analysis = 0
self.post_end = 0
self.prepipeline = 0
self.versioning = 0
self.sum = 0
self.ctx_switch = 0
def cache(self):
self.sum = self.logical_analysis + self.physical_analysis + \
self.post_end + self.prepipeline + self.versioning
self.ctx_switch = self.span.length() - self.sum
def __add__(self, other):
result = Summary(None)
result.num_tasks = self.num_tasks + other.num_tasks
result.logical_analysis = self.logical_analysis + other.logical_analysis
result.physical_analysis = self.physical_analysis + other.physical_analysis
result.post_end = self.post_end + other.post_end
result.prepipeline = self.prepipeline + other.prepipeline
result.versioning = self.versioning + other.versioning
result.span = self.span + other.span
result.sum = self.sum + other.sum
result.ctx_switch = self.ctx_switch + other.ctx_switch
return result
def __repr__(self):
num_tasks = float(self.num_tasks)
s = "* total overhead: " + str(self.span.length() / num_tasks) + "\n"
s = s + "* number of tasks: " + str(self.num_tasks) + "\n"
s = s + "* logical analysis: " + str(self.logical_analysis / num_tasks) + "\n"
s = s + "* physical analysis: " + str(self.physical_analysis / num_tasks) + "\n"
s = s + "* post end task: " + str(self.post_end / num_tasks) + "\n"
s = s + "* prepipeline: " + str(self.prepipeline / num_tasks) + "\n"
s = s + "* close/open/advance: " + str(self.versioning / num_tasks) + "\n"
s = s + "* context switch: " + str(self.ctx_switch / num_tasks)
return s
class TaskGroupInfo(object):
def __init__(self):
self.tasks = set([])
self.logical_analysis = set([])
self.physical_analysis = set([])
self.post_end = set([])
self.prepipeline = set([])
self.versioning_ops = set([])
self.last_interval = None
self.variant_id = None
def add_task(self, task_id):
self.tasks.add(task_id)
def add_interval(self, interval, op_kind):
if op_kind == 17 or op_kind == 14 or op_kind == 15:
self.physical_analysis.add(interval)
elif op_kind == 1:
self.post_end.add(interval)
elif op_kind == 12:
self.logical_analysis.add(interval)
elif op_kind == 11:
self.prepipeline.add(interval)
else:
return
self.last_interval = interval
def add_wait_interval(self, wait_interval, op_kind):
assert(self.last_interval != None)
if op_kind == 17 or op_kind == 14 or op_kind == 15:
target = self.physical_analysis
elif op_kind == 1:
target = self.post_end
elif op_kind == 12:
target = self.logical_analysis
elif op_kind == 11:
target = self.prepipeline
else:
return
target.remove(self.last_interval)
before_wait = Interval(self.last_interval.start, wait_interval.start)
after_wait = Interval(wait_interval.end, self.last_interval.end)
if before_wait.length() > 0:
target.add(before_wait)
if after_wait.length() > 0:
target.add(after_wait)
self.last_interval = after_wait
def add_versioning_op(self, interval):
self.versioning_ops.add(interval)
def get_summary(self):
span = itv_sum(self.logical_analysis) + itv_sum(self.physical_analysis) + \
itv_sum(self.post_end) + itv_sum(self.prepipeline)
summary = Summary(span)
summary.num_tasks = len(self.tasks)
summary.logical_analysis = \
sum([i.length() for i in self.logical_analysis])
summary.physical_analysis = \
sum([i.length() for i in self.physical_analysis])
summary.post_end = \
sum([i.length() for i in self.post_end])
summary.prepipeline = \
sum([i.length() for i in self.prepipeline])
summary.versioning = \
sum([i.length() for i in self.versioning_ops])
summary.cache()
return summary
def add_new_task_group(task_id):
global num_task_groups
task_group = TaskGroupInfo()
num_task_groups = num_task_groups + 1
task_groups[num_task_groups] = task_group
id_to_task_group[task_id] = task_group
return task_group
def gather_slice_owners(parent_id, op_id):
if parent_id not in id_to_task_group:
task_group = add_new_task_group(parent_id)
else:
task_group = id_to_task_group[parent_id]
id_to_task_group[op_id] = task_group
def gather_meta_info(op_id, lg_id, proc_id, create, ready, start, stop):
global last_op_id
if op_id in versioning_ops:
assert(last_op_id != None)
task_group = id_to_task_group[last_op_id]
task_group.add_versioning_op(Interval(start, stop))
elif op_id == 0 or op_id not in id_to_task_group:
return
else:
task_group = id_to_task_group[op_id]
task_group.add_interval(Interval(start, stop), lg_id)
last_op_id = op_id
def gather_meta_wait_info(op_id, lg_id, wait_start, wait_ready, wait_end):
if op_id == 0 or op_id not in id_to_task_group:
return
task_group = id_to_task_group[op_id]
task_group.add_wait_interval(Interval(wait_start, wait_end), lg_id)
def gather_task_info(op_id, variant_id, proc_id, create, ready, start, stop):
if op_id not in id_to_task_group:
task_group = add_new_task_group(op_id)
else:
task_group = id_to_task_group[op_id]
task_group.add_task(op_id)
task_group.variant_id = variant_id
def mark_versioning_ops(op_id, kind):
if 5 <= kind and kind <= 8:
versioning_ops.add(op_id)
callbacks = {
"MessageDesc": noop,
"MapperCallDesc": noop,
"RuntimeCallDesc": noop,
"MetaDesc": noop,
"OpDesc": noop,
"ProcDesc": noop,
"MemDesc": noop,
"TaskKind": noop,
"TaskVariant": noop,
"OperationInstance": mark_versioning_ops,
"MultiTask": noop,
"SliceOwner": gather_slice_owners,
"TaskWaitInfo": noop,
"MetaWaitInfo": gather_meta_wait_info,
"TaskInfo": gather_task_info,
"MetaInfo": gather_meta_info,
"CopyInfo": noop,
"FillInfo": noop,
"InstCreateInfo": noop,
"InstUsageInfo": noop,
"InstTimelineInfo": noop,
"MessageInfo": noop,
"MapperCallInfo": noop,
"RuntimeCallInfo": noop,
"ProfTaskInfo": noop
}
class Dummy(object):
def __init__(self):
self.has_spy_data = False
def main():
global task_groups
global summaries
parser = argparse.ArgumentParser()
# usage: python3 process.py <logfiles>
parser.add_argument(dest='filenames', nargs='+',
help='input Legion Prof log filenames')
args = parser.parse_args()
deserializer = LegionProfASCIIDeserializer(Dummy(), callbacks)
if args.filenames is None:
print("You must pass in a logfile!")
exit(-1)
has_matches = False
for file_name in args.filenames:
matches = deserializer.parse(file_name, True)
has_matches = has_matches or matches > 0
if not has_matches:
print('No matches found! Exiting...')
return
# filter only the tasks of interest
task_group_ids = set(task_groups.iterkeys())
for i in task_group_ids:
if task_groups[i].variant_id != 2:
del task_groups[i]
for i, grp in task_groups.iteritems():
summaries.append(grp.get_summary())
# filter out the first and last 5% as they are mostly abnormal
outliers = len(summaries) / 10
summaries = summaries[outliers:-outliers]
print(itv_sum(summaries))
if __name__ == "__main__":
main()
| 33.585455
| 88
| 0.647791
|
794ed4a30826674e9f19b8dc49db55c1bada5aca
| 2,891
|
py
|
Python
|
tb_rest_client/models/models_ce/user_id.py
|
jernkuan/thingsboard-python-rest-client
|
3fb25272507494e6d494b27ca2380d3c543562e5
|
[
"Apache-2.0"
] | null | null | null |
tb_rest_client/models/models_ce/user_id.py
|
jernkuan/thingsboard-python-rest-client
|
3fb25272507494e6d494b27ca2380d3c543562e5
|
[
"Apache-2.0"
] | null | null | null |
tb_rest_client/models/models_ce/user_id.py
|
jernkuan/thingsboard-python-rest-client
|
3fb25272507494e6d494b27ca2380d3c543562e5
|
[
"Apache-2.0"
] | 1
|
2021-11-26T11:24:56.000Z
|
2021-11-26T11:24:56.000Z
|
# coding: utf-8
"""
ThingsBoard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from .entity_id import EntityId
class UserId(EntityId):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
def __init__(self, entity_type, id=None): # noqa: E501
"""UserId - a model defined in Swagger""" # noqa: E501
super().__init__(entity_type, id)
@property
def id(self):
"""Gets the id of this UserId. # noqa: E501
:return: The id of this UserId. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UserId.
:param id: The id of this UserId. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UserId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.067961
| 163
| 0.55275
|
794ed4d13a26a83e374c41157fab1fed5e2490cc
| 11,968
|
py
|
Python
|
sdks/python/apache_beam/runners/worker/data_plane.py
|
mwylde/beam
|
9d0b03ad736797c33fbd6a4eb13155fa91367779
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/runners/worker/data_plane.py
|
mwylde/beam
|
9d0b03ad736797c33fbd6a4eb13155fa91367779
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/runners/worker/data_plane.py
|
mwylde/beam
|
9d0b03ad736797c33fbd6a4eb13155fa91367779
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of DataChannels for communicating across the data plane."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import logging
import queue
import sys
import threading
from builtins import object
from builtins import range
import grpc
from future import standard_library
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
standard_library.install_aliases()
# This module is experimental. No backwards-compatibility guarantees.
_DEFAULT_FLUSH_THRESHOLD = 10 << 20 # 10MB
class ClosableOutputStream(type(coder_impl.create_OutputStream())):
"""A Outputstream for use with CoderImpls that has a close() method."""
def __init__(self,
close_callback=None,
flush_callback=None,
flush_threshold=_DEFAULT_FLUSH_THRESHOLD):
super(ClosableOutputStream, self).__init__()
self._close_callback = close_callback
self._flush_callback = flush_callback
self._flush_threshold = flush_threshold
# This must be called explicitly to avoid flushing partial elements.
def maybe_flush(self):
if self._flush_callback and self.size() > self._flush_threshold:
self._flush_callback(self.get())
self._clear()
def close(self):
if self._close_callback:
self._close_callback(self.get())
class DataChannel(with_metaclass(abc.ABCMeta, object)):
"""Represents a channel for reading and writing data over the data plane.
Read from this channel with the input_elements method::
for elements_data in data_channel.input_elements(instruction_id, targets):
[process elements_data]
Write to this channel using the output_stream method::
out1 = data_channel.output_stream(instruction_id, target1)
out1.write(...)
out1.close()
When all data for all instructions is written, close the channel::
data_channel.close()
"""
@abc.abstractmethod
def input_elements(self, instruction_id, expected_targets):
"""Returns an iterable of all Element.Data bundles for instruction_id.
This iterable terminates only once the full set of data has been recieved
for each of the expected targets. It may block waiting for more data.
Args:
instruction_id: which instruction the results must belong to
expected_targets: which targets to wait on for completion
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def output_stream(self, instruction_id, target):
"""Returns an output stream writing elements to target.
Args:
instruction_id: which instruction this stream belongs to
target: the target of the returned stream
"""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Closes this channel, indicating that all data has been written.
Data can continue to be read.
If this channel is shared by many instructions, should only be called on
worker shutdown.
"""
raise NotImplementedError(type(self))
class InMemoryDataChannel(DataChannel):
"""An in-memory implementation of a DataChannel.
This channel is two-sided. What is written to one side is read by the other.
The inverse() method returns the other side of a instance.
"""
def __init__(self, inverse=None):
self._inputs = []
self._inverse = inverse or InMemoryDataChannel(self)
def inverse(self):
return self._inverse
def input_elements(self, instruction_id, unused_expected_targets=None):
for data in self._inputs:
if data.instruction_reference == instruction_id:
yield data
def output_stream(self, instruction_id, target):
def add_to_inverse_output(data):
self._inverse._inputs.append( # pylint: disable=protected-access
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
return ClosableOutputStream(
add_to_inverse_output, flush_callback=add_to_inverse_output)
def close(self):
pass
class _GrpcDataChannel(DataChannel):
"""Base class for implementing a BeamFnData-based DataChannel."""
_WRITES_FINISHED = object()
def __init__(self):
self._to_send = queue.Queue()
self._received = collections.defaultdict(queue.Queue)
self._receive_lock = threading.Lock()
self._reads_finished = threading.Event()
self._closed = False
self._exc_info = None
def close(self):
self._to_send.put(self._WRITES_FINISHED)
self._closed = True
def wait(self, timeout=None):
self._reads_finished.wait(timeout)
def _receiving_queue(self, instruction_id):
with self._receive_lock:
return self._received[instruction_id]
def _clean_receiving_queue(self, instruction_id):
with self._receive_lock:
self._received.pop(instruction_id)
def input_elements(self, instruction_id, expected_targets):
"""
Generator to retrieve elements for an instruction_id
input_elements should be called only once for an instruction_id
Args:
instruction_id(str): instruction_id for which data is read
expected_targets(collection): expected targets
"""
received = self._receiving_queue(instruction_id)
done_targets = []
try:
while len(done_targets) < len(expected_targets):
try:
data = received.get(timeout=1)
except queue.Empty:
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
else:
if not data.data and data.target in expected_targets:
done_targets.append(data.target)
else:
assert data.target not in done_targets
yield data
finally:
# Instruction_ids are not reusable so Clean queue once we are done with
# an instruction_id
self._clean_receiving_queue(instruction_id)
def output_stream(self, instruction_id, target):
def add_to_send_queue(data):
if data:
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=data))
def close_callback(data):
add_to_send_queue(data)
# End of stream marker.
self._to_send.put(
beam_fn_api_pb2.Elements.Data(
instruction_reference=instruction_id,
target=target,
data=''))
return ClosableOutputStream(
close_callback, flush_callback=add_to_send_queue)
def _write_outputs(self):
done = False
while not done:
data = [self._to_send.get()]
try:
# Coalesce up to 100 other items.
for _ in range(100):
data.append(self._to_send.get_nowait())
except queue.Empty:
pass
if data[-1] is self._WRITES_FINISHED:
done = True
data.pop()
if data:
yield beam_fn_api_pb2.Elements(data=data)
def _read_inputs(self, elements_iterator):
# TODO(robertwb): Pushback/throttling to avoid unbounded buffering.
try:
for elements in elements_iterator:
for data in elements.data:
self._receiving_queue(data.instruction_reference).put(data)
except: # pylint: disable=bare-except
if not self._closed:
logging.exception('Failed to read inputs in the data plane')
self._exc_info = sys.exc_info()
raise
finally:
self._reads_finished.set()
def _start_reader(self, elements_iterator):
reader = threading.Thread(
target=lambda: self._read_inputs(elements_iterator),
name='read_grpc_client_inputs')
reader.daemon = True
reader.start()
class GrpcClientDataChannel(_GrpcDataChannel):
"""A DataChannel wrapping the client side of a BeamFnData connection."""
def __init__(self, data_stub):
super(GrpcClientDataChannel, self).__init__()
self._start_reader(data_stub.Data(self._write_outputs()))
class GrpcServerDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataServicer, _GrpcDataChannel):
"""A DataChannel wrapping the server side of a BeamFnData connection."""
def Data(self, elements_iterator, context):
self._start_reader(elements_iterator)
for elements in self._write_outputs():
yield elements
class DataChannelFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_data_channel(self, remote_grpc_port):
"""Returns a ``DataChannel`` from the given RemoteGrpcPort."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcClientDataChannelFactory(DataChannelFactory):
"""A factory for ``GrpcClientDataChannel``.
Caches the created channels by ``data descriptor url``.
"""
def __init__(self, credentials=None):
self._data_channel_cache = {}
self._lock = threading.Lock()
self._credentials = None
if credentials is not None:
logging.info('Using secure channel creds.')
self._credentials = credentials
def create_data_channel(self, remote_grpc_port):
url = remote_grpc_port.api_service_descriptor.url
if url not in self._data_channel_cache:
with self._lock:
if url not in self._data_channel_cache:
logging.info('Creating channel for %s', url)
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
grpc_channel = None
if self._credentials is None:
grpc_channel = grpc.insecure_channel(url, options=channel_options)
else:
grpc_channel = grpc.secure_channel(
url, self._credentials, options=channel_options)
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._data_channel_cache[url] = GrpcClientDataChannel(
beam_fn_api_pb2_grpc.BeamFnDataStub(grpc_channel))
return self._data_channel_cache[url]
def close(self):
logging.info('Closing all cached grpc data channels.')
for _, channel in self._data_channel_cache.items():
channel.close()
self._data_channel_cache.clear()
class InMemoryDataChannelFactory(DataChannelFactory):
"""A singleton factory for ``InMemoryDataChannel``."""
def __init__(self, in_memory_data_channel):
self._in_memory_data_channel = in_memory_data_channel
def create_data_channel(self, unused_remote_grpc_port):
return self._in_memory_data_channel
def close(self):
pass
| 32.699454
| 80
| 0.709726
|
794ed5066cd6e20f53cb7138a64a5abec58d4392
| 1,694
|
py
|
Python
|
csxextract/extractors/parscit.py
|
SeerLabs/new-csx-extractor
|
84de9276988e7d61cdf24a410d9fe1724dfaea9d
|
[
"Apache-2.0"
] | 9
|
2015-04-17T22:19:21.000Z
|
2021-08-03T08:27:30.000Z
|
csxextract/extractors/parscit.py
|
SeerLabs/new-csx-extractor
|
84de9276988e7d61cdf24a410d9fe1724dfaea9d
|
[
"Apache-2.0"
] | 2
|
2015-06-02T12:47:51.000Z
|
2015-06-26T18:45:47.000Z
|
csxextract/extractors/parscit.py
|
SeerLabs/new-csx-extractor
|
84de9276988e7d61cdf24a410d9fe1724dfaea9d
|
[
"Apache-2.0"
] | 8
|
2015-04-10T18:11:11.000Z
|
2018-07-14T21:09:37.000Z
|
from extraction.runnables import Extractor, RunnableError, ExtractorResult
import extraction.utils
import csxextract.config as config
import csxextract.interfaces as interfaces
import csxextract.filters as filters
import csxextract.utils as utils
import defusedxml.ElementTree as safeET
import xml.etree.ElementTree as ET
import subprocess32 as subprocess
import requests
import os
import shutil
import glob
import re
import tempfile
# Takes a plain text version of a PDF and uses ParsCit to extract citations
# Returns an xml document of citation info in CSX format
class ParsCitCitationExtractor(interfaces.CSXCitationExtractor):
dependencies = frozenset([interfaces.PlainTextExtractor, filters.AcademicPaperFilter])
result_file_name = '.cite'
def extract(self, data, dependency_results):
# Get the plain text file of the PDF and write it to a temporary location
pdf_text = dependency_results[interfaces.PlainTextExtractor].files['.txt']
text_file_path = extraction.utils.temp_file(pdf_text)
# Run parscit on the text file to extract citations
try:
status, stdout, stderr = extraction.utils.external_process(['perl', config.PARSCIT_PATH, text_file_path], timeout=20)
except subprocess.TimeoutExpired as te:
raise RunnableError('ParsCit timed out while processing document')
finally:
os.remove(text_file_path)
if status != 0:
raise RunnableError('ParsCit Failure. Possible error:\n' + stderr)
# ParsCit will give us a string representing an xml doc
# convert from string type into an xml object
xml = safeET.fromstring(stdout)
return ExtractorResult(xml_result=xml)
| 36.042553
| 126
| 0.762692
|
794ed574fe6b6e9fb13fbae99c5c88b921952a8b
| 2,442
|
py
|
Python
|
gen_dataset.py
|
soravux/jambokoko
|
e9b2460cf0dcd5eab397aeefc60cfd4bc4821028
|
[
"0BSD"
] | null | null | null |
gen_dataset.py
|
soravux/jambokoko
|
e9b2460cf0dcd5eab397aeefc60cfd4bc4821028
|
[
"0BSD"
] | null | null | null |
gen_dataset.py
|
soravux/jambokoko
|
e9b2460cf0dcd5eab397aeefc60cfd4bc4821028
|
[
"0BSD"
] | null | null | null |
# coding: utf-8
import fnmatch
import os
import random
import pickle
import numpy as np
from hdrio import imread, imsave
from envmap import EnvironmentMap
from scipy.ndimage.interpolation import zoom
from matplotlib import pyplot as plt
STACK_FILENAME = 'stack.exr'
TARGET_SIZE = (256, 256)
def getAvailableData(root_path, filename=STACK_FILENAME):
"""Get all the stacks images available."""
matches = []
for root, dirnames, filenames in os.walk(root_path):
for filename in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, filename))
return matches
def generateLDRfromHDR(im_path, out_prefix):
"""Convert an HDR image into a clipped 0-255 value ("simulating" a camera)"""
print('Processing: ', im_path)
im = imread(im_path)
h, w, c = im.shape
im = im[:, w/2 - h/2:w/2 + h/2]
envmap = EnvironmentMap(im, 'SkyAngular').convertTo('LatLong', TARGET_SIZE[0])
im = envmap.data
valid = (im > 0) & (~np.isnan(im))
im_median = np.median(im[valid])
im_low = np.percentile(im[valid], 3)
im_high = np.percentile(im[valid], 95)
#scales = (TARGET_SIZE[0]/im.shape[0], TARGET_SIZE[1]/im.shape[1])
#im = zoom(im, [scales[0], scales[1], 1])
with open(out_prefix + "_hdr.pkl", 'wb') as fhdl:
pickle.dump(im, fhdl, pickle.HIGHEST_PROTOCOL)
imsave(out_prefix + '_hdr.exr', im)
# 20th percentile -> value 5
# 80th percentile -> value 250
#print("Ratio:", (im_high - im_low))
ratio = im_high - im_low
if ratio < 0.1:
ratio = 0.1
im_ldr = (im - im_low) * 250. / ratio + 5
im_ldr = np.clip(im_ldr, 0, 255).astype('uint8')
imsave(out_prefix + '_ldr.jpg', im_ldr)
plt.figure()
plt.subplot(1,2,1); plt.hist(im.ravel()[im.ravel()<im_high], 50)
plt.subplot(1,2,2); plt.hist(im_ldr.ravel()[im_ldr.ravel()>0], 50)
plt.savefig(out_prefix + 'debug.png')
plt.close()
def main():
im_paths = getAvailableData('/gel/rachmaninoff/data/pictures/master/skycam/')
im_paths = random.sample(im_paths, 1000)
for im_path in im_paths:
root_path = os.path.dirname(im_path)
out_path = os.path.join('data', "_".join(root_path.split(os.sep)[-2:]))
try:
generateLDRfromHDR(im_path, out_path)
except Exception as e:
print("Error happened:", e)
if __name__ == '__main__':
main()
| 28.729412
| 82
| 0.634316
|
794ed6fd770032e7c934c61f166f0aeed9329dbf
| 10,209
|
py
|
Python
|
tests/inspectdb/tests.py
|
deployed/django
|
9db4271bd11ac23a5a5652bbcdf8fb6d4b997651
|
[
"BSD-3-Clause"
] | 1
|
2018-12-10T12:21:40.000Z
|
2018-12-10T12:21:40.000Z
|
tests/inspectdb/tests.py
|
avkryukov/django
|
f90be002d9d3c10b87c74741986e2cbf9f2b858e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/inspectdb/tests.py
|
avkryukov/django
|
f90be002d9d3c10b87c74741986e2cbf9f2b858e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from unittest import expectedFailure, skipUnless
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.six import PY3, StringIO
if connection.vendor == 'oracle':
expectedFailureOnOracle = expectedFailure
else:
expectedFailureOnOracle = lambda f: f
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
# Inspecting oracle DB doesn't produce correct results, see #19884
@expectedFailureOnOracle
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
assertFieldType('email_field', "models.CharField(max_length=75)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.vendor == 'postgresql':
# Only PostgreSQL has a specific type
assertFieldType('ip_address_field', "models.GenericIPAddressField()")
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
else:
assertFieldType('ip_address_field', "models.CharField(max_length=15)")
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
assertFieldType('slug_field', "models.CharField(max_length=50)")
assertFieldType('text_field', "models.TextField()")
assertFieldType('time_field', "models.TimeField()")
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
assertFieldType('big_int_field', "models.BigIntegerField()")
if connection.vendor == 'mysql':
# No native boolean type on MySQL
assertFieldType('bool_field', "models.IntegerField()")
assertFieldType('null_bool_field', "models.IntegerField(blank=True, null=True)")
else:
assertFieldType('bool_field', "models.BooleanField()")
assertFieldType('null_bool_field', "models.NullBooleanField()")
if connection.vendor == 'sqlite':
# Guessed arguments, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
else:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.vendor == 'sqlite':
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
# 'unsigned' property undetected on other backends
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.vendor == 'postgresql':
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.vendor in ('sqlite', 'postgresql'):
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self')", output)
self.assertNotIn("from = models.ForeignKey(InspectdbPeople)", output, msg=error_message)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn("from_field = models.ForeignKey('InspectdbPeople', db_column='from_id')",
output)
self.assertIn("people_pk = models.ForeignKey(InspectdbPeople, primary_key=True)",
output)
self.assertIn("people_unique = models.ForeignKey(InspectdbPeople, unique=True)",
output)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb', stdout=out)
output = out.getvalue()
base_name = 'Field' if connection.vendor != 'oracle' else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ascii identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse
| 50.539604
| 107
| 0.654325
|
794ed8eb6af4aee3be2d7005206a89256d50c0bd
| 1,584
|
py
|
Python
|
pyforce/memory/memory.py
|
olemeyer/pyforce
|
f0432ada7974419d40cce229cd875ce6ae4270b9
|
[
"Apache-2.0"
] | 4
|
2020-03-27T06:46:49.000Z
|
2020-07-01T20:55:32.000Z
|
pyforce/memory/memory.py
|
olemeyer/pyforce
|
f0432ada7974419d40cce229cd875ce6ae4270b9
|
[
"Apache-2.0"
] | null | null | null |
pyforce/memory/memory.py
|
olemeyer/pyforce
|
f0432ada7974419d40cce229cd875ce6ae4270b9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
class Memory():
def __init__(self,device="cpu",data=None,ordered=True):
self.device=device
self.data=data if data is not None else {}
self.ordered=ordered
def clear(self):
self.data={}
def __len__(self):
keys=self.keys()
if len(keys)==0:
return 0
else:
k=keys[0]
if isinstance(self.data[k],dict):
keys2=[k for k in self.data[k]]
return self.data[k][keys2[0]].shape[0]
else:
return self.data[k].shape[0]
def keys(self):
keys=[k for k in self.data]
return keys
def append(self,**data):
for k in data:
if not isinstance(data[k],dict):
data[k]={"_value":data[k]}
new_data={i:data[k][i] for i in data[k]}
if k in self.data:
existing_data=self.data[k]
new_data={i:torch.cat([existing_data[i],new_data[i]]) for i in new_data}
self.data[k]=new_data
def to(self,device):
self.device=device
for k in self.data:
self.data[k]={i:self.data[k].to(self.device) for i in self.data[k]}
return self
def sample(self,n):
k=list(self.data.keys())[0]
max_i=len(self.data[k])
idx=np.random.choice(max_i,n,replace=n>max_i)
data={}
for k in self.data:
data[k]={i:self.data[k][i][idx] for i in self.data[k]}
return Memory(device=self.device,data=data,ordered=False)
def __getattr__(self,name):
if name not in self.data:
return []
if len(self.data[name])==1 and "_value" in self.data[name]:
return self.data[name]["_value"]
return self.data[name]
| 26.4
| 80
| 0.613005
|
794ed900b185270d20a04d8954dada28c961cdc9
| 8,219
|
py
|
Python
|
assignment2/q1_classifier.py
|
tavaresdong/stanford_cs224n
|
09936ce5d994bb25bb45d19fd363c37198b6eda2
|
[
"MIT"
] | null | null | null |
assignment2/q1_classifier.py
|
tavaresdong/stanford_cs224n
|
09936ce5d994bb25bb45d19fd363c37198b6eda2
|
[
"MIT"
] | null | null | null |
assignment2/q1_classifier.py
|
tavaresdong/stanford_cs224n
|
09936ce5d994bb25bb45d19fd363c37198b6eda2
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import tensorflow as tf
from q1_softmax import softmax
from q1_softmax import cross_entropy_loss
from model import Model
from utils.general_utils import get_minibatches
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation. They can then call self.config.<hyperparameter_name> to
get the hyperparameter settings.
"""
n_samples = 1024
n_features = 100
n_classes = 5
batch_size = 64
n_epochs = 50
lr = 1e-3
class SoftmaxModel(Model):
"""Implements a Softmax classifier with cross-entropy loss."""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
and will be fed data during training.
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(batch_size, n_features), type tf.float32
labels_placeholder: Labels placeholder tensor of shape
(batch_size, n_classes), type tf.int32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(tf.float32, shape=(self.config.batch_size, self.config.n_features))
self.labels_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.config.n_classes))
### END YOUR CODE
def create_feed_dict(self, inputs_batch, labels_batch=None):
"""Creates the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
If label_batch is None, then no labels are added to feed_dict.
Hint: The keys for the feed_dict should be the placeholder
tensors created in add_placeholders.
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict = { self.input_placeholder: inputs_batch }
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
### END YOUR CODE
return feed_dict
def add_prediction_op(self):
"""Adds the core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the transformation is a linear layer plus a
softmax transformation:
yhat = softmax(xW + b)
Hint: The input x will be passed in through self.input_placeholder. Each ROW of
self.input_placeholder is a single example. This is usually best-practice for
tensorflow code.
Hint: Make sure to create tf.Variables as needed.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Returns:
pred: A tensor of shape (batch_size, n_classes)
"""
### YOUR CODE HERE
self.W = tf.Variable(tf.truncated_normal(shape=(self.config.n_features, self.config.n_classes)))
self.b = tf.Variable(tf.zeros([self.config.n_classes]))
self.yhat = tf.matmul(self.input_placeholder, self.W) + self.b
pred = softmax(self.yhat)
### END YOUR CODE
return pred
def add_loss_op(self, pred):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: Use the cross_entropy_loss function we defined. This should be a very
short function.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
loss = cross_entropy_loss(self.labels_placeholder, pred)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/api_docs/python/tf/train/Optimizer
for more information. Use the learning rate from self.config.
Hint: Use tf.train.GradientDescentOptimizer to get an optimizer object.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
train_op = tf.train.GradientDescentOptimizer(learning_rate=self.config.lr).minimize(loss)
### END YOUR CODE
return train_op
def run_epoch(self, sess, inputs, labels):
"""Runs an epoch of training.
Args:
sess: tf.Session() object
inputs: np.ndarray of shape (n_samples, n_features)
labels: np.ndarray of shape (n_samples, n_classes)
Returns:
average_loss: scalar. Average minibatch loss of model on epoch.
"""
n_minibatches, total_loss = 0, 0
for input_batch, labels_batch in get_minibatches([inputs, labels], self.config.batch_size):
n_minibatches += 1
total_loss += self.train_on_batch(sess, input_batch, labels_batch)
return total_loss / n_minibatches
def fit(self, sess, inputs, labels):
"""Fit model on provided data.
Args:
sess: tf.Session()
inputs: np.ndarray of shape (n_samples, n_features)
labels: np.ndarray of shape (n_samples, n_classes)
Returns:
losses: list of loss per epoch
"""
losses = []
for epoch in range(self.config.n_epochs):
start_time = time.time()
average_loss = self.run_epoch(sess, inputs, labels)
duration = time.time() - start_time
print('Epoch {:}: loss = {:.2f} ({:.3f} sec)'.format(epoch, average_loss, duration))
losses.append(average_loss)
return losses
def __init__(self, config):
"""Initializes the model.
Args:
config: A model configuration object of type Config
"""
self.config = config
self.build()
def test_softmax_model():
"""Train softmax model for a number of steps."""
config = Config()
# Generate random data to train the model on
np.random.seed(1234)
inputs = np.random.rand(config.n_samples, config.n_features)
labels = np.zeros((config.n_samples, config.n_classes), dtype=np.int32)
labels[:, 0] = 1
# Tell TensorFlow that the model will be built into the default Graph.
# (not required but good practice)
with tf.Graph().as_default() as graph:
# Build the model and add the variable initializer op
model = SoftmaxModel(config)
init_op = tf.global_variables_initializer()
# Finalizing the graph causes tensorflow to raise an exception if you try to modify the graph
# further. This is good practice because it makes explicit the distinction between building and
# running the graph.
graph.finalize()
# Create a session for running ops in the graph
with tf.Session(graph=graph) as sess:
# Run the op to initialize the variables.
sess.run(init_op)
# Fit the model
losses = model.fit(sess, inputs, labels)
# If ops are implemented correctly, the average loss should fall close to zero
# rapidly.
assert losses[-1] < .5
print("Basic (non-exhaustive) classifier tests pass")
if __name__ == "__main__":
test_softmax_model()
| 36.691964
| 115
| 0.639129
|
794ed90cecf5388cb3cf5ec69902d1270f94139e
| 1,604
|
py
|
Python
|
python/problems/minimum_cost_for_tickets.py
|
vivaxy/algorithms
|
b2e49476ed2ad1fd82a1183f656d1907a666c347
|
[
"MIT"
] | 1
|
2019-05-04T13:21:41.000Z
|
2019-05-04T13:21:41.000Z
|
python/problems/minimum_cost_for_tickets.py
|
vivaxy/algorithms
|
b2e49476ed2ad1fd82a1183f656d1907a666c347
|
[
"MIT"
] | 1
|
2021-08-30T09:34:58.000Z
|
2021-08-30T09:34:58.000Z
|
python/problems/minimum_cost_for_tickets.py
|
vivaxy/algorithms
|
b2e49476ed2ad1fd82a1183f656d1907a666c347
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
import unittest
"""
https://leetcode.com/problems/minimum-cost-for-tickets/
https://leetcode.com/submissions/detail/227086131/
"""
from typing import List
class Solution1:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
self.memo = dict()
# dp(i) = min(dp(i + 1) + costs[0], dp(i + 7) + costs[1], dp[i + 30] + costs[2])
def dp(day: int) -> int:
if day in self.memo:
return self.memo[day]
if day > 365:
return 0
if day in days:
ans = min(
dp(day + 1) + costs[0], dp(day + 7) + costs[1], dp(day + 30) + costs[2])
self.memo[day] = ans
return ans
ans = dp(day + 1)
self.memo[day] = ans
return ans
return dp(1)
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
@lru_cache(None)
def dp(day: int) -> int:
if day <= 0:
return 0
if day in days:
return min(dp(day - 1) + costs[0], dp(day - 7) + costs[1], dp(day - 30) + costs[2])
return dp(day - 1)
return dp(365)
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.mincostTickets(
[1, 4, 6, 7, 8, 20], [2, 7, 15]), 11)
self.assertEqual(solution.mincostTickets(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 7, 15]), 17)
if __name__ == '__main__':
unittest.main()
| 28.140351
| 99
| 0.503741
|
794eda182f9e0dac032216b095e8a6fcdd126eb0
| 2,532
|
py
|
Python
|
vnpy/gateway/loopringv36/exceptions.py
|
dos2004/vnpy
|
8f2af5ff8c737adf32aaf4021ceb5bea1eea6ec4
|
[
"MIT"
] | 21
|
2020-07-15T07:06:25.000Z
|
2021-12-19T07:33:02.000Z
|
vnpy/gateway/loopringv36/exceptions.py
|
dos2004/vnpy
|
8f2af5ff8c737adf32aaf4021ceb5bea1eea6ec4
|
[
"MIT"
] | 2
|
2020-08-25T08:04:49.000Z
|
2020-10-04T11:55:07.000Z
|
vnpy/gateway/loopringv36/exceptions.py
|
dos2004/vnpy
|
8f2af5ff8c737adf32aaf4021ceb5bea1eea6ec4
|
[
"MIT"
] | 5
|
2020-08-03T02:46:19.000Z
|
2021-09-19T15:35:44.000Z
|
# coding=utf-8
class LoopringAPIException(Exception):
def __init__(self, response):
# self.code = 0
# try:
# json_res = response.json()
# except ValueError:
# self.message = 'Invalid JSON error message from Loopring: {}'.format(response.text)
# else:
# self.code = json_res['code']
# self.message = json_res['msg']
self.status_code = response.status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return self.response.text
class LoopringRequestException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'LoopringRequestException: %s' % self.message
class LoopringOrderException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return 'LoopringOrderException(code=%s): %s' % (self.code, self.message)
class LoopringOrderMinAmountException(LoopringOrderException):
def __init__(self, value):
message = "Amount must be a multiple of %s" % value
super(LoopringOrderMinAmountException, self).__init__(-1013, message)
class LoopringOrderMinPriceException(LoopringOrderException):
def __init__(self, value):
message = "Price must be at least %s" % value
super(LoopringOrderMinPriceException, self).__init__(-1013, message)
class LoopringOrderMinTotalException(LoopringOrderException):
def __init__(self, value):
message = "Total must be at least %s" % value
super(LoopringOrderMinTotalException, self).__init__(-1013, message)
class LoopringOrderUnknownSymbolException(LoopringOrderException):
def __init__(self, value):
message = "Unknown symbol %s" % value
super(LoopringOrderUnknownSymbolException, self).__init__(-1013, message)
class LoopringOrderInactiveSymbolException(LoopringOrderException):
def __init__(self, value):
message = "Attempting to trade an inactive symbol %s" % value
super(LoopringOrderInactiveSymbolException, self).__init__(-1013, message)
class LoopringWithdrawException(Exception):
def __init__(self, message):
if message == u'参数异常':
message = 'Withdraw to this address through the website first'
self.message = message
def __str__(self):
return 'LoopringWithdrawException: %s' % self.message
| 30.142857
| 97
| 0.68207
|
794eda383f6f8c6d4e88e020290bc1b8a0d35af1
| 6,426
|
py
|
Python
|
test/functional/rpc_bind.py
|
eloncoingit/eloncoin
|
20c0d1166e8c1e094f684654e8b4c80fc0c89ef7
|
[
"MIT"
] | 1
|
2021-02-28T07:14:09.000Z
|
2021-02-28T07:14:09.000Z
|
test/functional/rpc_bind.py
|
eloncoingit/eloncoin
|
20c0d1166e8c1e094f684654e8b4c80fc0c89ef7
|
[
"MIT"
] | null | null | null |
test/functional/rpc_bind.py
|
eloncoingit/eloncoin
|
20c0d1166e8c1e094f684654e8b4c80fc0c89ef7
|
[
"MIT"
] | 1
|
2021-05-01T00:14:29.000Z
|
2021-05-01T00:14:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running eloncoind with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| 49.430769
| 172
| 0.633209
|
794edb1576915509131366ea42a0d0ed8d685fed
| 6,929
|
py
|
Python
|
project/social_login.py
|
tomjuggler/login_system
|
c3341e218a1a7e5da033b52fb56416521fd256e3
|
[
"MIT"
] | 28
|
2020-12-30T23:54:35.000Z
|
2022-03-26T07:37:00.000Z
|
project/social_login.py
|
tomjuggler/login_system
|
c3341e218a1a7e5da033b52fb56416521fd256e3
|
[
"MIT"
] | 1
|
2021-05-31T19:14:20.000Z
|
2021-05-31T21:00:08.000Z
|
project/social_login.py
|
tomjuggler/login_system
|
c3341e218a1a7e5da033b52fb56416521fd256e3
|
[
"MIT"
] | 10
|
2021-02-02T00:59:41.000Z
|
2022-03-02T09:35:06.000Z
|
from flask import Flask, render_template, redirect, url_for, flash, Blueprint
from flask_login import current_user, login_user, login_required
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.google import make_google_blueprint, google
from flask_dance.contrib.facebook import make_facebook_blueprint, facebook
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import SQLAlchemyStorage
from sqlalchemy.orm.exc import NoResultFound
from . import db
from .models import User, OAuth
github_blueprint = make_github_blueprint(client_id = 'YOUR CLIENT ID', client_secret = 'YOUR CLIENT SECRET')
google_blueprint = make_google_blueprint(client_id= "YOUR CLIENT ID", client_secret= "YOUR CLIENT SECRET", scope=[
"openid",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile",
]
)
facebook_blueprint = make_facebook_blueprint(client_id= "YOUR CLIENT ID", client_secret= "YOUR CLIENT SECRET", scope = [
"email"
]
)
github_bp = make_github_blueprint(storage = SQLAlchemyStorage(OAuth, db.session, user = current_user))
google_bp = make_google_blueprint(storage = SQLAlchemyStorage(OAuth, db.session, user = current_user))
facebook_bp = make_facebook_blueprint(storage = SQLAlchemyStorage(OAuth, db.session, user = current_user))
@oauth_authorized.connect_via(github_blueprint)
def github_logged_in(blueprint, token):
if not token:
flash("Failed to log in with GitHub.", category = "error")
return
resp = blueprint.session.get("/user")
if not resp.ok:
msg = "Failed to fecth user info from GitHub."
flash(msg, category= "error")
return
github_name = resp.json()["name"]
github_user_id = resp.json()["id"]
query = OAuth.query.filter_by(
provider = blueprint.name, provider_user_id = github_user_id)
try:
oauth = query.one()
except NoResultFound:
github_user_login = github_name
oauth = OAuth(
provider = blueprint.name,
provider_user_id = github_user_id,
provider_user_login = github_user_login,
token = token,
)
if current_user.is_anonymous:
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with GitHub.", 'success')
else:
user = User(username = github_name)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
login_user(user)
# flash("Successfully signed in with GitHub.", 'success')
else:
if oauth.user:
if current_user != oauth.user:
url = url_for("auth.merge", username = oauth.user.username)
return redirect(url)
else:
oauth.user =current_user
db.session.add(oauth)
db.session.commit()
# flash("Successfully linked GitHub account.", 'success')
return redirect(url_for("main.profile"))
@oauth_error.connect_via(github_blueprint)
def github_error(blueprint, message, response):
msg = ("OAuth error from {name}! " "message={message} response = {response}").format(
name = blueprint.name, message = message, response = response
)
flash(msg, category="error")
@oauth_authorized.connect_via(google_blueprint)
def google_logged_in(blueprint, token):
if not token:
flask("Failed to log in.", category="error")
return
resp = blueprint.session.get("/oauth2/v2/userinfo")
if not resp.ok:
msg = "Failed to fetch user info."
flash(msg, category="error")
return
google_name = resp.json()["name"]
google_user_id = resp.json()["id"]
query = OAuth.query.filter_by(
provider = blueprint.name, provider_user_id = google_user_id
)
try:
oauth = query.one()
except NoResultFound:
google_user_login = google_name
oauth = OAuth(
provider=blueprint.name,
provider_user_id=google_user_id,
provider_user_login=google_user_login,
token=token,
)
if current_user.is_anonymous:
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with Google.", 'success')
else:
user = User(username = google_name)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
login_user(user)
# flash("Successfully signed in with Google.", 'success')
else:
if oauth.user:
if current_user != oauth.user:
url = url_for("auth.merge", username=oauth.user.username)
return redirect(url)
else:
oauth.user = current_user
db.session.add(oauth)
db.commit()
# flash("Successfully linked Google account.")
return redirect(url_for("main.profile"))
@oauth_error.connect_via(google_blueprint)
def google_error(blueprint, message, response):
msg = ("OAuth error from {name}! " "message={message} response={response}").format(
name=blueprint.name, message = message, response = response
)
flash(msg, category = "error")
@oauth_authorized.connect_via(facebook_blueprint)
def facebook_logged_in(blueprint,token):
if not token:
flash("Failed to log in.", category="error")
return
resp = blueprint.session.get("/me")
if not resp.ok:
msg = "Failed to fetch user info."
flash(msg, category="error")
return
facebook_name = resp.json()["name"]
facebook_user_id = resp.json()["id"]
query = OAuth.query.filter_by(
provider = blueprint.name,
provider_user_id = facebook_user_id
)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider = blueprint.name,
provider_user_id = facebook_user_id,
token = token
)
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with Facebook.", 'success')
else:
user = User(username = facebook_name)
oauth.user = user
db.session.add_all([user, oauth])
db.session.commit()
login_user(user)
# flash("Successfully signed in with Facebook.", 'success')
return redirect(url_for("main.profile"))
@oauth_error.connect_via(facebook_blueprint)
def facebook_error(blueprint, message, response):
msg = ("OAuth error from {name}! " "message={message} response={response}").format(
name=blueprint.name, message=message, response=response
)
flash(msg, category="error")
| 35.352041
| 120
| 0.634002
|
794edb7f62435552f6580e7977fb459be4fde2a0
| 4,229
|
py
|
Python
|
city_scrapers/spiders/pa_liquorboard.py
|
maxachis/city-scrapers-pitt
|
63150a522a512d35b64e4068e25169cf46875598
|
[
"MIT"
] | 16
|
2019-02-22T23:43:06.000Z
|
2020-01-13T04:41:50.000Z
|
city_scrapers/spiders/pa_liquorboard.py
|
maxachis/city-scrapers-pitt
|
63150a522a512d35b64e4068e25169cf46875598
|
[
"MIT"
] | 109
|
2020-02-09T21:42:36.000Z
|
2021-03-06T21:41:18.000Z
|
city_scrapers/spiders/pa_liquorboard.py
|
maxachis/city-scrapers-pitt
|
63150a522a512d35b64e4068e25169cf46875598
|
[
"MIT"
] | 62
|
2019-01-25T23:55:58.000Z
|
2020-01-17T00:53:55.000Z
|
import re
from datetime import datetime
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
class PaLiquorboardSpider(CityScrapersSpider):
"""Spider is a class that scapy provides to us,
this spider will inherit properties from the base spider class
"""
name = "pa_liquorboard" # How we refer to the spider when we want to run it
agency = "Pennsylvania Liquor Control Board"
timezone = "America/New_York"
allowed_domains = ["www.lcb.pa.gov"]
start_urls = ["https://www.lcb.pa.gov/About-Us/Board/Pages/Public-Meetings.aspx"]
BUILDING_NAME = "Pennsylvania Liquor Control Board Headquarters"
ADDRESS = "Room 117, 604 Northwest Office Building, Harrisburg, PA 17124"
EXPECTED_START_HOUR = "11:00"
# List of urls that we are going to scrape content from
# We are extracting the entire html content -- all of the html content and saving it
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
sel_id = "ctl00_PlaceHolderMain_PageContent__ControlWrapper_RichHtmlField"
sel_path = "/blockquote/font/text()"
sel_path2 = "/blockquote/div/span/text()"
select_txt = "//*[@id='" + sel_id + "']" + sel_path
select_txt2 = "//*[@id='" + sel_id + "']" + sel_path2
# Identify CSS node or XPath you're interested in
meetings = response.xpath(select_txt).extract() # Make variable of that text
meetings += response.xpath(select_txt2).extract()
container = response.xpath('//*[@id="container"]').extract()
match = re.search(r"(\d{2}:\d{2}) ?(AM|PM)", container[0])
time = match.group(1) if match else EXPECTED_START_HOUR
for item in meetings:
meeting = Meeting(
title=self._parse_title(item),
description=self._parse_description(item),
classification=self._parse_classification(item),
start=self._parse_start(item, time),
end=self._parse_end(item),
all_day=self._parse_all_day(item),
time_notes=self._parse_time_notes(item),
location=self._parse_location(item),
links=self._parse_links(item),
source=self._parse_source(response),
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
yield meeting
def _parse_title(self, item):
"""Parse or generate meeting title."""
return ""
def _parse_description(self, item):
"""Parse or generate meeting description."""
return ""
def _parse_classification(self, item):
"""Parse or generate classification from allowed options."""
return BOARD
def _parse_start(self, item: str, start_time: int):
"""Parse start datetime as a naive datetime object."""
# Remove garbage from our date item:
date_split = item.split(",")
date_string = "".join(date_split[1:])[1:]
start_datetime = datetime.strptime(
date_string + " " + start_time + ":00", "%B %d %Y %H:%M:%S"
)
return start_datetime
def _parse_end(self, item):
"""Parse end datetime as a naive datetime object. Added by pipeline if None"""
return None
def _parse_time_notes(self, item):
"""Parse any additional notes on the timing of the meeting"""
return ""
def _parse_all_day(self, item):
"""Parse or generate all-day status. Defaults to False."""
return False
def _parse_location(self, item): # Put function to get location
"""Parse or generate location."""
return {
"address": self.ADDRESS,
"name": self.BUILDING_NAME,
}
def _parse_links(self, item):
"""Parse or generate links."""
return [{"href": "", "title": ""}]
def _parse_source(self, response):
"""Parse or generate source."""
return response.url
| 37.758929
| 88
| 0.624261
|
794edba706aa6e447e89d1034059e591be3cfa3e
| 1,190
|
py
|
Python
|
chinilla/wallet/settings/user_settings.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
chinilla/wallet/settings/user_settings.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
chinilla/wallet/settings/user_settings.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict
from chinilla.wallet.key_val_store import KeyValStore
from chinilla.wallet.settings.default_settings import default_settings
from chinilla.wallet.settings.settings_objects import BackupInitialized
class UserSettings:
settings: Dict[str, Any]
basic_store: KeyValStore
@staticmethod
async def create(
store: KeyValStore,
name: str = None,
):
self = UserSettings()
self.basic_store = store
self.settings = {}
await self.load_store()
return self
def _keys(self):
all_keys = [BackupInitialized]
return all_keys
async def load_store(self):
keys = self._keys()
for setting in keys:
name = setting.__name__
object = await self.basic_store.get_object(name, BackupInitialized)
if object is None:
object = default_settings[name]
assert object is not None
self.settings[name] = object
async def setting_updated(self, setting: Any):
name = setting.__class__.__name__
await self.basic_store.set_object(name, setting)
self.settings[name] = setting
| 28.333333
| 79
| 0.652941
|
794ee016c60f657148360bcf8a5e6f91e778f00d
| 378
|
py
|
Python
|
Lab1/q3.py
|
ViniciusRCortez/Monitoria-de-metodos-numericos-com-python
|
85678fe8907752533d0dc97dc83550411ba079f0
|
[
"MIT"
] | null | null | null |
Lab1/q3.py
|
ViniciusRCortez/Monitoria-de-metodos-numericos-com-python
|
85678fe8907752533d0dc97dc83550411ba079f0
|
[
"MIT"
] | null | null | null |
Lab1/q3.py
|
ViniciusRCortez/Monitoria-de-metodos-numericos-com-python
|
85678fe8907752533d0dc97dc83550411ba079f0
|
[
"MIT"
] | null | null | null |
"""
Objetivo: Resolver questão 3 do primeiro laboratorio.
"""
def conta(a): #retorna a expressão desejada para cada valor de i
numerador = ((a ** 3) + 1) ** 2
denominador = (a ** 2) + 2
return numerador / denominador
x = [-1.6, -1.2, -0.8, -0.4, 0, 0.4, 0.8, 1.2]
for i in x: #i = cada valor de x individualmente
y = conta(i)
print(f'y = {y}')
| 23.625
| 70
| 0.566138
|
794ee06e7bd109b02f0212d0caf7b66d2ab5db80
| 4,524
|
py
|
Python
|
examples/run_sim_and_calc_resp_v2.py
|
willdickson/sys_id_utils
|
5dbf95e04e27377dcabed2f672f5fa788b0860d3
|
[
"MIT"
] | null | null | null |
examples/run_sim_and_calc_resp_v2.py
|
willdickson/sys_id_utils
|
5dbf95e04e27377dcabed2f672f5fa788b0860d3
|
[
"MIT"
] | null | null | null |
examples/run_sim_and_calc_resp_v2.py
|
willdickson/sys_id_utils
|
5dbf95e04e27377dcabed2f672f5fa788b0860d3
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import sys_id_utils
num_pts = 5000
t0 = 0.0 # Start time
t1 = 20.0 # End time
# Yaw dynamics + controller model parameters
model_param = {
'inertia' : 1.0,
'damping' : 0.0,
'pro_gain' : 5.0,
'int_gain' : 20.0,
'int_leak' : 1.0,
'noise' : 1.1,
}
# Input signal parameters for chirp function
input_param_chrip = {
'type' : 'chirp',
'method' : 'logarithmic',
'min_freq' : 0.1,
'max_freq' : 10.0,
}
# Input signal parameters for step function
input_param_step = {
'type' : 'step',
'value_start' : 0.0,
'value_final' : 1.0,
't_step' : 10.0,
}
nperseg = num_pts/4 # Number of points per segment for power spectral density calculation
f_cutoff = 8.0 # Cut off frequency for analysis
fit = True # If True fits parametric model of transfer function to frequency response
# Create input and output data
t, input_sig, output_sig, state = sys_id_utils.lpi_yaw_model(t0, t1, num_pts, model_param, input_param_chrip)
f_sample = 1.0/(t[1] - t[0])
# Compute gain and phase as funtion of frequency from input/output data - as if we were performing an experiment.
f, gain_db, phase_deg = sys_id_utils.freq_response(input_sig, output_sig, f_sample, f_cutoff, nperseg)
# Create state space model to get theoretical frequncy response
ss_model = sys_id_utils.create_lpi_ss_model(model_param)
f_model = np.linspace(0.0, f_cutoff, num_pts)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_, gain_db_model, phase_deg_model = sp.signal.bode(ss_model,w=f_model*2.0*np.pi)
# Fit state space models
if fit:
if 0:
d_fit, gp_fit, fit_info = sys_id_utils.fit_p_yaw_model(t, input_sig, output_sig, op_param={'disp':True})
print('fit: d {}, gp {}'.format(d_fit, gp_fit))
model_param_fit = {
'inertia' : model_param['inertia'],
'damping' : model_param['inertia']*d_fit,
'pro_gain' : model_param['inertia']*gp_fit,
'int_gain' : 0.0,
'int_leak' : 0.0,
'noise' : 0.0,
}
if 0:
d_fit, gp_fit, gi_fit, fit_info = sys_id_utils.fit_pi_yaw_model(t, input_sig, output_sig, op_param={'disp':True})
print('fit: d {}, gp {}, gi {}'.format(d_fit, gp_fit, gi_fit))
model_param_fit = {
'inertia' : model_param['inertia'],
'damping' : model_param['inertia']*d_fit,
'pro_gain' : model_param['inertia']*gp_fit,
'int_gain' : model_param['inertia']*gi_fit,
'int_leak' : 0.0,
'noise' : 0.0,
}
if 1:
d_fit, gp_fit, gi_fit, c_fit, fit_info = sys_id_utils.fit_lpi_yaw_model(t, input_sig, output_sig, op_param={'disp':True})
print('fit: d {}, gp {}, gi {}, c {}'.format(d_fit, gp_fit, gi_fit, c_fit))
model_param_fit = {
'inertia' : model_param['inertia'],
'damping' : model_param['inertia']*d_fit,
'pro_gain' : model_param['inertia']*gp_fit,
'int_gain' : model_param['inertia']*gi_fit,
'int_leak' : c_fit,
'noise' : 0.0,
}
ss_model_fit = sys_id_utils.create_lpi_ss_model(model_param_fit)
f_fit = f_model
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_, gain_db_fit, phase_deg_fit = sp.signal.bode(ss_model_fit,w=f_fit*2.0*np.pi)
# Plot input and output data
fig1, ax1 = plt.subplots(1,1)
h_input, = ax1.plot(t,input_sig,'b')
h_output, = ax1.plot(t, output_sig ,'r')
ax1.set_xlabel('t (sec)')
ax1.set_ylabel('velocity (rad/sec)')
ax1.grid(True)
plt.figlegend((h_input, h_output), ('input', 'output'), 'upper right')
# Plot frequency response (Bode plot)
fig2, ax2 = plt.subplots(2,1,sharex=True)
fig2.suptitle('Frequency Response')
ax2[0].semilogx(f_model, gain_db_model,'b')
if fit:
ax2[0].semilogx(f_fit, gain_db_fit,'g')
ax2[0].semilogx(f, gain_db,'or')
ax2[0].grid(True, which='both', axis='both')
ax2[0].set_ylabel('gain (dB)')
ax2[1].semilogx(f_model, phase_deg_model,'b')
if fit:
ax2[1].semilogx(f_fit, phase_deg_fit, 'g')
ax2[1].semilogx(f, phase_deg,'or')
ax2[1].grid(True, which='both', axis='both')
ax2[1].set_ylabel('phase lag (deg)')
ax2[1].set_xlabel('f (Hz)')
plt.show()
| 35.069767
| 129
| 0.612953
|
794ee1a767ccb5aa4e8a8cd6047727bb5d676853
| 5,990
|
py
|
Python
|
bg_biz/service/user_service.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
bg_biz/service/user_service.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
bg_biz/service/user_service.py
|
sluggard6/bgirl
|
3c9fa895189ef16442694830d0c05cf60ee5187b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from datetime import datetime, timedelta
from bg_biz.orm.sysconfig import SysConfig
from bg_biz.orm.user import User, UserVcode, ExchangeWifiRecord, UserHit
from bg_biz.service.tool_service import ToolService
from sharper.flaskapp.orm.base import transaction, db
from sharper.lib.error import AppError
from sharper.lib.validator import is_mobile
from sharper.util.string import random_number
from sharper.util.app_util import get_app_name
from bg_biz.orm.app_log import SmsLog
from bg_biz.orm.admin import AdminLog, AdminAction
__author__ = [
"sluggrd"
]
@transaction
def send_user_vcode(phone, category, app=UserVcode.App.ANDROID, mac=None):
vcode = random_number(4)
sms_config = SysConfig.get_json("reg_sms_config")
limit_times = sms_config.get('limit_times', 10)
# 如果指定时间内发送过同类型的验证码,并且未使用过,则发送相同验证码
keep_time = datetime.now() - timedelta(minutes=sms_config.get('keep_minutes', 30))
delay_time = datetime.now() - timedelta(minutes=1)
vcode_today = UserVcode.query.filter_by(phone=phone).filter(
"date(create_time)='%s'" % datetime.now().strftime("%Y-%m-%d")).all()
vcode_today = sum([x.times for x in vcode_today])
if vcode_today > 9:
raise AppError(u"今日获取短信过多,已关闭验证码发送")
vcode_latest_log = UserVcode.query.with_lockmode("update").filter_by(phone=phone).filter_by(
category=category).filter_by(app=app).filter(
UserVcode.modify_time > delay_time.strftime("%Y-%m-%d %H:%M:%S")).first()
if vcode_latest_log:
raise AppError(u"获取短信过于频繁,请稍后再试")
vcode_log = UserVcode.query.filter_by(phone=phone).filter_by(category=category).filter_by(app=app).filter_by(
status=UserVcode.Status.INIT).filter(
UserVcode.create_time > keep_time.strftime("%Y-%m-%d %H:%M:%S")).first()
if vcode_log:
if vcode_log.times > limit_times:
raise AppError(u"获取次数过多,请稍后再试")
vcode = vcode_log.vcode
vcode_log.times = vcode_log.times + 1
vcode_log.update()
else:
vcode_log = UserVcode()
vcode_log.phone = phone
vcode_log.category = category
vcode_log.vcode = vcode
vcode_log.mac = mac
vcode_log.app = app
vcode_log.insert()
content = ''
if not is_mobile(phone):
raise AppError(u"请输入正确的手机号码")
if category == UserVcode.Category.REGISTER:
content = u"您在%s的验证码是:%s 。 请不要把验证码泄露给其他人。如非本人操作,可不用理会!" % (get_app_name(), vcode)
elif category == UserVcode.Category.FORGET_PASS:
content = u"您的验证码为:%s " % vcode
# elif category == UserVcode.Category.CHANGE_PHONE_OLD:
# content = u"您的验证码为:%s " % vcode
# elif category == UserVcode.Category.CHANGE_PHONE_NEW:
# content = u"您的验证码为:%s " % vcode
else:
content = u"您的验证码为:%s " % vcode
need_switch = True
# if category in [UserVcode.Category.CHANGE_PHONE_OLD, UserVcode.Category.CHANGE_PHONE_NEW]:
# need_switch = False
print vcode_log
ToolService.send_sms(phone, content, need_switch=need_switch, app=app, scene=SmsLog.Scene.VCODE)
return vcode
class UserService:
@classmethod
def add_hit(cls, user_hit):
sql = 'insert into user_hit (user_id,pic_id,status) values (%s,%s,%s) ON DUPLICATE KEY UPDATE status=%s' % (user_hit.user_id,user_hit.pic_id,user_hit.status,user_hit.status)
print sql
return db.engine.execute(sql)
@classmethod
@transaction
def register(cls, phone, password):
if User.query.filter_by(phone=phone).first():
return AppError(msg=u'该手机号码已经被注册。')
u = User.register(phone, password)
return u
@classmethod
@transaction
def delay_wifi(cls, user, day=None, seconds=None, admin_log_info="", category=None, obj_id=None):
now = datetime.now()
vipend = user.vipend
if user:
if vipend > now:
if day:
net_end = vipend + timedelta(days=int(day))
else:
net_end = vipend + timedelta(seconds=int(seconds))
else:
if day:
net_end = now + timedelta(days=int(day))
else:
net_end = now + timedelta(seconds=int(seconds))
user.vipend = net_end
record = ExchangeWifiRecord()
record.before_net_end = vipend
record.category = category
record.obj_id = obj_id
if day:
record.days = day
elif seconds:
record.seconds = seconds
record.user_id = user.id
record.after_net_end = net_end
record.insert()
user.update()
if not admin_log_info:
admin_log_info = '空'
if not day:
log = AdminLog.write(AdminAction.DelayNetEnd, user.id, ip="", key1=user.id,
key2=admin_log_info, key3=seconds)
else:
log = AdminLog.write(AdminAction.DelayNetEnd, user.id, ip="", key1=user.id,
key2=admin_log_info, key3=day)
return True
@classmethod
def build_user_hit(cls, hits):
dic = dict()
for hit in hits:
dic[str(hit.pic_id)] = hit.status
return dic
def validate_vcode(phone, code, category):
"""
验证码验证
"""
record = UserVcode.query.filter_by(phone=phone).filter_by(vcode=code).filter_by(category=category).first()
print record
limit_time = datetime.now() - timedelta(minutes=60)
if record and record.status == UserVcode.Status.INIT and record.create_time > limit_time:
record.status = UserVcode.Status.VERIFIED
record.update()
return True
return False
| 37.911392
| 182
| 0.611352
|
794ee1cb7e573cec9fa7f6a6e54bc6cb6800d725
| 4,352
|
py
|
Python
|
customSDK/servicefabric/models/application_upgrade_rollback_started_event.py
|
hans-olav/service-fabric-cli
|
baf27342ad4b9f74dee1954e60ed5b40ebcf039d
|
[
"MIT"
] | null | null | null |
customSDK/servicefabric/models/application_upgrade_rollback_started_event.py
|
hans-olav/service-fabric-cli
|
baf27342ad4b9f74dee1954e60ed5b40ebcf039d
|
[
"MIT"
] | null | null | null |
customSDK/servicefabric/models/application_upgrade_rollback_started_event.py
|
hans-olav/service-fabric-cli
|
baf27342ad4b9f74dee1954e60ed5b40ebcf039d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_event import ApplicationEvent
class ApplicationUpgradeRollbackStartedEvent(ApplicationEvent):
"""Application Upgrade Rollback Started event.
:param event_instance_id: The identifier for the FabricEvent instance.
:type event_instance_id: str
:param category: The category of event.
:type category: str
:param time_stamp: The time event was logged.
:type time_stamp: datetime
:param has_correlated_events: Shows there is existing related events
available.
:type has_correlated_events: bool
:param kind: Constant filled by server.
:type kind: str
:param application_id: The identity of the application. This is an encoded
representation of the application name. This is used in the REST APIs to
identify the application resource.
Starting in version 6.0, hierarchical names are delimited with the "\\~"
character. For example, if the application name is "fabric:/myapp/app1",
the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1"
in previous versions.
:type application_id: str
:param application_type_name: Application type name.
:type application_type_name: str
:param current_application_type_version: Current Application type version.
:type current_application_type_version: str
:param application_type_version: Target Application type version.
:type application_type_version: str
:param failure_reason: Describes reason of failure.
:type failure_reason: str
:param overall_upgrade_elapsed_time_in_ms: Overall upgrade time in
milli-seconds.
:type overall_upgrade_elapsed_time_in_ms: float
"""
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'application_id': {'required': True},
'application_type_name': {'required': True},
'current_application_type_version': {'required': True},
'application_type_version': {'required': True},
'failure_reason': {'required': True},
'overall_upgrade_elapsed_time_in_ms': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'category': {'key': 'Category', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'application_id': {'key': 'ApplicationId', 'type': 'str'},
'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'},
'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'},
'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'},
'failure_reason': {'key': 'FailureReason', 'type': 'str'},
'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'},
}
def __init__(self, event_instance_id, time_stamp, application_id, application_type_name, current_application_type_version, application_type_version, failure_reason, overall_upgrade_elapsed_time_in_ms, category=None, has_correlated_events=None):
super(ApplicationUpgradeRollbackStartedEvent, self).__init__(event_instance_id=event_instance_id, category=category, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id)
self.application_type_name = application_type_name
self.current_application_type_version = current_application_type_version
self.application_type_version = application_type_version
self.failure_reason = failure_reason
self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms
self.kind = 'ApplicationUpgradeRollbackStarted'
| 51.809524
| 248
| 0.697151
|
794ee1e97c43619c87f8fdc20a2ec5574e9e57d6
| 5,755
|
py
|
Python
|
testfile/test_CornerNetPureBar.py
|
Yunnglin/Chart-to-text
|
86f3291930289a4739f658c590e208771759ee50
|
[
"BSD-3-Clause"
] | 30
|
2021-03-03T02:16:30.000Z
|
2022-02-23T10:46:36.000Z
|
testfile/test_CornerNetPureBar.py
|
Yunnglin/Chart-to-text
|
86f3291930289a4739f658c590e208771759ee50
|
[
"BSD-3-Clause"
] | 16
|
2021-03-30T07:50:03.000Z
|
2022-03-03T04:56:30.000Z
|
testfile/test_CornerNetPureBar.py
|
Yunnglin/Chart-to-text
|
86f3291930289a4739f658c590e208771759ee50
|
[
"BSD-3-Clause"
] | 15
|
2021-03-03T06:21:19.000Z
|
2022-02-25T10:01:36.000Z
|
import os
import cv2
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
import external.nms as nms
def _rescale_points(dets, ratios, borders, sizes):
xs, ys = dets[:, :, 2], dets[:, :, 3]
xs /= ratios[0, 1]
ys /= ratios[0, 0]
xs -= borders[0, 2]
ys -= borders[0, 0]
np.clip(xs, 0, sizes[0, 1], out=xs)
np.clip(ys, 0, sizes[0, 0], out=ys)
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi = height)
plt.close()
def kp_decode(nnet, images, K, ae_threshold=0.5, kernel=3):
with torch.no_grad():
detections_tl_detections_br, time_backbone, time_psn = nnet.test([images], ae_threshold=ae_threshold, K=K, kernel=kernel)
detections_tl = detections_tl_detections_br[0]
detections_br = detections_tl_detections_br[1]
detections_tl = detections_tl.data.cpu().numpy().transpose((2, 1, 0))
detections_br = detections_br.data.cpu().numpy().transpose((2, 1, 0))
return detections_tl, detections_br, True
def kp_detection(image, db, nnet, debug=False, decode_func=kp_decode, cuda_id=0):
K = db.configs["top_k"]
ae_threshold = db.configs["ae_threshold"]
nms_kernel = db.configs["nms_kernel"]
categories = db.configs["categories"]
nms_threshold = db.configs["nms_threshold"]
max_per_image = db.configs["max_per_image"]
if True:
height, width = image.shape[0:2]
detections_point_tl = []
detections_point_br = []
scale = 1.0
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
#normalize_(resized_image, db.mean, db.std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
if torch.cuda.is_available():
images = torch.from_numpy(images).cuda(cuda_id)
else:
images = torch.from_numpy(images)
dets_tl, dets_br, flag = decode_func(nnet, images, K, ae_threshold=ae_threshold, kernel=nms_kernel)
offset = (offset + 1) * 100
_rescale_points(dets_tl, ratios, borders, sizes)
_rescale_points(dets_br, ratios, borders, sizes)
detections_point_tl.append(dets_tl)
detections_point_br.append(dets_br)
detections_point_tl = np.concatenate(detections_point_tl, axis=1)
detections_point_br = np.concatenate(detections_point_br, axis=1)
#print('1')
#print(detections_point.shape)
classes_p_tl = detections_point_tl[:, 0, 1]
classes_p_br = detections_point_br[:, 0, 1]
#print('2')
#print(classes_p.shape)
# reject detections with negative scores
keep_inds_p = (detections_point_tl[:, 0, 0] > 0)
detections_point_tl = detections_point_tl[keep_inds_p, 0]
classes_p_tl = classes_p_tl[keep_inds_p]
keep_inds_p = (detections_point_br[:, 0, 0] > 0)
detections_point_br = detections_point_br[keep_inds_p, 0]
classes_p_br = classes_p_br[keep_inds_p]
#print('3')
#print(detections_point.shape)
top_points_tl = {}
top_points_br = {}
for j in range(categories):
keep_inds_p = (classes_p_tl == j)
top_points_tl[j + 1] = detections_point_tl[keep_inds_p].astype(np.float32)
keep_inds_p = (classes_p_br == j)
top_points_br[j + 1] = detections_point_br[keep_inds_p].astype(np.float32)
#print(top_points[image_id][j + 1][0])
scores = np.hstack([
top_points_tl[j][:, 0]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_points_tl[j][:, 0] >= thresh)
top_points_tl[j] = top_points_tl[j][keep_inds]
scores = np.hstack([
top_points_br[j][:, 0]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_points_br[j][:, 0] >= thresh)
top_points_br[j] = top_points_br[j][keep_inds]
return top_points_tl, top_points_br
def testing(image, db, nnet, debug=False):
return globals()[system_configs.sampling_function](image, db, nnet, debug=debug)
| 36.424051
| 133
| 0.618593
|
794ee295d6220f6577f353b0a518e9ecdf243583
| 430
|
py
|
Python
|
tests/nlu_core_tests/component_tests/classifier_tests/cyber_tests.py
|
milyiyo/nlu
|
d209ed11c6a84639c268f08435552248391c5573
|
[
"Apache-2.0"
] | 480
|
2020-08-24T02:36:40.000Z
|
2022-03-30T08:09:43.000Z
|
tests/nlu_core_tests/component_tests/classifier_tests/cyber_tests.py
|
milyiyo/nlu
|
d209ed11c6a84639c268f08435552248391c5573
|
[
"Apache-2.0"
] | 28
|
2020-09-26T18:55:43.000Z
|
2022-03-26T01:05:45.000Z
|
tests/nlu_core_tests/component_tests/classifier_tests/cyber_tests.py
|
milyiyo/nlu
|
d209ed11c6a84639c268f08435552248391c5573
|
[
"Apache-2.0"
] | 76
|
2020-09-25T22:55:12.000Z
|
2022-03-17T20:25:52.000Z
|
import unittest
from nlu import *
class TestCyber(unittest.TestCase):
def test_cyber_model(self):
import pandas as pd
pipe = nlu.load('cyberbullying',verbose=True)
df = pipe.predict(['Peter love pancaces. I hate Mondays', 'I love Fridays'], output_level='token',drop_irrelevant_cols=False, metadata=True, )
for c in df.columns: print(df[c])
if __name__ == '__main__':
unittest.main()
| 25.294118
| 150
| 0.67907
|
794ee3a32ede6449a2c1c65a30d241f886e1bc64
| 791
|
py
|
Python
|
python/hw2/bfs.py
|
jeremy24/494-graph-algos
|
031a90e46304f405829bad7658965aae215833e1
|
[
"MIT"
] | null | null | null |
python/hw2/bfs.py
|
jeremy24/494-graph-algos
|
031a90e46304f405829bad7658965aae215833e1
|
[
"MIT"
] | null | null | null |
python/hw2/bfs.py
|
jeremy24/494-graph-algos
|
031a90e46304f405829bad7658965aae215833e1
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
from graph import Graph
from graph import make
from graph import GraphException
from graph import Matrix
def go():
if ( len(sys.argv) == 3 ):
filename = str(sys.argv[1])
start = int (sys.argv[2])
graph = make( filename )
visited = graph.bfs(start)
out = ""
for item in visited:
out += str(object=item) + " "
out += "\n"
print (out)
# print ("Max Degree: " + str(graph.degree("max")))
# print ("Min Degree: " + str(graph.degree("min")))
# print ("Density: " + str(graph.density()))
# graph.output()
else:
print (GraphException("You must supply a valid graph file"))
go()
| 21.378378
| 69
| 0.533502
|
794ee3cce054643651dd16950e32464e9de8e074
| 401
|
py
|
Python
|
torch2trt/__init__.py
|
NVIDIA-AI-IOT-private/torch2trt
|
953d60039e0c81e90eea467c3df2e6e3f7040242
|
[
"MIT"
] | null | null | null |
torch2trt/__init__.py
|
NVIDIA-AI-IOT-private/torch2trt
|
953d60039e0c81e90eea467c3df2e6e3f7040242
|
[
"MIT"
] | null | null | null |
torch2trt/__init__.py
|
NVIDIA-AI-IOT-private/torch2trt
|
953d60039e0c81e90eea467c3df2e6e3f7040242
|
[
"MIT"
] | null | null | null |
from .torch2trt import *
from .converters import *
import tensorrt as trt
def load_plugins():
import torch2trt.torch_plugins
registry = trt.get_plugin_registry()
torch2trt_creators = [c for c in registry.plugin_creator_list if c.plugin_namespace == 'torch2trt']
for c in torch2trt_creators:
registry.register_creator(c, 'torch2trt')
try:
load_plugins()
except:
pass
| 23.588235
| 103
| 0.733167
|
794ee3f47f77570be81f168a9fa8735017a7898a
| 11,448
|
py
|
Python
|
data/molecules.py
|
ihounie/benchmarking-gnns
|
b62268b162b2506630028d92123826694f6bb018
|
[
"MIT"
] | null | null | null |
data/molecules.py
|
ihounie/benchmarking-gnns
|
b62268b162b2506630028d92123826694f6bb018
|
[
"MIT"
] | null | null | null |
data/molecules.py
|
ihounie/benchmarking-gnns
|
b62268b162b2506630028d92123826694f6bb018
|
[
"MIT"
] | null | null | null |
import torch
import pickle
import torch.utils.data
import time
import os
import numpy as np
import csv
import dgl
from scipy import sparse as sp
import numpy as np
# *NOTE
# The dataset pickle and index files are in ./zinc_molecules/ dir
# [<split>.pickle and <split>.index; for split 'train', 'val' and 'test']
class MoleculeDGL(torch.utils.data.Dataset):
def __init__(self, data_dir, split, num_graphs=None):
self.data_dir = data_dir
self.split = split
self.num_graphs = num_graphs
with open(data_dir + "/%s.pickle" % self.split,"rb") as f:
self.data = pickle.load(f)
if self.num_graphs in [10000, 1000]:
# loading the sampled indices from file ./zinc_molecules/<split>.index
with open(data_dir + "/%s.index" % self.split,"r") as f:
data_idx = [list(map(int, idx)) for idx in csv.reader(f)]
self.data = [ self.data[i] for i in data_idx[0] ]
assert len(self.data)==num_graphs, "Sample num_graphs again; available idx: train/val/test => 10k/1k/1k"
"""
data is a list of Molecule dict objects with following attributes
molecule = data[idx]
; molecule['num_atom'] : nb of atoms, an integer (N)
; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type
; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type
; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable
"""
self.graph_lists = []
self.graph_labels = []
self.n_samples = len(self.data)
self._prepare()
def _prepare(self):
print("preparing %d graphs for the %s set..." % (self.num_graphs, self.split.upper()))
for molecule in self.data:
node_features = molecule['atom_type'].long()
adj = molecule['bond_type']
#print(adj)
edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list
#print(edge_list)
edge_idxs_in_adj = edge_list.split(1, dim=1)
#print(edge_idxs_in_adj)
#assert(0)
edge_features = adj[edge_idxs_in_adj].reshape(-1).long()
# Create the DGL Graph
g = dgl.DGLGraph()
g.add_nodes(molecule['num_atom'])
g.ndata['feat'] = node_features
for src, dst in edge_list:
g.add_edges(src.item(), dst.item())
g.edata['feat'] = edge_features
self.graph_lists.append(g)
self.graph_labels.append(molecule['logP_SA_cycle_normalized'])
def __len__(self):
"""Return the number of graphs in the dataset."""
return self.n_samples
def __getitem__(self, idx):
"""
Get the idx^th sample.
Parameters
---------
idx : int
The sample index.
Returns
-------
(dgl.DGLGraph, int)
DGLGraph with node feature stored in `feat` field
And its label.
"""
return self.graph_lists[idx], self.graph_labels[idx]
class MoleculeDatasetDGL(torch.utils.data.Dataset):
def __init__(self, name='Zinc'):
t0 = time.time()
self.name = name
self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well
self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well
data_dir='./data/molecules'
if self.name == 'ZINC-full':
data_dir='./data/molecules/zinc_full'
self.train = MoleculeDGL(data_dir, 'train', num_graphs=220011)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=24445)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=5000)
else:
self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000)
self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000)
self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000)
print("Time taken: {:.4f}s".format(time.time()-t0))
def self_loop(g):
"""
Utility function only, to be used only when necessary as per user self_loop flag
: Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']
This function is called inside a function in MoleculeDataset class.
"""
new_g = dgl.DGLGraph()
new_g.add_nodes(g.number_of_nodes())
new_g.ndata['feat'] = g.ndata['feat']
src, dst = g.all_edges(order="eid")
src = dgl.backend.zerocopy_to_numpy(src)
dst = dgl.backend.zerocopy_to_numpy(dst)
non_self_edges_idx = src != dst
nodes = np.arange(g.number_of_nodes())
new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])
new_g.add_edges(nodes, nodes)
# This new edata is not used since this function gets called only for GCN, GAT
# However, we need this for the generic requirement of ndata and edata
new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())
return new_g
def positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with numpy
EigVal, EigVec = np.linalg.eig(L.toarray())
idx = EigVal.argsort() # increasing order
EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])
g.ndata['pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
# # Eigenvectors with scipy
# EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
# EigVec = EigVec[:, EigVal.argsort()] # increasing order
# g.ndata['pos_enc'] = torch.from_numpy(np.abs(EigVec[:,1:pos_enc_dim+1])).float()
return g
class MoleculeDataset(torch.utils.data.Dataset):
def __init__(self, name):
"""
Loading SBM datasets
"""
start = time.time()
print("[I] Loading dataset %s..." % (name))
self.name = name
data_dir = 'data/molecules/'
with open(data_dir+name+'.pkl',"rb") as f:
f = pickle.load(f)
self.train = f[0]
self.val = f[1]
self.test = f[2]
self.num_atom_type = f[3]
self.num_bond_type = f[4]
print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))
print("[I] Finished loading.")
print("[I] Data load time: {:.4f}s".format(time.time()-start))
# form a mini batch from a given list of samples = [(graph, label) pairs]
def collate(self, samples):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = torch.cat(tab_snorm_n).sqrt()
#tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]
#tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]
#snorm_e = torch.cat(tab_snorm_e).sqrt()
batched_graph = dgl.batch(graphs)
return batched_graph, labels
# prepare dense tensors for GNNs using them; such as RingGNN, 3WLGNN
def collate_dense_gnn(self, samples, edge_feat):
# The input samples is a list of pairs (graph, label).
graphs, labels = map(list, zip(*samples))
labels = torch.tensor(np.array(labels)).unsqueeze(1)
#tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]
#tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]
#snorm_n = tab_snorm_n[0][0].sqrt()
#batched_graph = dgl.batch(graphs)
g = graphs[0]
adj = self._sym_normalize_adj(g.adjacency_matrix().to_dense())
"""
Adapted from https://github.com/leichen2018/Ring-GNN/
Assigning node and edge feats::
we have the adjacency matrix in R^{n x n}, the node features in R^{d_n} and edge features R^{d_e}.
Then we build a zero-initialized tensor, say T, in R^{(1 + d_n + d_e) x n x n}. T[0, :, :] is the adjacency matrix.
The diagonal T[1:1+d_n, i, i], i = 0 to n-1, store the node feature of node i.
The off diagonal T[1+d_n:, i, j] store edge features of edge(i, j).
"""
zero_adj = torch.zeros_like(adj)
if edge_feat:
# use edge feats also to prepare adj
adj_with_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type + self.num_bond_type)])
adj_with_edge_feat = torch.cat([adj.unsqueeze(0), adj_with_edge_feat], dim=0)
us, vs = g.edges()
for idx, edge_label in enumerate(g.edata['feat']):
adj_with_edge_feat[edge_label.item()+1+self.num_atom_type][us[idx]][vs[idx]] = 1
for node, node_label in enumerate(g.ndata['feat']):
adj_with_edge_feat[node_label.item()+1][node][node] = 1
x_with_edge_feat = adj_with_edge_feat.unsqueeze(0)
return None, x_with_edge_feat, labels
else:
# use only node feats to prepare adj
adj_no_edge_feat = torch.stack([zero_adj for j in range(self.num_atom_type)])
adj_no_edge_feat = torch.cat([adj.unsqueeze(0), adj_no_edge_feat], dim=0)
for node, node_label in enumerate(g.ndata['feat']):
adj_no_edge_feat[node_label.item()+1][node][node] = 1
x_no_edge_feat = adj_no_edge_feat.unsqueeze(0)
return x_no_edge_feat, None, labels
def _sym_normalize_adj(self, adj):
deg = torch.sum(adj, dim = 0)#.squeeze()
deg_inv = torch.where(deg>0, 1./torch.sqrt(deg), torch.zeros(deg.size()))
deg_inv = torch.diag(deg_inv)
return torch.mm(deg_inv, torch.mm(adj, deg_inv))
def _add_self_loops(self):
# function for adding self loops
# this function will be called only if self_loop flag is True
self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]
self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]
self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]
def _add_positional_encodings(self, pos_enc_dim):
# Graph positional encoding v/ Laplacian eigenvectors
self.train.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]
self.val.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]
self.test.graph_lists = [positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]
| 39.475862
| 127
| 0.604385
|
794ee47bbdd20abbf3c979aa78624ea349e97ef7
| 1,614
|
py
|
Python
|
nexus/examples/qmcpack/afqmc_pyscf/08_diamond_2x2x2_kpoint_sym/diamond_afqmc.py
|
prckent/qmcpack
|
127caf219ee99c2449b803821fcc8b1304b66ee1
|
[
"NCSA"
] | null | null | null |
nexus/examples/qmcpack/afqmc_pyscf/08_diamond_2x2x2_kpoint_sym/diamond_afqmc.py
|
prckent/qmcpack
|
127caf219ee99c2449b803821fcc8b1304b66ee1
|
[
"NCSA"
] | null | null | null |
nexus/examples/qmcpack/afqmc_pyscf/08_diamond_2x2x2_kpoint_sym/diamond_afqmc.py
|
prckent/qmcpack
|
127caf219ee99c2449b803821fcc8b1304b66ee1
|
[
"NCSA"
] | null | null | null |
#! /usr/bin/env python
from nexus import settings,job,run_project,obj
from nexus import generate_physical_system
from nexus import generate_pyscf
from nexus import generate_pyscf_to_afqmc
from nexus import generate_qmcpack
settings(
results = '',
sleep = 3,
machine = 'ws16',
)
a = 3.6
system = generate_physical_system(
units = 'A',
axes = [[ 0, a/2, a/2 ],
[ a/2, 0, a/2 ],
[ a/2, a/2, 0 ]],
elem = ('C','C'),
pos = [[ 0, 0, 0 ],
[ a/4, a/4, a/4 ]],
tiling = (2,2,2),
kgrid = (1,1,1),
kshift = (0,0,0),
)
scf = generate_pyscf(
identifier = 'scf',
path = 'rhf',
job = job(serial=True),
template = './scf_template.py',
system = system,
cell = obj(
basis = 'gth-szv',
pseudo = 'gth-pade',
mesh = [25,25,25],
verbose = 5,
),
checkpoint = True,
)
p2a = generate_pyscf_to_afqmc(
identifier = 'p2a',
path = 'rhf',
job = job(serial=True),
cholesky_threshold = 1e-5,
ao = True,
kpoint = True,
verbose = True,
dependencies = (scf,'wavefunction'),
)
qmc = generate_qmcpack(
identifier = 'qmc',
path = 'afqmc',
job = job(cores=1,app='qmcpack'),
system = system,
input_type = 'basic_afqmc',
blocks = 100,
timestep = 0.01,
dependencies = (p2a,'wavefunction'),
)
run_project()
| 23.735294
| 46
| 0.475217
|
794ee4bef4e62bc5a0149e39d2c7342882b1765d
| 1,559
|
py
|
Python
|
bglcapi/bgapi/gatt/parse.py
|
edgebr/python-bgapi
|
0aeb525edf605e892b20f5c3fb11269cce0c5bdf
|
[
"MIT"
] | null | null | null |
bglcapi/bgapi/gatt/parse.py
|
edgebr/python-bgapi
|
0aeb525edf605e892b20f5c3fb11269cce0c5bdf
|
[
"MIT"
] | null | null | null |
bglcapi/bgapi/gatt/parse.py
|
edgebr/python-bgapi
|
0aeb525edf605e892b20f5c3fb11269cce0c5bdf
|
[
"MIT"
] | null | null | null |
from struct import (unpack_from, calcsize)
from bglcapi.types import MessageType
from . import rsp
from . import evt
PARSE_MAP = {
MessageType.COMMAND_RESPONSE: {
0x03: rsp.discover_characteristics,
0x04: rsp.discover_characteristics_by_uuid,
0x06: rsp.discover_descriptors,
0x01: rsp.discover_primary_services,
0x02: rsp.discover_primary_services_by_uuid,
0x0c: rsp.execute_characteristic_value_write,
0x10: rsp.find_included_services,
0x13: rsp.prepare_characteristic_value_reliable_write,
0x0b: rsp.prepare_characteristic_value_write,
0x07: rsp.read_characteristic_value,
0x08: rsp.read_characteristic_value_by_uuid,
0x12: rsp.read_characteristic_value_from_offset,
0x0e: rsp.read_descriptor_value,
0x11: rsp.read_multiple_characteristic_values,
0x0d: rsp.send_characteristic_confirmation,
0x05: rsp.set_characteristic_notification,
0x00: rsp.set_max_mtu,
0x09: rsp.write_characteristic_value,
0x0a: rsp.write_characteristic_value_without_response,
0x0f: rsp.write_descriptor_value,
},
MessageType.EVENT: {
0x02: evt.characteristic,
0x04: evt.characteristic_value,
0x03: evt.descriptor,
0x05: evt.descriptor_value,
0x00: evt.mtu_exchanged,
0x06: evt.procedure_completed,
0x01: evt.service,
},
}
def from_binary(msg_type: int, msg_id: int, data: bytes, offset: int):
return PARSE_MAP[msg_type][msg_id](data, offset)
| 34.644444
| 70
| 0.710712
|
794ee50af211b2ef7d89826609589befe66e556d
| 6,869
|
py
|
Python
|
uibcdf_systems/systems/attic/alanine_dipeptide.py
|
uibcdf/Molecular-Systems
|
74c4313ae25584ad24bea65f961280f187eda9cb
|
[
"MIT"
] | null | null | null |
uibcdf_systems/systems/attic/alanine_dipeptide.py
|
uibcdf/Molecular-Systems
|
74c4313ae25584ad24bea65f961280f187eda9cb
|
[
"MIT"
] | null | null | null |
uibcdf_systems/systems/attic/alanine_dipeptide.py
|
uibcdf/Molecular-Systems
|
74c4313ae25584ad24bea65f961280f187eda9cb
|
[
"MIT"
] | null | null | null |
import openmm.unit as unit
class AlanineDipeptideVacuum():
"""Alanine dipeptide in vacuum
Alanine dipeptide same as OpenMMTools
Attributes
----------
system
Xxx xxx
Methods
-------
"""
def __init__(self, forcefield='AMBER14', constraints='h_bonds', hydrogen_mass=None):
"""XXX
A new test system is returned with the openmm system of particles in an external double
well potential.
Parameters
----------
forcefield: xxx
XXX
constraints: app.Hbonds
XXX
hydrogen_mass: None
XXX
Examples
--------
>>> from uibcdf_test_systems.systems import AlanineDipeptideVacuum
>>> from openmm import unit
>>> dialanine = AlanineDipeptideVacuum(forcefield='xxx')
Notes
-----
See `corresponding documentation in the user guide regarding this class
<../../systems/dialanine.html>`_.
"""
# Parameters
self.parameters={}
self.parameters['forcefield']=forcefield
self.parameters['constraints']=constraints
self.parameters['hydrogen_mass']=hydrogen_mass
# openmm.Modeller
from molsysmt import convert
from .files import alanine_dipeptide
file_pdb = alanine_dipeptide['vacuum.pdb']
item_modeller = convert(file_pdb, to_form='openmm.Modeller')
# OpenMM topology
self.topology = item_modeller.topology
# Coordinates
self.coordinates = item_modeller.positions
# OpenMM system
self.system = convert(item_modeller, to_form='openmm.System', forcefield=forcefield,
constraints=constraints, hydrogen_mass=hydrogen_mass,
implicit_solvent=None, non_bonded_method='no_cutoff')
class AlanineDipeptideImplicitSolvent():
"""Alanine dipeptide in implicit solvent
Alanine dipeptide same as OpenMMTools
Attributes
----------
system
Xxx xxx
positions
Xxx xxx
topology
Xxx xxx
Methods
-------
"""
def __init__(self, forcefield='AMBER14', implicit_solvent='OBC1', solute_dielectric=1.0,
solvent_dielectric=78.3, implicit_solvent_kappa=0.0/unit.nanometer,
implicit_solvent_salt_conc=0.0*unit.mole/unit.liter,
constraints='h_bonds', hydrogen_mass=None):
"""Creating a new instance of AlanineDipeptideImplicitSolvent
...
Parameters
----------
forcefield: xxx
XXX
constraints: app.Hbonds
XXX
hydrogen_mass: None
XXX
Examples
--------
>>> from uibcdf_test_systems.systems import AlanineDipeptideImplicitSolvent
>>> from openmm import unit
>>> dialanine = AlanineDipeptideImplicitSolvent(forcefield='xxx')
Notes
-----
See `corresponding documentation in the user guide regarding this class
<../../systems/dialanine.html>`_.
"""
# Parameters
self.parameters={}
self.parameters['forcefield']=forcefield
self.parameters['implicit_solvent']=implicit_solvent
self.parameters['solute_dielectric']=solute_dielectric
self.parameters['solvent_dielectric']=solvent_dielectric
self.parameters['implicit_solvent_kappa']=implicit_solvent_kappa
self.parameters['implicit_solvent_salt_conc']=implicit_solvent_salt_conc
self.parameters['constraints']=constraints
self.parameters['hydrogen_mass']=hydrogen_mass
# openmm.Modeller
from molsysmt import convert
from .files import alanine_dipeptide
file_pdb = alanine_dipeptide['vacuum.pdb']
item_modeller = convert(file_pdb, to_form='openmm.Modeller')
# OpenMM topology
self.topology = item_modeller.topology
# Coordinates
self.coordinates = item_modeller.positions
# OpenMM system
self.system = convert(item_modeller, to_form='openmm.System', forcefield=forcefield,
constraints=constraints, hydrogen_mass=hydrogen_mass, non_bonded_method='no_cutoff',
implicit_solvent=implicit_solvent, solute_dielectric=solute_dielectric,
solvent_dielectric=solvent_dielectric,
implicit_solvent_kappa=implicit_solvent_kappa,
implicit_solvent_salt_conc=implicit_solvent_salt_conc
)
class AlanineDipeptideExplicitSolvent():
"""Alanine dipeptide in explicit solvent
Alanine dipeptide same as OpenMMTools
Attributes
----------
system
Xxx xxx
positions
Xxx xxx
topology
Xxx xxx
Methods
-------
"""
def __init__(self, forcefield = 'AMBER14', water_model = 'TIP3P', rigid_water = True, constraints = 'h_bonds', hydrogen_mass = None,
non_bonded_method = 'PME', non_bonded_cutoff = 10.0 * unit.angstroms, switch_distance = 8.5 * unit.angstroms,
use_dispersion_correction = True, ewald_error_tolerance = 1.0e-5):
"""Creating a new instance of AlanineDipeptideExplicitSolvent
Parameters
----------
forcefield: xxx
XXX
constraints: app.Hbonds
XXX
hydrogen_mass: None
XXX
Examples
--------
>>> from uibcdf_test_systems.systems import AlanineDipeptideExplicitSolvent
>>> from openmm import unit
>>> dialanine = AlanineDipeptideExplicitSolvent()
Notes
-----
See `corresponding documentation in the user guide regarding this class
<../../systems/dialanine.html>`_.
"""
# openmm.Modeller
from molsysmt import convert
from .files import alanine_dipeptide
file_pdb = alanine_dipeptide['octahedral_14.pdb']
item_modeller = convert(file_pdb, to_form='openmm.Modeller')
# OpenMM topology
self.topology = item_modeller.topology
# Coordinates
self.coordinates = item_modeller.positions
# OpenMM system
self.system = convert(item_modeller, to_form='openmm.System', forcefield=forcefield,
water_model=water_model, rigid_water=rigid_water, constraints=constraints,
hydrogen_mass=hydrogen_mass, non_bonded_method=non_bonded_method,
non_bonded_cutoff=non_bonded_cutoff, switch_distance=switch_distance,
use_dispersion_correction=use_dispersion_correction,
ewald_error_tolerance=ewald_error_tolerance)
| 27.697581
| 136
| 0.614646
|
794ee5d664506e2c6c727910a769443e84d644a0
| 459
|
py
|
Python
|
client/core/movement/__init__.py
|
krerkkiat/space-invader
|
428b1041c9246b55cb63bc6c0b2ec20beb7a32ed
|
[
"MIT"
] | null | null | null |
client/core/movement/__init__.py
|
krerkkiat/space-invader
|
428b1041c9246b55cb63bc6c0b2ec20beb7a32ed
|
[
"MIT"
] | null | null | null |
client/core/movement/__init__.py
|
krerkkiat/space-invader
|
428b1041c9246b55cb63bc6c0b2ec20beb7a32ed
|
[
"MIT"
] | null | null | null |
from core.movement.paths import SpiralPath
class Movement:
def __init__(self, path, start=0):
self._path = path
self._currentPos = start-1
def move(self):
self._currentPos += 1
if self._currentPos >= self._path.length:
self._currentPos = 0
if self._currentPos >= 0:
return (self._path.x(self._currentPos), self._path.y(self._currentPos))
else:
return (-100, -100)
| 27
| 83
| 0.59695
|
794ee746a270782dd0d78013bf73b6ea5b0b1a33
| 2,664
|
py
|
Python
|
src/japronto/protocol/tracing.py
|
kzh3ka/japronto
|
a526277a2f59100388c9f39d4ca22bfb4909955b
|
[
"MIT"
] | 9,472
|
2017-01-31T13:22:02.000Z
|
2022-03-31T13:15:46.000Z
|
src/japronto/protocol/tracing.py
|
zishenWang/japronto
|
f501c9f59fc83d0e4ab928ecdacf1735a2cc5cb6
|
[
"MIT"
] | 170
|
2017-01-31T18:50:13.000Z
|
2022-03-17T13:32:04.000Z
|
src/japronto/protocol/tracing.py
|
zishenWang/japronto
|
f501c9f59fc83d0e4ab928ecdacf1735a2cc5cb6
|
[
"MIT"
] | 739
|
2017-01-31T17:42:03.000Z
|
2022-02-24T05:10:32.000Z
|
from functools import partial
from parser.libpicohttpparser import ffi
from request import HttpRequest
class TracingProtocol:
def __init__(self, on_headers_adapter: callable,
on_body_adapter: callable):
self.requests = []
self.error = None
self.on_headers_adapter = on_headers_adapter
self.on_body_adapter = on_body_adapter
self.on_headers_call_count = 0
self.on_body_call_count = 0
self.on_error_call_count = 0
def on_headers(self, *args):
self.request = self.on_headers_adapter(*args)
self.requests.append(self.request)
self.on_headers_call_count += 1
def on_body(self, body):
self.request.body = self.on_body_adapter(body)
self.on_body_call_count += 1
def on_error(self, error: str):
self.error = error
self.on_error_call_count += 1
def _request_from_cprotocol(method: memoryview, path: memoryview, version: int,
headers: memoryview):
method = method.tobytes().decode('ascii')
path = path.tobytes().decode('ascii')
version = "1.0" if version == 0 else "1.1"
headers_len = headers.nbytes // ffi.sizeof("struct phr_header")
headers_cdata = ffi.from_buffer(headers)
headers_cdata = ffi.cast(
'struct phr_header[{}]'.format(headers_len), headers_cdata)
headers = _extract_headers(headers_cdata)
return HttpRequest(method, path, version, headers)
def _body_from_cprotocol(body: memoryview):
return None if body is None else body.tobytes()
def _request_from_cffiprotocol(method: "char[]", path: "char[]", version: int,
headers: "struct phr_header[]"):
method = ffi.buffer(method)[:].decode('ascii')
path = ffi.buffer(path)[:].decode('ascii')
version = "1.0" if version == 0 else "1.1"
headers = _extract_headers(headers)
return HttpRequest(method, path, version, headers)
def _body_from_cffiprotocol(body: "char[]"):
return None if body is None else ffi.buffer(body)[:]
def _extract_headers(headers_cdata: "struct phr_header[]"):
headers = {}
for header in headers_cdata:
name = ffi.string(header.name, header.name_len).decode('ascii').title()
value = ffi.string(header.value, header.value_len).decode('latin1')
headers[name] = value
return headers
CTracingProtocol = partial(
TracingProtocol, on_headers_adapter=_request_from_cprotocol,
on_body_adapter=_body_from_cprotocol)
CffiTracingProtocol = partial(
TracingProtocol, on_headers_adapter=_request_from_cffiprotocol,
on_body_adapter=_body_from_cffiprotocol)
| 29.6
| 79
| 0.683934
|
794ee96595b1601bbcd3f60c4dfd2af5ee969ab3
| 40,294
|
py
|
Python
|
tests/orm/test_model.py
|
fenestron/lorator
|
1966f095a67ae65e816b1c8f7359b9f203cd5c4f
|
[
"MIT"
] | null | null | null |
tests/orm/test_model.py
|
fenestron/lorator
|
1966f095a67ae65e816b1c8f7359b9f203cd5c4f
|
[
"MIT"
] | null | null | null |
tests/orm/test_model.py
|
fenestron/lorator
|
1966f095a67ae65e816b1c8f7359b9f203cd5c4f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import simplejson as json
import hashlib
import time
import datetime
from pendulum import Pendulum
from flexmock import flexmock, flexmock_teardown
from .. import OratorTestCase, mock
from ..utils import MockModel, MockQueryBuilder, MockConnection, MockProcessor
from lorator.query.builder import QueryBuilder
from lorator.query.grammars import QueryGrammar
from lorator.query.processors import QueryProcessor
from lorator.orm.builder import Builder
from lorator.orm.model import Model
from lorator.orm.utils import mutator, accessor
from lorator.exceptions.orm import ModelNotFound, MassAssignmentError
from lorator.orm.collection import Collection
from lorator.connections import Connection
from lorator import DatabaseManager
from lorator.utils import basestring
from lorator.events import Event
class OrmModelTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_attributes_manipulation(self):
model = OrmModelStub()
model.name = "foo"
self.assertEqual("foo", model.name)
del model.name
self.assertFalse(hasattr(model, "name"))
model.list_items = {"name": "john"}
self.assertEqual({"name": "john"}, model.list_items)
attributes = model.get_attributes()
self.assertEqual(json.dumps({"name": "john"}), attributes["list_items"])
def test_dirty_attributes(self):
model = OrmModelStub(foo="1", bar=2, baz=3)
model.foo = 1
model.bar = 20
model.baz = 30
self.assertTrue(model.is_dirty())
self.assertTrue(model.is_dirty("foo"))
self.assertTrue(model.is_dirty("bar"))
self.assertTrue(model.is_dirty("baz"))
self.assertTrue(model.is_dirty("foo", "bar", "baz"))
def test_calculated_attributes(self):
model = OrmModelStub()
model.password = "secret"
attributes = model.get_attributes()
self.assertFalse("password" in attributes)
self.assertEqual("******", model.password)
self.assertEqual(
"5ebe2294ecd0e0f08eab7690d2a6ee69", attributes["password_hash"]
)
self.assertEqual("5ebe2294ecd0e0f08eab7690d2a6ee69", model.password_hash)
def test_new_instance_returns_instance_wit_attributes_set(self):
model = OrmModelStub()
instance = model.new_instance({"name": "john"})
self.assertIsInstance(instance, OrmModelStub)
self.assertEqual("john", instance.name)
def test_hydrate_creates_collection_of_models(self):
data = [{"name": "john"}, {"name": "jane"}]
collection = OrmModelStub.hydrate(data, "foo_connection")
self.assertIsInstance(collection, Collection)
self.assertEqual(2, len(collection))
self.assertIsInstance(collection[0], OrmModelStub)
self.assertIsInstance(collection[1], OrmModelStub)
self.assertEqual(collection[0].get_attributes(), collection[0].get_original())
self.assertEqual(collection[1].get_attributes(), collection[1].get_original())
self.assertEqual("john", collection[0].name)
self.assertEqual("jane", collection[1].name)
self.assertEqual("foo_connection", collection[0].get_connection_name())
self.assertEqual("foo_connection", collection[1].get_connection_name())
def test_hydrate_raw_makes_raw_query(self):
model = OrmModelHydrateRawStub()
connection = MockConnection().prepare_mock()
connection.select.return_value = []
model.get_connection = mock.MagicMock(return_value=connection)
def _set_connection(name):
model.__connection__ = name
return model
OrmModelHydrateRawStub.set_connection = mock.MagicMock(
side_effect=_set_connection
)
collection = OrmModelHydrateRawStub.hydrate_raw("SELECT ?", ["foo"])
self.assertEqual("hydrated", collection)
connection.select.assert_called_once_with("SELECT ?", ["foo"])
def test_create_saves_new_model(self):
model = OrmModelSaveStub.create(name="john")
self.assertTrue(model.get_saved())
self.assertEqual("john", model.name)
def test_find_method_calls_query_builder_correctly(self):
result = OrmModelFindStub.find(1)
self.assertEqual("foo", result)
def test_find_use_write_connection(self):
OrmModelFindWithWriteConnectionStub.on_write_connection().find(1)
def test_find_with_list_calls_query_builder_correctly(self):
result = OrmModelFindManyStub.find([1, 2])
self.assertEqual("foo", result)
def test_destroy_method_calls_query_builder_correctly(self):
OrmModelDestroyStub.destroy(1, 2, 3)
def test_with_calls_query_builder_correctly(self):
result = OrmModelWithStub.with_("foo", "bar")
self.assertEqual("foo", result)
def test_update_process(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args({"name": "john"})
model = OrmModelStub()
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model._update_timestamps = mock.MagicMock()
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updating: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updated: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"saved: %s" % model.__class__.__name__, model
).and_return(True)
model.id = 1
model.foo = "bar"
model.sync_original()
model.name = "john"
model.set_exists(True)
self.assertTrue(model.save())
model.new_query.assert_called_once_with()
model._update_timestamps.assert_called_once_with()
def test_update_process_does_not_override_timestamps(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args(
{"created_at": "foo", "updated_at": "bar"}
)
model = OrmModelStub()
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model._update_timestamps = mock.MagicMock()
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updating: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updated: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"saved: %s" % model.__class__.__name__, model
).and_return(True)
model.id = 1
model.sync_original()
model.created_at = "foo"
model.updated_at = "bar"
model.set_exists(True)
self.assertTrue(model.save())
model.new_query.assert_called_once_with()
self.assertTrue(model._update_timestamps.called)
def test_creating_with_only_created_at_column(self):
query_builder = flexmock(QueryBuilder)
query_builder.should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model = flexmock(OrmModelCreatedAt())
model.should_receive("new_query").and_return(
Builder(QueryBuilder(None, None, None))
)
model.should_receive("set_created_at").once()
model.should_receive("set_updated_at").never()
model.name = "john"
model.save()
def test_creating_with_only_updated_at_column(self):
query_builder = flexmock(QueryBuilder)
query_builder.should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model = flexmock(OrmModelUpdatedAt())
model.should_receive("new_query").and_return(
Builder(QueryBuilder(None, None, None))
)
model.should_receive("set_created_at").never()
model.should_receive("set_updated_at").once()
model.name = "john"
model.save()
def test_updating_with_only_created_at_column(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args({"name": "john"})
model = flexmock(OrmModelCreatedAt())
model.id = 1
model.sync_original()
model.set_exists(True)
model.should_receive("new_query").and_return(
Builder(QueryBuilder(None, None, None))
)
model.should_receive("set_created_at").never()
model.should_receive("set_updated_at").never()
model.name = "john"
model.save()
def test_updating_with_only_updated_at_column(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args({"name": "john"})
model = flexmock(OrmModelUpdatedAt())
model.id = 1
model.sync_original()
model.set_exists(True)
model.should_receive("new_query").and_return(
Builder(QueryBuilder(None, None, None))
)
model.should_receive("set_created_at").never()
model.should_receive("set_updated_at").once()
model.name = "john"
model.save()
def test_update_is_cancelled_if_updating_event_returns_false(self):
model = flexmock(OrmModelStub())
query = flexmock(Builder(flexmock(QueryBuilder(None, None, None))))
model.should_receive("new_query_without_scopes").once().and_return(query)
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updating: %s" % model.__class__.__name__, model
).and_return(False)
model.set_exists(True)
model.foo = "bar"
self.assertFalse(model.save())
def test_update_process_without_timestamps(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args({"name": "john"})
model = flexmock(OrmModelStub())
model.__timestamps__ = False
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model._update_timestamps = mock.MagicMock()
events = flexmock(Event())
model.__dispatcher__ = events
model.should_receive("_fire_model_event").and_return(True)
model.id = 1
model.sync_original()
model.name = "john"
model.set_exists(True)
self.assertTrue(model.save())
model.new_query.assert_called_once_with()
self.assertFalse(model._update_timestamps.called)
def test_update_process_uses_old_primary_key(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args({"id": 2, "name": "john"})
model = OrmModelStub()
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model._update_timestamps = mock.MagicMock()
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updating: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"updated: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"saved: %s" % model.__class__.__name__, model
).and_return(True)
model.id = 1
model.sync_original()
model.id = 2
model.name = "john"
model.set_exists(True)
self.assertTrue(model.save())
model.new_query.assert_called_once_with()
self.assertTrue(model._update_timestamps.called)
def test_timestamps_are_returned_as_objects(self):
model = Model()
model.set_raw_attributes(
{"created_at": "2015-03-24", "updated_at": "2015-03-24"}
)
self.assertIsInstance(model.created_at, Pendulum)
self.assertIsInstance(model.updated_at, Pendulum)
def test_timestamps_are_returned_as_objects_from_timestamps_and_datetime(self):
model = Model()
model.set_raw_attributes(
{"created_at": datetime.datetime.utcnow(), "updated_at": time.time()}
)
self.assertIsInstance(model.created_at, Pendulum)
self.assertIsInstance(model.updated_at, Pendulum)
def test_timestamps_are_returned_as_objects_on_create(self):
model = Model()
model.unguard()
timestamps = {
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
}
instance = model.new_instance(timestamps)
self.assertIsInstance(instance.created_at, Pendulum)
self.assertIsInstance(instance.updated_at, Pendulum)
model.reguard()
def test_timestamps_return_none_if_set_to_none(self):
model = Model()
model.unguard()
timestamps = {
"created_at": datetime.datetime.now(),
"updated_at": datetime.datetime.now(),
}
instance = model.new_instance(timestamps)
instance.created_at = None
self.assertIsNone(instance.created_at)
model.reguard()
def test_insert_process(self):
query = flexmock(Builder)
model = OrmModelStub()
query_builder = flexmock(QueryBuilder)
query_builder.should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model._update_timestamps = mock.MagicMock()
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"creating: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"created: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"saved: %s" % model.__class__.__name__, model
).and_return(True)
model.name = "john"
model.set_exists(False)
self.assertTrue(model.save())
self.assertEqual(1, model.id)
self.assertTrue(model.exists)
self.assertTrue(model._update_timestamps.called)
model = OrmModelStub()
query_builder.should_receive("insert").once().with_args({"name": "john"})
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model._update_timestamps = mock.MagicMock()
model.set_incrementing(False)
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"creating: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"created: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"saved: %s" % model.__class__.__name__, model
).and_return(True)
model.name = "john"
model.set_exists(False)
self.assertTrue(model.save())
self.assertFalse(hasattr(model, "id"))
self.assertTrue(model.exists)
self.assertTrue(model._update_timestamps.called)
def test_insert_is_cancelled_if_creating_event_returns_false(self):
model = flexmock(OrmModelStub())
query = flexmock(Builder(flexmock(QueryBuilder(None, None, None))))
model.should_receive("new_query_without_scopes").once().and_return(query)
events = flexmock(Event())
model.__dispatcher__ = events
events.should_receive("fire").once().with_args(
"saving: %s" % model.__class__.__name__, model
).and_return(True)
events.should_receive("fire").once().with_args(
"creating: %s" % model.__class__.__name__, model
).and_return(False)
self.assertFalse(model.save())
self.assertFalse(model.exists)
def test_delete_properly_deletes_model(self):
model = OrmModelStub()
builder = flexmock(Builder(QueryBuilder(None, None, None)))
builder.should_receive("where").once().with_args("id", 1).and_return(builder)
builder.should_receive("delete").once()
model.new_query = mock.MagicMock(return_value=builder)
model.touch_owners = mock.MagicMock()
model.set_exists(True)
model.id = 1
model.delete()
self.assertTrue(model.touch_owners.called)
def test_push_no_relations(self):
model = flexmock(Model())
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model.should_receive("new_query").once().and_return(builder)
model.should_receive("_update_timestamps").once()
model.name = "john"
model.set_exists(False)
self.assertTrue(model.push())
self.assertEqual(1, model.id)
self.assertTrue(model.exists)
def test_push_empty_one_relation(self):
model = flexmock(Model())
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model.should_receive("new_query").once().and_return(builder)
model.should_receive("_update_timestamps").once()
model.name = "john"
model.set_exists(False)
model.set_relation("relation_one", None)
self.assertTrue(model.push())
self.assertEqual(1, model.id)
self.assertTrue(model.exists)
self.assertIsNone(model.relation_one)
def test_push_one_relation(self):
related1 = flexmock(Model())
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "related1"}, "id"
).and_return(2)
related1.should_receive("new_query").once().and_return(builder)
related1.should_receive("_update_timestamps").once()
related1.name = "related1"
related1.set_exists(False)
model = flexmock(Model())
model.should_receive("resolve_connection").and_return(
MockConnection().prepare_mock()
)
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model.should_receive("new_query").once().and_return(builder)
model.should_receive("_update_timestamps").once()
model.name = "john"
model.set_exists(False)
model.set_relation("relation_one", related1)
self.assertTrue(model.push())
self.assertEqual(1, model.id)
self.assertTrue(model.exists)
self.assertEqual(2, model.relation_one.id)
self.assertTrue(model.relation_one.exists)
self.assertEqual(2, related1.id)
self.assertTrue(related1.exists)
def test_push_empty_many_relation(self):
model = flexmock(Model())
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model.should_receive("new_query").once().and_return(builder)
model.should_receive("_update_timestamps").once()
model.name = "john"
model.set_exists(False)
model.set_relation("relation_many", Collection([]))
self.assertTrue(model.push())
self.assertEqual(1, model.id)
self.assertTrue(model.exists)
self.assertEqual(0, len(model.relation_many))
def test_push_many_relation(self):
related1 = flexmock(Model())
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "related1"}, "id"
).and_return(2)
related1.should_receive("new_query").once().and_return(builder)
related1.should_receive("_update_timestamps").once()
related1.name = "related1"
related1.set_exists(False)
related2 = flexmock(Model())
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "related2"}, "id"
).and_return(3)
related2.should_receive("new_query").once().and_return(builder)
related2.should_receive("_update_timestamps").once()
related2.name = "related2"
related2.set_exists(False)
model = flexmock(Model())
model.should_receive("resolve_connection").and_return(
MockConnection().prepare_mock()
)
query = flexmock(
QueryBuilder(
MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()
)
)
builder = Builder(query)
builder.get_query().should_receive("insert_get_id").once().with_args(
{"name": "john"}, "id"
).and_return(1)
model.should_receive("new_query").once().and_return(builder)
model.should_receive("_update_timestamps").once()
model.name = "john"
model.set_exists(False)
model.set_relation("relation_many", Collection([related1, related2]))
self.assertTrue(model.push())
self.assertEqual(1, model.id)
self.assertTrue(model.exists)
self.assertEqual(2, len(model.relation_many))
self.assertEqual([2, 3], model.relation_many.lists("id"))
def test_new_query_returns_orator_query_builder(self):
conn = flexmock(Connection)
grammar = flexmock(QueryGrammar)
processor = flexmock(QueryProcessor)
conn.should_receive("get_query_grammar").and_return(grammar)
conn.should_receive("get_post_processor").and_return(processor)
resolver = flexmock(DatabaseManager)
resolver.should_receive("connection").and_return(Connection(None))
OrmModelStub.set_connection_resolver(DatabaseManager({}))
model = OrmModelStub()
builder = model.new_query()
self.assertIsInstance(builder, Builder)
def test_get_and_set_table(self):
model = OrmModelStub()
self.assertEqual("stub", model.get_table())
model.set_table("foo")
self.assertEqual("foo", model.get_table())
def test_get_key_returns_primary_key_value(self):
model = OrmModelStub()
model.id = 1
self.assertEqual(1, model.get_key())
self.assertEqual("id", model.get_key_name())
def test_connection_management(self):
resolver = flexmock(DatabaseManager)
resolver.should_receive("connection").once().with_args("foo").and_return("bar")
OrmModelStub.set_connection_resolver(DatabaseManager({}))
model = OrmModelStub()
model.set_connection("foo")
self.assertEqual("bar", model.get_connection())
def test_serialize(self):
model = OrmModelStub()
model.name = "foo"
model.age = None
model.password = "password1"
model.set_hidden(["password"])
model.set_relation(
"names", Collection([OrmModelStub(bar="baz"), OrmModelStub(bam="boom")])
)
model.set_relation("partner", OrmModelStub(name="jane"))
model.set_relation("group", None)
model.set_relation("multi", Collection())
d = model.serialize()
self.assertIsInstance(d, dict)
self.assertEqual("foo", d["name"])
self.assertEqual("baz", d["names"][0]["bar"])
self.assertEqual("boom", d["names"][1]["bam"])
self.assertEqual("jane", d["partner"]["name"])
self.assertIsNone(d["group"])
self.assertEqual([], d["multi"])
self.assertIsNone(d["age"])
self.assertNotIn("password", d)
model.set_appends(["appendable"])
d = model.to_dict()
self.assertEqual("appended", d["appendable"])
def test_to_dict_includes_default_formatted_timestamps(self):
model = Model()
model.set_raw_attributes(
{"created_at": "2015-03-24", "updated_at": "2015-03-25"}
)
d = model.to_dict()
self.assertEqual("2015-03-24T00:00:00+00:00", d["created_at"])
self.assertEqual("2015-03-25T00:00:00+00:00", d["updated_at"])
def test_to_dict_includes_custom_formatted_timestamps(self):
class Stub(Model):
def get_date_format(self):
return "%d-%m-%-y"
flexmock(Stub).should_receive("_boot_columns").and_return(
["created_at", "updated_at"]
)
model = Stub()
model.set_raw_attributes(
{"created_at": "2015-03-24", "updated_at": "2015-03-25"}
)
d = model.to_dict()
self.assertEqual("24-03-15", d["created_at"])
self.assertEqual("25-03-15", d["updated_at"])
def test_visible_creates_dict_whitelist(self):
model = OrmModelStub()
model.set_visible(["name"])
model.name = "John"
model.age = 28
d = model.to_dict()
self.assertEqual({"name": "John"}, d)
def test_hidden_can_also_exclude_relationships(self):
model = OrmModelStub()
model.name = "John"
model.set_relation("foo", ["bar"])
model.set_hidden(["foo", "list_items", "password"])
d = model.to_dict()
self.assertEqual({"name": "John"}, d)
def test_to_dict_uses_mutators(self):
model = OrmModelStub()
model.list_items = [1, 2, 3]
d = model.to_dict()
self.assertEqual([1, 2, 3], d["list_items"])
model = OrmModelStub(list_items=[1, 2, 3])
d = model.to_dict()
self.assertEqual([1, 2, 3], d["list_items"])
def test_hidden_are_ignored_when_visible(self):
model = OrmModelStub(name="john", age=28, id="foo")
model.set_visible(["name", "id"])
model.set_hidden(["name", "age"])
d = model.to_dict()
self.assertIn("name", d)
self.assertIn("id", d)
self.assertNotIn("age", d)
def test_fillable(self):
model = OrmModelStub()
model.fillable(["name", "age"])
model.fill(name="foo", age=28)
self.assertEqual("foo", model.name)
self.assertEqual(28, model.age)
def test_fill_with_dict(self):
model = OrmModelStub()
model.fill({"name": "foo", "age": 28})
self.assertEqual("foo", model.name)
self.assertEqual(28, model.age)
def test_unguard_allows_anything(self):
model = OrmModelStub()
model.unguard()
model.guard(["*"])
model.fill(name="foo", age=28)
self.assertEqual("foo", model.name)
self.assertEqual(28, model.age)
model.reguard()
def test_underscore_properties_are_not_filled(self):
model = OrmModelStub()
model.fill(_foo="bar")
self.assertEqual({}, model.get_attributes())
def test_guarded(self):
model = OrmModelStub()
model.guard(["name", "age"])
model.fill(name="foo", age="bar", foo="bar")
self.assertFalse(hasattr(model, "name"))
self.assertFalse(hasattr(model, "age"))
self.assertEqual("bar", model.foo)
def test_fillable_overrides_guarded(self):
model = OrmModelStub()
model.guard(["name", "age"])
model.fillable(["age", "foo"])
model.fill(name="foo", age="bar", foo="bar")
self.assertFalse(hasattr(model, "name"))
self.assertEqual("bar", model.age)
self.assertEqual("bar", model.foo)
def test_global_guarded(self):
model = OrmModelStub()
model.guard(["*"])
self.assertRaises(
MassAssignmentError, model.fill, name="foo", age="bar", foo="bar"
)
# TODO: test relations
def test_models_assumes_their_name(self):
model = OrmModelNoTableStub()
self.assertEqual("orm_model_no_table_stubs", model.get_table())
def test_mutator_cache_is_populated(self):
model = OrmModelStub()
expected_attributes = sorted(["list_items", "password", "appendable"])
self.assertEqual(
expected_attributes, sorted(list(model._get_mutated_attributes().keys()))
)
def test_fresh_method(self):
model = flexmock(OrmModelStub())
model.id = 1
model.set_exists(True)
flexmock(Builder)
q = flexmock(QueryBuilder(None, None, None))
query = flexmock(Builder(q))
query.should_receive("where").and_return(query)
query.get_query().should_receive("take").and_return(query)
query.should_receive("get").and_return(Collection([]))
model.should_receive("with_").once().with_args("foo", "bar").and_return(query)
model.fresh(["foo", "bar"])
model.should_receive("with_").once().with_args().and_return(query)
model.fresh()
def test_clone_model_makes_a_fresh_copy(self):
model = OrmModelStub()
model.id = 1
model.set_exists(True)
model.first = "john"
model.last = "doe"
model.created_at = model.fresh_timestamp()
model.updated_at = model.fresh_timestamp()
# TODO: relation
clone = model.replicate()
self.assertFalse(hasattr(clone, "id"))
self.assertFalse(clone.exists)
self.assertEqual("john", clone.first)
self.assertEqual("doe", clone.last)
self.assertFalse(hasattr(clone, "created_at"))
self.assertFalse(hasattr(clone, "updated_at"))
# TODO: relation
clone.first = "jane"
self.assertEqual("john", model.first)
self.assertEqual("jane", clone.first)
def test_get_attribute_raise_attribute_error(self):
model = OrmModelStub()
try:
relation = model.incorrect_relation
self.fail("AttributeError not raised")
except AttributeError:
pass
def test_increment(self):
query = flexmock()
model_mock = flexmock(OrmModelStub, new_query=lambda: query)
model = OrmModelStub()
model.set_exists(True)
model.id = 1
model.sync_original_attribute("id")
model.foo = 2
model_mock.should_receive("new_query").and_return(query)
query.should_receive("where").and_return(query)
query.should_receive("increment")
model.public_increment("foo")
self.assertEqual(3, model.foo)
self.assertFalse(model.is_dirty())
# TODO: relationship touch_owners is propagated
# TODO: relationship touch_owners is not propagated if no relationship result
def test_timestamps_are_not_update_with_timestamps_false_save_option(self):
query = flexmock(Builder)
query.should_receive("where").once().with_args("id", 1)
query.should_receive("update").once().with_args({"name": "john"})
model = OrmModelStub()
model.new_query = mock.MagicMock(
return_value=Builder(QueryBuilder(None, None, None))
)
model.id = 1
model.sync_original()
model.name = "john"
model.set_exists(True)
self.assertTrue(model.save({"timestamps": False}))
self.assertFalse(hasattr(model, "updated_at"))
model.new_query.assert_called_once_with()
def test_casts(self):
model = OrmModelCastingStub()
model.first = "3"
model.second = "4.0"
model.third = 2.5
model.fourth = 1
model.fifth = 0
model.sixth = {"foo": "bar"}
model.seventh = ["foo", "bar"]
model.eighth = {"foo": "bar"}
self.assertIsInstance(model.first, int)
self.assertIsInstance(model.second, float)
self.assertIsInstance(model.third, basestring)
self.assertIsInstance(model.fourth, bool)
self.assertIsInstance(model.fifth, bool)
self.assertIsInstance(model.sixth, dict)
self.assertIsInstance(model.seventh, list)
self.assertIsInstance(model.eighth, dict)
self.assertTrue(model.fourth)
self.assertFalse(model.fifth)
self.assertEqual({"foo": "bar"}, model.sixth)
self.assertEqual({"foo": "bar"}, model.eighth)
self.assertEqual(["foo", "bar"], model.seventh)
d = model.to_dict()
self.assertIsInstance(d["first"], int)
self.assertIsInstance(d["second"], float)
self.assertIsInstance(d["third"], basestring)
self.assertIsInstance(d["fourth"], bool)
self.assertIsInstance(d["fifth"], bool)
self.assertIsInstance(d["sixth"], dict)
self.assertIsInstance(d["seventh"], list)
self.assertIsInstance(d["eighth"], dict)
self.assertTrue(d["fourth"])
self.assertFalse(d["fifth"])
self.assertEqual({"foo": "bar"}, d["sixth"])
self.assertEqual({"foo": "bar"}, d["eighth"])
self.assertEqual(["foo", "bar"], d["seventh"])
def test_casts_preserve_null(self):
model = OrmModelCastingStub()
model.first = None
model.second = None
model.third = None
model.fourth = None
model.fifth = None
model.sixth = None
model.seventh = None
model.eighth = None
self.assertIsNone(model.first)
self.assertIsNone(model.second)
self.assertIsNone(model.third)
self.assertIsNone(model.fourth)
self.assertIsNone(model.fifth)
self.assertIsNone(model.sixth)
self.assertIsNone(model.seventh)
self.assertIsNone(model.eighth)
d = model.to_dict()
self.assertIsNone(d["first"])
self.assertIsNone(d["second"])
self.assertIsNone(d["third"])
self.assertIsNone(d["fourth"])
self.assertIsNone(d["fifth"])
self.assertIsNone(d["sixth"])
self.assertIsNone(d["seventh"])
self.assertIsNone(d["eighth"])
def test_get_foreign_key(self):
model = OrmModelStub()
model.set_table("stub")
self.assertEqual("stub_id", model.get_foreign_key())
def test_default_values(self):
model = OrmModelDefaultAttributes()
self.assertEqual("bar", model.foo)
def test_get_morph_name(self):
model = OrmModelStub()
self.assertEqual("stub", model.get_morph_name())
class OrmModelStub(Model):
__table__ = "stub"
__guarded__ = []
@accessor
def list_items(self):
return json.loads(self.get_raw_attribute("list_items"))
@list_items.mutator
def set_list_items(self, value):
self.set_raw_attribute("list_items", json.dumps(value))
@mutator
def password(self, value):
self.set_raw_attribute("password_hash", hashlib.md5(value.encode()).hexdigest())
@password.accessor
def get_password(self):
return "******"
@accessor
def appendable(self):
return "appended"
def public_increment(self, column, amount=1):
return self._increment(column, amount)
def get_dates(self):
return []
class OrmModelHydrateRawStub(Model):
@classmethod
def hydrate(cls, items, connection=None):
return "hydrated"
class OrmModelWithStub(Model):
def new_query(self):
mock = flexmock(Builder(None))
mock.should_receive("with_").once().with_args("foo", "bar").and_return("foo")
return mock
class OrmModelSaveStub(Model):
__table__ = "save_stub"
__guarded__ = []
def save(self, options=None):
self.__saved = True
def set_incrementing(self, value):
self.__incrementing__ = value
def get_saved(self):
return self.__saved
class OrmModelFindStub(Model):
def new_query(self):
flexmock(Builder).should_receive("find").once().with_args(1, ["*"]).and_return(
"foo"
)
return Builder(None)
class OrmModelFindWithWriteConnectionStub(Model):
def new_query(self):
mock = flexmock(Builder)
mock_query = flexmock(QueryBuilder)
mock_query.should_receive("use_write_connection").once().and_return(flexmock)
mock.should_receive("find").once().with_args(1).and_return("foo")
return Builder(QueryBuilder(None, None, None))
class OrmModelFindManyStub(Model):
def new_query(self):
mock = flexmock(Builder)
mock.should_receive("find").once().with_args([1, 2], ["*"]).and_return("foo")
return Builder(QueryBuilder(None, None, None))
class OrmModelDestroyStub(Model):
def new_query(self):
mock = flexmock(Builder)
model = flexmock()
mock_query = flexmock(QueryBuilder)
mock_query.should_receive("where_in").once().with_args(
"id", [1, 2, 3]
).and_return(flexmock)
mock.should_receive("get").once().and_return([model])
model.should_receive("delete").once()
return Builder(QueryBuilder(None, None, None))
class OrmModelNoTableStub(Model):
pass
class OrmModelCastingStub(Model):
__casts__ = {
"first": "int",
"second": "float",
"third": "str",
"fourth": "bool",
"fifth": "boolean",
"sixth": "dict",
"seventh": "list",
"eighth": "json",
}
class OrmModelCreatedAt(Model):
__timestamps__ = ["created_at"]
class OrmModelUpdatedAt(Model):
__timestamps__ = ["updated_at"]
class OrmModelDefaultAttributes(Model):
__attributes__ = {"foo": "bar"}
| 34.118544
| 88
| 0.628257
|
794eea13e6104bcf2bfc238686d4c884b7f954e0
| 166
|
py
|
Python
|
src/smart_beta_strategies/algo_type.py
|
WQU-MScFE-Capstone-MGS/retail-investor-strategies
|
b8e9c791b3bd71cbe0d0f2c6c9b7de30a9b72100
|
[
"MIT"
] | 12
|
2020-01-25T12:53:50.000Z
|
2021-09-21T18:02:16.000Z
|
src/smart_beta_strategies/algo_type.py
|
WQU-MScFE-Capstone-MGS/retail-investor-strategies
|
b8e9c791b3bd71cbe0d0f2c6c9b7de30a9b72100
|
[
"MIT"
] | null | null | null |
src/smart_beta_strategies/algo_type.py
|
WQU-MScFE-Capstone-MGS/retail-investor-strategies
|
b8e9c791b3bd71cbe0d0f2c6c9b7de30a9b72100
|
[
"MIT"
] | 13
|
2020-01-23T11:45:18.000Z
|
2021-09-12T22:06:07.000Z
|
from enum import Enum
class MLAlgoType(Enum):
'''
Enum to define ML algorithms which are supported by strategy
'''
ADABOOST = 1
RANDOMFOREST = 2
| 18.444444
| 64
| 0.662651
|
794eeb0976cce9226b6d404dcd7d34223a0c3d83
| 585
|
py
|
Python
|
work/find-unknowns.py
|
tuomassalo/wikipedia-voikko-analyzer
|
712b6f14a2cac3a15f940c671f4d462c46417d67
|
[
"MIT"
] | null | null | null |
work/find-unknowns.py
|
tuomassalo/wikipedia-voikko-analyzer
|
712b6f14a2cac3a15f940c671f4d462c46417d67
|
[
"MIT"
] | null | null | null |
work/find-unknowns.py
|
tuomassalo/wikipedia-voikko-analyzer
|
712b6f14a2cac3a15f940c671f4d462c46417d67
|
[
"MIT"
] | null | null | null |
import regex
# from extractor import extract_words
from analyzer import analyze_word
from plaintextreader import get_split_articles
for (page_id, occurrences) in get_split_articles():
# count the number of occurrence in this article
word_occurrence = dict()
for (word, properness, sentence) in occurrences:
if(len(word) >= 5):
analysis = analyze_word(word)
if(len(analysis) == 0):
word_occurrence[word] = word_occurrence.get(word, 0) + 1
for (word, cnt) in word_occurrence.items():
print(page_id, cnt, word)
| 29.25
| 72
| 0.675214
|
794eec3ba823777a1df9aafbd546d3347ce193d7
| 18,017
|
py
|
Python
|
tensorflow_addons/image/dense_image_warp.py
|
stnava/addons
|
faa5cee641127926c266932296d4ece60783f62a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_addons/image/dense_image_warp.py
|
stnava/addons
|
faa5cee641127926c266932296d4ece60783f62a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_addons/image/dense_image_warp.py
|
stnava/addons
|
faa5cee641127926c266932296d4ece60783f62a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image warping using per-pixel flow vectors."""
import tensorflow as tf
from tensorflow_addons.utils import types
from typing import Optional
@tf.function
def interpolate_bilinear(
grid: types.TensorLike,
query_points: types.TensorLike,
indexing: str = "ij",
name: Optional[str] = None,
) -> tf.Tensor:
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape
`[batch, N, 2]`.
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
name: a name for the operation (optional).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the
inputs invalid.
"""
if indexing != "ij" and indexing != "xy":
raise ValueError("Indexing mode must be 'ij' or 'xy'")
with tf.name_scope(name or "interpolate_bilinear"):
grid = tf.convert_to_tensor(grid)
query_points = tf.convert_to_tensor(query_points)
# grid shape checks
grid_static_shape = grid.shape
grid_shape = tf.shape(grid)
if grid_static_shape.dims is not None:
if len(grid_static_shape) != 4:
raise ValueError("Grid must be 4D Tensor")
if grid_static_shape[1] is not None and grid_static_shape[1] < 2:
raise ValueError("Grid height must be at least 2.")
if grid_static_shape[2] is not None and grid_static_shape[2] < 2:
raise ValueError("Grid width must be at least 2.")
# query_points shape checks
query_static_shape = query_points.shape
query_shape = tf.shape(query_points)
if query_static_shape.dims is not None:
if len(query_static_shape) != 3:
raise ValueError("Query points must be 3 dimensional.")
query_hw = query_static_shape[2]
if query_hw is not None and query_hw != 2:
raise ValueError("Query points last dimension must be 2.")
batch_size, height, width, channels = (
grid_shape[0],
grid_shape[1],
grid_shape[2],
grid_shape[3],
)
num_queries = query_shape[1]
query_type = query_points.dtype
grid_type = grid.dtype
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == "ij" else [1, 0]
unstacked_query_points = tf.unstack(query_points, axis=2, num=2)
for i, dim in enumerate(index_order):
with tf.name_scope("dim-" + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = grid_shape[i + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = tf.cast(size_in_indexing_dimension - 2, query_type)
min_floor = tf.constant(0.0, dtype=query_type)
floor = tf.math.minimum(
tf.math.maximum(min_floor, tf.math.floor(queries)), max_floor
)
int_floor = tf.cast(floor, tf.dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = tf.cast(queries - floor, grid_type)
min_alpha = tf.constant(0.0, dtype=grid_type)
max_alpha = tf.constant(1.0, dtype=grid_type)
alpha = tf.math.minimum(tf.math.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = tf.expand_dims(alpha, 2)
alphas.append(alpha)
flattened_grid = tf.reshape(grid, [batch_size * height * width, channels])
batch_offsets = tf.reshape(
tf.range(batch_size) * height * width, [batch_size, 1]
)
# This wraps tf.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using tf.gather_nd.
def gather(y_coords, x_coords, name):
with tf.name_scope("gather-" + name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = tf.gather(flattened_grid, linear_coordinates)
return tf.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], "top_left")
top_right = gather(floors[0], ceils[1], "top_right")
bottom_left = gather(ceils[0], floors[1], "bottom_left")
bottom_right = gather(ceils[0], ceils[1], "bottom_right")
# now, do the actual interpolation
with tf.name_scope("interpolate"):
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
@tf.function
def interpolate_trilinear(
grid: types.TensorLike,
query_points: types.TensorLike,
indexing: str = "ijk",
name: Optional[str] = None,
) -> tf.Tensor:
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, depth, channels]`.
query_points: a 3-D float `Tensor` of N points with shape
`[batch, N, 3]`.
indexing: whether the query points are specified as row and column (ijk),
or Cartesian coordinates (xyz).
name: a name for the operation (optional).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the
inputs invalid.
"""
if indexing != "ijk" and indexing != "xyz":
raise ValueError("Indexing mode must be 'ijk' or 'xyz'")
with tf.name_scope(name or "interpolate_trilinear"):
grid = tf.convert_to_tensor(grid)
query_points = tf.convert_to_tensor(query_points)
# grid shape checks
grid_static_shape = grid.shape
grid_shape = tf.shape(grid)
if grid_static_shape.dims is not None:
if len(grid_static_shape) != 5:
raise ValueError("Grid must be 5D Tensor")
if grid_static_shape[1] is not None and grid_static_shape[1] < 2:
raise ValueError("Grid height must be at least 2.")
if grid_static_shape[2] is not None and grid_static_shape[2] < 2:
raise ValueError("Grid width must be at least 2.")
else:
with tf.control_dependencies(
[
tf.debugging.assert_greater_equal(
grid_shape[1], 2, message="Grid height must be at least 2."
),
tf.debugging.assert_greater_equal(
grid_shape[2], 2, message="Grid width must be at least 2."
),
tf.debugging.assert_less_equal(
tf.cast(
grid_shape[0] * grid_shape[1] * grid_shape[2]* grid_shape[3],
dtype=tf.dtypes.float32,
),
np.iinfo(np.int32).max / 8.0,
message="The image size or batch size is sufficiently "
"large that the linearized addresses used by "
"tf.gather may exceed the int32 limit.",
),
]
):
pass
# query_points shape checks
query_static_shape = query_points.shape
query_shape = tf.shape(query_points)
if query_static_shape.dims is not None:
if len(query_static_shape) != 3:
raise ValueError("Query points must be 3 dimensional.")
query_hw = query_static_shape[2]
if query_hw is not None and query_hw != 3:
raise ValueError("Query points last dimension must be 3.")
else:
with tf.control_dependencies(
[
tf.debugging.assert_equal(
query_shape[2],
3,
message="Query points last dimension must be 3.",
)
]
):
pass
batch_size, height, width, depth, channels = (
grid_shape[0],
grid_shape[1],
grid_shape[2],
grid_shape[3],
grid_shape[4],
)
num_queries = query_shape[1]
query_type = query_points.dtype
grid_type = grid.dtype
alphas = []
floors = []
ceils = []
index_order = [0, 1, 2 ] if indexing == "ijk" else [2, 1, 0]
# FIXME - was axis=2, num=2
unstacked_query_points = tf.unstack(query_points, axis=2, num=3)
for i, dim in enumerate(index_order):
with tf.name_scope("dim-" + str(dim)):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = grid_shape[i + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = tf.cast(size_in_indexing_dimension - 2, query_type)
min_floor = tf.constant(0.0, dtype=query_type)
floor = tf.math.minimum(
tf.math.maximum(min_floor, tf.math.floor(queries)), max_floor
)
int_floor = tf.cast(floor, tf.dtypes.int32)
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = tf.cast(queries - floor, grid_type)
min_alpha = tf.constant(0.0, dtype=grid_type)
max_alpha = tf.constant(1.0, dtype=grid_type)
alpha = tf.math.minimum(tf.math.maximum(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = tf.expand_dims(alpha, 2)
alphas.append(alpha)
flattened_grid = tf.reshape(grid, [batch_size * height * width * depth, channels])
batch_offsets = tf.reshape(
tf.range(batch_size) * height * width * depth, [batch_size, 1]
)
# This wraps tf.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using tf.gather_nd.
def gather(z_coords, y_coords, x_coords, name):
with tf.name_scope("gather-" + name):
linear_coordinates = batch_offsets + z_coords * width * height + y_coords * width + x_coords
gathered_values = tf.gather(flattened_grid, linear_coordinates)
return tf.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left_d = gather(floors[0], floors[1], floors[2], "top_left_d")
top_right_d = gather(floors[0], ceils[1], floors[2], "top_right_d")
bottom_left_d = gather(ceils[0], floors[1], floors[2], "bottom_left_d")
bottom_right_d = gather(ceils[0], ceils[1], floors[2], "bottom_right_d")
top_left_u = gather(floors[0], floors[1], ceils[2], "top_left_u")
top_right_u = gather(floors[0], ceils[1], ceils[2], "top_right_u")
bottom_left_u = gather(ceils[0], floors[1], ceils[2], "bottom_left_u")
bottom_right_u = gather(ceils[0], ceils[1], ceils[2], "bottom_right_u")
# now, do the actual interpolation FIXME NOT RIGHT YET
with tf.name_scope("interpolate"):
interp_top_d = alphas[1] * (top_right_d - top_left_d) + top_left_d
interp_bottom_d = alphas[1] * (bottom_right_d - bottom_left_d) + bottom_left_d
interp_d = alphas[0] * (interp_bottom_d - interp_top_d) + interp_top_d
interp_top_u = alphas[2] * (top_right_u - top_left_u) + top_left_u
interp_bottom_u = alphas[2] * (bottom_right_u - bottom_left_u) + bottom_left_u
interp_u = alphas[1] * (interp_bottom_u - interp_top_u) + interp_top_u
interp = alphas[2] * interp_u + alphas[1] * interp_d
return interp
def _get_dim(x, idx):
if x.shape.ndims is None:
return tf.shape(x)[idx]
return x.shape[idx] or tf.shape(x)[idx]
@tf.function
def dense_image_warp(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a
dense flow field of offset vectors that define the correspondences of
pixel values in the output image back to locations in the source image.
Specifically, the pixel value at `output[b, j, i, c]` is
`images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c]`.
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
`(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1])`. For locations outside
of the image, we use the nearest pixel values at the image boundary.
NOTE: The definition of the flow field above is different from that
of optical flow. This function expects the negative forward flow from
output image to source image. Given two images `I_1` and `I_2` and the
optical flow `F_12` from `I_1` to `I_2`, the image `I_1` can be
reconstructed by `I_1_rec = dense_image_warp(I_2, -F_12)`.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type `tf.half`, `tf.float32`, or
`tf.float64`, and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if `height < 2` or `width < 2` or the inputs have the wrong
number of dimensions.
"""
with tf.name_scope(name or "dense_image_warp"):
image = tf.convert_to_tensor(image)
flow = tf.convert_to_tensor(flow)
batch_size, height, width, channels = (
_get_dim(image, 0),
_get_dim(image, 1),
_get_dim(image, 2),
_get_dim(image, 3),
)
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = tf.meshgrid(tf.range(width), tf.range(height))
stacked_grid = tf.cast(tf.stack([grid_y, grid_x], axis=2), flow.dtype)
batched_grid = tf.expand_dims(stacked_grid, axis=0)
query_points_on_grid = batched_grid - flow
query_points_flattened = tf.reshape(
query_points_on_grid, [batch_size, height * width, 2]
)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened)
interpolated = tf.reshape(interpolated, [batch_size, height, width, channels])
return interpolated
@tf.function(experimental_implements="addons:DenseImageWarp")
def dense_image_warp_annotated(
image: types.TensorLike, flow: types.TensorLike, name: Optional[str] = None
) -> tf.Tensor:
"""Similar to dense_image_warp but annotated with experimental_implements.
IMPORTANT: This is a temporary function and will be removed after TensorFlow's
next release.
This annotation make the serialized function detectable by the TFLite MLIR
converter and allow the converter to convert it to corresponding TFLite op.
However, with the annotation, this function cannot be used with backprop
under `tf.GradientTape` objects.
"""
return dense_image_warp(image, flow, name)
| 42.897619
| 108
| 0.607371
|
794eee26125bb42e95137792eca3db452f524b65
| 4,271
|
py
|
Python
|
tests/test/generic/basic.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 6
|
2019-01-09T11:55:15.000Z
|
2021-06-25T19:52:42.000Z
|
tests/test/generic/basic.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 65
|
2018-12-12T08:40:38.000Z
|
2022-02-28T09:19:45.000Z
|
tests/test/generic/basic.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 9
|
2018-11-23T08:59:09.000Z
|
2020-02-04T12:56:35.000Z
|
#!/usr/bin/env python2.7
import os
import sys
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
from udf import requires
class BasicTest(udf.TestCase):
@requires('BASIC_RANGE')
def test_basic_scalar_emits(self):
rows = self.query('''
SELECT fn1.basic_range(3)
FROM DUAL
''')
self.assertRowsEqual([(x,) for x in range(3)], sorted(rows))
@requires('BASIC_SUM')
def test_basic_set_returns(self):
rows = self.query('''
SELECT fn1.basic_sum(3)
FROM DUAL
''')
self.assertRowsEqual([(3,)], rows)
@requires('BASIC_EMIT_TWO_INTS')
def test_emit_two_ints(self):
rows = self.query('''
SELECT fn1.basic_emit_two_ints()
FROM DUAL''')
self.assertRowsEqual([(1, 2)], rows)
@requires('BASIC_SUM')
@requires('BASIC_NTH_PARTIAL_SUM')
@requires('BASIC_RANGE')
def test_simple_combination(self):
rows = self.query('''
SELECT fn1.basic_sum(psum)
FROM (
SELECT fn1.basic_nth_partial_sum(n) AS PSUM
FROM (
SELECT fn1.basic_range(10)
FROM DUAL
)
)''')
self.assertRowsEqual([(165,)], rows)
@requires('BASIC_SUM_GRP')
@requires('BASIC_NTH_PARTIAL_SUM')
@requires('BASIC_RANGE')
def test_simple_combination_grouping(self):
rows = self.query('''
SELECT fn1.BASIC_SUM_GRP(psum)
FROM (
SELECT MOD(N, 3) AS n,
fn1.basic_nth_partial_sum(n) AS psum
FROM (
SELECT fn1.basic_range(10)
FROM DUAL
)
)
GROUP BY n
ORDER BY 1''')
self.assertRowsEqual([(39.0,), (54.0,), (72.0,)], rows)
@requires('BASIC_EMIT_SEVERAL_GROUPS')
@requires('BASIC_TEST_RESET')
def test_reset(self):
rows = self.query('''
SELECT fn1.basic_test_reset(i, j)
FROM (SELECT fn1.basic_emit_several_groups(16, 8) FROM DUAL)
GROUP BY i
ORDER BY 1''')
self.assertRowsEqual([(0.0,), (0.0,), (0.0,), (0.0,), (1.0,), (1.0,), (1.0,), (1.0,), (2.0,)], rows[:9])
@requires('PERFORMANCE_REDUCE_CHARACTERS')
@requires('PERFORMANCE_MAP_CHARACTERS')
def test_order_by_clause(self):
rows = self.query('''
SELECT fn1.performance_reduce_characters(w, c)
FROM (
SELECT fn1.performance_map_characters('hello hello hello abc')
FROM DUAL
)
GROUP BY w
ORDER BY c DESC''')
unsorted_list = [tuple(x) for x in rows]
sorted_list = sorted(unsorted_list, key=lambda x: x[1], reverse=True)
#for x in zip(unsorted_list, sorted_list):
# print x
self.assertEqual(sorted_list, unsorted_list)
class SetWithEmptyInput(udf.TestCase):
def setUp(self):
self.query('DROP SCHEMA FN2 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA FN2')
self.query('CREATE TABLE FN2.empty_table(c int)')
@requires('SET_RETURNS_HAS_EMPTY_INPUT')
def test_set_returns_has_empty_input_group_by(self):
self.query("""select FN1.set_returns_has_empty_input(c) from empty_table group by 'X'""")
self.assertEqual(0, self.rowcount())
@requires('SET_RETURNS_HAS_EMPTY_INPUT')
def test_set_returns_has_empty_input_no_group_by(self):
rows = self.query('''select FN1.set_returns_has_empty_input(c) from empty_table''')
self.assertRowsEqual([(None,)], rows)
@requires('SET_EMITS_HAS_EMPTY_INPUT')
def test_set_emits_has_empty_input_group_by(self):
self.query("""select FN1.set_emits_has_empty_input(c) from empty_table group by 'X'""")
self.assertEqual(0, self.rowcount())
@requires('SET_EMITS_HAS_EMPTY_INPUT')
def test_set_emits_has_empty_input_no_group_by(self):
rows = self.query('''select FN1.set_emits_has_empty_input(c) from empty_table''')
self.assertRowsEqual([(None,None)], rows)
if __name__ == '__main__':
udf.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
| 32.112782
| 112
| 0.592835
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.