hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d59e60abaebaaaecba5bc9c804151d46cefabbd
| 3,346
|
py
|
Python
|
crawler.py
|
dboth/AssociatedArtistCrawler
|
8497aa49476fe184508344929642cd5435358da4
|
[
"Apache-2.0"
] | null | null | null |
crawler.py
|
dboth/AssociatedArtistCrawler
|
8497aa49476fe184508344929642cd5435358da4
|
[
"Apache-2.0"
] | null | null | null |
crawler.py
|
dboth/AssociatedArtistCrawler
|
8497aa49476fe184508344929642cd5435358da4
|
[
"Apache-2.0"
] | null | null | null |
# -- coding: utf-8 --
import json, urllib2, re
class ArtistCrawler:
def __init__(self, outputfile, stackfile, donefile):
#at least one artist has to be in the stackfile!
self.outputb = open(outputfile,"a")
self.stackfile = stackfile
self.donefile = donefile
with open(stackfile,"r") as stack:
self.stacklist = [line.strip() for line in stack if line.strip() != ""]
with open(donefile,"r") as done:
self.donelist = [line.strip() for line in done if line.strip() != ""]
def save(self):
with open(self.stackfile,"w") as stack:
stack.write("\n".join([l for l in self.stacklist]))
with open(self.donefile,"w") as done:
done.write("\n".join([l for l in self.donelist]))
def end(self):
self.save()
self.outputb.close()
def extractNames(self,strd):
a = re.findall("\[\[([^|^\]]*?)\|.*?\]\]",strd)
b = re.findall("\[\[([^|]*?)\]\]",strd)
return a + b
def crawl(self, name):
print "Crawling "+name
out = []
page = json.load(urllib2.urlopen('https://en.wikipedia.org/w/api.php?action=query&titles='+urllib2.quote(name)+'&prop=revisions&rvprop=content&format=json'))
try:
if "-1" in page["query"]["pages"]:
return []
for pagekey in page["query"]["pages"]:
wiki = page["query"]["pages"][pagekey]["revisions"][0]["*"]
for key in ["associated_acts", "current_members", "past_members"]:
ass_act_d = re.search(key+"[\s]*=[\s]\{\{(.*?)\}\}",wiki, re.DOTALL)
if ass_act_d is None:
ass_act_d = re.search(key+"[\s]*=[\s](\[\[.*?\]\])",wiki)
if ass_act_d is not None:
out += self.extractNames(ass_act_d.group(1))
except Exception:
return []
return out
def isDone(self, name):
for line in self.donelist:
if name == line.strip():
return True
def getNext(self):
return self.stacklist[0]
def output(self,s):
self.outputb.write(s)
def addToStack(self, x):
self.stacklist.append(x)
def addToDone(self, x):
self.donelist.append(x)
def deleteFirstLine(self):
self.stacklist = self.stacklist[1:]
if len(self.stacklist) == 0:
self.again = False
def processNext(self):
position = self.getNext()
if self.isDone(position):
print "Skip double"
self.deleteFirstLine()
return False
assoc = self.crawl(position)
for mus in assoc:
mus = mus.encode("utf-8")
if mus > position:
self.output(position+"\t"+mus+"\n")
else:
self.output(mus+"\t"+position+"\n")
self.addToStack(mus)
#add to done
self.addToDone(position)
self.deleteFirstLine()
v = ArtistCrawler("output.txt","stack.txt","done.txt")
v.again = True
try:
i = 0
while v.again:
if i%20:
v.save()
v.processNext()
i+=1
except Exception:
v.end()
| 32.173077
| 165
| 0.505678
|
92795ebb1aa4190d0614cdffe90ea3bee2daa361
| 5,587
|
py
|
Python
|
third_party/maya/lib/usdMaya/testenv/testUsdMayaModelKindWriter.py
|
navefx/YuksUSD
|
56c2e1def36ee07121f4ecb349c1626472b3c338
|
[
"AML"
] | 18
|
2017-10-28T22:37:48.000Z
|
2022-01-26T12:00:24.000Z
|
third_party/maya/lib/usdMaya/testenv/testUsdMayaModelKindWriter.py
|
navefx/YuksUSD
|
56c2e1def36ee07121f4ecb349c1626472b3c338
|
[
"AML"
] | 1
|
2021-08-14T23:57:51.000Z
|
2021-08-14T23:57:51.000Z
|
third_party/maya/lib/usdMaya/testenv/testUsdMayaModelKindWriter.py
|
navefx/YuksUSD
|
56c2e1def36ee07121f4ecb349c1626472b3c338
|
[
"AML"
] | 4
|
2018-06-14T18:14:59.000Z
|
2021-09-13T22:20:50.000Z
|
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import os
import unittest
from pxr import Kind
from pxr import Usd
from maya import cmds
from maya import standalone
class testUsdMayaModelKindWriter(unittest.TestCase):
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
def testExportWithKindFlag(self):
"""
Tests exporting a Maya file with no USD_kind custom attributes
and using the usdExport -kind flag.
"""
cmds.file(os.path.abspath('KindTest.ma'), open=True, force=True)
cmds.loadPlugin('pxrUsd')
usdFilePath = os.path.abspath('KindTest.usda')
with self.assertRaises(RuntimeError):
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath,
kind='assembly')
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath,
kind='fakeKind')
stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(stage)
rootPrim = stage.GetPrimAtPath('/KindTest')
self.assertTrue(Kind.Registry().IsA(Usd.ModelAPI(rootPrim).GetKind(),
'fakeKind'))
def testExportWithKindAttrAndKindFlag(self):
"""
Tests exporting a Maya file with both USD_kind custom attributes and
using the usdExport -kind flag; there should be an error if the USD_kind
is not derived from the kind specified in the -kind flag.
"""
cmds.file(os.path.abspath('KindTestUsdKindAttr.ma'), open=True, force=True)
cmds.loadPlugin('pxrUsd')
usdFilePath = os.path.abspath('KindTestUsdKindAttr.usda')
with self.assertRaises(RuntimeError):
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath,
kind='assembly')
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath,
kind='model')
stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(stage)
rootPrim = stage.GetPrimAtPath('/KindTest')
self.assertTrue(Kind.Registry().IsA(Usd.ModelAPI(rootPrim).GetKind(),
'component'))
rootPrim2 = stage.GetPrimAtPath('/KindTest2')
self.assertTrue(Kind.Registry().IsA(Usd.ModelAPI(rootPrim2).GetKind(),
'assembly'))
def testExportWithAssemblies(self):
"""
Tests exporting a Maya file with a root prim containing an assembly.
"""
cmds.file(os.path.abspath('KindTestAssembly.ma'), open=True, force=True)
cmds.loadPlugin('pxrUsd')
usdFilePath = os.path.abspath('KindTestAssembly.usda')
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath,
kind='assembly')
stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(stage)
# Default kind without setting kind=assembly should still be assembly.
usdFilePath = os.path.abspath('KindTestAssembly2.usda')
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath)
stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(stage)
rootPrim = stage.GetPrimAtPath('/KindTest')
self.assertTrue(Kind.Registry().IsA(Usd.ModelAPI(rootPrim).GetKind(),
'assembly'))
def testExportWithAssemblyAndMesh(self):
"""
Tests exporting a Maya file with a root prim containing an assembly
and a mesh.
"""
cmds.file(os.path.abspath('KindTestAssemblyAndMesh.ma'), open=True,
force=True)
cmds.loadPlugin('pxrUsd')
# Should fail due to the mesh.
usdFilePath = os.path.abspath('KindTestAssemblyAndMesh.usda')
with self.assertRaises(RuntimeError):
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath,
kind='assembly')
# Should be 'component' because of the mesh
usdFilePath = os.path.abspath('KindTestAssemblyAndMesh.usda')
cmds.usdExport(mergeTransformAndShape=True,
file=usdFilePath)
stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(stage)
rootPrim = stage.GetPrimAtPath('/KindTest')
self.assertTrue(Kind.Registry().IsA(Usd.ModelAPI(rootPrim).GetKind(),
'component'))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 36.756579
| 83
| 0.64847
|
4c6a64050589d3e77a284b4e4b84dd745e721b7c
| 2,331
|
py
|
Python
|
src/python/nimbusml/examples/RobustScaler.py
|
montehoover/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
[
"MIT"
] | 134
|
2018-11-01T22:15:24.000Z
|
2019-05-04T11:30:08.000Z
|
src/python/nimbusml/examples/RobustScaler.py
|
montehoover/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
[
"MIT"
] | 226
|
2019-05-07T19:00:44.000Z
|
2021-01-06T07:59:48.000Z
|
src/python/nimbusml/examples/RobustScaler.py
|
montehoover/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
[
"MIT"
] | 43
|
2019-05-15T20:19:42.000Z
|
2022-03-30T10:26:07.000Z
|
###############################################################################
# RobustScaler
import numpy
from nimbusml import FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.preprocessing.normalization import RobustScaler
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path, sep=',')
print(data.head())
# row_num education age parity induced case spontaneous stratum pooled.stratum
# 0 1 0-5yrs 26 6 1 1 2 1 3
# 1 2 0-5yrs 42 1 1 1 0 2 1
# 2 3 0-5yrs 39 6 2 1 0 3 4
# 3 4 0-5yrs 34 4 2 1 0 4 2
# 4 5 6-11yrs 35 3 1 1 1 5 32
# transform usage
xf = RobustScaler(
center=True, scale=True,
columns={'age_norm': 'age', 'par_norm': 'parity'})
# fit and transform
features = xf.fit_transform(data)
print(features.head(n=10))
# row_num education age parity induced case spontaneous stratum pooled.stratum age_norm par_norm
# 0 1 0-5yrs 26 6 1 1 2 1 3 -0.434783 1.6
# 1 2 0-5yrs 42 1 1 1 0 2 1 0.956522 -0.4
# 2 3 0-5yrs 39 6 2 1 0 3 4 0.695652 1.6
# 3 4 0-5yrs 34 4 2 1 0 4 2 0.260870 0.8
# 4 5 6-11yrs 35 3 1 1 1 5 32 0.347826 0.4
# 5 6 6-11yrs 36 4 2 1 1 6 36 0.434783 0.8
# 6 7 6-11yrs 23 1 0 1 0 7 6 -0.695652 -0.4
# 7 8 6-11yrs 32 2 0 1 0 8 22 0.086957 0.0
# 8 9 6-11yrs 21 1 0 1 1 9 5 -0.869565 -0.4
# 9 10 6-11yrs 28 2 0 1 0 10 19 -0.260870 0.0
| 58.275
| 108
| 0.374088
|
416ce5e03292d1f4e65b04bd36f628378cb4f9d7
| 7,598
|
py
|
Python
|
processText.py
|
norbertbin/subredditAnalysis
|
1951267f8e1cd993a9a3aa294a9b7e8caee71545
|
[
"MIT"
] | null | null | null |
processText.py
|
norbertbin/subredditAnalysis
|
1951267f8e1cd993a9a3aa294a9b7e8caee71545
|
[
"MIT"
] | null | null | null |
processText.py
|
norbertbin/subredditAnalysis
|
1951267f8e1cd993a9a3aa294a9b7e8caee71545
|
[
"MIT"
] | null | null | null |
import sklearn
import sqlite3 as lite
import string
from nltk.corpus import stopwords
from scipy import io
from collections import Counter
from itertools import dropwhile
from nltk.tokenize import word_tokenize
from submission import Submission
from comment import Comment
### define constants
DB_NAME = "data/raw_subreddit.db"
PROC_DB_NAME = "data/proc_subreddit.db"
DTM_FILE = "data/dtm_subreddit.mat"
COM_TEXT_INDEX = 7
SUB_TEXT_INDEX = 6
SUB_TITLE_INDEX = 1
WORD_COUNT_CUTOFF = 10
STOPWORDS = stopwords.words('english') + ['ive', 'k', 'th', 'm', 'im', 'also']
###
### helper functions
def strip_tuple(tuple_list, tuple_index = 0):
"""Given a list of tuples, creates a list of elements at tuple_index"""
elem_list = []
for i in range(0, len(tuple_list)):
elem_list.append(tuple_list[i][tuple_index])
return elem_list
def xstr(s):
"""If None convert to empty string"""
if s is None:
return u''
return s
def clean_text(text_list):
"""Removes capitol letters and punctuation from list of text."""
remove_char = u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~$1234567890'
translate_table = dict((ord(char), None) for char in remove_char)
translate_table[ord(u'\n')] = ord(' ')
for i in range(0, len(text_list)):
text_list[i] = (xstr(text_list[i])).lower().translate(translate_table)
return text_list
def get_text_data(db, table, col):
"""Returns processed text from row in table"""
con = lite.connect(db)
with con:
cur = con.cursor()
cur.execute("SELECT " + col + " FROM " + table)
text_data = cur.fetchall() #list of tuples
text_data = strip_tuple(text_data)
text_data = clean_text(text_data)
return(text_data)
def gen_dtm(text_data, vocab):
"""Creates document term count matrix"""
vectorizer = sklearn.feature_extraction.text.CountVectorizer(
vocabulary = vocab)
return vectorizer.fit_transform(text_data)
def gen_CRM(call_text, response_text):
"""Creates a call response matrix (count matching words)"""
pass
def replace_tuple(tuple_obj, replace_obj, replace_index):
"""Create a new tuple with a new object at index"""
if len(tuple_obj) - 1 <= replace_index:
return tuple_obj[:replace_index] + (replace_obj,)
else:
return tuple_obj[:replace_index] + (replace_obj,) + tuple_obj[replace_index+1:]
def gen_new_table(db_old, db_new, table, col_index, new_col_list, ord_users, ord_subs):
"""Create a new table with new data in col_index"""
con = lite.connect(db_old)
with con:
cur = con.cursor()
cur.execute("SELECT * FROM " + table)
tuple_list = cur.fetchall()
for i in range(0, len(new_col_list)):
tuple_list[i] = replace_tuple(tuple_list[i], new_col_list[i], col_index)
#anonymize username and submission id
if(table == "Comments"):
anon_users = anonymize(strip_tuple(tuple_list, 1), ord_users)
anon_subs = anonymize(strip_tuple(tuple_list, 5), ord_subs)
for i in range(0, len(new_col_list)):
tuple_list[i] = replace_tuple(tuple_list[i], anon_users[i], 1)
tuple_list[i] = replace_tuple(tuple_list[i], anon_subs[i], 5)
elif(table == "Submissions"):
for i in range(0, len(new_col_list)):
tuple_list[i] = replace_tuple(tuple_list[i], i, 0)
num_bindings = len(tuple_list[0])
bindings = ('?,' * num_bindings)[:-1]
con = lite.connect(db_new)
with con:
cur = con.cursor()
cur.executemany("INSERT INTO " + table + " VALUES" + " ("+ bindings + ")", tuple_list)
def gen_vocab(text_list, cutoff, stopwords):
"""Generates a vocabulary in a dictionary for a list of text"""
word_counts = Counter()
for text in text_list:
word_counts.update(word for word in text.split())
# using dropwhile takes advantage of ordering
for key, count in dropwhile(lambda key_count: key_count[1] >= cutoff, word_counts.most_common()):
del word_counts[key]
return list(set(word_counts.keys()) - set(stopwords))
def remove_unused_words(text_list, vocab):
"""Removes words not in vocab from a list of text"""
vocabset = set(vocab)
for i in range(0, len(text_list)):
tokens = word_tokenize(text_list[i])
tokens = [word for word in tokens if word in vocabset]
text_list[i] = u' '.join(tokens)
return text_list
def create_vocab_table(db, vocab):
"""Creates a table with vocab in db"""
con = lite.connect(db)
with con:
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS Vocab(vocab TEXT)")
for i in range(0, len(vocab)):
vocab[i] = (vocab[i],)
with con:
cur = con.cursor()
cur.executemany("INSERT INTO Vocab VALUES (?)", vocab)
def get_user_and_text(db):
"""Returns tuple with user name and comment text"""
con = lite.connect(db)
with con:
cur = con.cursor()
cur.execute("SELECT author, GROUP_CONCAT(text, ' ') FROM Comments GROUP BY author")
user_text_list = cur.fetchall()
return user_text_list
def create_user_text_table(db, user_list, text_list):
"""Creates a table with user name and their text"""
con = lite.connect(db)
with con:
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS User(user TEXT, text TEXT)")
ut_list = []
for i in range(0, len(user_list)):
ut_list.append((i, text_list[i])) #use index for instead of username
with con:
cur = con.cursor()
cur.executemany("INSERT INTO User VALUES (?,?)", ut_list)
def anonymize(labels, unique_ordered_labels):
"""Renames labels using index of unique_ordered_labels"""
index_dict = dict((val, idx) for idx, val in enumerate(unique_ordered_labels))
return [index_dict[x] for x in labels]
def get_sub_list(db):
"""a"""
con = lite.connect(db)
with con:
cur = con.cursor()
cur.execute("SELECT sub_id FROM Submissions")
sub_list = cur.fetchall()
return strip_tuple(sub_list)
###
### process and save data
# load data from database and do basic processing
sub_text_list = get_text_data(DB_NAME, "Submissions", "text")
com_text_list = get_text_data(DB_NAME, "Comments", "text")
user_and_text_list = get_user_and_text(DB_NAME)
user_list = strip_tuple(user_and_text_list, 0)
user_text_list = clean_text(strip_tuple(user_and_text_list, 1))
sub_list = get_sub_list(DB_NAME)
# get joint vocabulary for submissions and comments excluding low counts
vocab = gen_vocab(sub_text_list + com_text_list, WORD_COUNT_CUTOFF,
STOPWORDS)
# generate document term matrices
sub_dtm = gen_dtm(sub_text_list, vocab)
user_dtm = gen_dtm(user_text_list, vocab)
# filter unused words from text lists
sub_text_list = remove_unused_words(sub_text_list, vocab)
com_text_list = remove_unused_words(com_text_list, vocab)
user_text_list = remove_unused_words(user_text_list, vocab)
#save document term matrices
io.savemat(DTM_FILE, dict(sub_dtm = sub_dtm,
user_dtm = user_dtm))
# create submission and comment table if they do not exist in db
Submission.create_table(PROC_DB_NAME)
Comment.create_table(PROC_DB_NAME)
#load processed data to a database for use in R
create_vocab_table(PROC_DB_NAME, vocab)
create_user_text_table(PROC_DB_NAME, user_list, user_text_list)
gen_new_table(DB_NAME, PROC_DB_NAME, "Submissions", SUB_TEXT_INDEX, sub_text_list, user_list, sub_list)
gen_new_table(DB_NAME, PROC_DB_NAME, "Comments", COM_TEXT_INDEX, com_text_list, user_list, sub_list)
###
| 35.504673
| 103
| 0.685049
|
e1d1a2490e6e2724eb4701b98ef22c09730207a5
| 3,313
|
py
|
Python
|
examples/positioned.py
|
lostinplace/parsita
|
2aa4fefc3df88baa75655ce5c631b27ac55d1ad4
|
[
"MIT"
] | 68
|
2016-09-25T20:21:08.000Z
|
2022-01-31T23:53:42.000Z
|
examples/positioned.py
|
lostinplace/parsita
|
2aa4fefc3df88baa75655ce5c631b27ac55d1ad4
|
[
"MIT"
] | 26
|
2016-09-29T19:58:19.000Z
|
2022-01-02T00:38:55.000Z
|
examples/positioned.py
|
lostinplace/parsita
|
2aa4fefc3df88baa75655ce5c631b27ac55d1ad4
|
[
"MIT"
] | 9
|
2016-09-29T19:47:23.000Z
|
2022-01-01T21:32:34.000Z
|
"""User-defined positioned parser example.
This shows how a new parser can be defined outside Parsita and used in tandem
with the built-in parsers. The ``positioned`` parser updates the value
returned from an arbitrary parser with the position in the input that was
consumed by that parser.
"""
from abc import abstractmethod
from dataclasses import dataclass
from typing import Generic
from parsita import Parser, TextParsers, reg
from parsita.state import Continue, Input, Output, Reader, Status
from parsita.util import splat
class PositionAware(Generic[Output]):
"""An object which can cooperate with the positioned parser.
The ``positioned`` parser calls the ``set_position`` method on values it
receives. This abstract base class marks those objects that can cooperate
with ``positioned`` in this way and receive the input position to produce
the final value.
"""
@abstractmethod
def set_position(self, start: int, length: int) -> Output:
"""Produce a new value with the position set.
This abstract method must be implemented by subclasses of
``PositionAware``. It receives the position in the input that was
consumed and returns a new value, typically an object similar to the old
value, but with the position set. Important: the old value is not
expected to be mutated.
Args:
start: The index of the first character consumed by the parser
length: The number of characters consumed by the parser
"""
pass
class PositionedParser(Generic[Input, Output], Parser[Input, Output]):
def __init__(self, parser: Parser[Input, PositionAware[Output]]):
super().__init__()
self.parser = parser
def consume(self, reader: Reader[Input]) -> Status[Input, Output]:
start = reader.position
status = self.parser.consume(reader)
if isinstance(status, Continue):
end = status.remainder.position
return Continue(status.remainder, status.value.set_position(start, end - start)).merge(status)
else:
return status
def __repr__(self):
return self.name_or_nothing() + "positioned({})".format(self.parser.name_or_repr())
def positioned(parser: Parser[Input, PositionAware[Output]]):
"""Set the position on a PositionAware value.
This parser matches ``parser`` and, if successful, calls ``set_position``
on the produced value to produce a new value. The value produces by
``parser`` must implement the ``PositionAware`` interface so that it can
receive the position in the input.
Args:
parser: Parser
"""
return PositionedParser(parser)
# Everything below here is an example use case
@dataclass
class UnfinishedVariable(PositionAware):
name: str
def set_position(self, start: int, length: int):
return Variable(self.name, start, length)
@dataclass
class Variable:
name: str
start: int
length: int
@dataclass
class Plus:
first: Variable
second: Variable
class PlusParsers(TextParsers):
variable = positioned(reg("[A-Za-z][A-Za-z0-9_]*") > UnfinishedVariable)
plus = variable & "+" >> variable > splat(Plus)
if __name__ == "__main__":
print(PlusParsers.plus.parse("abc + xyz").or_die())
| 31.254717
| 106
| 0.695442
|
c036cb1421a174b91196a901ee7e59d3d954bb7b
| 14,407
|
py
|
Python
|
models/cite/FastText/test.py
|
DeepWTO/deepwto-draft
|
c61dc02e4ce3e72e8423c712ddc5483f851443f5
|
[
"Apache-2.0"
] | 34
|
2019-02-04T14:57:08.000Z
|
2022-02-07T13:32:35.000Z
|
models/cite/FastText/test.py
|
syyunn/DeepWTO
|
0e9f206ec2ce24b1afdf895bc81509dd84235b82
|
[
"Apache-2.0"
] | 3
|
2020-03-02T05:06:24.000Z
|
2020-03-06T08:39:18.000Z
|
models/cite/FastText/test.py
|
DeepWTO/deepwto-draft
|
c61dc02e4ce3e72e8423c712ddc5483f851443f5
|
[
"Apache-2.0"
] | 4
|
2020-02-17T04:17:12.000Z
|
2022-02-07T14:27:01.000Z
|
# This code is referenced from https://github.com/RandolphVIm and modified by
# Zachary
# -*- coding:utf-8 -*-
# This code is referenced from https://github.com/RandolphVIm and modified by
# Zachary
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
__modify__ = 'Zachary'
import os
import sys
import time
import numpy as np
import tensorflow as tf
from utils import checkpoints
from utils import feed
from sklearn.metrics import precision_score, recall_score, f1_score, \
roc_auc_score, average_precision_score
# Parameters
# =============================================================================
logger = feed.logger_fn("tflog", "logs/test-{0}.log".format(time.asctime()))
MODEL = input("☛ Please input the model file you want to test, "
"it should be like (1490175368): ")
while not (MODEL.isdigit() and len(MODEL) == 10):
MODEL = input("✘ The format of your input is illegal, "
"it should be like (1490175368), please re-input: ")
logger.info("✔︎ The format of your input is legal, "
"now loading to next step...")
TRAININGSET_DIR = 'data/Train.json'
VALIDATIONSET_DIR = 'data/Validation.json'
TEST_DIR = 'data/Test.json'
MODEL_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_MODEL_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'results/' + MODEL
# Data Parameters
tf.flags.DEFINE_string("training_data_file",
TRAININGSET_DIR,
"Data source for the training data.")
tf.flags.DEFINE_string("validation_data_file",
VALIDATIONSET_DIR,
"Data source for the validation data")
tf.flags.DEFINE_string("test_data_file",
TEST_DIR,
"Data source for the test data")
tf.flags.DEFINE_string("checkpoint_dir",
MODEL_DIR,
"Checkpoint directory from training run")
tf.flags.DEFINE_string("best_checkpoint_dir",
BEST_MODEL_DIR,
"Best checkpoint directory from training run")
# Model Hyperparameters
tf.flags.DEFINE_integer("pad_seq_len",
35842,
"Recommended padding Sequence "
"length of data (depends on the data)")
tf.flags.DEFINE_integer("embedding_dim",
300,
"Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("embedding_type",
1,
"The embedding type (default: 1)")
tf.flags.DEFINE_float("dropout_keep_prob",
0.5,
"Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda",
0.0,
"L2 regularization lambda (default: 0.0)")
tf.flags.DEFINE_integer("num_classes",
80,
"Number of labels (depends on the ""task)")
tf.flags.DEFINE_integer("top_num",
80,
"Number of top K prediction classes (default: 5)")
tf.flags.DEFINE_float("threshold",
0.2,
"Threshold for prediction classes (default: 0.5)")
# Test Parameters
tf.flags.DEFINE_integer("batch_size",
1,
"Batch Size (default: 1)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement",
True,
"Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement",
False,
"Log placement of ops on devices")
tf.flags.DEFINE_boolean("gpu_options_allow_growth",
True,
"Allow gpu options growth")
FLAGS = tf.flags.FLAGS
FLAGS(sys.argv)
dilim = '-' * 100
logger.info('\n'.join([dilim,
*['{0:>50}|{1:<50}'.format(attr.upper(),
FLAGS.__getattr__(attr))
for attr in sorted(
FLAGS.__dict__['__wrapped'])], dilim]))
def test_fasttext(word2vec_path):
"""Test FASTTEXT model."""
# Load data
logger.info("✔︎ Loading data...")
logger.info("Recommended padding Sequence length is: {0}".
format(FLAGS.pad_seq_len))
logger.info("✔︎ Test data processing...")
test_data = feed.load_data_and_labels(FLAGS.test_data_file,
FLAGS.num_classes,
FLAGS.embedding_dim,
data_aug_flag=False,
word2vec_path=word2vec_path)
print("test_data.tokenindex", test_data.tokenindex)
logger.info("✔︎ Test data padding...")
x_test, y_test = feed.pad_data(test_data, FLAGS.pad_seq_len)
print("y_test", y_test) # y_test is one hot
print("len(y_test)", len(y_test))
y_test_labels = test_data.labels
print("y_test_labels", y_test_labels)
# Load fasttext model
BEST_OR_LATEST = input("☛ Load Best or Latest Model?(B/L): ")
while not (BEST_OR_LATEST.isalpha() and BEST_OR_LATEST.upper() in
['B', 'L']):
BEST_OR_LATEST = \
input("✘ The format of your input is illegal, please re-input: ")
if BEST_OR_LATEST.upper() == 'B':
logger.info("✔︎ Loading best model...")
checkpoint_file = checkpoints.get_best_checkpoint(
FLAGS.best_checkpoint_dir,
select_maximum_value=True)
else:
logger.info("✔︎ Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
print(checkpoint_file)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = FLAGS.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(
checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
print("input_x", input_x)
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob"
).outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
print("is_training", is_training)
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
print("loss", loss)
# Split the output nodes name by '|'
# if you have several output nodes
output_node_names = "output/scores"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def,
"graph",
"graph-fasttext-{0}.pb".format(MODEL),
as_text=False)
# Generate batches for one epoch
batches = feed.batch_iter(list(zip(x_test, y_test, y_test_labels)),
FLAGS.batch_size,
1,
shuffle=False)
test_counter, test_loss = 0, 0.0
test_pre_tk = [0.0] * FLAGS.top_num
test_rec_tk = [0.0] * FLAGS.top_num
test_F_tk = [0.0] * FLAGS.top_num
# Collect the predictions here
true_labels = []
predicted_labels = []
predicted_scores = []
# Collect for calculating metrics
true_onehot_labels = []
predicted_onehot_scores = []
predicted_onehot_labels_ts = []
predicted_onehot_labels_tk = [[] for _ in range(FLAGS.top_num)]
for batch_test in batches:
x_batch_test, y_batch_test, y_batch_test_labels = \
zip(*batch_test)
feed_dict = {
input_x: x_batch_test,
input_y: y_batch_test,
dropout_keep_prob: 1.0,
is_training: False
}
batch_scores, cur_loss = sess.run([scores, loss], feed_dict)
# print("batch_scores: ", batch_scores)
print("cur_loss: ", cur_loss)
# Prepare for calculating metrics
for i in y_batch_test:
true_onehot_labels.append(i)
for j in batch_scores:
predicted_onehot_scores.append(j)
# Get the predicted labels by threshold
batch_predicted_labels_ts, batch_predicted_scores_ts = \
feed.get_label_threshold(scores=batch_scores,
threshold=FLAGS.threshold)
# Add results to collection
for i in y_batch_test_labels:
true_labels.append(i)
for j in batch_predicted_labels_ts:
predicted_labels.append(j)
for k in batch_predicted_scores_ts:
predicted_scores.append(k)
# Get onehot predictions by threshold
batch_predicted_onehot_labels_ts = \
feed.get_onehot_label_threshold(scores=batch_scores,
threshold=FLAGS.threshold)
for i in batch_predicted_onehot_labels_ts:
predicted_onehot_labels_ts.append(i)
# Get onehot predictions by topK
for top_num in range(FLAGS.top_num):
batch_predicted_onehot_labels_tk = feed.\
get_onehot_label_topk(scores=batch_scores,
top_num=top_num + 1)
for i in batch_predicted_onehot_labels_tk:
predicted_onehot_labels_tk[top_num].append(i)
test_loss = test_loss + cur_loss
test_counter = test_counter + 1
# Calculate Precision & Recall & F1 (threshold & topK)
test_pre_ts = precision_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(
predicted_onehot_labels_ts),
average='micro')
test_rec_ts = recall_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(
predicted_onehot_labels_ts),
average='micro')
test_F_ts = f1_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_ts),
average='micro')
for top_num in range(FLAGS.top_num):
test_pre_tk[top_num] = precision_score(
y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
test_rec_tk[top_num] = recall_score(
y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
test_F_tk[top_num] = f1_score(
y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
# Calculate the average AUC
test_auc = roc_auc_score(
y_true=np.array(true_onehot_labels),
y_score=np.array(predicted_onehot_scores),
average='micro')
# Calculate the average PR
test_prc = average_precision_score(
y_true=np.array(true_onehot_labels),
y_score=np.array(predicted_onehot_scores),
average="micro")
test_loss = float(test_loss / test_counter)
logger.info(
"☛ All Test Dataset: Loss {0:g} | AUC {1:g} | AUPRC {2:g}"
.format(test_loss, test_auc, test_prc))
# Predict by threshold
logger.info(
"☛ Predict by threshold: Precision {0:g}, Recall {1:g}, "
"F1 {2:g}"
.format(test_pre_ts, test_rec_ts, test_F_ts))
# Predict by topK
logger.info("☛ Predict by topK:")
for top_num in range(FLAGS.top_num):
logger.info("Top{0}: Precision {1:g}, Recall {2:g}, F {3:g}"
.format(top_num + 1,
test_pre_tk[top_num],
test_rec_tk[top_num],
test_F_tk[top_num]))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
feed.create_prediction_file(output_file=SAVE_DIR +
"/predictions.json",
data_id=test_data.testid,
all_labels=true_labels,
all_predict_labels=predicted_labels,
all_predict_scores=predicted_scores)
logger.info("✔︎ Done.")
if __name__ == '__main__':
word2vec_path = "/Users/zachary/Downloads/" \
"GoogleNews-vectors-negative300.bin"
test_fasttext(word2vec_path)
| 42.125731
| 79
| 0.534046
|
f25d45277f581f1bc31d12bbaa29538771c03fd5
| 2,193
|
py
|
Python
|
hdlConvertorAst/translate/common/discover_declarations.py
|
mewais/hdlConvertorAst
|
64c8c1deee923ffae17e70e0fb1ad763cb69608c
|
[
"MIT"
] | null | null | null |
hdlConvertorAst/translate/common/discover_declarations.py
|
mewais/hdlConvertorAst
|
64c8c1deee923ffae17e70e0fb1ad763cb69608c
|
[
"MIT"
] | null | null | null |
hdlConvertorAst/translate/common/discover_declarations.py
|
mewais/hdlConvertorAst
|
64c8c1deee923ffae17e70e0fb1ad763cb69608c
|
[
"MIT"
] | null | null | null |
from itertools import chain
from hdlConvertorAst.hdlAst import iHdlStatement, HdlIdDef,\
HdlModuleDec, HdlModuleDef, HdlCompInst
from hdlConvertorAst.to.hdl_ast_visitor import HdlAstVisitor
from hdlConvertorAst.translate.common.name_scope import WithNameScope
class DiscoverDeclarations(HdlAstVisitor):
def __init__(self, name_scope):
"""
:type name_scope: NameScope
"""
super(DiscoverDeclarations, self).__init__()
self.name_scope = name_scope
def visit_HdlIdDef(self, o):
"""
:type name_scope: NameScope
:type o: HdlIdDef
"""
self.name_scope.register_name(o.name, o)
def visit_HdlModuleDec(self, o):
"""
:type name_scope: NameScope
:type o: HdlModuleDec
"""
ns = self.name_scope
ns.register_name(o.name, o)
with WithNameScope(self, ns.level_push(o.name)):
for p in chain(o.params, o.ports):
self.visit_HdlIdDef(p)
for o2 in o.objs:
raise NotImplementedError(o2)
def visit_HdlModuleDef(self, o):
"""
:type o: HdlModuleDef
"""
if o.dec is not None:
self.visit_HdlModuleDec(o.dec)
with WithNameScope(self, self.name_scope.get_child(o.module_name.val)):
self.discover_declarations(o.objs)
def visit_HdlCompInst(self, o):
"""
:type o: HdlCompInst
"""
if o.name is not None:
self.name_scope.register_name(o.name, o)
# name_scope = name_scope.get_object_by_name(o.module_name)
def _discover_declarations(self, o):
if isinstance(o, HdlIdDef):
self.visit_HdlIdDef(o)
elif isinstance(o, HdlModuleDec):
self.visit_HdlModuleDec(o)
elif isinstance(o, HdlModuleDef):
self.visit_HdlModuleDef(o)
elif isinstance(o, iHdlStatement):
pass
elif isinstance(o, HdlCompInst):
self.visit_HdlCompInst(o)
else:
raise NotImplementedError(o)
def discover_declarations(self, objs):
for o in objs:
self._discover_declarations(o)
| 29.635135
| 79
| 0.615595
|
2ab80078522b8a897c6503952400aeec5f95a3f9
| 3,896
|
py
|
Python
|
lib/opentypesvg/utils.py
|
davidgodzsak/opentype-svg
|
038bb25bcf9ccf0408bde708c4758674d7db5247
|
[
"MIT"
] | 166
|
2016-09-14T07:42:58.000Z
|
2022-03-27T14:37:27.000Z
|
lib/opentypesvg/utils.py
|
davidgodzsak/opentype-svg
|
038bb25bcf9ccf0408bde708c4758674d7db5247
|
[
"MIT"
] | 13
|
2017-08-03T18:02:32.000Z
|
2021-06-01T07:08:41.000Z
|
lib/opentypesvg/utils.py
|
davidgodzsak/opentype-svg
|
038bb25bcf9ccf0408bde708c4758674d7db5247
|
[
"MIT"
] | 17
|
2018-01-20T03:21:40.000Z
|
2022-02-26T12:33:17.000Z
|
# Copyright 2016 Adobe. All rights reserved.
"""
Module that contains shared functionality.
"""
import os
import sys
SVG_FOLDER_NAME = "SVGs"
NESTED_FOLDER_NAME = "_moreSVGs_"
def read_file(file_path):
with open(file_path, "r") as f:
return f.read()
def write_file(file_path, data):
with open(file_path, "w") as f:
f.write(data)
def get_font_format(font_file_path):
with open(font_file_path, "rb") as f:
head = f.read(4).decode()
if head == "OTTO":
return "OTF"
elif head in ("\x00\x01\x00\x00", "true"):
return "TTF"
elif head == "wOFF":
return "WOFF"
elif head == "wOF2":
return "WOFF2"
return None
def validate_font_paths(paths_list):
validated_paths_list = []
for path in paths_list:
path = os.path.realpath(path)
if (os.path.isfile(path) and get_font_format(path) in
['OTF', 'TTF', 'WOFF', 'WOFF2']):
validated_paths_list.append(path)
else:
print("ERROR: {} is not a valid font file path.".format(path),
file=sys.stderr)
return validated_paths_list
def split_comma_sequence(comma_str):
return [item.strip() for item in comma_str.split(',')]
def final_message(num_files_saved):
if not num_files_saved:
num_files_saved = 'No'
plural = 's' if num_files_saved != 1 else ''
print("{} SVG file{} saved.".format(num_files_saved, plural),
file=sys.stdout)
def create_folder(folder_path):
try:
os.makedirs(folder_path)
except OSError:
if not os.path.isdir(folder_path):
raise
def create_nested_folder(nested_folder_path, main_folder_path):
"""
Creates a nested folder and returns its path.
This additional folder is created when file names conflict.
"""
if not nested_folder_path:
nested_folder_path = os.path.join(main_folder_path, NESTED_FOLDER_NAME)
create_folder(nested_folder_path)
return nested_folder_path
def validate_folder_path(folder_path):
"""
Validates that the path is a folder.
Returns the complete path.
"""
path = os.path.realpath(folder_path)
if os.path.isdir(path):
return path
else:
print("ERROR: {} is not a valid folder path.".format(path),
file=sys.stderr)
sys.exit(1)
def get_output_folder_path(provided_folder_path, first_font_path):
"""
If the path to the output folder was NOT provided, create
a folder in the same directory where the first font is.
If the path was provided, validate it.
Returns a valid output folder.
"""
if provided_folder_path:
return validate_folder_path(provided_folder_path)
return os.path.join(os.path.dirname(first_font_path), SVG_FOLDER_NAME)
def get_gnames_to_save_in_nested_folder(gnames_list):
"""
On case-insensitive systems the SVG files cannot be all saved to the
same folder otherwise a.svg and A.svg would be written over each other,
for example. So, pre-process the list of glyph names to find which ones
step on each other, and save half of them in a nested folder. This
approach won't handle the case where a.svg and A.svg are NOT generated
on the same run, but that's fine; the user will have to handle that.
Also, the process below assumes that there are no more than 2 conflicts
per name, i.e. it will handle "the/The" but not "the/The/THE/...";
this shouldn't be a problem in 99% of the time.
Returns list of glyph names that need to be saved in a nested folder.
"""
unique_names_set = set()
gnames_to_save_in_nested_folder = []
for gname in gnames_list:
if gname.lower() in unique_names_set:
gnames_to_save_in_nested_folder.append(gname)
unique_names_set.add(gname.lower())
return gnames_to_save_in_nested_folder
| 29.969231
| 79
| 0.670945
|
a90271ad98d0d445ff9e367954e5dda5192e62ed
| 3,101
|
py
|
Python
|
gaphor/diagram/tests/fixtures.py
|
seryafarma/gaphor
|
f85998ae3a3ec5381b25cda60d89a47383c4fd2e
|
[
"Apache-2.0"
] | null | null | null |
gaphor/diagram/tests/fixtures.py
|
seryafarma/gaphor
|
f85998ae3a3ec5381b25cda60d89a47383c4fd2e
|
[
"Apache-2.0"
] | null | null | null |
gaphor/diagram/tests/fixtures.py
|
seryafarma/gaphor
|
f85998ae3a3ec5381b25cda60d89a47383c4fd2e
|
[
"Apache-2.0"
] | null | null | null |
from io import StringIO
import pytest
from gaphas.aspect import ConnectionSink
from gaphas.aspect import Connector as ConnectorAspect
from gaphor.core.eventmanager import EventManager
from gaphor.core.modeling import Diagram, ElementFactory
from gaphor.core.modeling.elementdispatcher import ElementDispatcher
from gaphor.diagram.connectors import Connector
from gaphor.diagram.copypaste import copy, paste
from gaphor.storage import storage
from gaphor.storage.xmlwriter import XMLWriter
from gaphor.UML.modelinglanguage import UMLModelingLanguage
@pytest.fixture
def event_manager():
return EventManager()
@pytest.fixture
def element_factory(event_manager):
return ElementFactory(
event_manager, ElementDispatcher(event_manager, UMLModelingLanguage())
)
@pytest.fixture
def modeling_language():
return UMLModelingLanguage()
@pytest.fixture
def diagram(element_factory):
diagram = element_factory.create(Diagram)
yield diagram
diagram.unlink()
@pytest.fixture
def saver(element_factory):
def save():
"""
Save diagram into string.
"""
f = StringIO()
storage.save(XMLWriter(f), element_factory)
data = f.getvalue()
f.close()
return data
return save
@pytest.fixture
def loader(element_factory, modeling_language):
def load(data):
"""
Load data from specified string.
"""
element_factory.flush()
assert not list(element_factory.select())
f = StringIO(data)
storage.load(f, factory=element_factory, modeling_language=modeling_language)
f.close()
return load
def allow(line, handle, item, port=None) -> bool:
if port is None and len(item.ports()) > 0:
port = item.ports()[0]
adapter = Connector(item, line)
return adapter.allow(handle, port)
def connect(line, handle, item, port=None):
"""
Connect line's handle to an item.
If port is not provided, then first port is used.
"""
canvas = line.canvas
if port is None and len(item.ports()) > 0:
port = item.ports()[0]
sink = ConnectionSink(item, port)
connector = ConnectorAspect(line, handle)
connector.connect(sink)
cinfo = canvas.get_connection(handle)
assert cinfo.connected is item
assert cinfo.port is port
def disconnect(line, handle):
"""
Disconnect line's handle.
"""
canvas = line.canvas
canvas.disconnect_item(line, handle)
assert not canvas.get_connection(handle)
def clear_model(diagram, element_factory, retain=[]):
"""
Clear the model and diagram, leaving only an empty diagram.
"""
for element in list(element_factory.values()):
if element is not diagram and element not in retain:
element.unlink()
for item in diagram.canvas.get_all_items():
item.unlink()
def copy_clear_and_paste(items, diagram, element_factory, retain=[]):
buffer = copy(items)
clear_model(diagram, element_factory, retain)
print(buffer)
return paste(buffer, diagram, element_factory.lookup)
| 23.853846
| 85
| 0.695905
|
9ed324f46941ec3fe00901d914c4ab662eeedc97
| 22
|
py
|
Python
|
nnsubspace/nnsubspace/__init__.py
|
jiweiqi/nnsubspace
|
e443a949982ca950a0ab85bcf01dcfe2a65d3ee3
|
[
"MIT"
] | 5
|
2019-06-07T16:42:42.000Z
|
2021-08-31T14:56:10.000Z
|
nnsubspace/nnsubspace/__init__.py
|
jiweiqi/nnsubspace
|
e443a949982ca950a0ab85bcf01dcfe2a65d3ee3
|
[
"MIT"
] | null | null | null |
nnsubspace/nnsubspace/__init__.py
|
jiweiqi/nnsubspace
|
e443a949982ca950a0ab85bcf01dcfe2a65d3ee3
|
[
"MIT"
] | 5
|
2019-04-28T02:47:47.000Z
|
2021-12-03T17:53:13.000Z
|
__all__ = ['subspace']
| 22
| 22
| 0.681818
|
5bfc724d5dc5d5e2c827b7f06d0fb1d6238a4933
| 2,477
|
py
|
Python
|
dungeons/views.py
|
zachtib/MagicWithFriends
|
18db849fe282903051820b718a2d7b434360e332
|
[
"MIT"
] | null | null | null |
dungeons/views.py
|
zachtib/MagicWithFriends
|
18db849fe282903051820b718a2d7b434360e332
|
[
"MIT"
] | 5
|
2021-02-15T20:52:10.000Z
|
2021-07-03T18:19:59.000Z
|
dungeons/views.py
|
zachtib/MagicWithFriends
|
18db849fe282903051820b718a2d7b434360e332
|
[
"MIT"
] | null | null | null |
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from dungeons.models import Dungeon, DungeonRoom
COMPLETED_DUNGEONS = "completed_dungeons"
def dungeon_list(request):
completed_dungeon_ids = request.session.get(COMPLETED_DUNGEONS, [])
completed_dungeons = Dungeon.objects.filter(id__in=completed_dungeon_ids)
official_dungeons = Dungeon.objects.filter(is_official=True).exclude(id__in=completed_dungeon_ids)
remaining_dungeons = Dungeon.objects.filter(is_official=False).exclude(id__in=completed_dungeon_ids)
paginator = Paginator(remaining_dungeons, 10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request, 'dungeons/list.html', {
'completed_dungeons': completed_dungeons,
'official_dungeons': official_dungeons,
'dungeons': page,
'completed_ids': completed_dungeon_ids,
})
def dungeon_entrance(request, dungeon_slug):
dungeon = get_object_or_404(Dungeon, slug=dungeon_slug)
return render(request, 'dungeons/choice.html', {
'title': dungeon.name,
'text': 'Enter the Dungeon',
'choices': {room.name: room.get_absolute_url() for room in dungeon.entrances()},
})
def dungeon_room(request, dungeon_slug, room_slug):
dungeon = get_object_or_404(Dungeon, slug=dungeon_slug)
room = get_object_or_404(DungeonRoom, slug=room_slug, dungeon=dungeon)
if room.paths.count() > 0:
destinations = [path.destination for path in room.paths.all()]
choices = [
dict(
title=room.name,
text=room.room_text,
url=room.get_absolute_url()
) for room in destinations
]
else:
completed_dungeon_ids: list = request.session.get(COMPLETED_DUNGEONS, [])
completed_dungeon_ids.append(dungeon.id)
request.session[COMPLETED_DUNGEONS] = completed_dungeon_ids
choices = [dict(title="End of the Dungeon", text="Return to Dungeon List", url=reverse("dungeon-list"))]
return render(request, 'dungeons/room.html', {
'dungeon': dungeon.name,
'title': room.name,
'text': room.room_text,
'choices': choices,
})
def clear_completed(request):
request.session[COMPLETED_DUNGEONS] = []
return HttpResponseRedirect(reverse("dungeon-list"))
| 35.898551
| 112
| 0.702059
|
f9c3e3104f76dd8ffa99ecd072445665859d125d
| 10,454
|
py
|
Python
|
src/resources/host_api.py
|
jwagantall/cello
|
8fa6980148a96f925c40d78014cfc9c856bb3091
|
[
"Apache-2.0"
] | null | null | null |
src/resources/host_api.py
|
jwagantall/cello
|
8fa6980148a96f925c40d78014cfc9c856bb3091
|
[
"Apache-2.0"
] | null | null | null |
src/resources/host_api.py
|
jwagantall/cello
|
8fa6980148a96f925c40d78014cfc9c856bb3091
|
[
"Apache-2.0"
] | null | null | null |
# Copyright IBM Corp, All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
import os
import sys
import uuid
from flask import jsonify, Blueprint, render_template
from flask import request as r
import json
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from common import log_handler, LOG_LEVEL, \
make_ok_resp, make_fail_resp, \
CODE_CREATED, \
request_debug
from modules import host_handler
from modules.models import Cluster as ClusterModel
from modules.models import Host as HostModel
from agent import detect_daemon_type
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
logger.addHandler(log_handler)
bp_host_api = Blueprint('bp_host_api', __name__,
url_prefix='/{}'.format("api"))
@bp_host_api.route('/hosts', methods=['GET'])
def hosts_list():
logger.info("/hosts_list method=" + r.method)
request_debug(r, logger)
col_filter = dict((key, r.args.get(key)) for key in r.args)
items = list(host_handler.list(filter_data=col_filter))
return make_ok_resp(data=items)
@bp_host_api.route('/host/<host_id>', methods=['GET'])
def host_query(host_id):
request_debug(r, logger)
result = host_handler.schema(host_handler.get_by_id(host_id))
logger.debug(result)
if result:
return make_ok_resp(data=result)
else:
error_msg = "host not found with id=" + host_id
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=r.form)
@bp_host_api.route('/host', methods=['POST'])
def host_create():
request_debug(r, logger)
if r.content_type.startswith("application/json"):
body = dict(r.get_json(force=True, silent=True))
else:
body = r.form
name, worker_api, capacity, log_type, log_server, log_level, host_type = \
body['name'], body['worker_api'], body['capacity'], \
body['log_type'], body.get('log_server', ''), body['log_level'], \
body['host_type'] if 'host_type' in body else None
if "autofill" in body and body["autofill"] == "on":
autofill = "true"
else:
autofill = "false"
if "schedulable" in body and body["schedulable"] == "on":
schedulable = "true"
else:
schedulable = "false"
if host_type == "vsphere":
vcaddress = body['vc_address']
if vcaddress.find(":") == -1:
address = vcaddress
port = "443"
else:
address = vcaddress.split(':')[0]
port = vcaddress.split(':')[1]
logger.debug("address={}, port={}".format(address, port))
vmname = "cello-vsphere-" + str(uuid.uuid1())
vsphere_param = {
'vc': {
'address': address,
'port': port,
'username': body['vc_user'],
'password': body['vc_password'],
'network': body['vc_network'],
'vc_datastore': body['datastore'],
'vc_datacenter': body['datacenter'],
'vc_cluster': body['cluster'],
'template': body['vm_template']},
'vm': {
'vmname': vmname,
'ip': body['vm_ip'],
'gateway': body['vm_gateway'],
'netmask': body['vm_netmask'],
'dns': body['vm_dns'],
'vcpus': int(body['vm_cpus']),
'memory': int(body['vm_memory'])}}
logger.debug("name={}, capacity={},"
"fillup={}, schedulable={}, log={}/{}, vsphere_param={}".
format(name, capacity, autofill, schedulable,
log_type, log_server, vsphere_param))
vsphere_must_have_params = {
'Name': name,
'Capacity': capacity,
'LoggingType': log_type,
'VCAddress': address,
'VCUser': body['vc_user'],
'VCPassword': body['vc_password'],
'VCNetwork': body['vc_network'],
'Datastore': body['datastore'],
'Datacenter': body['datacenter'],
'Cluster': body['cluster'],
'VMIp': body['vm_ip'],
'VMGateway': body['vm_gateway'],
'VMNetmask': body['vm_netmask']}
for key in vsphere_must_have_params:
if vsphere_must_have_params[key] == '':
error_msg = "host POST without {} data".format(key)
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
result = host_handler.create(name=name, worker_api=worker_api,
capacity=int(capacity),
autofill=autofill,
schedulable=schedulable,
log_level=log_level,
log_type=log_type,
log_server=log_server,
host_type=host_type,
params=vsphere_param)
else:
logger.debug("name={}, worker_api={}, capacity={}"
"fillup={}, schedulable={}, log={}/{}".
format(name, worker_api, capacity, autofill, schedulable,
log_type, log_server))
if not name or not worker_api or not capacity or not log_type:
error_msg = "host POST without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
else:
host_type = host_type if host_type \
else detect_daemon_type(worker_api)
result = host_handler.create(name=name, worker_api=worker_api,
capacity=int(capacity),
autofill=autofill,
schedulable=schedulable,
log_level=log_level,
log_type=log_type,
log_server=log_server,
host_type=host_type)
logger.debug("result.msg={}".format(result.get('msg')))
if (host_type == "vsphere") and ('msg' in result):
vsphere_errmsg = result.get('msg')
error_msg = "Failed to create vsphere host {}".format(vsphere_errmsg)
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
elif result:
logger.debug("host creation successfully")
return make_ok_resp(code=CODE_CREATED)
else:
error_msg = "Failed to create host {}".format(body["name"])
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
@bp_host_api.route('/host', methods=['PUT'])
def host_update():
request_debug(r, logger)
if r.content_type.startswith("application/json"):
body = dict(r.get_json(force=True, silent=True))
else:
body = r.form
if "id" not in body:
error_msg = "host PUT without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg,
data=body)
else:
id, d = body["id"], {}
for k in body:
if k != "id":
d[k] = body.get(k)
result = host_handler.update(id, d)
if result:
logger.debug("host PUT successfully")
return make_ok_resp()
else:
error_msg = "Failed to update host {}".format(result.get("name"))
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
@bp_host_api.route('/host', methods=['PUT', 'DELETE'])
def host_delete():
request_debug(r, logger)
request_data = r.get_json(force=True, silent=True)
if "id" in r.form:
host_id = r.form["id"]
elif "id" in request_data:
host_id = request_data.get("id")
else:
error_msg = "host delete without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=r.form)
logger.debug("host delete with id={0}".format(host_id))
if host_handler.delete(id=host_id):
return make_ok_resp()
else:
error_msg = "Failed to delete host {}".format(host_id)
logger.warning(error_msg)
return make_fail_resp(error=error_msg)
@bp_host_api.route('/host_op', methods=['POST'])
def host_actions():
logger.info("/host_op, method=" + r.method)
request_debug(r, logger)
if r.content_type.startswith("application/json"):
body = dict(r.get_json(force=True, silent=True))
else:
body = r.form
host_id, action = body['id'], body['action']
if not host_id or not action:
error_msg = "host POST without enough data"
logger.warning(error_msg)
return make_fail_resp(error=error_msg,
data=body)
else:
if action == "fillup":
if host_handler.fillup(host_id):
logger.debug("fillup successfully")
return make_ok_resp()
else:
error_msg = "Failed to fillup the host."
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
elif action == "clean":
if host_handler.clean(host_id):
logger.debug("clean successfully")
return make_ok_resp()
else:
error_msg = "Failed to clean the host."
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
elif action == "reset":
if host_handler.reset(host_id):
logger.debug("reset successfully")
try:
host_model = HostModel.objects.get(id=host_id)
clusters = ClusterModel.objects(host=host_model)
for cluster_item in clusters:
cluster_item.delete()
except Exception:
pass
return make_ok_resp()
else:
error_msg = "Failed to reset the host."
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
error_msg = "unknown host action={}".format(action)
logger.warning(error_msg)
return make_fail_resp(error=error_msg, data=body)
| 37.469534
| 78
| 0.55749
|
fd2336fe23ff2c8a3143fcb946142daa072745a8
| 1,164
|
py
|
Python
|
pycoreutils/main.py
|
davidfischer/pycoreutils
|
a5f72d4765b2340eb2ac96099e8de87214a908fa
|
[
"MIT"
] | 25
|
2016-11-03T06:41:01.000Z
|
2022-03-20T20:42:47.000Z
|
pycoreutils/main.py
|
davidfischer/pycoreutils
|
a5f72d4765b2340eb2ac96099e8de87214a908fa
|
[
"MIT"
] | 1
|
2021-04-05T16:11:24.000Z
|
2021-04-11T15:41:21.000Z
|
pycoreutils/main.py
|
davidfischer/pycoreutils
|
a5f72d4765b2340eb2ac96099e8de87214a908fa
|
[
"MIT"
] | 3
|
2016-12-31T14:37:03.000Z
|
2018-10-03T22:38:23.000Z
|
import importlib
from .commands import commands
from .vendor import click
from .version import __version__
class PycoreutilsMulticommand(click.MultiCommand):
def list_commands(self, ctx):
return commands
def get_command(self, ctx, name):
try:
mod = importlib.import_module(u'pycoreutils.commands._{}'.format(name))
if hasattr(mod, 'subcommand'):
return getattr(mod, 'subcommand')
except ImportError:
pass
return None
@click.command(
cls=PycoreutilsMulticommand,
epilog='See "COMMAND -h" to read about a specific subcommand',
short_help='%(prog)s [-h] COMMAND [args]',
)
@click.help_option('-h', '--help')
@click.version_option(__version__, '-v', '--version', message='%(prog)s v%(version)s')
def cli():
'''
Coreutils in Pure Python
\b
____ _ _ ___ _____ ____ ____ __ __ ____ ____ __ ___
( _ \( \/ )/ __)( _ )( _ \( ___)( )( )(_ _)(_ _)( ) / __)
)___/ \ /( (__ )(_)( ) / )__) )(__)( )( _)(_ )(__ \__ \\
(__) (__) \___)(_____)(_)\_)(____)(______) (__) (____)(____)(___/
'''
pass
| 28.390244
| 86
| 0.571306
|
c9d282e7404076a31997d71173e502bd5b385669
| 605
|
py
|
Python
|
test/widget/test_dialog_config.py
|
HansBug/pyqt5-demo
|
df10ba54209bcf993d5dab8d969ab63d7a0acc90
|
[
"Apache-2.0"
] | null | null | null |
test/widget/test_dialog_config.py
|
HansBug/pyqt5-demo
|
df10ba54209bcf993d5dab8d969ab63d7a0acc90
|
[
"Apache-2.0"
] | null | null | null |
test/widget/test_dialog_config.py
|
HansBug/pyqt5-demo
|
df10ba54209bcf993d5dab8d969ab63d7a0acc90
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from app.config.meta import __TITLE__, __VERSION__, __AUTHOR__, __AUTHOR_EMAIL__
from app.widget import DialogConfig
@pytest.mark.unittest
class TestDialogConfig:
def test_common(self, qtbot):
dc = DialogConfig(None)
qtbot.addWidget(dc)
title_info_str = dc.label_title.text().lower()
assert __TITLE__.lower() in title_info_str
assert __VERSION__.lower() in title_info_str
author_info_str = dc.label_author.text().lower()
assert __AUTHOR__.lower() in author_info_str
assert __AUTHOR_EMAIL__.lower() in author_info_str
| 28.809524
| 80
| 0.723967
|
6f94350453d25f6165ba422c00a407c26c5dbd93
| 29,657
|
py
|
Python
|
sdk/eventgrid/azure-mgmt-eventgrid/azure/mgmt/eventgrid/aio/operations/_partner_registrations_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-01-24T08:54:57.000Z
|
2022-01-24T08:54:57.000Z
|
sdk/eventgrid/azure-mgmt-eventgrid/azure/mgmt/eventgrid/aio/operations/_partner_registrations_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/eventgrid/azure-mgmt-eventgrid/azure/mgmt/eventgrid/aio/operations/_partner_registrations_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._partner_registrations_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PartnerRegistrationsOperations:
"""PartnerRegistrationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventgrid.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
partner_registration_name: str,
**kwargs: Any
) -> "_models.PartnerRegistration":
"""Get a partner registration.
Gets a partner registration with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_registration_name: Name of the partner registration.
:type partner_registration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PartnerRegistration, or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.PartnerRegistration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartnerRegistration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PartnerRegistration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
partner_registration_name: str,
partner_registration_info: "_models.PartnerRegistration",
**kwargs: Any
) -> "_models.PartnerRegistration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartnerRegistration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(partner_registration_info, 'PartnerRegistration')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PartnerRegistration', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PartnerRegistration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
partner_registration_name: str,
partner_registration_info: "_models.PartnerRegistration",
**kwargs: Any
) -> AsyncLROPoller["_models.PartnerRegistration"]:
"""Create a partner registration.
Creates a new partner registration with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_registration_name: Name of the partner registration.
:type partner_registration_name: str
:param partner_registration_info: PartnerRegistration information.
:type partner_registration_info: ~azure.mgmt.eventgrid.models.PartnerRegistration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PartnerRegistration or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.PartnerRegistration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartnerRegistration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
partner_registration_info=partner_registration_info,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PartnerRegistration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
partner_registration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
partner_registration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a partner registration.
Deletes a partner registration with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_registration_name: Name of the partner registration.
:type partner_registration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
partner_registration_name: str,
partner_registration_update_parameters: "_models.PartnerRegistrationUpdateParameters",
**kwargs: Any
) -> Optional["_models.PartnerRegistration"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PartnerRegistration"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(partner_registration_update_parameters, 'PartnerRegistrationUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PartnerRegistration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
partner_registration_name: str,
partner_registration_update_parameters: "_models.PartnerRegistrationUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.PartnerRegistration"]:
"""Update a partner registration.
Updates a partner registration with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param partner_registration_name: Name of the partner registration.
:type partner_registration_name: str
:param partner_registration_update_parameters: Partner registration update information.
:type partner_registration_update_parameters:
~azure.mgmt.eventgrid.models.PartnerRegistrationUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PartnerRegistration or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.eventgrid.models.PartnerRegistration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartnerRegistration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
partner_registration_name=partner_registration_name,
partner_registration_update_parameters=partner_registration_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PartnerRegistration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations/{partnerRegistrationName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.PartnerRegistrationsListResult"]:
"""List partner registrations under an Azure subscription.
List all the partner registrations under an Azure subscription.
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PartnerRegistrationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventgrid.models.PartnerRegistrationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartnerRegistrationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PartnerRegistrationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/partnerRegistrations'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.PartnerRegistrationsListResult"]:
"""List partner registrations under a resource group.
List all the partner registrations under a resource group.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PartnerRegistrationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.eventgrid.models.PartnerRegistrationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PartnerRegistrationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PartnerRegistrationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/partnerRegistrations'} # type: ignore
| 48.066451
| 251
| 0.680109
|
b59650536365c6bdc0111ab9761c3dff6ebeba96
| 11,098
|
py
|
Python
|
pandasdmx/writer.py
|
daoluan/pandaSDMX
|
2efcb5a429a5306efd89bed4cd55946d1ad5067b
|
[
"Apache-2.0"
] | null | null | null |
pandasdmx/writer.py
|
daoluan/pandaSDMX
|
2efcb5a429a5306efd89bed4cd55946d1ad5067b
|
[
"Apache-2.0"
] | null | null | null |
pandasdmx/writer.py
|
daoluan/pandaSDMX
|
2efcb5a429a5306efd89bed4cd55946d1ad5067b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
from pandasdmx.model import (
DEFAULT_LOCALE,
AgencyScheme,
DataflowDefinition,
DataStructureDefinition,
DataSet,
Dimension,
# DimensionDescriptor,
CategoryScheme,
Codelist,
Component,
ConceptScheme,
ItemScheme,
NameableArtefact,
Observation,
SeriesKey,
TimeDimension,
)
from pandasdmx.util import DictLike
# Class → common write_*() methods
_alias = {
DictLike: dict,
AgencyScheme: ItemScheme,
CategoryScheme: ItemScheme,
ConceptScheme: ItemScheme,
Codelist: ItemScheme,
DataflowDefinition: NameableArtefact,
DataStructureDefinition: NameableArtefact,
Dimension: Component,
TimeDimension: Component,
}
def write(obj, *args, **kwargs):
"""Convert an SDMX *obj* to :mod:`pandas` object(s).
:meth:`write` implements a dispatch pattern according to the type of
*obj*. For instance, a :class:`pandasdmx.model.DataSet` object is
converted using :meth:`write_dataset`. See individual ``write_*`` methods
named for more information on their behaviour, including accepted *args*
and *kwargs*.
"""
cls = obj.__class__
function = 'write_' + _alias.get(cls, cls).__name__.lower()
return globals()[function](obj, *args, **kwargs)
# Functions for Python containers
def write_list(obj, *args, **kwargs):
"""Convert a :class:`list` of SDMX objects.
For the following *obj*, :meth:`write_list` returns :class:`pandas.Series`
instead of a :class:`list`:
- a list of :class:`Observation <pandasdmx.model.Observation>`:
the Observations are written using :meth:`write_dataset`.
- a list with only 1 :class:`DataSet <pandasdmx.model.DataSet>` (e.g. the
:attr:`data <pandasdmx.message.DataMessage.data>` attribute of
:class:`DataMessage <pandasdmx.message.DataMessage>`): the Series for
the single element is returned.
- a list of :class:`SeriesKey`: the key values (but no data) are returned.
"""
if isinstance(obj[0], Observation):
return write_dataset(obj, *args, **kwargs)
elif isinstance(obj[0], DataSet) and len(obj) == 1:
return write(obj[0], *args, **kwargs)
elif isinstance(obj[0], SeriesKey):
return write_serieskeys(obj, *args, **kwargs)
else:
return [write(item, *args, **kwargs) for item in obj]
def write_dict(obj, *args, **kwargs):
"""Convert mappings.
The values of the mapping are write()'d individually. If the resulting
values are :class:`str` or :class:`pd.Series` *with indexes that share the
same name*, then they are converted to a pd.Series, possibly with a
pd.MultiIndex. Otherwise, a DictLike is returned.
"""
result = {k: write(v, *args, **kwargs) for k, v in obj.items()}
result_type = set(type(v) for v in result.values())
if result_type <= {pd.Series, pd.DataFrame}:
if (len(set(map(lambda s: s.index.name, result.values()))) == 1 and
len(result) > 1):
# Can safely concatenate these to a pd.MultiIndex'd Series.
return pd.concat(result)
else:
# The individual pd.Series are indexed by different dimensions; do
# not concatenate.
return DictLike(result)
elif result_type == {str}:
return pd.Series(result)
elif result_type == set():
return pd.Series()
else:
raise ValueError(result_type)
# Functions for message classes
def write_response(obj, *args, **kwargs):
"""Convert :class:`pandasdmx.api.Response`.
The :attr:`msg <pandasdmx.api.Response.msg>` attribute of *obj* is
converted.
"""
return write(obj.msg, *args, **kwargs)
def write_datamessage(obj, *args, **kwargs):
"""Convert :class:`DataMessage <pandasdmx.message.DataMessage>`."""
if len(obj.data) == 1:
return write(obj.data[0], *args, **kwargs)
else:
return [write(ds, *args, **kwargs) for ds in obj.data]
def write_structuremessage(obj, include=None, **kwargs):
"""Convert :class:`StructureMessage <pandasdmx.message.StructureMessage>`.
Parameters
----------
obj : pandasdmx.message.StructureMessage
include : iterable of str or str, optional
One or more of the attributes of the StructureMessage (
'category_scheme', 'codelist', etc.) to transform.
kwargs :
Passed to :meth:`write` for each attribute.
Returns
-------
:class:`pandasdmx.util.DictLike`
Keys are StructureMessage attributes; values are pandas objects.
"""
all_contents = {
'category_scheme',
'codelist',
'concept_scheme',
'constraint',
'dataflow',
'structure',
'organisation_scheme',
}
# Handle arguments
if include is None:
attrs = all_contents
else:
attrs = set([include] if isinstance(include, str) else include)
# Silently discard invalid names
attrs &= all_contents
attrs = sorted(attrs)
result = DictLike()
for a in attrs:
dl = write(getattr(obj, a), **kwargs)
if len(dl):
# Only add non-empty elements
result[a] = dl
return result
# Functions for model classes
def write_component(obj):
"""Convert :class:`Component <pandasdmx.model.Component>`.
The :attr:`Concept.id <pandasdmx.model.Concept.id>` attribute of the
:attr:`Component.concept_identity
<pandasdmx.model.Component.concept_identity>` is returned.
"""
return str(obj.concept_identity.id)
def write_dataset(obj, attributes='', dtype=np.float64, constraint=None,
fromfreq=False, parse_time=True):
"""Convert :class:`DataSet <pandasdmx.model.DataSet>`.
Parameters
----------
obj : :class:`DataSet <pandasdmx.model.DataSet>` or iterable of \
:class:`Observation <pandasdmx.model.Observation>`
attributes : str
Types of attributes to return with the data. A string containing
zero or more of:
- ``'o'``: attributes attached to each :class:`Observation
<pandasdmx.model.Observation>` .
- ``'s'``: attributes attached to any (0 or 1) :class:`SeriesKey
<pandasdmx.model.SeriesKey>` associated with each Observation.
- ``'g'``: attributes attached to any (0 or more) :class:`GroupKeys
<pandasdmx.model.GroupKey>` associated with each Observation.
- ``'d'``: attributes attached to the :class:`DataSet
<pandasdmx.model.DataSet>` containing the Observations.
dtype : str or :class:`np.dtype` or None
Datatype for values. If None, do not return the values of a series.
In this case, `attributes` must not be an empty string so that some
attribute is returned.
constraint : :class:`ContentConstraint \
<pandasdmx.model.ContentConstraint>` , optional
If given, only Observations included by the *constraint* are returned.
fromfreq : bool, optional
If True, extrapolate time periods from the first item and FREQ
dimension.
parse_time : bool, optional
If True (default), try to generate datetime index, provided that
dim_at_obs is 'TIME' or 'TIME_PERIOD'. Otherwise, ``parse_time`` is
ignored. If False, always generate index of strings. Set it to
False to increase performance and avoid parsing errors for exotic
date-time formats unsupported by pandas.
Returns
-------
:class:`pandas.Series` or :class:`pandas.DataFrame`
If `attributes` is not ``''``, a :class:`pandas.DataFrame` is
returned with ``value`` as the first column, and additional
columns for each attribute.
"""
# source will now be a DataSet
# validate 'attributes'
if attributes is None or not attributes:
attributes = ''
else:
try:
attributes = attributes.lower()
except AttributeError:
raise TypeError("'attributes' argument must be of type str.")
if set(attributes) - {'o', 's', 'g', 'd'}:
raise ValueError(
"'attributes' must only contain 'o', 's', 'd' or 'g'.")
# Iterate on observations
result = {}
for observation in getattr(obj, 'obs', obj):
# Check that the Observation is within the constraint, if any
key = observation.key.order()
if constraint and key not in constraint:
continue
# Add value and attributes
row = {}
if dtype:
row['value'] = observation.value
if attributes:
row.update(observation.attrib)
result[tuple(map(str, key.get_values()))] = row
result = pd.DataFrame.from_dict(result, orient='index')
if len(result):
result.index.names = observation.key.order().values.keys()
if dtype:
result['value'] = result['value'].astype(dtype)
if not attributes:
result = result['value']
return result
def write_dimensiondescriptor(obj):
"""Convert :class:`DimensionDescriptor
<pandasdmx.model.DimensionDescriptor>`.
The :attr:`components <pandasdmx.model.DimensionDescriptor.components>` of
the DimensionDescriptor are written.
"""
return write(obj.components)
def write_itemscheme(obj, locale=DEFAULT_LOCALE):
"""Convert :class:`ItemScheme <pandasdmx.model.ItemScheme>`.
Names from *locale* are serialized.
Returns
-------
pandas.Series
"""
items = {}
seen = set()
def add_item(item):
"""Recursive helper for adding items."""
# Track seen items
if item in seen:
return
else:
seen.add(item)
# Localized name
row = {'name': item.name.localized_default(locale)}
try:
# Parent ID
row['parent'] = item.parent.id
except AttributeError:
row['parent'] = ''
items[item.id] = row
# Add this item's children, recursively
for child in item.child:
add_item(child)
for item in obj.items:
add_item(item)
# Convert to DataFrame
result = pd.DataFrame.from_dict(items, orient='index', dtype=object) \
.rename_axis(obj.id, axis='index')
if not result['parent'].str.len().any():
# 'parent' column is empty; convert to pd.Series and rename
result = result['name'].rename(obj.name.localized_default(locale))
return result
def write_nameableartefact(obj):
"""Convert :class:`NameableArtefact <pandasdmx.model.NameableArtefact>`.
The :attr:`name <pandasdmx.model.NameableArtefact.name>` attribute of *obj*
is returned.
"""
return str(obj.name)
def write_serieskeys(obj):
"""Convert a list of :class:`SeriesKey <pandasdmx.model.SeriesKey>`."""
result = []
for sk in obj:
result.append({dim: kv.value for dim, kv in sk.order().values.items()})
# TODO perhaps return as a pd.MultiIndex if that is more useful
return pd.DataFrame(result)
| 32.168116
| 79
| 0.634889
|
f60da6c1898bf557edb2aa52348faa7a25de0374
| 13,807
|
py
|
Python
|
guides/customizing_what_happens_in_fit.py
|
pavithrasv/keras-io
|
ffbfaf9b263690a47fffb5f5438d44ca2f554979
|
[
"Apache-2.0"
] | 1
|
2020-08-28T08:31:52.000Z
|
2020-08-28T08:31:52.000Z
|
guides/customizing_what_happens_in_fit.py
|
pavithrasv/keras-io
|
ffbfaf9b263690a47fffb5f5438d44ca2f554979
|
[
"Apache-2.0"
] | null | null | null |
guides/customizing_what_happens_in_fit.py
|
pavithrasv/keras-io
|
ffbfaf9b263690a47fffb5f5438d44ca2f554979
|
[
"Apache-2.0"
] | null | null | null |
"""
Title: Customizing what happens in `fit()`
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/04/15
Last modified: 2020/04/15
Description: Complete guide to overriding the training step of the Model class.
"""
"""
## Introduction
When you're doing supervised learning, you can use `fit()` and everything works
smoothly.
When you need to write your own training loop from scratch, you can use the
`GradientTape` and take control of every little detail.
But what if you need a custom training algorithm, but you still want to benefit from
the convenient features of `fit()`, such as callbacks, built-in distribution support,
or step fusing?
A core principle of Keras is **progressive disclosure of complexity**. You should
always be able to get into lower-level workflows in a gradual way. You shouldn't fall
off a cliff if the high-level functionality doesn't exactly match your use case. You
should be able to gain more control over the small details while retaing a
commensurate amount of high-level convenience.
When you need to customize what `fit()` does, you should **override the training step
function of the `Model` class**. This is the function that is called by `fit()` for
every batch of data. You will then be able to call `fit()` as usual -- and it will be
running your own learning algorithm.
Note that this pattern does not prevent you from building models with the Functional
API. You can do this whether you're building `Sequential` models, Functional API
models, or subclassed models.
Let's see how that works.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
"""
## A first simple example
Let's start from a simple example:
- We create a new class that subclasses `keras.Model`.
- We just override the method `train_step(self, data)`.
- We return a dictionary mapping metric names (including the loss) to their current
value.
The input argument `data` is what gets passed to fit as training data:
- If you pass Numpy arrays, by calling `fit(x, y, ...)`, then `data` will be the tuple
`(x, y)`
- If you pass a `tf.data.Dataset`, by calling `fit(dataset, ...)`, then `data` will be
what gets yielded by `dataset` at each batch.
In the body of the `train_step` method, we implement a regular training update,
similar to what you are already familiar with. Importantly, **we compute the loss via
`self.compiled_loss`**, which wraps the loss(es) function(s) that were passed to
`compile()`.
Similarly, we call `self.compiled_metrics.update_state(y, y_pred)` to update the state
of the metrics that were passed in `compile()`, and we query results from
`self.metrics` at the end to retrieve their current value.
"""
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
"""
Let's try this out:
"""
import numpy as np
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# Just use `fit` as usual
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=3)
"""
## Going lower-level
Naturally, you could just skip passing a loss function in `compile()`, and instead do
everything *manually* in `train_step`. Likewise for metrics. Here's a lower-level
example, that only uses `compile()` to configure the optimizer:
"""
mae_metric = keras.metrics.MeanAbsoluteError(name="mae")
loss_tracker = keras.metrics.Mean(name="loss")
class CustomModel(keras.Model):
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute our own loss
loss = keras.losses.mean_squared_error(y, y_pred)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Compute our own metrics
loss_tracker.update_state(loss)
mae_metric.update_state(y, y_pred)
return {"loss": loss_tracker.result(), "mae": mae_metric.result()}
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
# We don't passs a loss or metrics here.
model.compile(optimizer="adam")
# Just use `fit` as usual -- you can use callbacks, etc.
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.fit(x, y, epochs=3)
"""
## Supporting `sample_weight` & `class_weight`
You may have noticed that our first basic example didn't make any mention of sample
weighting. If you want to support the `fit()` arguments `sample_weight` and
`class_weight`, you'd simply do the following:
- Unpack `sample_weight` from the `data` argument
- Pass it to `compiled_loss` & `compiled_metrics` (of course, you could also just apply
it manually if you don't rely on `compile()` for losses & metrics)
- That's it. That's the list.
"""
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
if len(data) == 3:
x, y, sample_weight = data
else:
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value.
# The loss function is configured in `compile()`.
loss = self.compiled_loss(
y,
y_pred,
sample_weight=sample_weight,
regularization_losses=self.losses,
)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics.
# Metrics are configured in `compile()`.
self.compiled_metrics.update_state(y, y_pred, sample_weight=sample_weight)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
# You can now use sample_weight argument
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
sw = np.random.random((1000, 1))
model.fit(x, y, sample_weight=sw, epochs=3)
"""
## Providing your own evaluation step
What if you want to do the same for calls to `model.evaluate()`? Then you would
override `test_step` in exactly the same way. Here's what it looks like:
"""
class CustomModel(keras.Model):
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_pred = self(x, training=False)
# Updates the metrics tracking the loss
self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Update the metrics.
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
# Construct an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(loss="mse", metrics=["mae"])
# Evaluate with our custom test_step
x = np.random.random((1000, 32))
y = np.random.random((1000, 1))
model.evaluate(x, y)
"""
## Wrapping up: an end-to-end GAN example
Let's walk through an end-to-end example that leverages everything you just learned.
Let's consider:
- A generator network meant to generate 28x28x1 images.
- A discriminator network meant to classify 28x28x1 images into two classes ("fake" and
"real").
- One optimizer for each.
- A loss function to train the discriminator.
"""
from tensorflow.keras import layers
# Create the discriminator
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
Here's a feature-complete GAN class, overriding `compile()` to use its own signature,
and implementing the entire GAN algorithm in 17 lines in `train_step`:
"""
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super(GAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
def compile(self, d_optimizer, g_optimizer, loss_fn):
super(GAN, self).compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, real_images):
if isinstance(real_images, tuple):
real_images = real_images[0]
# Sample random points in the latent space
batch_size = tf.shape(real_images)[0]
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(tf.shape(labels))
# Train the discriminator
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = self.discriminator(self.generator(random_latent_vectors))
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
return {"d_loss": d_loss, "g_loss": g_loss}
"""
Let's test-drive it:
"""
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
# To limit execution time, we only train on 100 batches. You can train on
# the entire dataset. You will need about 20 epochs to get nice results.
gan.fit(dataset.take(100), epochs=1)
"""
The idea behind deep learning are simple, so why should their implementation be painful?
"""
| 34.60401
| 88
| 0.686681
|
6fb79f0ff0188fc596457da4c563f634b80c906e
| 5,421
|
py
|
Python
|
test_proj/settings.py
|
artscoop/django-admin-tools
|
f98189615db5114ef3b136c90983a6bc5590c9af
|
[
"MIT"
] | 6
|
2016-02-18T10:00:34.000Z
|
2021-05-27T09:41:35.000Z
|
venv/lib/python2.7/site-packages/test_proj/settings.py
|
WhySoGeeky/DroidPot
|
7c3d9e975dae3835e2ccf42c425d65b26466e82a
|
[
"MIT"
] | 6
|
2018-03-30T10:06:12.000Z
|
2021-06-10T17:59:44.000Z
|
test_proj/settings.py
|
artscoop/django-admin-tools
|
f98189615db5114ef3b136c90983a6bc5590c9af
|
[
"MIT"
] | null | null | null |
# settings for django-admin-tools test project.
import os
import sys
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.dirname(PROJECT_PATH))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'testdb.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'django-admin-tools'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_proj.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_proj.wsgi.application'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_PATH + '/templates',
)
INSTALLED_APPS = [
'admin_tools',
'admin_tools.dashboard',
'admin_tools.menu',
'admin_tools.theming',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'test_app',
]
try:
import django_coverage
TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner'
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(PROJECT_PATH, '_coverage')
except ImportError:
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
ADMIN_TOOLS_INDEX_DASHBOARD = 'test_proj.dashboard.CustomIndexDashboard'
ADMIN_TOOLS_MENU = 'test_proj.menu.CustomMenu'
| 34.528662
| 108
| 0.723298
|
2e84ab6fd65dd656a8b3e5ebf0cffba9ed639087
| 13,138
|
py
|
Python
|
tools/wptrunner/wptrunner/executors/executorselenium.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | null | null | null |
tools/wptrunner/wptrunner/executors/executorselenium.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | null | null | null |
tools/wptrunner/wptrunner/executors/executorselenium.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
import socket
import sys
import threading
import time
import traceback
import urlparse
import uuid
from .base import (Protocol,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
class SeleniumProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def setup(self, runner):
"""Connect to browser via Selenium's WebDriver implementation."""
self.runner = runner
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
session_started = False
try:
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
except:
self.logger.warning(
"Connecting to Selenium failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("Selenium session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect to Selenium")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except:
print >> sys.stderr, traceback.format_exc()
self.logger.warning(
"Failed to connect to navigate initial page")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.load_runner("http")
def load_runner(self, protocol):
url = urlparse.urljoin(self.executor.server_url(protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.webdriver.execute_script("document.title = '%s'" %
threading.current_thread().name.replace("'", '"'))
def wait(self):
while True:
try:
self.webdriver.execute_async_script("");
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumRun(object):
def __init__(self, func, webdriver, url, timeout):
self.func = func
self.result = None
self.webdriver = webdriver
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.webdriver, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol.webdriver,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, webdriver, url, timeout):
format_map = {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}
parent = webdriver.current_window_handle
handles = [item for item in webdriver.window_handles if item != parent]
for handle in handles:
try:
webdriver.switch_to_window(handle)
webdriver.close()
except exceptions.NoSuchWindowException:
pass
webdriver.switch_to_window(parent)
webdriver.execute_script(self.script % format_map)
try:
# Try this, it's in Level 1 but nothing supports it yet
win_s = webdriver.execute_script("return window['%s'];" % self.window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except:
after = webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
else:
raise Exception("unable to find test window")
assert test_window != parent
handler = CallbackHandler(webdriver, test_window, self.logger)
while True:
result = webdriver.execute_async_script(
self.script_resume % format_map)
done, rv = handler(result)
if done:
break
return rv
class CallbackHandler(object):
def __init__(self, webdriver, test_window, logger):
self.webdriver = webdriver
self.test_window = test_window
self.logger = logger
def __call__(self, result):
self.logger.debug("Got async callback: %s" % result[1])
try:
attr = getattr(self, "process_%s" % result[1])
except AttributeError:
raise ValueError("Unknown callback type %r" % result[1])
else:
return attr(result)
def process_complete(self, result):
rv = [result[0]] + result[2]
return True, rv
def process_action(self, result):
parent = self.webdriver.current_window_handle
try:
self.webdriver.switch_to.window(self.test_window)
action = result[2]["action"]
self.logger.debug("Got action: %s" % action)
if action == "click":
selector = result[2]["selector"]
elements = self.webdriver.find_elements_by_css_selector(selector)
if len(elements) == 0:
raise ValueError("Selector matches no elements")
elif len(elements) > 1:
raise ValueError("Selector matches multiple elements")
self.logger.debug("Clicking element: %s" % selector)
try:
elements[0].click()
except (exceptions.ElementNotInteractableException,
exceptions.ElementNotVisibleException) as e:
self._send_message("complete",
"failure",
e)
self.logger.debug("Clicking element failed: %s" % str(e))
else:
self._send_message("complete",
"success")
self.logger.debug("Clicking element succeeded")
finally:
self.webdriver.switch_to.window(parent)
return False, None
def _send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
self.protocol.webdriver.set_window_size(600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol.webdriver,
self.test_url(test),
test.timeout).run()
def _screenshot(self, webdriver, url, timeout):
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
| 36.393352
| 100
| 0.576572
|
0186d72a792f0ce6439d0b9f96ac1e1fbbb65ef4
| 8,680
|
py
|
Python
|
configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py
|
jiangwenj02/SOLO
|
f0a1de652028236d7935274f51c509008903ad7b
|
[
"BSD-2-Clause"
] | 1,467
|
2020-03-24T01:38:24.000Z
|
2022-03-31T03:02:05.000Z
|
configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py
|
cds-mipt/cds-mmdetection
|
da6b1c82715c189bf33e944c6edda80590b5a867
|
[
"Apache-2.0"
] | 208
|
2020-03-26T16:24:23.000Z
|
2022-03-30T13:12:07.000Z
|
configs/htc/htc_x101_64x4d_fpn_20e_16gpu.py
|
cds-mipt/cds-mmdetection
|
da6b1c82715c189bf33e944c6edda80590b5a867
|
[
"Apache-2.0"
] | 300
|
2020-03-24T03:55:02.000Z
|
2022-03-29T19:08:07.000Z
|
# model settings
model = dict(
type='HybridTaskCascade',
num_stages=3,
pretrained='open-mmlab://resnext101_64x4d',
interleaved=True,
mask_info_flow=True,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067],
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
],
stage_loss_weights=[1, 0.5, 0.25])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'stuffthingmaps/train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 19])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/htc_x101_64x4d_fpn_20e'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.33574
| 79
| 0.554147
|
8f1207ecfdadeabfdb8931aa196b0d7fac9abaea
| 3,122
|
py
|
Python
|
text_classification/network_conf.py
|
nickyfantasy/paddlepaddle_models
|
cb5e1f600bf6d69f25d7c9a40743452bdd0e1670
|
[
"Apache-2.0"
] | null | null | null |
text_classification/network_conf.py
|
nickyfantasy/paddlepaddle_models
|
cb5e1f600bf6d69f25d7c9a40743452bdd0e1670
|
[
"Apache-2.0"
] | null | null | null |
text_classification/network_conf.py
|
nickyfantasy/paddlepaddle_models
|
cb5e1f600bf6d69f25d7c9a40743452bdd0e1670
|
[
"Apache-2.0"
] | 1
|
2019-01-22T15:40:57.000Z
|
2019-01-22T15:40:57.000Z
|
import sys
import math
import gzip
from paddle.v2.layer import parse_network
import paddle.v2 as paddle
__all__ = ["fc_net", "convolution_net"]
def fc_net(dict_dim,
class_num,
emb_dim=28,
hidden_layer_sizes=[28, 8],
is_infer=False):
"""
define the topology of the dnn network
:param dict_dim: size of word dictionary
:type input_dim: int
:params class_num: number of instance class
:type class_num: int
:params emb_dim: embedding vector dimension
:type emb_dim: int
"""
# define the input layers
data = paddle.layer.data("word",
paddle.data_type.integer_value_sequence(dict_dim))
if not is_infer:
lbl = paddle.layer.data("label",
paddle.data_type.integer_value(class_num))
# define the embedding layer
emb = paddle.layer.embedding(input=data, size=emb_dim)
# max pooling to reduce the input sequence into a vector (non-sequence)
seq_pool = paddle.layer.pooling(
input=emb, pooling_type=paddle.pooling.Max())
for idx, hidden_size in enumerate(hidden_layer_sizes):
hidden_init_std = 1.0 / math.sqrt(hidden_size)
hidden = paddle.layer.fc(
input=hidden if idx else seq_pool,
size=hidden_size,
act=paddle.activation.Tanh(),
param_attr=paddle.attr.Param(initial_std=hidden_init_std))
prob = paddle.layer.fc(
input=hidden,
size=class_num,
act=paddle.activation.Softmax(),
param_attr=paddle.attr.Param(initial_std=1.0 / math.sqrt(class_num)))
if is_infer:
return prob
else:
return paddle.layer.classification_cost(
input=prob, label=lbl), prob, lbl
def convolution_net(dict_dim,
class_dim=2,
emb_dim=28,
hid_dim=128,
is_infer=False):
"""
cnn network definition
:param dict_dim: size of word dictionary
:type input_dim: int
:params class_dim: number of instance class
:type class_dim: int
:params emb_dim: embedding vector dimension
:type emb_dim: int
:params hid_dim: number of same size convolution kernels
:type hid_dim: int
"""
# input layers
data = paddle.layer.data("word",
paddle.data_type.integer_value_sequence(dict_dim))
lbl = paddle.layer.data("label", paddle.data_type.integer_value(class_dim))
# embedding layer
emb = paddle.layer.embedding(input=data, size=emb_dim)
# convolution layers with max pooling
conv_3 = paddle.networks.sequence_conv_pool(
input=emb, context_len=3, hidden_size=hid_dim)
conv_4 = paddle.networks.sequence_conv_pool(
input=emb, context_len=4, hidden_size=hid_dim)
# fc and output layer
prob = paddle.layer.fc(
input=[conv_3, conv_4], size=class_dim, act=paddle.activation.Softmax())
if is_infer:
return prob
else:
cost = paddle.layer.classification_cost(input=prob, label=lbl)
return cost, prob, lbl
| 30.31068
| 80
| 0.639974
|
1d773f607e8e327b6f761b3bb1f28990264f4e98
| 26,528
|
py
|
Python
|
sympy/utilities/tests/test_lambdify.py
|
tachycline/sympy
|
abf6fec12012852c7e6fae38461da9723cadc8b9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_lambdify.py
|
tachycline/sympy
|
abf6fec12012852c7e6fae38461da9723cadc8b9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_lambdify.py
|
tachycline/sympy
|
abf6fec12012852c7e6fae38461da9723cadc8b9
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import product
import math
import mpmath
from sympy.utilities.pytest import XFAIL, raises
from sympy import (
symbols, lambdify, sqrt, sin, cos, tan, pi, acos, acosh, Rational,
Float, Matrix, Lambda, Piecewise, exp, Integral, oo, I, Abs, Function,
true, false, And, Or, Not, ITE, Min, Max, floor, diff, IndexedBase, Sum,
DotProduct, Eq, Dummy)
from sympy.printing.lambdarepr import LambdaPrinter
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
from sympy.functions.special.gamma_functions import uppergamma,lowergamma
import sympy
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
numexpr = import_module('numexpr')
tensorflow = import_module('tensorflow')
w, x, y, z = symbols('w,x,y,z')
#================== Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace_1():
myfunc = lambda x: 1
f = lambdify(x, sin(x), {"sin": myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_namespace_2():
def myfunc(x):
return 1
f = lambdify(x, sin(x), {'sin': myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1, 2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
@conserve_mpmath_dps
def test_mpmath_precision():
mpmath.mp.dps = 100
assert str(lambdify((), pi.evalf(100), 'mpmath')()) == str(pi.evalf(100))
#================== Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_tensorflow_transl():
if not tensorflow:
skip("tensorflow not installed")
from sympy.utilities.lambdify import TENSORFLOW_TRANSLATIONS
for sym, tens in TENSORFLOW_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert tens in tensorflow.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
def test_numexpr_printer():
if not numexpr:
skip("numexpr not installed.")
# if translation/printing is done incorrectly then evaluating
# a lambdified numexpr expression will throw an exception
from sympy.printing.lambdarepr import NumExprPrinter
from sympy import S
blacklist = ('where', 'complex', 'contains')
arg_tuple = (x, y, z) # some functions take more than one argument
for sym in NumExprPrinter._numexpr_functions.keys():
if sym in blacklist:
continue
ssym = S(sym)
if hasattr(ssym, '_nargs'):
nargs = ssym._nargs[0]
else:
nargs = 1
args = arg_tuple[:nargs]
f = lambdify(args, ssym(*args), modules='numexpr')
assert f(*(1, )*nargs) is not None
def test_issue_9334():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
expr = sympy.S('b*a - sqrt(a**2)')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = numpy.random.random((2, 4))
func_numexpr(foo, bar)
#================== Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
#================== Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)], 'math')
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), (float, mpmath.ctx_mp_python.mpf))
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules="sympy")
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules="sympy")
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='sympy')(1, 2) == sol
assert lambdify(v.T, J, modules='sympy')(1, 2) == sol
def test_numpy_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
#Lambdify array first, to ensure return to array as default
f = lambdify((x, y, z), A, ['numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
#Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
def test_numpy_transpose():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A.T, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]]))
def test_numpy_dotproduct():
if not numpy:
skip("numpy not installed")
A = Matrix([x, y, z])
f1 = lambdify([x, y, z], DotProduct(A, A), modules='numpy')
f2 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy')
f3 = lambdify([x, y, z], DotProduct(A.T, A), modules='numpy')
f4 = lambdify([x, y, z], DotProduct(A, A.T), modules='numpy')
assert f1(1, 2, 3) == \
f2(1, 2, 3) == \
f3(1, 2, 3) == \
f4(1, 2, 3) == \
numpy.array([14])
def test_numpy_inverse():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A**-1, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]]))
def test_numpy_old_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
f = lambdify((x, y, z), A, [{'ImmutableDenseMatrix': numpy.matrix}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
assert isinstance(f(1, 2, 3), numpy.matrix)
def test_python_div_zero_issue_11306():
if not numpy:
skip("numpy not installed.")
p = Piecewise((1 / x, y < -1), (x, y < 1), (1 / x, True))
f = lambdify([x, y], p, modules='numpy')
numpy.seterr(divide='ignore')
assert str(float(f(0,1))) == 'inf'
numpy.seterr(divide='warn')
def test_issue9474():
mods = [None, 'math']
if numpy:
mods.append('numpy')
if mpmath:
mods.append('mpmath')
for mod in mods:
f = lambdify(x, sympy.S(1)/x, modules=mod)
assert f(2) == 0.5
f = lambdify(x, floor(sympy.S(1)/x), modules=mod)
assert f(2) == 0
if mpmath:
f = lambdify(x, sympy.S(1)/sympy.Abs(x), modules=['mpmath'])
assert isinstance(f(2), mpmath.mpf)
for absfunc, modules in product([Abs, abs], mods):
f = lambdify(x, absfunc(x), modules=modules)
assert f(-1) == 1
assert f(1) == 1
assert f(3+4j) == 5
def test_issue_9871():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
r = sqrt(x**2 + y**2)
expr = diff(1/r, x)
xn = yn = numpy.linspace(1, 10, 16)
# expr(xn, xn) = -xn/(sqrt(2)*xn)^3
fv_exact = -numpy.sqrt(2.)**-3 * xn**-2
fv_numpy = lambdify((x, y), expr, modules='numpy')(xn, yn)
fv_numexpr = lambdify((x, y), expr, modules='numexpr')(xn, yn)
numpy.testing.assert_allclose(fv_numpy, fv_exact, rtol=1e-10)
numpy.testing.assert_allclose(fv_numexpr, fv_exact, rtol=1e-10)
def test_numpy_piecewise():
if not numpy:
skip("numpy not installed.")
pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True))
f = lambdify(x, pieces, modules="numpy")
numpy.testing.assert_array_equal(f(numpy.arange(10)),
numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81]))
# If we evaluate somewhere all conditions are False, we should get back NaN
nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0)))
numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])),
numpy.array([1, numpy.nan, 1]))
def test_numpy_logical_ops():
if not numpy:
skip("numpy not installed.")
and_func = lambdify((x, y), And(x, y), modules="numpy")
or_func = lambdify((x, y), Or(x, y), modules="numpy")
not_func = lambdify((x), Not(x), modules="numpy")
arr1 = numpy.array([True, True])
arr2 = numpy.array([False, True])
numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True]))
numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True]))
numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False]))
def test_numpy_matmul():
if not numpy:
skip("numpy not installed.")
xmat = Matrix([[x, y], [z, 1+z]])
ymat = Matrix([[x**2], [Abs(x)]])
mat_func = lambdify((x, y, z), xmat*ymat, modules="numpy")
numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]]))
numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]]))
# Multiple matrices chained together in multiplication
f = lambdify((x, y, z), xmat*xmat*xmat, modules="numpy")
numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25],
[159, 251]]))
def test_numpy_numexpr():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b, c = numpy.random.randn(3, 128, 128)
# ensure that numpy and numexpr return same value for complicated expression
expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \
Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2)
npfunc = lambdify((x, y, z), expr, modules='numpy')
nefunc = lambdify((x, y, z), expr, modules='numexpr')
assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c))
def test_numexpr_userfunctions():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b = numpy.random.randn(2, 10)
uf = type('uf', (Function, ),
{'eval' : classmethod(lambda x, y : y**2+1)})
func = lambdify(x, 1-uf(x), modules='numexpr')
assert numpy.allclose(func(a), -(a**2))
uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1)
func = lambdify((x, y), uf(x, y), modules='numexpr')
assert numpy.allclose(func(a, b), 2*a*b+1)
def test_tensorflow_basic_math():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.constant(0, dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s) == 0.5
def test_tensorflow_placeholders():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: 0}) == 0.5
def test_tensorflow_variables():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(sin(x), Abs(1/(x+2)))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.Variable(0, dtype=tensorflow.float32)
s = tensorflow.Session()
s.run(tensorflow.initialize_all_variables())
assert func(a).eval(session=s) == 0.5
def test_tensorflow_logical_operations():
if not tensorflow:
skip("tensorflow not installed.")
expr = Not(And(Or(x, y), y))
func = lambdify([x, y], expr, modules="tensorflow")
a = tensorflow.constant(False)
b = tensorflow.constant(True)
s = tensorflow.Session()
assert func(a, b).eval(session=s) == 0
def test_tensorflow_piecewise():
if not tensorflow:
skip("tensorflow not installed.")
expr = Piecewise((0, Eq(x,0)), (-1, x < 0), (1, x > 0))
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: -1}) == -1
assert func(a).eval(session=s, feed_dict={a: 0}) == 0
assert func(a).eval(session=s, feed_dict={a: 1}) == 1
def test_tensorflow_multi_max():
if not tensorflow:
skip("tensorflow not installed.")
expr = Max(x, -x, x**2)
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: -2}) == 4
def test_tensorflow_multi_min():
if not tensorflow:
skip("tensorflow not installed.")
expr = Min(x, -x, x**2)
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: -2}) == -2
def test_tensorflow_relational():
if not tensorflow:
skip("tensorflow not installed.")
expr = x >= 0
func = lambdify(x, expr, modules="tensorflow")
a = tensorflow.placeholder(dtype=tensorflow.float32)
s = tensorflow.Session()
assert func(a).eval(session=s, feed_dict={a: 1})
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#================== Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(my_f, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_errors():
# Test errors that implemented functions can return, and still be able to
# form expressions.
# See: https://github.com/sympy/sympy/issues/10810
for val, error_class in product((0, 0., 2, 2.0),
(AttributeError, TypeError, ValueError)):
def myfunc(a):
if a == 0:
raise error_class
return 1
f = implemented_function('f', myfunc)
expr = f(val)
assert expr == f(val)
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
t = symbols('t')
F = Function('F')
G = Function('G')
#"\alpha" is not a valid python variable name
#lambdify should sub in a dummy for it, and return
#without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
#Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_python_keywords():
# Test for issue 7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
ref = (
"Created with lambdify. Signature:\n\n"
"func(w, x, y, z)\n\n"
"Expression:\n\n"
"w + x + y + z"
).splitlines()
assert func.__doc__.splitlines()[:len(ref)] == ref
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
ref = (
"Created with lambdify. Signature:\n\n"
"func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n"
" a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n"
"Expression:\n\n"
"a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +..."
).splitlines()
assert func.__doc__.splitlines()[:len(ref)] == ref
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_issue_2790():
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
def test_issue_12092():
f = implemented_function('f', lambda x: x**2)
assert f(f(2)).evalf() == Float(16)
def test_ITE():
assert lambdify((x, y, z), ITE(x, y, z))(True, 5, 3) == 5
assert lambdify((x, y, z), ITE(x, y, z))(False, 5, 3) == 3
def test_Min_Max():
# see gh-10375
assert lambdify((x, y, z), Min(x, y, z))(1, 2, 3) == 1
assert lambdify((x, y, z), Max(x, y, z))(1, 2, 3) == 3
def test_Indexed():
# Issue #10934
if not numpy:
skip("numpy not installed")
a = IndexedBase('a')
i, j = symbols('i j')
b = numpy.array([[1, 2], [3, 4]])
assert lambdify(a, Sum(a[x, y], (x, 0, 1), (y, 0, 1)))(b) == 10
def test_issue_12173():
#test for issue 12173
exp1 = lambdify((x, y), uppergamma(x, y),"mpmath")(1, 2)
exp2 = lambdify((x, y), lowergamma(x, y),"mpmath")(1, 2)
assert exp1 == uppergamma(1, 2).evalf()
assert exp2 == lowergamma(1, 2).evalf()
def test_lambdify_dummy_arg():
d1 = Dummy()
f1 = lambdify(d1, d1 + 1, dummify=False)
assert f1(2) == 3
f1b = lambdify(d1, d1 + 1)
assert f1b(2) == 3
d2 = Dummy('x')
f2 = lambdify(d2, d2 + 1)
assert f2(2) == 3
| 32.750617
| 89
| 0.594353
|
66eb2bc60d57fc150b5c7f04d5eee41fc1554cc6
| 206
|
py
|
Python
|
tutorial/Austrian_energy_system/tools.py
|
ardasha93/message_ix
|
f68403d1c3b62392a082d008198f87e706e6ffc2
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
tutorial/Austrian_energy_system/tools.py
|
ardasha93/message_ix
|
f68403d1c3b62392a082d008198f87e706e6ffc2
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2
|
2018-08-09T13:26:01.000Z
|
2020-03-13T09:04:45.000Z
|
tutorial/Austrian_energy_system/tools.py
|
behnam2015/message_ix
|
430a6d967be6cf473a8bbf1576a1e478bd399a92
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
import sys
import os
here = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
path = os.path.join(here, '..', 'utils')
sys.path.append(path)
from plotting import *
from run_scenarios import *
| 18.727273
| 67
| 0.728155
|
0551956667bfe569c10718bdb97a124f5c40ab14
| 51,039
|
py
|
Python
|
src/python/grpcio/grpc_core_dependencies.py
|
casperisfine/grpc
|
9facfe2b684bbaaf9e75e2d285e7bffceeefe2b1
|
[
"Apache-2.0"
] | 1
|
2019-07-24T18:08:51.000Z
|
2019-07-24T18:08:51.000Z
|
src/python/grpcio/grpc_core_dependencies.py
|
casperisfine/grpc
|
9facfe2b684bbaaf9e75e2d285e7bffceeefe2b1
|
[
"Apache-2.0"
] | 9
|
2019-07-22T21:13:22.000Z
|
2019-12-11T08:50:56.000Z
|
src/python/grpcio/grpc_core_dependencies.py
|
casperisfine/grpc
|
9facfe2b684bbaaf9e75e2d285e7bffceeefe2b1
|
[
"Apache-2.0"
] | 1
|
2019-12-17T06:56:33.000Z
|
2019-12-17T06:56:33.000Z
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/eds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/service_config.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/xds/xds_api.cc',
'src/core/ext/filters/client_channel/xds/xds_bootstrap.cc',
'src/core/ext/filters/client_channel/xds/xds_channel_secure.cc',
'src/core/ext/filters/client_channel/xds/xds_client.cc',
'src/core/ext/filters/client_channel/xds/xds_client_stats.cc',
'src/core/ext/filters/client_idle/client_idle_filter.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/max_age/max_age_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
'src/core/ext/filters/workarounds/workaround_utils.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/authority.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/auth/common.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/auth/secret.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/auth/tls.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/filter.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/address.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/backoff.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/base.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/config_source.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/health_check.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/protocol.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/core/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/discovery.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/eds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/lds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener/listener.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/listener/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/rds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/route.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/route/route.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/route/route_components.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/srds.upb.c',
'src/core/ext/upb-generated/envoy/config/filter/accesslog/v2/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v2/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v2/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v2/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v2/lrs.upb.c',
'src/core/ext/upb-generated/envoy/type/http.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/string.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v2/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/range.upb.c',
'src/core/ext/upb-generated/envoy/type/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v2/custom_tag.upb.c',
'src/core/ext/upb-generated/gogoproto/gogo.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/data/orca/v1/orca_load_report.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/lib/avl/avl.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/handshaker.cc',
'src/core/lib/channel/handshaker_registry.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_args.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/compression/stream_compression.cc',
'src/core/lib/compression/stream_compression_gzip.cc',
'src/core/lib/compression/stream_compression_identity.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tls_pthread.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_uv.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_epollex_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_custom.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_uv.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/is_epollexclusive_available.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_custom.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_custom.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_uv.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_custom.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/resource_quota.cc',
'src/core/lib/iomgr/sockaddr_utils.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_uv.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_custom.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_custom.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_custom.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_uv.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_custom.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/timer_uv.cc',
'src/core/lib/iomgr/udp_server.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/credentials_metadata.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/target_authority_table.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_intern.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/init_secure.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/metadata.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/static_metadata.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/status_metadata.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
'third_party/abseil-cpp/absl/base/dynamic_annotations.cc',
'third_party/abseil-cpp/absl/base/internal/cycleclock.cc',
'third_party/abseil-cpp/absl/base/internal/raw_logging.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock.cc',
'third_party/abseil-cpp/absl/base/internal/spinlock_wait.cc',
'third_party/abseil-cpp/absl/base/internal/sysinfo.cc',
'third_party/abseil-cpp/absl/base/internal/thread_identity.cc',
'third_party/abseil-cpp/absl/base/internal/throw_delegate.cc',
'third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc',
'third_party/abseil-cpp/absl/base/log_severity.cc',
'third_party/abseil-cpp/absl/numeric/int128.cc',
'third_party/abseil-cpp/absl/strings/ascii.cc',
'third_party/abseil-cpp/absl/strings/charconv.cc',
'third_party/abseil-cpp/absl/strings/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_bigint.cc',
'third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc',
'third_party/abseil-cpp/absl/strings/internal/escaping.cc',
'third_party/abseil-cpp/absl/strings/internal/memutil.cc',
'third_party/abseil-cpp/absl/strings/internal/ostringstream.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/extension.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/output.cc',
'third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc',
'third_party/abseil-cpp/absl/strings/internal/utf8.cc',
'third_party/abseil-cpp/absl/strings/match.cc',
'third_party/abseil-cpp/absl/strings/numbers.cc',
'third_party/abseil-cpp/absl/strings/str_cat.cc',
'third_party/abseil-cpp/absl/strings/str_replace.cc',
'third_party/abseil-cpp/absl/strings/str_split.cc',
'third_party/abseil-cpp/absl/strings/string_view.cc',
'third_party/abseil-cpp/absl/strings/substitute.cc',
'third_party/abseil-cpp/absl/time/civil_time.cc',
'third_party/abseil-cpp/absl/time/clock.cc',
'third_party/abseil-cpp/absl/time/duration.cc',
'third_party/abseil-cpp/absl/time/format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/civil_time_detail.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_fixed.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_format.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_if.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_impl.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_info.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_lookup.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_posix.cc',
'third_party/abseil-cpp/absl/time/internal/cctz/src/zone_info_source.cc',
'third_party/abseil-cpp/absl/time/time.cc',
'third_party/abseil-cpp/absl/types/bad_optional_access.cc',
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh/check.c',
'third_party/boringssl-with-bazel/src/crypto/dh/dh.c',
'third_party/boringssl-with-bazel/src/crypto/dh/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/is_fips.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_r2x.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
'third_party/cares/cares/ares__close_sockets.c',
'third_party/cares/cares/ares__get_hostent.c',
'third_party/cares/cares/ares__read_line.c',
'third_party/cares/cares/ares__timeval.c',
'third_party/cares/cares/ares_cancel.c',
'third_party/cares/cares/ares_create_query.c',
'third_party/cares/cares/ares_data.c',
'third_party/cares/cares/ares_destroy.c',
'third_party/cares/cares/ares_expand_name.c',
'third_party/cares/cares/ares_expand_string.c',
'third_party/cares/cares/ares_fds.c',
'third_party/cares/cares/ares_free_hostent.c',
'third_party/cares/cares/ares_free_string.c',
'third_party/cares/cares/ares_getenv.c',
'third_party/cares/cares/ares_gethostbyaddr.c',
'third_party/cares/cares/ares_gethostbyname.c',
'third_party/cares/cares/ares_getnameinfo.c',
'third_party/cares/cares/ares_getopt.c',
'third_party/cares/cares/ares_getsock.c',
'third_party/cares/cares/ares_init.c',
'third_party/cares/cares/ares_library_init.c',
'third_party/cares/cares/ares_llist.c',
'third_party/cares/cares/ares_mkquery.c',
'third_party/cares/cares/ares_nowarn.c',
'third_party/cares/cares/ares_options.c',
'third_party/cares/cares/ares_parse_a_reply.c',
'third_party/cares/cares/ares_parse_aaaa_reply.c',
'third_party/cares/cares/ares_parse_mx_reply.c',
'third_party/cares/cares/ares_parse_naptr_reply.c',
'third_party/cares/cares/ares_parse_ns_reply.c',
'third_party/cares/cares/ares_parse_ptr_reply.c',
'third_party/cares/cares/ares_parse_soa_reply.c',
'third_party/cares/cares/ares_parse_srv_reply.c',
'third_party/cares/cares/ares_parse_txt_reply.c',
'third_party/cares/cares/ares_platform.c',
'third_party/cares/cares/ares_process.c',
'third_party/cares/cares/ares_query.c',
'third_party/cares/cares/ares_search.c',
'third_party/cares/cares/ares_send.c',
'third_party/cares/cares/ares_strcasecmp.c',
'third_party/cares/cares/ares_strdup.c',
'third_party/cares/cares/ares_strerror.c',
'third_party/cares/cares/ares_strsplit.c',
'third_party/cares/cares/ares_timeout.c',
'third_party/cares/cares/ares_version.c',
'third_party/cares/cares/ares_writev.c',
'third_party/cares/cares/bitncmp.c',
'third_party/cares/cares/inet_net_pton.c',
'third_party/cares/cares/inet_ntop.c',
'third_party/cares/cares/windows_port.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/port.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/upb.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
]
| 59.004624
| 118
| 0.752444
|
4f8b6c2d2a741d4acb4af4d2a0d6ab31c6ce18b3
| 679
|
py
|
Python
|
chapter04/dags/listing_4_5.py
|
add54/Data_PipeLine_Apache_Airflow
|
40b52ba6fcda3203b194be9e1c2850135997215a
|
[
"BSD-Source-Code"
] | 303
|
2019-09-30T10:59:15.000Z
|
2022-03-30T17:03:27.000Z
|
chapter04/dags/listing_4_5.py
|
andreaschandra/data-pipelines-with-apache-airflow
|
40b52ba6fcda3203b194be9e1c2850135997215a
|
[
"BSD-Source-Code"
] | 13
|
2020-04-08T12:28:30.000Z
|
2021-12-30T06:40:37.000Z
|
chapter04/dags/listing_4_5.py
|
andreaschandra/data-pipelines-with-apache-airflow
|
40b52ba6fcda3203b194be9e1c2850135997215a
|
[
"BSD-Source-Code"
] | 148
|
2020-01-03T03:30:39.000Z
|
2022-03-28T04:19:43.000Z
|
from urllib import request
import airflow.utils.dates
from airflow import DAG
from airflow.operators.python import PythonOperator
dag = DAG(
dag_id="listing_4_05",
start_date=airflow.utils.dates.days_ago(1),
schedule_interval="@hourly",
)
def _get_data(execution_date):
year, month, day, hour, *_ = execution_date.timetuple()
url = (
"https://dumps.wikimedia.org/other/pageviews/"
f"{year}/{year}-{month:0>2}/pageviews-{year}{month:0>2}{day:0>2}-{hour:0>2}0000.gz"
)
output_path = "/tmp/wikipageviews.gz"
request.urlretrieve(url, output_path)
get_data = PythonOperator(task_id="get_data", python_callable=_get_data, dag=dag)
| 27.16
| 91
| 0.708395
|
dab02c2cbb55b4a2a8cc2c8452bf3af69e78761c
| 400
|
py
|
Python
|
python-projects/uppercase_and_reverse.py
|
iamvalentin23/onemonth
|
a44a4eb131824d9224170b55264902e63354da6e
|
[
"MIT"
] | null | null | null |
python-projects/uppercase_and_reverse.py
|
iamvalentin23/onemonth
|
a44a4eb131824d9224170b55264902e63354da6e
|
[
"MIT"
] | null | null | null |
python-projects/uppercase_and_reverse.py
|
iamvalentin23/onemonth
|
a44a4eb131824d9224170b55264902e63354da6e
|
[
"MIT"
] | null | null | null |
# Create a function called uppercase_and_reverse that takes a little bit of text,
# uppercases it all, and then reverses it (flips all the letters around)
def uppercase_and_reverse(sentence):
return reverse(sentence.upper())
def reverse(sentence):
return sentence[::-1]
text = uppercase_and_reverse("Do not go gentle into that good night.") #"THGIN DOOG TAHT OTNI ELTNEG OG TON OD"
print(text)
| 36.363636
| 112
| 0.77
|
5efb7f25110ae4597e0d57582d15f7aaf0538ba6
| 1,317
|
py
|
Python
|
tests/cli/test_image.py
|
akx/markovchain
|
9981ef327e70ba3e42f1a6c926f1252b47c29b48
|
[
"MIT"
] | 16
|
2017-10-28T10:06:51.000Z
|
2021-06-05T15:38:34.000Z
|
tests/cli/test_image.py
|
santhoshtr/markovchain
|
daf7273d15826b2555f92026ce1a999fb5906e37
|
[
"MIT"
] | 1
|
2019-06-07T09:29:00.000Z
|
2019-06-07T19:29:45.000Z
|
tests/cli/test_image.py
|
santhoshtr/markovchain
|
daf7273d15826b2555f92026ce1a999fb5906e37
|
[
"MIT"
] | 6
|
2017-11-06T16:03:26.000Z
|
2021-04-02T20:56:30.000Z
|
import os
import json
import pytest
from markovchain.cli.main import main
@pytest.mark.parametrize('fname,settings,data,args,res', [
])
def test_cli_image(mocker, mock_cli, fname, settings, data, args, res):
mock_cli(mocker)
statefile = os.path.join(mock_cli.dir, fname)
datafile = os.path.join(mock_cli.dir, 'data.txt')
settingsfile = os.path.join(mock_cli.dir, 'settings.json')
cmd = ['text', 'create', '-o', statefile]
if settings is not None:
with open(settingsfile, 'wt') as fp:
json.dump(settings, fp)
cmd.extend(('-s', settingsfile))
if len(data) > 0:
with open(datafile, 'wt') as fp:
fp.write(data[0])
cmd.append(datafile)
mock_cli.run(main, cmd)
mock_cli.assert_output('', '')
update = data[1:]
if update:
cmd = ['text', 'update']
cmd.append(statefile)
for i, data_ in enumerate(data):
datafile = os.path.join(mock_cli.dir, 'data%d.txt' % i)
cmd.append(datafile)
with open(datafile, 'wt') as fp:
fp.write(data_)
mock_cli.run(main, cmd)
mock_cli.assert_output('', '')
cmd = ['text', 'generate']
cmd.extend(args)
cmd.append(statefile)
mock_cli.run(main, cmd)
mock_cli.assert_output(res, '')
| 28.630435
| 71
| 0.599848
|
a4758df8558a206a463b8db70b98cffac2dd3e90
| 792
|
py
|
Python
|
venv/Scripts/f2py.py
|
Bandarban/Bandar_Bot
|
d185083bbeafa3d2002eace1122d595833130f19
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/f2py.py
|
Bandarban/Bandar_Bot
|
d185083bbeafa3d2002eace1122d595833130f19
|
[
"Apache-2.0"
] | 4
|
2021-03-19T04:42:14.000Z
|
2022-03-12T00:02:52.000Z
|
venv/Scripts/f2py.py
|
Bandarban/Bandar_Bot
|
d185083bbeafa3d2002eace1122d595833130f19
|
[
"Apache-2.0"
] | null | null | null |
#!C:\Users\Bandar\Bandar_Bot\venv\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| 27.310345
| 67
| 0.643939
|
c365d9a90c5d7c68c4efb701b6b26f1d1fc8da0f
| 5,132
|
py
|
Python
|
pynetdicom/tests/test_dul.py
|
edmcdonagh/pynetdicom-1
|
9156c1adb34bb01580b9ba7247e78d699f38bbb7
|
[
"MIT"
] | null | null | null |
pynetdicom/tests/test_dul.py
|
edmcdonagh/pynetdicom-1
|
9156c1adb34bb01580b9ba7247e78d699f38bbb7
|
[
"MIT"
] | null | null | null |
pynetdicom/tests/test_dul.py
|
edmcdonagh/pynetdicom-1
|
9156c1adb34bb01580b9ba7247e78d699f38bbb7
|
[
"MIT"
] | null | null | null |
"""DUL service testing"""
import logging
import socket
import threading
import time
import pytest
from pynetdicom import AE
from pynetdicom.dul import DULServiceProvider
from pynetdicom.pdu import A_ASSOCIATE_RQ, A_ASSOCIATE_AC, A_ASSOCIATE_RJ, \
A_RELEASE_RQ, A_RELEASE_RP, P_DATA_TF, A_ABORT_RQ
from pynetdicom.pdu_primitives import A_ASSOCIATE, A_RELEASE, A_ABORT, P_DATA
from .encoded_pdu_items import a_associate_ac, a_release_rq
from .parrot import start_server, ThreadedParrot
LOGGER = logging.getLogger('pynetdicom')
LOGGER.setLevel(logging.CRITICAL)
#LOGGER.setLevel(logging.DEBUG)
class DummyACSE(object):
"""Dummy ACSE class"""
@staticmethod
def debug_receive_associate_rq(): pass
@staticmethod
def debug_receive_associate_ac(): pass
@staticmethod
def debug_receive_associate_rj(): pass
@staticmethod
def debug_receive_data_tf(): pass
@staticmethod
def debug_receive_release_rq(): pass
@staticmethod
def debug_receive_release_rp(): pass
@staticmethod
def debug_receive_abort(): pass
class DummyAssociation(object):
"""Dummy Association class"""
acse = DummyACSE()
class TestDUL(object):
"""Run tests on DUL service provider."""
def teardown(self):
for thread in threading.enumerate():
if isinstance(thread, ThreadedParrot):
thread.shutdown()
def test_primitive_to_event(self):
"""Test that parameter returns expected results"""
dul = DULServiceProvider(DummyAssociation())
p2e = dul._primitive_to_event
primitive = A_ASSOCIATE()
primitive.result = None
assert p2e(primitive) == 'Evt1'
primitive.result = 0
assert p2e(primitive) == 'Evt7'
primitive.result = 1
assert p2e(primitive) == 'Evt8'
primitive = A_RELEASE()
primitive.result = None
assert p2e(primitive) == 'Evt11'
primitive.result = 'affirmative'
assert p2e(primitive) == 'Evt14'
primitive = A_ABORT()
assert p2e(primitive) == 'Evt15'
primitive = P_DATA()
assert p2e(primitive) == 'Evt9'
with pytest.raises(ValueError):
p2e('TEST')
def test_recv_failure_aborts(self):
"""Test connection close during PDU recv causes abort."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('wait', 0.1), # Don't want to accidentally kill the DUL
('send', b"\x07\x00\x00\x00\x00\x04"),
('wait', 0.3)
]
scp = start_server(commands)
ae = AE()
ae.network_timeout = 0.2
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
time.sleep(0.4)
assert assoc.is_aborted
scp.shutdown()
def test_recv_short_aborts(self):
"""Test receiving short PDU causes abort."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('send', b"\x07\x00\x00\x00\x00\x04\x00\x00"), # Send short PDU
('wait', 0.3), # Keep connection open
]
scp = start_server(commands)
ae = AE()
ae.add_requested_context('1.2.840.10008.1.1')
# Sends a-associate-rq
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
time.sleep(0.4)
assert assoc.is_aborted
scp.shutdown()
def test_recv_bad_pdu_aborts(self):
"""Test receiving undecodable PDU causes abort."""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('wait', 0.1), # Don't want to accidentally kill the DUL
('send', b"\x07\x00\x00\x00\x00\x02\x00\x00"),
]
scp = start_server(commands)
ae = AE()
ae.network_timeout = 0.2
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
time.sleep(0.4)
assert assoc.is_aborted
scp.shutdown()
def test_exception_in_reactor(self):
"""Test that an exception being raised in the DUL reactor kills the
DUL and aborts the association.
"""
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('wait', 0.1), # Give some time to monkey patch
('send', a_release_rq), # Trigger the exception
('recv', None), # recv a-abort
('wait', 0.2),
]
scp = start_server(commands)
ae = AE()
ae.network_timeout = 0.2
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
def patch_read_pdu():
raise NotImplementedError
assoc.dul._read_pdu_data = patch_read_pdu
time.sleep(0.4)
assert assoc.is_aborted
scp.shutdown()
| 29.66474
| 77
| 0.606002
|
7e7039e50d16f5fa222e0fe8f82bb1ef80c3491a
| 3,746
|
py
|
Python
|
server/metodos/interpolacion.py
|
sinedie/FinalProcesosNumericos
|
082a3180cad5d9a32435ecb03b04d02dcf1a921f
|
[
"MIT"
] | null | null | null |
server/metodos/interpolacion.py
|
sinedie/FinalProcesosNumericos
|
082a3180cad5d9a32435ecb03b04d02dcf1a921f
|
[
"MIT"
] | null | null | null |
server/metodos/interpolacion.py
|
sinedie/FinalProcesosNumericos
|
082a3180cad5d9a32435ecb03b04d02dcf1a921f
|
[
"MIT"
] | null | null | null |
import sympy
import numpy as np
from .utils import parse_pol
def lagrange(x, y):
n = len(x)
tabla = np.zeros((n, n))
for i in range(n):
Li = 1
den = 1
for j in range(n):
if j != i:
paux = [1, -x[j]]
Li = np.convolve(Li, paux)
den *= x[i] - x[j]
tabla[i, :] = y[i] * Li / den
pols = [sum(tabla).tolist()]
polinomio = parse_pol(pols)
return polinomio, pols
def diferencias_newton(x, y):
n = len(x)
tabla = np.zeros((n, n + 1))
tabla[:, 0] = x
tabla[:, 1] = y
for j in range(2, n + 1):
for i in range(j - 1, n):
denominador = tabla[i, 0] - tabla[i - j + 1, 0]
tabla[i, j] = (tabla[i, j - 1] - tabla[i - 1, j - 1]) / denominador
pol = 0
coef = np.diag(tabla[:, 1:])
x_symbol = sympy.Symbol("x")
for i in range(len(coef)):
const = coef[i]
for j in range(i):
const *= x_symbol - x[j]
pol += const
return [str(sympy.simplify(pol))], tabla.tolist()
def spline_lineal(x, y):
d = 1
n = len(x)
A = np.zeros(((d + 1) * (n - 1), (d + 1) * (n - 1)))
b = np.zeros(((d + 1) * (n - 1), 1))
c = 0
h = 0
for i in range(n - 1):
A[i, c] = x[i]
A[i, c + 1] = 1
b[i] = y[i]
c += 2
h += 1
c = 0
for i in range(1, n):
A[h, c] = x[i]
A[h, c + 1] = 1
b[h] = y[i]
c += 2
h += 1
val = np.dot(np.linalg.inv(A), b)
pols = np.reshape(val, (n - 1, d + 1))
polinomios = parse_pol(pols)
return polinomios, pols.tolist()
def spline_cuadratico(x, y):
d = 2
n = len(x)
A = np.zeros(((d + 1) * (n - 1), (d + 1) * (n - 1)))
b = np.zeros(((d + 1) * (n - 1), 1))
cua = x ** 2
c = 0
h = 0
for i in range(n - 1):
A[i, c] = cua[i]
A[i, c + 1] = x[i]
A[i, c + 2] = 1
b[i] = y[i]
c += 3
h += 1
c = 0
for i in range(1, n):
A[h, c] = cua[i]
A[h, c + 1] = x[i]
A[h, c + 2] = 1
b[h] = y[i]
c += 3
h += 1
c = 0
for i in range(1, n - 1):
A[h, c] = 2 * x[i]
A[h, c + 1] = 1
A[h, c + 3] = -2 * x[i]
A[h, c + 4] = -1
b[h] = 0
c += 4
h += 1
A[h, 0] = 2
b[h] = 0
val = np.dot(np.linalg.inv(A), b)
pols = np.reshape(val, (n - 1, d + 1))
polinomios = parse_pol(pols)
return polinomios, pols.tolist()
def spline_cubico(x, y):
d = 3
n = len(x)
A = np.zeros(((d + 1) * (n - 1), (d + 1) * (n - 1)))
b = np.zeros(((d + 1) * (n - 1), 1))
cua = x ** 2
cub = x ** 3
c = 0
h = 0
for i in range(n - 1):
A[i, c : c + 4] = [cub[i], cua[i], x[i], 1]
b[i] = y[i]
c += 4
h += 1
c = 0
for i in range(1, n):
A[h, c : c + 4] = [cub[i], cua[i], x[i], 1]
b[h] = y[i]
c += 4
h += 1
c = 0
for i in range(1, n - 1):
A[h, c] = 3 * cua[i]
A[h, c + 1] = 2 * x[i]
A[h, c + 2] = 1
A[h, c + 4] = -3 * cua[i]
A[h, c + 5] = -2 * x[i]
A[h, c + 6] = -1
b[h] = 0
c += 4
h += 1
c = 0
for i in range(1, n - 1):
A[h, c] = 6 * x[i]
A[h, c + 1] = 2
A[h, c + 4] = -6 * x[i]
A[h, c + 5] = -2
b[h] = 0
c += 4
h += 1
A[h, 0] = 6 * x[0]
A[h, 1] = 2
b[h] = 0
h += 1
A[h, c] = 6 * x[-1]
A[h, c + 1] = 2
b[h] = 0
val = np.dot(np.linalg.inv(A), b)
pols = np.reshape(val, (n - 1, d + 1))
polinomios = parse_pol(pols)
return polinomios, pols.tolist()
| 20.927374
| 79
| 0.368927
|
27965044f2e55e9370b616c858176d7675dd0687
| 456
|
py
|
Python
|
backend/flask/run.py
|
mdda/2014-07_Singapore-Maritime-Hackathon
|
0629f78f4214b5968c902ed2f3f98d872e6be998
|
[
"MIT"
] | 1
|
2020-05-28T03:23:34.000Z
|
2020-05-28T03:23:34.000Z
|
backend/flask/run.py
|
mdda/2014-07_Singapore-Maritime-Hackathon
|
0629f78f4214b5968c902ed2f3f98d872e6be998
|
[
"MIT"
] | null | null | null |
backend/flask/run.py
|
mdda/2014-07_Singapore-Maritime-Hackathon
|
0629f78f4214b5968c902ed2f3f98d872e6be998
|
[
"MIT"
] | null | null | null |
from www import app
HOST = 'localhost' # This restricts incoming calls to the local machine
#HOST = '0.0.0.0' # This allows incoming calls from outside the machine (Windows will ask for Firewall permission)
PORT = 7882 # Arbitrary port (epoch accessible from outside)
import os
debug=os.environ.get("FLASK_DEBUG", False) # Better default choice than True
if debug:
print "Debugging is on"
app.run(host=HOST, port=PORT, debug=debug, )
| 32.571429
| 118
| 0.725877
|
f8afe19a39c310809941ec2a628ed0f07aee764d
| 6,096
|
py
|
Python
|
combined_phase1.py
|
ozgesevgili/n-hance
|
d8f967f8c866e28305ea24bd38021c66bf981d90
|
[
"Apache-2.0"
] | 4
|
2019-02-06T13:06:56.000Z
|
2022-02-01T09:09:03.000Z
|
combined_phase1.py
|
ozgesevgili/n-hance
|
d8f967f8c866e28305ea24bd38021c66bf981d90
|
[
"Apache-2.0"
] | null | null | null |
combined_phase1.py
|
ozgesevgili/n-hance
|
d8f967f8c866e28305ea24bd38021c66bf981d90
|
[
"Apache-2.0"
] | null | null | null |
import xml.etree.ElementTree as ET
import nltk
import string
import operator
from nltk.corpus import stopwords
# takes path and returns list of sentences.
def read_data(path="/Users/ozge/Documents/semeval2017_pun_task/data/test/subtask1-homographic-test.xml"):
result = list()
tree = ET.parse(path)
root = tree.getroot()
for text in root:
text_id = text.attrib['id']
sentence = ''
for word in text:
sentence += ' ' + word.text
# sentence, text id, pun word id, pun word
result.append((sentence.strip(), text_id))
return result
# takes two paths and according to overlap, assign puns or not puns.
def count_puns(path1="/Users/ozge/Documents/semeval2017_pun_task/data/test/subtask1-homographic-test.xml",
path2="/Users/ozge/Documents/semeval2017_pun_task/data/test/subtask2-homographic-test.xml"):
# sentence ids which have pun.
sentence_ids = list()
tree1 = ET.parse(path1)
root1 = tree1.getroot()
tree2 = ET.parse(path2)
root2 = tree2.getroot()
text_ids1 = list()
for text in root1:
text_ids1.append(text.attrib['id'])
for text in root2:
text_id = text.attrib['id']
if text_id in text_ids1:
sentence_ids.append(text_id)
return sentence_ids
def create_pair(sentences, stop_words):
tokens = nltk.word_tokenize(sentences.lower())
tokens_filtered = list()
punctuations = list(string.punctuation)
# remove stopwords
for word in tokens:
if word not in stop_words and word not in punctuations:
tokens_filtered.append(word)
# list of pair tuple
pair_list = list()
for word in tokens_filtered:
for other_word in tokens_filtered[tokens_filtered.index(word) + 1:]:
pair_list.append((word, other_word))
return pair_list
def load_pmi_scores(path):
lines = open(path).readlines()
pair_pmi = {}
for line in lines:
pair = line.split(":")[0]
word1 = pair.split(",")[0]
word2 = pair.split(",")[1]
score_ = line.split(":")[1]
score = float(score_[:len(score_) - 1])
pair_pmi[(word1, word2)] = score
return pair_pmi
def find_pair_scores(pair_list, scores):
pair_scores = {}
for pair in pair_list:
word1 = pair[0].encode('ascii', 'ignore')
word2 = pair[1].encode('ascii', 'ignore')
try:
pair_scores[(word1, word2)] = scores[(word1, word2)]
except KeyError:
continue
# print pair_scores
try:
return sorted(pair_scores.items(), key=operator.itemgetter(1), reverse=True)[0], pair_scores
except IndexError:
return None, pair_scores
def write_scores(text_id, scores, path="/Users/ozge/Desktop/sorted.scores.txt"):
file = open(path, 'a')
values = str(text_id) + ":"
for score in scores:
values += str(score) + " "
values += '\n'
file.write(values)
file.close()
def does_contain_pun(text_id, pair_scores, treshold = 0.1):
prediction = False
sorted_pair_scores = sorted(pair_scores.items(), key=operator.itemgetter(1), reverse=True)
print sorted_pair_scores
scores = pair_scores.values()
sorted_scores = sorted(scores, reverse=True)
print sorted_scores
write_scores(text_id, sorted_scores)
diff = sorted_scores[0] - sorted_scores[-1]
# TODO: normalization and then apply quartile
if diff > treshold:
prediction = True
return prediction
if __name__ == "__main__":
scores = load_pmi_scores("/Users/ozge/Desktop/score5000.homo.all.combined.txt")
print "scores are loaded.."
sentence_list = read_data()
print "data is read.."
pun_ids = count_puns()
print "text_ids which contain pun are taken.."
subtask1_true_positive_counter = 0
subtask1_true_negative_counter = 0
subtask1_false_positive_counter = 0
subtask1_false_negative_counter = 0
for sentence_info in sentence_list:
sentence = sentence_info[0]
text_id = sentence_info[1]
print sentence
pair_list = create_pair(sentence, stopwords.words("english"))
#print pair_list
try:
max_pair_score, pair_scores = find_pair_scores(pair_list, scores)
print max_pair_score
predicted = does_contain_pun(text_id, pair_scores, 2)
truth = True if text_id in pun_ids else False
if predicted and truth:
subtask1_true_positive_counter += 1
elif predicted and not truth:
subtask1_true_negative_counter += 1
elif not predicted and truth:
subtask1_false_positive_counter += 1
elif not predicted and not truth:
subtask1_false_negative_counter += 1
else:
continue
except:
continue
print "true positive", subtask1_true_positive_counter
print "true negative", subtask1_true_negative_counter
print "false positive", subtask1_false_positive_counter
print "false negative", subtask1_false_negative_counter
print len(sentence_list)
'''
sentence1 = "I used to be banker, I lost interest"
sentence = "Cinderella was thrown off the basketball team because she ran away from the ball."
sentence = "I'm dying, Tom croaked."
sentence = "Old swords never rust, they just lose their temper."
sentence3 = "My father slept under the bed, I think he was a little potty."
sentence2 = "Quick, dive into those reeds! 'Tom rushed'"
sentence = "The bee got married, he found his honey."
sentence = "My bakery burned down last night, and now my business is toast."
sentence = "An optometrist fell into a lens grinder and made a spectacle of himself."
sentence = "A horse is a very stable animal."
pair_list = create_pair(sentence, stopwords.words("english"))
# print pair_list
max_pair_score, pair_scores = find_pair_scores(pair_list, scores)
print max_pair_score
does_contain_pun("hom_1", pair_scores)'''
| 30.178218
| 107
| 0.658465
|
d60e0c19bff8c666f9e62aa26654c5e48d148bca
| 21
|
py
|
Python
|
kmapper/_version.py
|
deargle/kepler-mapper
|
925f09c7016624ba93a5f30f15c2330a43524c3c
|
[
"MIT"
] | 167
|
2016-09-26T22:47:17.000Z
|
2022-03-22T04:13:11.000Z
|
kmapper/_version.py
|
deargle/kepler-mapper
|
925f09c7016624ba93a5f30f15c2330a43524c3c
|
[
"MIT"
] | 21
|
2016-09-26T05:25:03.000Z
|
2021-08-30T13:51:06.000Z
|
kmapper/_version.py
|
deargle/kepler-mapper
|
925f09c7016624ba93a5f30f15c2330a43524c3c
|
[
"MIT"
] | 22
|
2017-02-03T07:48:44.000Z
|
2022-01-14T08:51:19.000Z
|
__version__ = "1.4.0"
| 21
| 21
| 0.666667
|
398d834f21b2cf6e1f461cd6fe0d9fe95227eec6
| 4,507
|
py
|
Python
|
docs/cunumeric/source/comparison/_comparison_generator.py
|
bryevdv/cunumeric
|
7965ceb96d3252371c22cf32d38ac91c4db77a38
|
[
"Apache-2.0"
] | null | null | null |
docs/cunumeric/source/comparison/_comparison_generator.py
|
bryevdv/cunumeric
|
7965ceb96d3252371c22cf32d38ac91c4db77a38
|
[
"Apache-2.0"
] | null | null | null |
docs/cunumeric/source/comparison/_comparison_generator.py
|
bryevdv/cunumeric
|
7965ceb96d3252371c22cf32d38ac91c4db77a38
|
[
"Apache-2.0"
] | null | null | null |
import importlib
def _filter(obj, n):
try:
return (
n
not in [
"test",
"add_docstring",
"abs",
"add_newdoc",
"add_newdoc_ufunc",
"alen",
"alltrue",
"bitwise_not",
"compare_chararrays",
"cumproduct",
"fastCopyAndTranspose",
"get_array_wrap",
"iterable",
"max",
"min",
"ndim",
"product",
"recfromcsv",
"recfromtxt",
"round",
"safe_eval",
"set_numeric_ops",
"size",
"sometrue",
"loads",
"mafromtxt",
"matmul",
"ndfromtxt",
] # not in blocklist
and callable(getattr(obj, n)) # callable
and not isinstance(getattr(obj, n), type) # not class
and n[0].islower() # starts with lower char
and not n.startswith("__") # not special methods
)
except: # noqa: E722
return False
def _get_functions(obj):
return set([n for n in dir(obj) if (_filter(obj, n))])
def _import(mod, klass):
try:
obj = importlib.import_module(mod)
except ModuleNotFoundError:
return None, None
if klass:
obj = getattr(obj, klass)
return obj, ":meth:`{}.{}.{{}}`".format(mod, klass)
else:
# ufunc is not a function
return obj, ":obj:`{}.{{}}`".format(mod)
def _section(header, mod_ext, other_lib, klass=None, exclude_mod=None):
base_mod = "numpy" + mod_ext
other_mod = other_lib + mod_ext
base_obj, base_fmt = _import(base_mod, klass)
base_funcs = _get_functions(base_obj)
lg_obj, lg_fmt = _import(other_mod, klass)
lg_funcs = []
for f in _get_functions(lg_obj):
obj = getattr(lg_obj, f)
if getattr(obj, "_cunumeric_implemented", False):
lg_funcs.append(f)
lg_funcs = set(lg_funcs)
if exclude_mod:
exclude_obj, _ = _import(exclude_mod, klass)
exclude_funcs = _get_functions(exclude_obj)
base_funcs -= exclude_funcs
lg_funcs -= exclude_funcs
buf = [
header,
"~" * len(header),
"",
]
buf += [
".. currentmodule:: cunumeric",
"",
".. autosummary::",
" :toctree: generated/",
"",
]
buf += [
".. csv-table::",
" :header: NumPy, {}, single-GPU/CPU, multi-GPU/CPU".format(
other_mod
),
"",
]
for f in sorted(base_funcs):
base_cell = base_fmt.format(f)
lg_cell = r"\-"
single_gpu_cell = ""
multi_gpu_cell = ""
if f in lg_funcs:
lg_cell = lg_fmt.format(f)
obj = getattr(lg_obj, f)
if obj.__doc__ is not None and "Single GPU" in obj.__doc__:
multi_gpu_cell = "No"
single_gpu_cell = "Yes"
elif obj.__doc__ is not None and "Multiple GPUs" in obj.__doc__:
multi_gpu_cell = "Yes"
single_gpu_cell = "Yes"
if getattr(base_obj, f) is getattr(lg_obj, f):
lg_cell = "{} (*alias of* {})".format(lg_cell, base_cell)
line = " {}, {}, {}, {}".format(
base_cell, lg_cell, single_gpu_cell, multi_gpu_cell
)
buf.append(line)
buf += [
"",
".. Summary:",
" Number of NumPy functions: {}".format(len(base_funcs)),
" Number of functions covered by "
f"{other_lib}: {len(lg_funcs & base_funcs)}",
" {} specific functions:".format(other_lib),
]
buf += [" - {}".format(f) for f in (lg_funcs - base_funcs)]
buf += [
"",
]
return buf
def generate(other_lib):
buf = []
buf += [
"NumPy vs cuNumeric APIs",
"------------------------",
"",
]
buf += _section("Module-Level", "", other_lib)
buf += _section("Multi-Dimensional Array", "", other_lib, klass="ndarray")
buf += _section("Linear Algebra", ".linalg", other_lib)
buf += _section("Discrete Fourier Transform", ".fft", other_lib)
buf += _section("Random Sampling", ".random", other_lib)
return "\n".join(buf)
if __name__ == "__main__":
print(generate("cunumeric"))
| 27.993789
| 78
| 0.492789
|
dc8522ca6ef614219bed35bbb5ad5bb5d7a09847
| 11,624
|
py
|
Python
|
my.py
|
Anantuo/pySpider
|
5dbb707d750f5d01b20bd410b9f1d6e46b8a3f99
|
[
"Apache-2.0"
] | 5
|
2016-06-15T10:43:03.000Z
|
2019-11-29T07:39:54.000Z
|
my.py
|
Anantuo/pySpider
|
5dbb707d750f5d01b20bd410b9f1d6e46b8a3f99
|
[
"Apache-2.0"
] | null | null | null |
my.py
|
Anantuo/pySpider
|
5dbb707d750f5d01b20bd410b9f1d6e46b8a3f99
|
[
"Apache-2.0"
] | 4
|
2016-01-15T14:22:40.000Z
|
2019-04-17T03:35:55.000Z
|
from pyspider.libs.base_handler import *
from bs4 import BeautifulSoup
import cx_Oracle
import hashlib
import time
import re
import os
import redis
from urllib.parse import urljoin
from urllib.parse import urlparse
from urllib.parse import urlunparse
from urllib.parse import quote
'''放到/opt/pythontools下'''
class My(BaseHandler):
mkdir = '/home/oracle/Gis/'
r = redis.Redis()
download_key = 'download'
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
table_name = ['选址意见书', '建设用地规划许可证', '建设工程规划许可证', '乡村建设规划许可证',
'规划验收合格证', '规划验收合格证', '批前公示', '批后公布', 'Unknow', '选址意见书_批前',
'建设用地规划许可证_批前', '建设工程规划许可证_批前', '乡村建设规划许可证_批前',
'规划验收合格证_批前', '挂牌', '竣工验收']
city_name = {'CZ':'潮州', 'DG':'东莞', 'FS':'佛山', 'GZ':'广州', 'HY':'河源', 'HZ':'惠州',
'JM':'江门', 'JM_X':'江门', 'JY':'揭阳', 'MM':'茂名', 'MZ':'梅州', 'QY':'清远',
'SG':'韶关', 'ST':'汕头', 'SW':'汕尾', 'SZ':'深圳', 'YF':'云浮', 'YJ':'阳江',
'ZH':'珠海', 'ZJ':'湛江', 'ZQ':'肇庆', 'ZS':'中山',
}
source_name = {'GH':'规划', 'GT':'国土', 'JS':'建设', 'JT':'交通', 'GF':'公服',}
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, sdch",
"Accept-Language":"zh-CN,zh;q=0.8",
"Cache-Control":"max-age=0",
"Connection":"keep-alive",
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36"
}
crawl_config = {
"headers" : headers,
"timeout" : 100
}
def on_start(self):
print('请重新启动pyspider')
pass
# def index_page(self, response):
# pass
# def next_list(self, response):
# pass
def get_date(self):
return time.strftime("%Y-%m-%d", time.localtime())
'''返回基本url和url参数'''
def get_params(self, response=None, link=''):
if response == None and not link:
raise KeyError
if response != None:
url, params_str = response.url.split('?')
else:
url, params_str = link.split('?')
params = {}
for i in params_str.split('&'):
temp = i.split('=')
params[temp[0]] = temp[1]
return (url, params)
'''生成绝对链接'''
def real_path(self, a, b):
path = urljoin(a, b)
arr = urlparse(path)
real_path = os.path.normpath(arr[2])
return urlunparse((arr.scheme, arr.netloc, real_path, arr.params, arr.query, arr.fragment))
# def js_css_download(self, response):
# # 存储位置
# path = response.save['path']
# file_name = response.save['name']
# # 创建目录
# if not os.path.exists(path):
# os.makedirs(path)
# with open(path + file_name, 'w') as f:
# f.write(response.text)
@config(priority=2)
def content_page(self, response):
'''构造存储位置'''
url = response.url
m = hashlib.md5()
m.update(url.encode())
web_name = '/' + m.hexdigest() + '/'
path = self.mkdir + self.name + '/' + response.save['source'] + '/' + web_name
if not os.path.exists(path):
os.makedirs(path)
soup = BeautifulSoup(response.text)
'''提取js文件'''
script_tag = soup.find_all('script', src=True)
for each in script_tag:
js_m = hashlib.md5()
js_m.update(each['src'].encode())
js_name = js_m.hexdigest()
# 获取访问地址
request_url = self.real_path(url, each['src'])
# 改动网页js地址为相对地址
each['src'] = js_name + '.js'
# 爬取js文件
d = {}
d['url'] = request_url
d['type'] = 'attachment'
d['path'] = path
d['file_name'] = each['src']
self.r.rpush(self.download_key, str(d))
# self.crawl(request_url, fetch_type='js', callback = self.js_css_download, save = {'path':path, 'name':each['src']})
'''提取css文件'''
css_tag = soup.find_all('link', type='text/css')
for each in css_tag:
css_m = hashlib.md5()
css_m.update(each['href'].encode())
css_name = css_m.hexdigest()
# 获取访问地址
request_url = self.real_path(url, each['href'])
# 改动网页 css 地址为相对地址
each['href'] = css_name + '.css'
# 爬取css文件
d = {}
d['url'] = request_url
d['type'] = 'attachment'
d['path'] = path
d['file_name'] = each['href']
self.r.rpush(self.download_key, str(d))
# self.crawl(request_url, callback = self.js_css_download, save = {'path':path, 'name':each['href']})
'''提取图片,显示用的img标签内的文字'''
images = soup('img', src=True) + soup('img', {'data-src': re.compile(r'')})
image_list = []
if images is not None:
for each in images:
try:
image_url = self.real_path(url, each['src'])
except KeyError:
image_url = self.real_path(url, each['data-src'])
k = image_url.split('/')
link = k[0]
for i in k[1:]:
link += '/'+ quote(i)
image_url = link
if image_url not in image_list:
# t = re.search('.asp', image_url)
# if t is None:
image_list.append(image_url)
d = {}
d['type'] = 'image'
d['path'] = path
d['url'] = image_url
m = hashlib.md5()
m.update(image_url.encode())
if re.search('.jpg', image_url) is not None:
each['src'] = m.hexdigest() + '.jpg'
elif re.search('.png', image_url) is not None:
each['src'] = m.hexdigest() + '.png'
elif re.search('.gif', image_url) is not None:
each['src'] = m.hexdigest() + '.gif'
d['file_name'] = each['src']
self.r.rpush(self.download_key, str(d))
'''提取附件'''
attachments = soup('a', {'href': re.compile(r'^http')})
attachment_list = []
if attachments is not None:
for each in attachments:
href = each['href']
type_name = None
if re.search('.jpg', href) is not None:
type_name = 'jpg'
elif re.search('.png', href) is not None:
type_name = '.png'
elif re.search('.gif', href) is not None:
type_name = '.gif'
elif re.search('.doc', href) is not None:
type_name = '.doc'
elif re.search('.pdf', href) is not None:
type_name = '.pdf'
elif re.search('.zip', href) is not None:
type_name = '.zip'
elif re.search('.rar', href) is not None:
type_name = '.rar'
if type_name is not None:
attachment_url = self.real_path(url, href)
k = attachment_url.split('/')
link = k[0]
for i in k[1:]:
link += '/'+ quote(i)
attachment_url = link
if attachment_url not in attachment_list and attachment_url not in image_list:
attachment_list.append(href)
d = {}
d['type'] = 'attachment'
d['path'] = path
d['url'] = attachment_url
m = hashlib.md5()
m.update(attachment_url.encode())
each['href'] = m.hexdigest() + '.' + type_name
d['file_name'] = each['href']
self.r.rpush(self.download_key, str(d))
'''提取background图片'''
for key in soup.find_all(background=True):
image_url = self.real_path(url, key['background'])
k = image_url.split('/')
link = k[0]
for i in k[1:]:
link += '/'+ quote(i)
image_url = link
if image_url not in image_list:
image_list.append(image_url)
d = {}
d['type'] = 'image'
d['path'] = path
d['url'] = image_url
m = hashlib.md5()
m.update(image_url.encode())
if re.search('.jpg', image_url) is not None:
each['src'] = m.hexdigest() + '.jpg'
elif re.search('.png', image_url) is not None:
each['src'] = m.hexdigest() + '.png'
elif re.search('.gif', image_url) is not None:
each['src'] = m.hexdigest() + '.gif'
d['file_name'] = each['src']
self.r.rpush(self.download_key, str(d))
return {
"url": url,
"html": str(soup),
"type": response.save['type'],
"source": response.save['source']
}
def on_result(self, result):
if result is not None:
m = hashlib.md5()
m.update(result['url'].encode())
web_name = '/' + m.hexdigest() + '/'
path = self.mkdir + self.name + '/' + result['source'] + '/' + web_name
if not os.path.exists(path):
os.makedirs(path)
page_path = path + 'page.html'
f = open(page_path, 'wb')
f.write(result['html'].encode('utf-8'))
f.close()
'''去除页面全部标签,只获得全部文字,用于全文索引'''
content_path = path + 'content.txt'
soup = BeautifulSoup(result['html'], 'html.parser')
for i in soup('style') + soup('script'):
i.extract()
content = soup.decode('utf-8')
content = re.sub(r'<[/!]?\w+[^>]*>', '\n', content)
content = re.sub(r'<!--[\w\W\r\n]*?-->', '\n', content)
content = re.sub(r'\s+', '\n', content)
print(self.get_date())
values = (result['url'], path,
self.get_date(), self.city_name[self.name],
result['type'], content, result['source'])
'''存入数据库'''
conn = None
try:
dsn = cx_Oracle.makedsn('localhost', 1521, 'urbandeve')
conn = cx_Oracle.connect(user='C##WWPA', password='wwpa5678', dsn=dsn)
try:
cursor = conn.cursor()
cursor.setinputsizes(cx_Oracle.NCHAR, cx_Oracle.NCHAR,
cx_Oracle.DATETIME, cx_Oracle.NCHAR,
cx_Oracle.NCHAR, cx_Oracle.CLOB, cx_Oracle.NCHAR)
cursor.prepare('''insert into TBL_ORGLPBLC
(ORIGINALADDRESS, STORAGEPATH,
ARCHIVEDATE, ASCRIPTIONCITY, DOCUMENTTYPE, BODY, SOURCE)
values(:1, :2, to_date(:3, 'yyyy-mm-dd'), :4, :5, :6, :7)''')
cursor.execute(None, values)
conn.commit()
finally:
cursor.close()
finally:
if conn is not None:
conn.close()
super(My, self).on_result(result)
| 39.808219
| 132
| 0.467567
|
507f62fa5505ee4429b8005646cd139c236b0fb2
| 4,479
|
py
|
Python
|
research/cv/squeezenet1_1/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/squeezenet1_1/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/squeezenet1_1/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval squeezenet."""
import os
import ast
import argparse
from mindspore import context
from mindspore.common import set_seed
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.CrossEntropySmooth import CrossEntropySmooth
from src.squeezenet import SqueezeNet as squeezenet
from src.dataset import create_dataset_imagenet as create_dataset
from src.config import config_imagenet as config
local_data_url = '/cache/data'
local_ckpt_url = '/cache/ckpt.ckpt'
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--dataset', type=str, default='imagenet', help='Dataset.')
parser.add_argument('--net', type=str, default='squeezenet', help='Model.')
parser.add_argument('--run_cloudbrain', type=ast.literal_eval, default=False,
help='Whether it is running on CloudBrain platform.')
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
parser.add_argument('--dataset_path', type=str, default='', help='Dataset path')
parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
parser.add_argument('--data_url', type=str, default="None", help='Datapath')
parser.add_argument('--train_url', type=str, default="None", help='Train output path')
args_opt = parser.parse_args()
set_seed(1)
if __name__ == '__main__':
target = args_opt.device_target
if args_opt.device_target != "Ascend":
raise ValueError("Unsupported device target.")
# init context
device_id = os.getenv('DEVICE_ID')
device_id = int(device_id) if device_id else 0
context.set_context(mode=context.GRAPH_MODE,
device_target=target,
device_id=device_id)
# create dataset
if args_opt.run_cloudbrain:
import moxing as mox
mox.file.copy_parallel(args_opt.checkpoint_path, local_ckpt_url)
mox.file.copy_parallel(args_opt.data_url, local_data_url)
dataset = create_dataset(dataset_path=local_data_url,
do_train=False,
repeat_num=1,
batch_size=config.batch_size,
target=target,
run_distribute=False)
else:
dataset = create_dataset(dataset_path=args_opt.dataset_path,
do_train=False,
repeat_num=1,
batch_size=config.batch_size,
target=target,
run_distribute=False)
step_size = dataset.get_dataset_size()
# define net
net = squeezenet(num_classes=config.class_num)
# load checkpoint
if args_opt.run_cloudbrain:
param_dict = load_checkpoint(local_ckpt_url)
else:
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
# define loss
if args_opt.dataset == "imagenet":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True,
reduction='mean',
smooth_factor=config.label_smooth_factor,
num_classes=config.class_num)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define model
model = Model(net,
loss_fn=loss,
metrics={'top_1_accuracy', 'top_5_accuracy'})
# eval model
res = model.eval(dataset)
print("result:", res, "ckpt=", local_ckpt_url)
| 40.718182
| 93
| 0.652378
|
7ada29076bf1bfe5a7fa0fbbf9a72ef3e552749c
| 1,210
|
py
|
Python
|
src/polaris_follower/controllers/follower.py
|
jaskirat1208/turtlebot-polaris
|
fe40b0bcccaffab2ea2ba204905989ed81d69d14
|
[
"BSD-2-Clause"
] | null | null | null |
src/polaris_follower/controllers/follower.py
|
jaskirat1208/turtlebot-polaris
|
fe40b0bcccaffab2ea2ba204905989ed81d69d14
|
[
"BSD-2-Clause"
] | null | null | null |
src/polaris_follower/controllers/follower.py
|
jaskirat1208/turtlebot-polaris
|
fe40b0bcccaffab2ea2ba204905989ed81d69d14
|
[
"BSD-2-Clause"
] | null | null | null |
from time import sleep
import rospy
from polaris_follower.controllers.base_controller import BaseController
from polaris_follower.utils import Point
class Follower(BaseController):
"""
Given a robot, x, y coordinates of the destination, it reaches points x and y.
"""
def __init__(self, robot, planner, *args, **kwargs):
"""
:param robot: The robot which you want to move
:param planner: Means of getting path to be followed
"""
self.robot = robot
self.planner = planner
super().__init__(*args, **kwargs)
def simulate(self):
"""
Given x, y coordinates of the destination, moves the robot to the point (x,y)
:param x: X coordinate of destination
:param y: Y coordinate of destination
:return:
"""
sleep(1) # Sleep so that the robot positions can be updated first
robot_path = self.planner.get_path(Point(0, 0), Point(0, 1))
for p in robot_path:
rospy.loginfo(str.format("Heading to point {}", p))
self.robot.move_to_dest(p.x, p.y)
rospy.loginfo(str.format("Robot {} has reached its destination", self.robot.object_name))
| 33.611111
| 97
| 0.636364
|
aa749cbae2aa9cfcef8d89f66e5316dd08477a0f
| 3,222
|
py
|
Python
|
src/abaqus/BoundaryCondition/FluidCavityPressureBCState.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/BoundaryCondition/FluidCavityPressureBCState.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/BoundaryCondition/FluidCavityPressureBCState.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from .BoundaryConditionState import BoundaryConditionState
class FluidCavityPressureBCState(BoundaryConditionState):
"""The FluidCavityPressureBCState object stores the propagating data for a fluid cavity
pressure boundary condition in a step. One instance of this object is created internally
by the FluidCavityPressureBC object for each step. The instance is also deleted
internally by the FluidCavityPressureBC object.
The FluidCavityPressureBCState object has no constructor or methods.
The FluidCavityPressureBCState object is derived from the BoundaryConditionState object.
Attributes
----------
magnitude: float
A Float specifying the fluid cavity pressure magnitude.
magnitudeState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the fluid cavity pressure
magnitude. Possible values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
amplitudeState: SymbolicConstant
A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
status: SymbolicConstant
A SymbolicConstant specifying the propagation state of the :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` object. Possible values are:
NOT_YET_ACTIVE
CREATED
PROPAGATED
MODIFIED
DEACTIVATED
NO_LONGER_ACTIVE
TYPE_NOT_APPLICABLE
INSTANCE_NOT_APPLICABLE
PROPAGATED_FROM_BASE_STATE
MODIFIED_FROM_BASE_STATE
DEACTIVATED_FROM_BASE_STATE
BUILT_INTO_MODES
amplitude: str
A String specifying the name of the amplitude reference. The String is empty if the
boundary condition has no amplitude reference.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].steps[name].boundaryConditionStates[name]
The corresponding analysis keywords are:
- BOUNDARY
"""
# A Float specifying the fluid cavity pressure magnitude.
magnitude: float = None
# A SymbolicConstant specifying the propagation state of the fluid cavity pressure
# magnitude. Possible values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
magnitudeState: SymbolicConstant = None
# A SymbolicConstant specifying the propagation state of the amplitude reference. Possible
# values are UNSET, SET, UNCHANGED, FREED, and MODIFIED.
amplitudeState: SymbolicConstant = None
# A SymbolicConstant specifying the propagation state of the BoundaryConditionState object. Possible values are:
# NOT_YET_ACTIVE
# CREATED
# PROPAGATED
# MODIFIED
# DEACTIVATED
# NO_LONGER_ACTIVE
# TYPE_NOT_APPLICABLE
# INSTANCE_NOT_APPLICABLE
# PROPAGATED_FROM_BASE_STATE
# MODIFIED_FROM_BASE_STATE
# DEACTIVATED_FROM_BASE_STATE
# BUILT_INTO_MODES
status: SymbolicConstant = None
# A String specifying the name of the amplitude reference. The String is empty if the
# boundary condition has no amplitude reference.
amplitude: str = ''
| 37.905882
| 179
| 0.735258
|
b23895b8b7e4e1df295b3d487461bc777b37a7a7
| 6,528
|
py
|
Python
|
qa/rpc-tests/receivedby.py
|
chratos-system/chratos-core
|
26488032eff82a99b99c48bde31d3fbb1863f2f9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/receivedby.py
|
chratos-system/chratos-core
|
26488032eff82a99b99c48bde31d3fbb1863f2f9
|
[
"MIT"
] | 1
|
2018-08-21T01:15:56.000Z
|
2018-08-21T01:15:56.000Z
|
qa/rpc-tests/receivedby.py
|
chratos-system/chratos-core
|
26488032eff82a99b99c48bde31d3fbb1863f2f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework.test_framework import ChratosTestFramework
from test_framework.util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
class ReceivedByTest(ChratosTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
assert_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
| 43.231788
| 139
| 0.632966
|
2bdad1f6a9cff4c4b2371baf32133ce50a85d4f2
| 1,522
|
py
|
Python
|
lib/scheduler.py
|
Toni-d-e-v/rxcsentinetal1
|
8af605f60cc6c6aa2b686d4e88fdc2051e0fc11e
|
[
"MIT"
] | null | null | null |
lib/scheduler.py
|
Toni-d-e-v/rxcsentinetal1
|
8af605f60cc6c6aa2b686d4e88fdc2051e0fc11e
|
[
"MIT"
] | null | null | null |
lib/scheduler.py
|
Toni-d-e-v/rxcsentinetal1
|
8af605f60cc6c6aa2b686d4e88fdc2051e0fc11e
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import init
import misc
from models import Transient
from misc import printdbg
import time
import random
class Scheduler(object):
transient_key_scheduled = 'NEXT_SENTINEL_CHECK_AT'
random_interval_max = 1200
@classmethod
def is_run_time(self):
next_run_time = Transient.get(self.transient_key_scheduled) or 0
now = misc.now()
printdbg("current_time = %d" % now)
printdbg("next_run_time = %d" % next_run_time)
return now >= next_run_time
@classmethod
def clear_schedule(self):
Transient.delete(self.transient_key_scheduled)
@classmethod
def schedule_next_run(self, random_interval=None):
if not random_interval:
random_interval = self.random_interval_max
next_run_at = misc.now() + random.randint(1, random_interval)
printdbg("scheduling next sentinel run for %d" % next_run_at)
Transient.set(self.transient_key_scheduled, next_run_at,
next_run_at)
@classmethod
def delay(self, delay_in_seconds=None):
if not delay_in_seconds:
delay_in_seconds = random.randint(0, 60)
# do not delay longer than 60 seconds
# in case an int > 60 given as argument
delay_in_seconds = delay_in_seconds % 60
printdbg("Delay of [%d] seconds for cron minute offset" % delay_in_seconds)
time.sleep(delay_in_seconds)
| 29.843137
| 84
| 0.684625
|
46301cac4d99a8d2585e852e307dd94eddfd43d6
| 3,704
|
py
|
Python
|
app/action.py
|
hf-zhu/v2ex-action
|
69675f72959b87b2052cea12ad6f4b2460c03d3b
|
[
"MIT"
] | null | null | null |
app/action.py
|
hf-zhu/v2ex-action
|
69675f72959b87b2052cea12ad6f4b2460c03d3b
|
[
"MIT"
] | null | null | null |
app/action.py
|
hf-zhu/v2ex-action
|
69675f72959b87b2052cea12ad6f4b2460c03d3b
|
[
"MIT"
] | null | null | null |
import base64
import hashlib
import hmac
import json
import re
import time
import urllib.parse
import requests
import requests.packages.urllib3
from actions_toolkit import core
from app.util import now
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
requests.packages.urllib3.disable_warnings()
class Action:
"""V2EX Action"""
timeout = 5
def __init__(self, hook, secret='', count=8):
self.hook = hook
self.secret = secret
self.count = count
self.contents = []
self.res = False
def wx(self):
data = {
'msgtype': 'markdown',
'markdown': {
'content': f'### Hi,小伙伴们\n 今天的周报交了嘛!\n{"".join(self.contents)}'
}
}
headers = {'Content-Type': 'application/json'}
resp = requests.post(url=self.hook,
headers=headers,
data=json.dumps(data),
timeout=Action.timeout,
verify=False)
self.res = resp.json()['errcode'] == 0
def ding(self):
timestamp = str(round(time.time() * 1000))
secret_enc = self.secret.encode('utf-8')
string_to_sign = f'{timestamp}\n{self.secret}'
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc,
string_to_sign_enc,
digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'{self.hook}×tamp={timestamp}&sign={sign}'
data = {
'msgtype': 'markdown',
'markdown': {
'title': 'V2EX 当前热门',
'text': f'### Hi,同志们\n 今天的周报交了嘛!\n{"".join(self.contents)}'
}
}
headers = {'Content-Type': 'application/json'}
requests.post(url=url,
headers=headers,
data=json.dumps(data),
timeout=Action.timeout,
verify=False)
@staticmethod
def get_v2ex_hot_topics():
url = 'https://v2ex.com/?tab=hot'
headers = {'User-Agent': USER_AGENT}
contents = []
resp = requests.get(url=url,
headers=headers,
timeout=Action.timeout,
verify=False)
match = re.compile(
'<span class="item_hot_topic_title">(.*?)</span>', re.DOTALL)
for item in match.findall(resp.text):
try:
detail_url = 'https://v2ex.com' + re.search(
'<a href="(.*?)">', item.strip()).group(1)
title = re.search('">(.*?)</a>', item.strip()).group(1)
title = title.replace('[', '').replace(']', '')
content = f'> - [{title}]({detail_url})\n'
contents.append(content)
except Exception as e:
core.error(f'[{now()}] Error occurred, msg: {str(e)}')
return contents
def run(self):
core.info('Welcome to use V2EX Action ❤\n\n'
'📕 Getting Started Guide: https://github.com/marketplace/actions/v2ex-action\n'
'📣 Maintained by Yang Libin: https://github.com/yanglbme\n')
#contents = Action.get_v2ex_hot_topics()
# self.contents = contents[:self.count]
if 'weixin' in self.hook:
self.wx()
elif 'dingtalk' in self.hook:
self.ding()
core.info(f'[{now()}] Success, thanks for using @yanglbme/v2ex-action!')
| 34.943396
| 97
| 0.517819
|
bf10e6f17727a302c60622e3c273c0e33ffdc269
| 495
|
py
|
Python
|
Distributed Systems/Client Server/UDS Echo Server/solution.py
|
oleg-cherednik/hackerrank
|
a76580e300ad7af248ad7c7d6839777e554cc379
|
[
"Apache-2.0"
] | 7
|
2020-04-02T16:18:46.000Z
|
2021-02-12T14:06:44.000Z
|
Distributed Systems/Client Server/UDS Echo Server/solution.py
|
oleg-cherednik/HackerRank
|
a76580e300ad7af248ad7c7d6839777e554cc379
|
[
"Apache-2.0"
] | null | null | null |
Distributed Systems/Client Server/UDS Echo Server/solution.py
|
oleg-cherednik/HackerRank
|
a76580e300ad7af248ad7c7d6839777e554cc379
|
[
"Apache-2.0"
] | 11
|
2020-05-06T08:28:43.000Z
|
2021-12-08T17:25:45.000Z
|
#!/usr/bin/env python2
import socket
import threading
def process_client_connection(connection):
while True:
message = connection.recv(4096)
connection.send(message)
if message == "END":
break
if __name__ == '__main__':
sock = socket.socket(socket.AF_UNIX)
sock.bind("./socket")
sock.listen(10)
while True:
connection = sock.accept()[0]
threading.Thread(target=process_client_connection, args=(connection,)).start()
| 20.625
| 86
| 0.648485
|
43595067ac485f02353b0a2ed25d07914ff69ad6
| 25,167
|
py
|
Python
|
dist/weewx-3.9.2/bin/weewx/uwxutils.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 10
|
2017-01-05T17:30:48.000Z
|
2021-09-18T15:04:20.000Z
|
dist/weewx-3.9.2/bin/weewx/uwxutils.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 2
|
2019-07-21T10:48:42.000Z
|
2022-02-16T20:36:45.000Z
|
dist/weewx-3.9.2/bin/weewx/uwxutils.py
|
v0rts/docker-weewx
|
70b2f252051dfead4fcb74e74662b297831e6342
|
[
"Apache-2.0"
] | 12
|
2017-01-05T18:50:30.000Z
|
2021-10-05T07:35:45.000Z
|
# Adapted for use with weewx
#
# This source code may be freely used, including for commercial purposes
# Steve Hatchett info@softwx.com
# http:#www.softwx.org/weather
"""
Functions for performing various weather related calculations.
Notes about pressure
Sensor Pressure raw pressure indicated by the barometer instrument
Station Pressure Sensor Pressure adjusted for any difference between
sensor elevation and official station elevation
Field Pressure (QFE) Usually the same as Station Pressure
Altimeter Setting (QNH) Station Pressure adjusted for elevation (assumes
standard atmosphere)
Sea Level Pressure (QFF) Station Pressure adjusted for elevation,
temperature and humidity
Notes about input parameters:
currentTemp - current instantaneous station temperature
meanTemp - average of current temp and the temperature 12 hours in
the past. If the 12 hour temp is not known, simply pass
the same value as currentTemp for the mean temp.
humidity - Value should be 0 to 100. For the pressure conversion
functions, pass a value of zero if you do not want to
the algorithm to include the humidity correction factor
in the calculation. If you provide a humidity value
> 0, then humidity effect will be included in the
calculation.
elevation - This should be the geometric altitude of the station
(this is the elevation provided by surveys and normally
used by people when they speak of elevation). Some
algorithms will convert the elevation internally into
a geopotential altitude.
sensorElevation - This should be the geometric altitude of the actual
barometric sensor (which could be different than the
official station elevation).
Notes about Sensor Pressure vs. Station Pressure:
SensorToStationPressure and StationToSensorPressure functions are based
on an ASOS algorithm. It corrects for a difference in elevation between
the official station location and the location of the barometetric sensor.
It turns out that if the elevation difference is under 30 ft, then the
algorithm will give the same result (a 0 to .01 inHg adjustment) regardless
of temperature. In that case, the difference can be covered using a simple
fixed offset. If the difference is 30 ft or greater, there is some effect
from temperature, though it is small. For example, at a 100ft difference,
the adjustment will be .13 inHg at -30F and .10 at 100F. The bottom line
is that while ASOS stations may do this calculation, it is likely unneeded
for home weather stations, and the station pressure and the sensor pressure
can be treated as equivalent."""
import math
def FToC(value):
return (value - 32.0) * (5.0 / 9.0)
def CToF(value):
return (9.0/5.0)*value + 32.0
def CToK(value):
return value + 273.15
def KToC(value):
return value - 273.15
def FToR(value):
return value + 459.67
def RToF(value):
return value - 459.67
def InToHPa(value):
return value / 0.02953
def HPaToIn(value):
return value * 0.02953
def FtToM(value):
return value * 0.3048
def MToFt(value):
return value / 0.3048
def InToMm(value):
return value * 25.4
def MmToIn(value):
return value / 25.4
def MToKm(value): # NB: This is *miles* to Km.
return value * 1.609344
def KmToM(value): # NB: This is Km to *miles*
return value / 1.609344
def msToKmh(value):
return value * 3.6
def Power10(y):
return pow(10.0, y)
# This maps various Pascal functions to Python functions.
Power = pow
Exp = math.exp
Round = round
class TWxUtils(object):
gravity = 9.80665 # g at sea level at lat 45.5 degrees in m/sec^2
uGC = 8.31432 # universal gas constant in J/mole-K
moleAir = 0.0289644 # mean molecular mass of air in kg/mole
moleWater = 0.01801528 # molecular weight of water in kg/mole
gasConstantAir = uGC/moleAir # (287.053) gas constant for air in J/kgK
standardSLP = 1013.25 # standard sea level pressure in hPa
standardSlpInHg = 29.921 # standard sea level pressure in inHg
standardTempK = 288.15 # standard sea level temperature in Kelvin
earthRadius45 = 6356.766 # radius of the earth at lat 45.5 degrees in km
# standard lapse rate (6.5C/1000m i.e. 6.5K/1000m)
standardLapseRate = 0.0065
# (0.0019812) standard lapse rate per foot (1.98C/1000ft)
standardLapseRateFt = standardLapseRate * 0.3048
vpLapseRateUS = 0.00275 # lapse rate used by VantagePro (2.75F/1000ft)
manBarLapseRate = 0.0117 # lapse rate from Manual of Barometry (11.7F/1000m, which = 6.5C/1000m)
@staticmethod
def StationToSensorPressure(pressureHPa, sensorElevationM, stationElevationM, currentTempC):
# from ASOS formula specified in US units
Result = InToHPa(HPaToIn(pressureHPa) / Power10(0.00813 * MToFt(sensorElevationM - stationElevationM) / FToR(CToF(currentTempC))))
return Result
@staticmethod
def StationToAltimeter(pressureHPa, elevationM, algorithm='aaMADIS'):
if algorithm == 'aaASOS':
# see ASOS training at http://www.nwstc.noaa.gov
# see also http://wahiduddin.net/calc/density_altitude.htm
Result = InToHPa(Power(Power(HPaToIn(pressureHPa), 0.1903) + (1.313E-5 * MToFt(elevationM)), 5.255))
elif algorithm == 'aaASOS2':
geopEl = TWxUtils.GeopotentialAltitude(elevationM)
k1 = TWxUtils.standardLapseRate * TWxUtils.gasConstantAir / TWxUtils.gravity # approx. 0.190263
k2 = 8.41728638E-5 # (stdLapseRate / stdTempK) * (Power(stdSLP, k1)
Result = Power(Power(pressureHPa, k1) + (k2 * geopEl), 1/k1)
elif algorithm == 'aaMADIS':
# from MADIS API by NOAA Forecast Systems Lab
# http://madis.noaa.gov/madis_api.html
k1 = 0.190284 # discrepency with calculated k1 probably
# because Smithsonian used less precise gas
# constant and gravity values
k2 = 8.4184960528E-5 # (stdLapseRate / stdTempK) * (Power(stdSLP, k1)
Result = Power(Power(pressureHPa - 0.3, k1) + (k2 * elevationM), 1/k1)
elif algorithm == 'aaNOAA':
# http://www.srh.noaa.gov/elp/wxclc/formulas/altimeterSetting.html
k1 = 0.190284 # discrepency with k1 probably because
# Smithsonian used less precise gas constant
# and gravity values
k2 = 8.42288069E-5 # (stdLapseRate / 288) * (Power(stdSLP, k1SMT)
Result = (pressureHPa - 0.3) * Power(1 + (k2 * (elevationM / Power(pressureHPa - 0.3, k1))), 1/k1)
elif algorithm == 'aaWOB':
# see http://www.wxqa.com/archive/obsman.pdf
k1 = TWxUtils.standardLapseRate * TWxUtils.gasConstantAir / TWxUtils.gravity # approx. 0.190263
k2 = 1.312603E-5 # (stdLapseRateFt / stdTempK) * Power(stdSlpInHg, k1)
Result = InToHPa(Power(Power(HPaToIn(pressureHPa), k1) + (k2 * MToFt(elevationM)), 1/k1))
elif algorithm == 'aaSMT':
# WMO Instruments and Observing Methods Report No.19
# http://www.wmo.int/pages/prog/www/IMOP/publications/IOM-19-Synoptic-AWS.pdf
k1 = 0.190284 # discrepency with calculated value probably
# because Smithsonian used less precise gas
# constant and gravity values
k2 = 4.30899E-5 # (stdLapseRate / 288) * (Power(stdSlpInHg, k1SMT))
geopEl = TWxUtils.GeopotentialAltitude(elevationM)
Result = InToHPa((HPaToIn(pressureHPa) - 0.01) * Power(1 + (k2 * (geopEl / Power(HPaToIn(pressureHPa) - 0.01, k1))), 1/k1))
else:
raise ValueError("Unknown StationToAltimeter algorithm '%s'" %
algorithm)
return Result
@staticmethod
def StationToSeaLevelPressure(pressureHPa, elevationM,
currentTempC, meanTempC, humidity,
algorithm = 'paManBar'):
Result = pressureHPa * TWxUtils.PressureReductionRatio(pressureHPa,
elevationM,
currentTempC,
meanTempC,
humidity,
algorithm)
return Result
@staticmethod
def SensorToStationPressure(pressureHPa, sensorElevationM,
stationElevationM, currentTempC):
# see ASOS training at http://www.nwstc.noaa.gov
# from US units ASOS formula
Result = InToHPa(HPaToIn(pressureHPa) * Power10(0.00813 * MToFt(sensorElevationM - stationElevationM) / FToR(CToF(currentTempC))))
return Result
# FIXME: still to do
#class function TWxUtils.AltimeterToStationPressure(pressureHPa: TWxReal;
# elevationM: TWxReal;
# algorithm: TAltimeterAlgorithm = DefaultAltimeterAlgorithm): TWxReal;
#begin
#end;
#}
@staticmethod
def SeaLevelToStationPressure(pressureHPa, elevationM,
currentTempC, meanTempC, humidity,
algorithm = 'paManBar'):
Result = pressureHPa / TWxUtils.PressureReductionRatio(pressureHPa,
elevationM,
currentTempC,
meanTempC,
humidity,
algorithm)
return Result
@staticmethod
def PressureReductionRatio(pressureHPa, elevationM,
currentTempC, meanTempC, humidity,
algorithm = 'paManBar'):
if algorithm == 'paUnivie':
# http://www.univie.ac.at/IMG-Wien/daquamap/Parametergencom.html
geopElevationM = TWxUtils.GeopotentialAltitude(elevationM)
Result = Exp(((TWxUtils.gravity/TWxUtils.gasConstantAir) * geopElevationM) / (TWxUtils.VirtualTempK(pressureHPa, meanTempC, humidity) + (geopElevationM * TWxUtils.standardLapseRate/2)))
elif algorithm == 'paDavisVp':
# http://www.exploratorium.edu/weather/barometer.html
if (humidity > 0):
hCorr = (9.0/5.0) * TWxUtils.HumidityCorrection(currentTempC, elevationM, humidity, 'vaDavisVp')
else:
hCorr = 0
# In the case of DavisVp, take the constant values literally.
Result = Power(10, (MToFt(elevationM) / (122.8943111 * (CToF(meanTempC) + 460 + (MToFt(elevationM) * TWxUtils.vpLapseRateUS/2) + hCorr))))
elif algorithm == 'paManBar':
# see WMO Instruments and Observing Methods Report No.19
# http://www.wmo.int/pages/prog/www/IMOP/publications/IOM-19-Synoptic-AWS.pdf
# http://www.wmo.ch/web/www/IMOP/publications/IOM-19-Synoptic-AWS.pdf
if (humidity > 0):
hCorr = (9.0/5.0) * TWxUtils.HumidityCorrection(currentTempC, elevationM, humidity, 'vaBuck')
else:
hCorr = 0
geopElevationM = TWxUtils.GeopotentialAltitude(elevationM)
Result = Exp(geopElevationM * 6.1454E-2 / (CToF(meanTempC) + 459.7 + (geopElevationM * TWxUtils.manBarLapseRate / 2) + hCorr))
else:
raise ValueError("Unknown PressureReductionRatio algorithm '%s'" %
algorithm)
return Result
@staticmethod
def ActualVaporPressure(tempC, humidity, algorithm='vaBolton'):
result = (humidity * TWxUtils.SaturationVaporPressure(tempC, algorithm)) / 100.0
return result
@staticmethod
def SaturationVaporPressure(tempC, algorithm='vaBolton'):
# comparison of vapor pressure algorithms
# http://cires.colorado.edu/~voemel/vp.html
# (for DavisVP) http://www.exploratorium.edu/weather/dewpoint.html
if algorithm == 'vaDavisVp':
# Davis Calculations Doc
Result = 6.112 * Exp((17.62 * tempC)/(243.12 + tempC))
elif algorithm == 'vaBuck':
# Buck(1996)
Result = 6.1121 * Exp((18.678 - (tempC/234.5)) * tempC / (257.14 + tempC))
elif algorithm == 'vaBuck81':
# Buck(1981)
Result = 6.1121 * Exp((17.502 * tempC)/(240.97 + tempC))
elif algorithm == 'vaBolton':
# Bolton(1980)
Result = 6.112 * Exp(17.67 * tempC / (tempC + 243.5))
elif algorithm == 'vaTetenNWS':
# Magnus Teten
# www.srh.weather.gov/elp/wxcalc/formulas/vaporPressure.html
Result = 6.112 * Power(10,(7.5 * tempC / (tempC + 237.7)))
elif algorithm == 'vaTetenMurray':
# Magnus Teten (Murray 1967)
Result = Power(10, (7.5 * tempC / (237.5 + tempC)) + 0.7858)
elif algorithm == 'vaTeten':
# Magnus Teten
# www.vivoscuola.it/US/RSIGPP3202/umidita/attivita/relhumONA.htm
Result = 6.1078 * Power(10, (7.5 * tempC / (tempC + 237.3)))
else:
raise ValueError("Unknown SaturationVaporPressure algorithm '%s'" %
algorithm)
return Result
@staticmethod
def MixingRatio(pressureHPa, tempC, humidity):
k1 = TWxUtils.moleWater / TWxUtils.moleAir # 0.62198
# http://www.wxqa.com/archive/obsman.pdf
# http://www.vivoscuola.it/US/RSIGPP3202/umidita/attiviat/relhumONA.htm
vapPres = TWxUtils.ActualVaporPressure(tempC, humidity, 'vaBuck')
Result = 1000 * ((k1 * vapPres) / (pressureHPa - vapPres))
return Result
@staticmethod
def VirtualTempK(pressureHPa, tempC, humidity):
epsilon = 1 - (TWxUtils.moleWater / TWxUtils.moleAir) # 0.37802
# http://www.univie.ac.at/IMG-Wien/daquamap/Parametergencom.html
# http://www.vivoscuola.it/US/RSIGPP3202/umidita/attiviat/relhumONA.htm
# http://wahiduddin.net/calc/density_altitude.htm
vapPres = TWxUtils.ActualVaporPressure(tempC, humidity, 'vaBuck')
Result = (CToK(tempC)) / (1-(epsilon * (vapPres/pressureHPa)))
return Result
@staticmethod
def HumidityCorrection(tempC, elevationM, humidity, algorithm='vaBolton'):
vapPress = TWxUtils.ActualVaporPressure(tempC, humidity, algorithm)
Result = (vapPress * ((2.8322E-9 * (elevationM**2)) + (2.225E-5 * elevationM) + 0.10743))
return Result
@staticmethod
def GeopotentialAltitude(geometricAltitudeM):
Result = (TWxUtils.earthRadius45 * 1000 * geometricAltitudeM) / ((TWxUtils.earthRadius45 * 1000) + geometricAltitudeM)
return Result
#==============================================================================
# class TWxUtilsUS
#==============================================================================
class TWxUtilsUS(object):
"""This class provides US unit versions of the functions in uWxUtils.
Refer to uWxUtils for documentation. All input and output paramters are
in the following US units:
pressure in inches of mercury
temperature in Fahrenheit
wind in MPH
elevation in feet"""
@staticmethod
def StationToSensorPressure(pressureIn, sensorElevationFt,
stationElevationFt, currentTempF):
Result = pressureIn / Power10(0.00813 * (sensorElevationFt - stationElevationFt) / FToR(currentTempF))
return Result
@staticmethod
def StationToAltimeter(pressureIn, elevationFt,
algorithm='aaMADIS'):
"""Example:
>>> p = TWxUtilsUS.StationToAltimeter(24.692, 5431, 'aaASOS')
>>> print "Station pressure to altimeter = %.3f" % p
Station pressure to altimeter = 30.153
"""
Result = HPaToIn(TWxUtils.StationToAltimeter(InToHPa(pressureIn),
FtToM(elevationFt),
algorithm))
return Result
@staticmethod
def StationToSeaLevelPressure(pressureIn, elevationFt,
currentTempF, meanTempF, humidity,
algorithm='paManBar'):
"""Example:
>>> p = TWxUtilsUS.StationToSeaLevelPressure(24.692, 5431, 59.0, 50.5, 40.5)
>>> print "Station to SLP = %.3f" % p
Station to SLP = 30.153
"""
Result = pressureIn * TWxUtilsUS.PressureReductionRatio(pressureIn,
elevationFt,
currentTempF,
meanTempF,
humidity,
algorithm)
return Result
@staticmethod
def SensorToStationPressure(pressureIn,
sensorElevationFt, stationElevationFt,
currentTempF):
Result = pressureIn * Power10(0.00813 * (sensorElevationFt - stationElevationFt) / FToR(currentTempF))
return Result
@staticmethod
def AltimeterToStationPressure(pressureIn, elevationFt,
algorithm='aaMADIS'):
Result = TWxUtils.AltimeterToStationPressure(InToHPa(pressureIn),
FtToM(elevationFt),
algorithm)
return Result
@staticmethod
def SeaLevelToStationPressure(pressureIn, elevationFt,
currentTempF, meanTempF, humidity,
algorithm='paManBar'):
"""Example:
>>> p = TWxUtilsUS.SeaLevelToStationPressure(30.153, 5431, 59.0, 50.5, 40.5)
>>> print "Station to SLP = %.3f" % p
Station to SLP = 24.692
"""
Result = pressureIn / TWxUtilsUS.PressureReductionRatio(pressureIn,
elevationFt,
currentTempF,
meanTempF,
humidity,
algorithm)
return Result
@staticmethod
def PressureReductionRatio(pressureIn, elevationFt,
currentTempF, meanTempF, humidity,
algorithm='paManBar'):
Result = TWxUtils.PressureReductionRatio(InToHPa(pressureIn),
FtToM(elevationFt),
FToC(currentTempF),
FToC(meanTempF),
humidity, algorithm)
return Result
@staticmethod
def ActualVaporPressure(tempF, humidity, algorithm='vaBolton'):
Result = (humidity * TWxUtilsUS.SaturationVaporPressure(tempF, algorithm)) / 100
return Result
@staticmethod
def SaturationVaporPressure(tempF, algorithm='vaBolton'):
Result = HPaToIn(TWxUtils.SaturationVaporPressure(FToC(tempF),
algorithm))
return Result
@staticmethod
def MixingRatio(pressureIn, tempF, humidity):
Result = HPaToIn(TWxUtils.MixingRatio(InToHPa(pressureIn),
FToC(tempF), humidity))
return Result
@staticmethod
def HumidityCorrection(tempF, elevationFt, humidity, algorithm='vaBolton'):
Result = TWxUtils.HumidityCorrection(FToC(tempF),
FtToM(elevationFt),
humidity,
algorithm)
return Result
@staticmethod
def GeopotentialAltitude(geometricAltitudeFt):
Result = MToFt(TWxUtils.GeopotentialAltitude(FtToM(geometricAltitudeFt)))
return Result
#==============================================================================
# class TWxUtilsVP
#==============================================================================
class uWxUtilsVP(object):
""" This class contains functions for calculating the raw sensor pressure
of a Vantage Pro weather station from the sea level reduced pressure it
provides.
The sensor pressure can then be used to calcuate altimeter setting using
other functions in the uWxUtils and uWxUtilsUS units.
notes about input parameters:
currentTemp - current instantaneous station temperature
temp12HrsAgoF - temperature from 12 hours ago. If the 12 hour temp is
not known, simply pass the same value as currentTemp
for the 12 hour temp. For the vantage pro sea level
to sensor pressure conversion, the 12 hour temp
should be the hourly temp that is 11 hours to 11:59
in the past. For example, if the current time is
3:59pm, use the 4:00am temp, and if it is currently
4:00pm, use the 5:00am temp. Also, the vantage pro
seems to use only whole degree temp values in the sea
level calculation, so the function performs rounding
on the temperature.
meanTemp - average of current temp and the temperature 12 hours in
the past. If the 12 hour temp is not known, simply pass
the same value as currentTemp for the mean temp. For the
Vantage Pro, the mean temperature should come from the
BARDATA.VirtualTemp. The value in BARDATA is an integer
(whole degrees). The vantage pro calculates the mean by
Round(((Round(currentTempF - 0.01) +
Round(temp12HrsAgoF - 0.01)) / 2) - 0.01);
humidity - Value should be 0 to 100. For the pressure conversion
functions, pass a value of zero if you do not want to
the algorithm to include the humidity correction factor
in the calculation. If you provide a humidity value
> 0, then humidity effect will be included in the
calculation.
elevation - This should be the geometric altitude of the station
(this is the elevation provided by surveys and normally
used by people when they speak of elevation). Some
algorithms will convert the elevation internally into
a geopotential altitude."""
# this function is used if you have access to BARDATA (Davis Serial docs)
# meanTempF is from BARDATA.VirtualTemp
# humidityCorr is from BARDATA.C (remember to first divide C by 10)
@staticmethod
def SeaLevelToSensorPressure_meanT(pressureIn, elevationFt, meanTempF,
humidityCorr):
Result = TWxUtilsUS.SeaLevelToStationPressure(
pressureIn, elevationFt, meanTempF,
meanTempF + humidityCorr, 0, 'paDavisVp')
return Result
# this function is used if you do not have access to BARDATA. The function
# will internally calculate the mean temp and the humidity correction
# the would normally come from the BARDATA.
# currentTempF is the value of the current sensor temp
# temp12HrsAgoF is the temperature from 12 hours ago (see comments on
# temp12Hr from earlier in this document for more on this).
@staticmethod
def SeaLevelToSensorPressure_12(pressureIn, elevationFt, currentTempF,
temp12HrsAgoF, humidity):
Result = TWxUtilsUS.SeaLevelToStationPressure(
pressureIn, elevationFt, currentTempF,
Round(((Round(currentTempF - 0.01) + Round(temp12HrsAgoF - 0.01)) / 2) - 0.01),
humidity, 'paDavisVp')
return Result
if __name__ == "__main__":
import doctest
if not doctest.testmod().failed:
print "PASSED"
| 47.574669
| 197
| 0.575635
|
19f212e38771cc1b741997b9c0bb954be88c1967
| 3,351
|
py
|
Python
|
bot/constants.py
|
superik032/save-media-bot-1
|
f1da929ce3fa345311e3dfa39ea95f777151b9c7
|
[
"MIT"
] | 1
|
2021-11-29T12:28:59.000Z
|
2021-11-29T12:28:59.000Z
|
bot/constants.py
|
superik032/save-media-bot-1
|
f1da929ce3fa345311e3dfa39ea95f777151b9c7
|
[
"MIT"
] | null | null | null |
bot/constants.py
|
superik032/save-media-bot-1
|
f1da929ce3fa345311e3dfa39ea95f777151b9c7
|
[
"MIT"
] | null | null | null |
from telegram import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup
USER_AGENT = (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)'
' AppleWebKit/605.1.15 (KHTML, like Gecko) '
'Version/14.1.1 Safari/605.1.15'
)
class States:
prepare_mailing = 1
received_mailing = 2
class CallbackData:
statistics = 'statistics'
mailing = 'mailing'
backup = 'backup'
class ReplyButtons:
how_to_use = '💬 Как использовать бота'
send_mailing = 'Отправить'
preview_mailing = 'Предпросмотр'
cancel_mailing = 'Отмена'
class Keyboard:
main = ReplyKeyboardMarkup([
[ReplyButtons.how_to_use]
], resize_keyboard=True)
admin = InlineKeyboardMarkup([
[InlineKeyboardButton('Посмотреть статистику', callback_data=CallbackData.statistics)],
[InlineKeyboardButton('Создать рассылку', callback_data=CallbackData.mailing)],
[InlineKeyboardButton('Копировать базу данных', callback_data=CallbackData.backup)]
])
mailing = ReplyKeyboardMarkup([
[ReplyButtons.send_mailing],
[ReplyButtons.preview_mailing, ReplyButtons.cancel_mailing]
], resize_keyboard=True)
class Message:
start = (
'Привет! Отправь мне ссылку на то, что надо скачать, и через мгновение я тебе всё отправлю. Сейчас '
'поддерживается:\n\n'
'<b>Instagram</b>: фото, видео и карусели.\n'
'<b>TikTok</b>: видео.'
)
how_to_use = (
'Для скачивания фото, видео или карусели из Instagram пришлите ссылку следующего вида:\n\n'
'<code>https://www.instagram.com/p/BYvh3Yel9iL/</code>\n\n'
'Для скачивания видео из TikTok пришлите ссылку следующего вида:\n\n'
'<code>https://vm.tiktok.com/ZSJvpWXK4/</code> или '
'<code>https://www.tiktok.com/@therock/video/6824918631965576454</code>'
)
not_subscribed = 'Чтобы пользоваться ботом, нужна подписка на канал — {}'
instagram_post_caption = (
'<b>Лайки: {likes}</b>\n\n{caption}'
'Рад был помочь! Ваш, <a href="https://t.me/{username1}?start=share">@{username2}</a>'
)
tiktok_video_caption = (
'<b>Просмотры: {views}\nЛайки: {likes}</b>\n\n{caption}'
'Рад был помочь! Ваш, <a href="https://t.me/{username1}?start=share">@{username2}</a>'
)
invalid_instagram_post = '💬 Не удалось получить пост в Instagram'
invalid_tiktok_video = '💬 Не удалось получить видео в TikTok'
invalid_link = '💬 Вы прислали нерабочую ссылку, убедитесь в правильности ввода'
admin = 'Добро пожаловать в админскую панель!'
statistics = (
'💬 <b>Статистика бота</b>\n\n'
'Количество пользователей: <b>{total_users}</b>\n'
'Из них активных: <b>{active_users}</b>\n\n'
'Запросов за всё время: <b>{total_requests}</b>'
)
sources = '💬 Статистика по источникам:\n\n'
mailing = 'Отправьте сообщение для рассылки'
received_mailing = 'Сообщение получено. Что дальше?'
mailing_canceled = 'Рассылка отменена'
mailing_started = 'Рассылка началась'
mailing_finished = (
'💬 Сообщение отправлено успешно:\n\n'
'Получившие пользователи: {sent_count}'
)
unexpected_error = '<code>Telegram Error: {error}.\n\n{update}</code>'
backup = 'Бэкап базы данных ({})'
database_not_found = '💬 База данных не найдена'
| 30.463636
| 108
| 0.661594
|
8e2d1a69917e4ef158c0c6332da1d9565ffdf60b
| 4,649
|
py
|
Python
|
otcextensions/sdk/anti_ddos/v1/status.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | 9
|
2020-04-09T21:13:18.000Z
|
2022-02-13T11:24:41.000Z
|
otcextensions/sdk/anti_ddos/v1/status.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | 208
|
2020-02-10T08:27:46.000Z
|
2022-03-29T15:24:21.000Z
|
otcextensions/sdk/anti_ddos/v1/status.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | 15
|
2020-04-01T20:45:54.000Z
|
2022-03-23T12:45:43.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from otcextensions.common import format
class TaskStatus(resource.Resource):
base_path = '/query_task_status'
# capabilities
allow_fetch = True
_query_mapping = resource.QueryParameters('task_id')
# Properties
#: Status of task
#: validate status are `success`, `failed`, `waiting`, `running`
task_status = resource.Body('task_status')
#: Additional task status message
task_msg = resource.Body('task_msg')
class FloatingIPStatus(resource.Resource):
base_path = '/antiddos/%(floating_ip_id)s/status'
# capabilities
allow_get = True
# Properties
floating_ip_id = resource.URI('floating_ip_id')
#: Status of Anti-DDos
#: validate status are `normal`, `configging`, `notConfig`,
#: `packetcleaning`, `packetdropping`
status = resource.Body('status')
class FloatingIPEvent(resource.Resource):
resources_key = 'logs'
base_path = '/antiddos/%(floating_ip_id)s/logs'
_query_mapping = resource.QueryParameters('limit', 'offset', 'sort_dir')
# capabilities
allow_list = True
# Properties
floating_ip_id = resource.URI('floating_ip_id')
#: start time
#: *Type: int*
start_time = resource.Body('start_time', type=format.TimeTMsStr)
#: end time
#: *Type: int*
end_time = resource.Body('end_time', type=format.TimeTMsStr)
#: Anti-ddos status
#: Defense status, the possible value of which is one of the following:
#: * 1: indicates that traffic cleaning is underway.
#: * 2: indicates that traffic is discarded.
#: *Type: int*
status = resource.Body('status', type=int)
#: Trigger bps (bit/s)
#: *Type: int*
trigger_bps = resource.Body('trigger_bps', type=int)
#: Trigger package per second
#: *Type: int*
trigger_pps = resource.Body('trigger_pps', type=int)
#: Trigger http requests
#: *Type: int*
trigger_http_pps = resource.Body('trigger_http_pps', type=int)
class FloatingIPDayStat(resource.Resource):
resources_key = 'data'
base_path = '/antiddos/%(floating_ip_id)s/daily'
# capabilities
allow_list = True
# Properties
#: Data start time
#: *Type: int*
period_start = resource.Body('period_start', type=format.TimeTMsStr)
#: In (bit/s)
#: *Type: int*
bps_in = resource.Body('bps_in', type=int)
#: Attack (bit/s)
#: *Type: int*
bps_attack = resource.Body('bps_attack', type=int)
#: Total data (bit/s)
#: *Type: int*
total_bps = resource.Body('total_bps', type=int)
#: Package in speed (/s)
#: *Type: int*
pps_in = resource.Body('pps_in', type=int)
#: Package attack speed (/s)
#: *Type: int*
pps_attack = resource.Body('pps_attack', type=int)
#: Total package speed (/s)
#: *Type: int*
total_pps = resource.Body('total_pps', type=int)
class FloatingIPWeekStatData(resource.Resource):
# Properties
#: Intercept time in one week
#: *Type: int*
ddos_intercept_times = resource.Body('ddos_intercept_times', type=int)
#: *Type: int*
ddos_blackhole_times = resource.Body('ddos_blackhole_times', type=int)
#: *Type: int*
max_attack_bps = resource.Body('max_attack_bps', type=int)
#: *Type: int*
max_attack_conns = resource.Body('max_attack_conns ', type=int)
#: Data start time
#: *Type: int*
period_start_date = resource.Body('period_start_date',
type=format.TimeTMsStr)
class FloatingIPWeekStat(resource.Resource):
base_path = '/antiddos/weekly'
# capabilities
allow_get = True
_query_mapping = resource.QueryParameters('period_start_date')
# Properties
#: Intercept time in one week
#: *Type: int*
ddos_intercept_times = resource.Body('ddos_intercept_times', type=int)
#: A list of data in one week
#: *Type: list*
weekdata = resource.Body('weekdata', type=list,
list_type=FloatingIPWeekStatData)
#: Top 10 ip address in one week
#: *Type: list*
top10 = resource.Body('top10', type=list)
| 30.385621
| 76
| 0.666595
|
6f168b02bb5efbf92750fbb2bf54c5435c68ff58
| 18,031
|
py
|
Python
|
tests/test_lstm.py
|
kadeng/tensorflow-onnx
|
db91f5b25cc2a053f46af3b2c04b65a679cff03b
|
[
"MIT"
] | null | null | null |
tests/test_lstm.py
|
kadeng/tensorflow-onnx
|
db91f5b25cc2a053f46af3b2c04b65a679cff03b
|
[
"MIT"
] | null | null | null |
tests/test_lstm.py
|
kadeng/tensorflow-onnx
|
db91f5b25cc2a053f46af3b2c04b65a679cff03b
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""Unit Tests for lstm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from backend_test_base import Tf2OnnxBackendTestBase
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
class LSTMTests(Tf2OnnxBackendTestBase):
def test_test_single_dynamic_lstm_state_is_tuple(self):
self.internal_test_single_dynamic_lstm(True)
def test_test_single_dynamic_lstm_state_is_not_tuple(self):
self.internal_test_single_dynamic_lstm(False)
def internal_test_single_dynamic_lstm(self, state_is_tuple):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_single_dynamic_lstm_seq_length_is_const(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
state_is_tuple = True
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_single_dynamic_lstm_seq_length_is_not_const(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
state_is_tuple = True
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)
seq_length = tf.placeholder(tf.int32, y_val.shape, name="input_2")
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_single_dynamic_lstm_placeholder_input(self):
units = 5
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * 6)
state_is_tuple = True
x = tf.placeholder(tf.float32, shape=(None, 4, 2), name="input_1")
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32) # by default zero initializer is used
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_single_dynamic_lstm_ch_zero_state_initializer(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
state_is_tuple = True
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
# defining initial state
initial_state = cell.zero_state(batch_size, dtype=tf.float32)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
initial_state=initial_state,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_single_dynamic_lstm_consume_one_of_ch_tuple(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
state_is_tuple = True
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state.c, name="cell_state_c")
_ = tf.identity(cell_state.h, name="cell_state_h")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state_c:0", "cell_state_h:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@unittest.skip("FIXME: disable for now for accuracy problem")
def test_single_dynamic_lstm_random_weights(self, state_is_tuple=True):
hidden_size = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = tf.random_uniform_initializer(-1.0, 1.0)
# no scope
cell = rnn.LSTMCell(
hidden_size,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, 0.0001)
@unittest.skip("FIXME: disable for now for accuracy problem")
def test_single_dynamic_lstm_random_weights2(self, state_is_tuple=True):
hidden_size = 128
batch_size = 1
x_val = np.random.randn(1, 133).astype('f')
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = tf.random_uniform_initializer(0.0, 1.0)
# no scope
cell = rnn.LSTMCell(
hidden_size,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, 0.01)
def test_multiple_dynamic_lstm_state_is_tuple(self):
self.internal_test_multiple_dynamic_lstm_with_parameters(True)
def test_multiple_dynamic_lstm_state_is_not_tuple(self):
self.internal_test_multiple_dynamic_lstm_with_parameters(False)
def internal_test_multiple_dynamic_lstm_with_parameters(self, state_is_tuple):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
_ = tf.placeholder(tf.float32, x_val.shape, name="input_2")
initializer = init_ops.constant_initializer(0.5)
lstm_output_list = []
lstm_cell_state_list = []
if True:
# no scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32)
lstm_output_list.append(outputs)
lstm_cell_state_list.append(cell_state)
if True:
# given scope
cell = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
with variable_scope.variable_scope("root1") as scope:
outputs, cell_state = tf.nn.dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 4, 4, 4, 4, 4],
scope=scope)
lstm_output_list.append(outputs)
lstm_cell_state_list.append(cell_state)
_ = tf.identity(lstm_output_list, name="output")
_ = tf.identity(lstm_cell_state_list, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_dynamic_basiclstm(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
cell1 = rnn.BasicLSTMCell(
units,
state_is_tuple=True)
outputs, cell_state = tf.nn.dynamic_rnn(
cell1,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, 0.0001)
def test_dynamic_lstm_output_consumed_only(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
cell1 = rnn.LSTMCell(
units,
state_is_tuple=True)
outputs, _ = tf.nn.dynamic_rnn(
cell1,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, 0.0001)
def test_dynamic_lstm_state_consumed_only(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
cell1 = rnn.LSTMCell(
units,
state_is_tuple=True)
_, cell_state = tf.nn.dynamic_rnn(
cell1,
x,
dtype=tf.float32)
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, 0.0001)
def test_dynamic_bilstm_state_is_tuple(self):
self.internal_test_dynamic_bilstm_with_parameters(True)
def test_dynamic_bilstm_state_is_not_tuple(self):
self.internal_test_dynamic_bilstm_with_parameters(False)
def internal_test_dynamic_bilstm_with_parameters(self, state_is_tuple):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
if True:
# bilstm, no scope
cell1 = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, cell_state = tf.nn.bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_dynamic_bilstm_output_consumed_only(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
if True:
# bilstm, no scope
cell1 = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
_ = tf.identity(outputs, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
def test_dynamic_bilstm_state_consumed_only(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
x = tf.placeholder(tf.float32, x_val.shape, name="input_1")
initializer = init_ops.constant_initializer(0.5)
if True:
# bilstm, no scope
cell1 = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple) # state_is_tuple will impact Pack node (for cell_state)'s usage pattern
cell2 = rnn.LSTMCell(
units,
initializer=initializer,
state_is_tuple=state_is_tuple)
_, cell_state = tf.nn.bidirectional_dynamic_rnn(
cell1,
cell2,
x,
dtype=tf.float32)
_ = tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["cell_state:0"]
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
if __name__ == '__main__':
Tf2OnnxBackendTestBase.trigger(LSTMTests)
| 36.797959
| 119
| 0.603294
|
383f001dc15f860710c28fcbb9f65be4685c766f
| 2,783
|
py
|
Python
|
testsuite/driver/src/target_parser/common_parse.py
|
openmaple/MapleCompiler
|
1648e63144766563f1ec44a25e0b618415648627
|
[
"MulanPSL-1.0"
] | 5
|
2019-09-02T04:44:52.000Z
|
2021-11-08T12:23:51.000Z
|
testsuite/driver/src/target_parser/common_parse.py
|
venshine/OpenArkCompiler
|
264cd4463834356658154f0d254672ef559f245f
|
[
"MulanPSL-1.0"
] | 2
|
2020-07-21T01:22:01.000Z
|
2021-12-06T08:07:16.000Z
|
testsuite/driver/src/target_parser/common_parse.py
|
venshine/OpenArkCompiler
|
264cd4463834356658154f0d254672ef559f245f
|
[
"MulanPSL-1.0"
] | 4
|
2019-09-02T04:46:52.000Z
|
2020-09-10T11:30:03.000Z
|
#
# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#
import os
import re
import sys
from env_var import EnvVar
from mode_table import ModeTable
class CommonParse(object):
case_regx = re.compile("^[A-Z]{1,9}[0-9]{3,10}-[a-zA-Z0-9_.]")
module_regx = re.compile("^[a-z0-9_]{1,20}_test$")
def __init__(self, input: dict):
self.target = input["target"]
self.mode = input["mode"]
if os.path.exists(os.path.join(EnvVar.CONFIG_FILE_PATH, self.target + ".conf")):
self.mode_table = ModeTable(os.path.join(EnvVar.CONFIG_FILE_PATH, self.target + ".conf"))
else:
self.mode_table = ModeTable(os.path.join(EnvVar.CONFIG_FILE_PATH, "testall.conf"))
self.cases = {}
def parser_cases(self):
targets = [self.target]
if os.path.exists(os.path.join(EnvVar.CONFIG_FILE_PATH, self.target + ".conf")):
targets = self.mode_table.get_targets()
for single_target in targets:
if not os.path.exists(os.path.join(EnvVar.TEST_SUITE_ROOT, single_target)):
print("Target " + single_target + " doesn't exist !")
sys.exit(1)
if CommonParse.case_regx.match(single_target.split('/')[-1]):
self.cases[single_target] = self.mode_table.get_case_mode_set(single_target)
elif CommonParse.module_regx.match(single_target.split('/')[-1]):
subtarget_list = [single_target]
while subtarget_list:
subtarget = subtarget_list.pop(0)
for dir in os.listdir(os.path.join(EnvVar.TEST_SUITE_ROOT, subtarget)):
if CommonParse.case_regx.match(dir):
self.cases[os.path.join(subtarget, dir)] = self.mode_table.get_case_mode_set(os.path.join(subtarget, dir))
elif CommonParse.module_regx.match(dir):
subtarget_list.append(os.path.join(subtarget, dir))
def execute(self):
self.parser_cases()
if self.mode != None:
for case in list(self.cases.keys()):
if self.mode in self.cases[case]:
self.cases[case] = {self.mode}
else:
del self.cases[case]
def get_output(self):
return self.cases
| 41.537313
| 134
| 0.620913
|
c06213aeb84aaf7311a3a838d2adc1bd3e423e04
| 6,413
|
py
|
Python
|
xsdata/formats/dataclass/context.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
xsdata/formats/dataclass/context.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
xsdata/formats/dataclass/context.py
|
nimish/xsdata
|
7afe2781b66982428cc1731f53c065086acd35c1
|
[
"MIT"
] | null | null | null |
import sys
from dataclasses import dataclass
from dataclasses import Field
from dataclasses import field
from dataclasses import fields
from dataclasses import is_dataclass
from dataclasses import MISSING
from typing import Any
from typing import Callable
from typing import Dict
from typing import get_type_hints
from typing import Iterator
from typing import List
from typing import Optional
from typing import Type
from lxml.etree import QName
from xsdata.exceptions import XmlContextError
from xsdata.formats.converters import sort_types
from xsdata.formats.dataclass.models.constants import XmlType
from xsdata.formats.dataclass.models.elements import XmlMeta
from xsdata.formats.dataclass.models.elements import XmlVar
from xsdata.models.enums import NamespaceType
@dataclass
class XmlContext:
name_generator: Callable = field(default=lambda x: x)
cache: Dict[Type, XmlMeta] = field(default_factory=dict)
def fetch(
self,
clazz: Type,
parent_ns: Optional[str] = None,
xsi_type: Optional[QName] = None,
) -> XmlMeta:
meta = self.build(clazz, parent_ns)
subclass = self.find_subclass(clazz, xsi_type) if xsi_type else None
if subclass:
meta = self.build(subclass, parent_ns)
return meta
def find_subclass(self, clazz: Type, xsi_type: QName) -> Optional[Type]:
for subclass in clazz.__subclasses__():
if self.match_class_source_qname(subclass, xsi_type):
return subclass
for base in clazz.__bases__:
if not is_dataclass(base):
continue
if self.match_class_source_qname(base, xsi_type):
return base
sibling = self.find_subclass(base, xsi_type)
if sibling:
return sibling
return None
def match_class_source_qname(self, clazz: Type, xsi_type: QName) -> bool:
if is_dataclass(clazz):
meta = self.build(clazz)
return meta.source_qname == xsi_type
return False
def build(self, clazz: Type, parent_ns: Optional[str] = None) -> XmlMeta:
if clazz not in self.cache:
if not is_dataclass(clazz):
raise XmlContextError(f"Object {clazz} is not a dataclass.")
meta = getattr(clazz, "Meta", None)
if meta and meta.__qualname__ != f"{clazz.__name__}.Meta":
meta = None
name = getattr(meta, "name", self.name_generator(clazz.__name__))
nillable = getattr(meta, "nillable", False)
namespace = getattr(meta, "namespace", parent_ns)
module = sys.modules[clazz.__module__]
source_namespace = getattr(module, "__NAMESPACE__", None)
self.cache[clazz] = XmlMeta(
name=name,
clazz=clazz,
qname=QName(namespace, name),
source_qname=QName(source_namespace, name),
nillable=nillable,
vars=list(self.get_type_hints(clazz, namespace)),
)
return self.cache[clazz]
def get_type_hints(self, clazz: Type, parent_ns: Optional[str]) -> Iterator[XmlVar]:
type_hints = get_type_hints(clazz)
for var in fields(clazz):
type_hint = type_hints[var.name]
types = self.real_types(type_hint)
xml_type = var.metadata.get("type")
xml_clazz = XmlType.to_xml_class(xml_type)
namespace = var.metadata.get("namespace")
namespaces = self.resolve_namespaces(xml_type, namespace, parent_ns)
local_name = var.metadata.get("name") or self.name_generator(var.name)
is_class = any(is_dataclass(clazz) for clazz in types)
first_namespace = (
namespaces[0]
if len(namespaces) > 0 and namespaces[0] and namespaces[0][0] != "#"
else None
)
yield xml_clazz(
name=var.name,
qname=QName(first_namespace, local_name),
namespaces=namespaces,
init=var.init,
nillable=var.metadata.get("nillable", False),
dataclass=is_class,
sequential=var.metadata.get("sequential", False),
types=types,
default=self.default_value(var),
)
@staticmethod
def resolve_namespaces(
xml_type: Optional[str],
namespace: Optional[str],
parent_namespace: Optional[str],
) -> List[str]:
if xml_type in (XmlType.ELEMENT, XmlType.WILDCARD) and namespace is None:
namespace = parent_namespace
if not namespace:
return []
result = set()
for ns in namespace.split(" "):
ns = ns.strip()
if not ns:
continue
ns_type = NamespaceType.get_enum(ns)
if ns_type == NamespaceType.TARGET:
result.add(parent_namespace or NamespaceType.ANY.value)
elif ns_type == NamespaceType.LOCAL:
result.add("")
elif ns_type == NamespaceType.OTHER:
result.add(f"!{parent_namespace or ''}")
else:
result.add(ns)
return list(result)
@staticmethod
def default_value(var: Field) -> Any:
if var.default_factory is not MISSING: # type: ignore
return var.default_factory # type: ignore
if var.default is not MISSING:
return var.default
return None
@staticmethod
def real_types(type_hint: Any) -> List:
types = []
if type_hint is Dict:
types.append(type_hint)
elif hasattr(type_hint, "__origin__"):
while len(type_hint.__args__) == 1 and hasattr(
type_hint.__args__[0], "__origin__"
):
type_hint = type_hint.__args__[0]
types = [
x for x in type_hint.__args__ if x is not None.__class__ # type: ignore
]
else:
types.append(type_hint)
return sort_types(types)
@classmethod
def is_derived(cls, obj: Any, clazz: Type) -> bool:
if isinstance(obj, clazz):
return True
return any(
base is not object and isinstance(obj, base) for base in clazz.__bases__
)
| 33.227979
| 88
| 0.601279
|
6abb2fb142bbad67e8c8ec340b4561a5d5657baa
| 2,997
|
py
|
Python
|
templates/template.py
|
cisaacstern/when-rad
|
ea97d77be0ff227494ce5eb168872f397ee4f064
|
[
"BSD-3-Clause"
] | null | null | null |
templates/template.py
|
cisaacstern/when-rad
|
ea97d77be0ff227494ce5eb168872f397ee4f064
|
[
"BSD-3-Clause"
] | null | null | null |
templates/template.py
|
cisaacstern/when-rad
|
ea97d77be0ff227494ce5eb168872f397ee4f064
|
[
"BSD-3-Clause"
] | null | null | null |
template = """
{% extends base %}
<!-- goes in body -->
{% block postamble %}
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css">
<link rel='stylesheet' href='//cdn.jsdelivr.net/npm/hack-font@3.3.0/build/web/hack-subset.css'>
<link rel="stylesheet" type="text/css" href="//fonts.googleapis.com/css?family=Nunito">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.2/css/all.min.css">
{% endblock %}
<!-- goes in body -->
{% block contents %}
<div id='content'>
<a href="https://github.com/cisaacstern/{{ app_title }}" target="_blank" class="github-corner" aria-label="View source on GitHub"><svg width="80" height="80" viewBox="0 0 250 250" style="fill:rgb(127, 255, 212, 0.5); color:#292929; position: absolute; top: 0; border: 0; right: 0;" aria-hidden="true"><path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path><path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path></svg></a><style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>
<h1>{{ app_title }}</h1>
<p>{{ description }}</p>
<blockquote>{{ blockquote | safe }}</blockquote>
<br>
<div class="container">
<div class="row centered">
<div class="col-sm-auto">
{{ embed(roots.A) }}
</div>
<div class="col-sm-auto">
{{ embed(roots.B) }}
</div>
</div>
<br>
<div class="row centered">
<p>{{ extras }}</p>
</div>
<br>
<div id="download" class="row centered">
{{ embed(roots.C) }}
</div>
<div class="row centered">
<br>
<blockquote>{{ returns }}</blockquote>
<br>
</div>
</div>
</div>
{#
<div id="sun-modal" class="invisible">
<div class="modal-content">
<span id="close-sun" class="close">×</span>
<h3>Here's the Sun Plot</h3>
{{ embed(roots.D) }}
</div>
</div>
#}
{{ js }}
{% endblock %}
"""
| 51.672414
| 1,588
| 0.635636
|
100c781247ba83e3b1a1a8a1aa7b426840b7a936
| 2,717
|
py
|
Python
|
analyze/visualizer.py
|
wtrnoguchi/superposition
|
a3ce049000ea8b014c4cac1bfe84d8b399862ef9
|
[
"MIT"
] | 2
|
2022-03-09T21:33:46.000Z
|
2022-03-12T06:14:41.000Z
|
analyze/visualizer.py
|
wtrnoguchi/superposition
|
a3ce049000ea8b014c4cac1bfe84d8b399862ef9
|
[
"MIT"
] | null | null | null |
analyze/visualizer.py
|
wtrnoguchi/superposition
|
a3ce049000ea8b014c4cac1bfe84d8b399862ef9
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy
import analyze_util
class Visualizer(object):
def __init__(self, field_size):
self.field_size = field_size
self.colormap = analyze_util.make2Dcolormap(size=self.field_size * 2)
def save_png(self, f_base):
self.fig.savefig(f_base + '.png')
def save_eps(self, f_base):
self.fig.savefig(f_base + '.eps')
def save_pdf(self, f_base):
self.fig.savefig(f_base + '.pdf', rasteriszed=True, dpi=300)
def put_label(self, x_lab, y_lab, fontsize=32):
plt.xlabel(x_lab, fontsize=fontsize)
plt.ylabel(y_lab, fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
def init_plot(self):
self.fig = plt.figure(figsize=(6, 6))
self.ax = self.fig.add_subplot(111)
self.ax.set_rasterization_zorder(1)
self.ax.tick_params(direction='in', length=10)
plt.xticks(numpy.arange(-10, 10, 1))
plt.yticks(numpy.arange(-10, 10, 1))
def plot(
self,
hidden,
position,
plot_type,
lim,
):
ds = []
for n in range(hidden.shape[0]):
h = hidden[n, :]
p = position[n, :]
d = self.plot_hidden(h, p, plot_type=plot_type, lim=lim)
ds.append(d)
return ds
def set_transparent(self):
self.fig.patch.set_visible(False)
self.ax.patch.set_visible(False)
plt.axis('off')
def close_plot(self):
plt.close()
def show_plot(self):
plt.show()
def plot_hidden(self, hidden, position, plot_type, lim=None):
p1 = hidden[:, 0]
p2 = hidden[:, 1]
if lim is not None:
plt.xlim(lim[0][0], lim[0][1])
plt.ylim(lim[1][0], lim[1][1])
if plot_type in ['self_position']:
t1 = position[:, 0]
t2 = position[:, 1]
t_idx1 = (t1 + self.field_size).astype(numpy.int)
t_idx2 = (t2 + self.field_size).astype(numpy.int)
color = self.colormap[t_idx1, t_idx2]
d = self.ax.scatter(p1, p2, c=color, alpha=0.9, zorder=0)
elif plot_type in ['other_position']:
t1 = position[:, 0]
t2 = position[:, 1]
t_idx1 = (t1 + self.field_size).astype(numpy.int)
t_idx2 = (t2 + self.field_size).astype(numpy.int)
color = self.colormap[t_idx1, t_idx2]
d = self.ax.scatter(p1, p2, c=color, alpha=0.9, zorder=0)
elif plot_type == 'point':
d = self.ax.scatter(p1, p2, c='w', edgecolor='k', s=200, zorder=0)
else:
assert (False)
return d
| 30.188889
| 78
| 0.556864
|
8acc3efba07efe16df72c1564e3cb2c66530c0e8
| 896
|
py
|
Python
|
translate.py
|
aqaqsubin/AMR2Text-summ
|
163548c0dbb3e4ca4f83cd59b74b8e5515898d1b
|
[
"MIT"
] | 24
|
2018-08-29T08:27:49.000Z
|
2021-07-18T16:25:37.000Z
|
translate.py
|
aqaqsubin/AMR2Text-summ
|
163548c0dbb3e4ca4f83cd59b74b8e5515898d1b
|
[
"MIT"
] | 2
|
2019-09-10T09:00:41.000Z
|
2020-12-17T09:10:54.000Z
|
translate.py
|
aqaqsubin/AMR2Text-summ
|
163548c0dbb3e4ca4f83cd59b74b8e5515898d1b
|
[
"MIT"
] | 6
|
2018-09-01T14:14:12.000Z
|
2021-07-20T07:13:24.000Z
|
#!/usr/bin/env python
from __future__ import division, unicode_literals
import argparse
from onmt.translate.Translator import make_translator
import onmt.io
import onmt.translate
import onmt
import onmt.ModelConstructor
import onmt.modules
import onmt.opts
def main(opt):
translator = make_translator(opt, report_score=True)
translator.translate(opt.src_dir, opt.src, opt.tgt, opt.phrase_table, opt.global_phrase_table,
opt.batch_size, opt.attn_debug, opt.side_src, opt.side_tgt, opt.oracle, opt.lower,
psi=opt.psi, theta=opt.theta, k=opt.k)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
onmt.opts.add_md_help_argument(parser)
onmt.opts.translate_opts(parser)
opt = parser.parse_args()
main(opt)
| 28.903226
| 107
| 0.726563
|
28ecc59bc042e3f4b5ed20c0ca164be66ec2f770
| 27,790
|
py
|
Python
|
tests/fruit_test_common.py
|
TinkerBoard-Android/external-google-fruit
|
57123c8a2477a4d99cb68c53d195e9fb428dd535
|
[
"Apache-2.0"
] | null | null | null |
tests/fruit_test_common.py
|
TinkerBoard-Android/external-google-fruit
|
57123c8a2477a4d99cb68c53d195e9fb428dd535
|
[
"Apache-2.0"
] | null | null | null |
tests/fruit_test_common.py
|
TinkerBoard-Android/external-google-fruit
|
57123c8a2477a4d99cb68c53d195e9fb428dd535
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import textwrap
import re
import sys
import shlex
import itertools
import subprocess
from absl.testing import parameterized
from fruit_test_config import *
from absl.testing import absltest
run_under_valgrind = RUN_TESTS_UNDER_VALGRIND.lower() not in ('false', 'off', 'no', '0', '')
def pretty_print_command(command, env):
return 'cd %s; env -i %s %s' % (
shlex.quote(env['PWD']),
' '.join('%s=%s' % (var_name, shlex.quote(value)) for var_name, value in env.items() if var_name != 'PWD'),
' '.join(shlex.quote(x) for x in command))
def multiple_parameters(*param_lists):
param_lists = [[params if isinstance(params, tuple) else (params,)
for params in param_list]
for param_list in param_lists]
result = param_lists[0]
for param_list in param_lists[1:]:
result = [(*args1, *args2)
for args1 in result
for args2 in param_list]
return parameterized.parameters(*result)
def multiple_named_parameters(*param_lists):
result = param_lists[0]
for param_list in param_lists[1:]:
result = [(name1 + ', ' + name2, *args1, *args2)
for name1, *args1 in result
for name2, *args2 in param_list]
return parameterized.named_parameters(*result)
class CommandFailedException(Exception):
def __init__(self, command, env, stdout, stderr, error_code):
self.command = command
self.env = env
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Exit code {error_code}
Stdout:
{stdout}
Stderr:
{stderr}
''').format(command=pretty_print_command(self.command, self.env), error_code=self.error_code, stdout=self.stdout, stderr=self.stderr)
def run_command(executable, args=[], modify_env=lambda env: env):
command = [executable] + args
modified_env = modify_env(os.environ)
print('Executing command:', pretty_print_command(command, modified_env))
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=modified_env)
(stdout, stderr) = p.communicate()
except Exception as e:
raise Exception("While executing: %s" % command)
if p.returncode != 0:
raise CommandFailedException(command, modified_env, stdout, stderr, p.returncode)
print('Execution successful.')
print('stdout:')
print(stdout)
print('')
print('stderr:')
print(stderr)
print('')
return (stdout, stderr)
def run_compiled_executable(executable):
if run_under_valgrind:
args = VALGRIND_FLAGS.split() + [executable]
run_command('valgrind', args = args, modify_env = modify_env_for_compiled_executables)
else:
run_command(executable, modify_env = modify_env_for_compiled_executables)
class CompilationFailedException(Exception):
def __init__(self, command, env, error_message):
self.command = command
self.env = env
self.error_message = error_message
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Error message:
{error_message}
''').format(command=pretty_print_command(self.command, self.env), error_message=self.error_message)
class PosixCompiler:
def __init__(self):
self.executable = CXX
self.name = CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['-c', source, '-o', os.path.devnull]
self._compile(include_dirs, args=args)
except CommandFailedException as e:
raise CompilationFailedException(e.command, e.env, e.stderr)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ ADDITIONAL_LINKER_FLAGS.split()
+ args
+ ['-o', output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
FRUIT_COMPILE_FLAGS.split()
+ include_flags
+ ['-g0', '-Werror']
+ args
)
run_command(self.executable, args)
def get_disable_deprecation_warning_flags(self):
return ['-Wno-deprecated-declarations']
def get_disable_all_warnings_flags(self):
return ['-Wno-error']
class MsvcCompiler:
def __init__(self):
self.executable = CXX
self.name = CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['/c', source]
self._compile(include_dirs, args = args)
except CommandFailedException as e:
# Note that we use stdout here, unlike above. MSVC reports compilation warnings and errors on stdout.
raise CompilationFailedException(e.command, e.stdout)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ ADDITIONAL_LINKER_FLAGS.split()
+ args
+ ['/Fe' + output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
FRUIT_COMPILE_FLAGS.split()
+ include_flags
+ ['/WX']
+ args
)
run_command(self.executable, args)
def get_disable_deprecation_warning_flags(self):
return ['/wd4996']
def get_disable_all_warnings_flags(self):
return ['/WX:NO']
if CXX_COMPILER_NAME == 'MSVC':
compiler = MsvcCompiler()
if PATH_TO_COMPILED_FRUIT_LIB.endswith('.dll'):
path_to_fruit_lib = PATH_TO_COMPILED_FRUIT_LIB[:-4] + '.lib'
else:
path_to_fruit_lib = PATH_TO_COMPILED_FRUIT_LIB
fruit_tests_linker_flags = [path_to_fruit_lib]
fruit_error_message_extraction_regex = 'error C2338: (.*)'
else:
compiler = PosixCompiler()
fruit_tests_linker_flags = [
'-lfruit',
'-L' + PATH_TO_COMPILED_FRUIT,
'-Wl,-rpath,' + PATH_TO_COMPILED_FRUIT,
]
fruit_error_message_extraction_regex = 'static.assert(.*)'
fruit_tests_include_dirs = ADDITIONAL_INCLUDE_DIRS.splitlines() + [
PATH_TO_FRUIT_TEST_HEADERS,
PATH_TO_FRUIT_STATIC_HEADERS,
PATH_TO_FRUIT_GENERATED_HEADERS,
]
_assert_helper = unittest.TestCase()
def modify_env_for_compiled_executables(env):
env = env.copy()
path_to_fruit_lib_dir = os.path.dirname(PATH_TO_COMPILED_FRUIT_LIB)
print('PATH_TO_COMPILED_FRUIT_LIB:', PATH_TO_COMPILED_FRUIT_LIB)
print('Adding directory to PATH:', path_to_fruit_lib_dir)
env["PATH"] += os.pathsep + path_to_fruit_lib_dir
return env
def _create_temporary_file(file_content, file_name_suffix=''):
file_descriptor, file_name = tempfile.mkstemp(text=True, suffix=file_name_suffix)
file = os.fdopen(file_descriptor, mode='w')
file.write(file_content)
file.close()
return file_name
def _cap_to_lines(s, n):
lines = s.splitlines()
if len(lines) <= n:
return s
else:
return '\n'.join(lines[0:n] + ['...'])
def _replace_using_test_params(s, test_params):
for var_name, value in test_params.items():
if isinstance(value, str):
s = re.sub(r'\b%s\b' % var_name, value, s)
return s
def _construct_final_source_code(setup_source_code, source_code, test_params):
setup_source_code = textwrap.dedent(setup_source_code)
source_code = textwrap.dedent(source_code)
source_code = _replace_using_test_params(source_code, test_params)
return setup_source_code + source_code
def try_remove_temporary_file(filename):
try:
os.remove(filename)
except:
# When running Fruit tests on Windows using Appveyor, the remove command fails for temporary files sometimes.
# This shouldn't cause the tests to fail, so we ignore the exception and go ahead.
pass
def normalize_error_message_lines(lines):
# Different compilers output a different number of spaces when pretty-printing types.
# When using libc++, sometimes std::foo identifiers are reported as std::__1::foo.
return [line.replace(' ', '').replace('std::__1::', 'std::') for line in lines]
def expect_compile_error_helper(
check_error_fun,
setup_source_code,
source_code,
test_params={},
ignore_deprecation_warnings=False,
ignore_warnings=False):
source_code = _construct_final_source_code(setup_source_code, source_code, test_params)
source_file_name = _create_temporary_file(source_code, file_name_suffix='.cpp')
try:
args = []
if ignore_deprecation_warnings:
args += compiler.get_disable_deprecation_warning_flags()
if ignore_warnings:
args += compiler.get_disable_all_warnings_flags()
if ENABLE_COVERAGE:
# When collecting coverage these arguments are enabled by default; however we must disable them in tests
# expected to fail at compile-time because GCC would otherwise fail with an error like:
# /tmp/tmp4m22cey7.cpp:1:0: error: cannot open /dev/null.gcno
args += ['-fno-profile-arcs', '-fno-test-coverage']
compiler.compile_discarding_output(
source=source_file_name,
include_dirs=fruit_tests_include_dirs,
args=args)
raise Exception('The test should have failed to compile, but it compiled successfully')
except CompilationFailedException as e1:
e = e1
error_message = e.error_message
error_message_lines = error_message.splitlines()
error_message_lines = error_message.splitlines()
error_message_head = _cap_to_lines(error_message, 40)
check_error_fun(e, error_message_lines, error_message_head)
try_remove_temporary_file(source_file_name)
def apply_any_error_context_replacements(error_string, following_lines):
if CXX_COMPILER_NAME == 'MSVC':
# MSVC errors are of the form:
#
# C:\Path\To\header\foo.h(59): note: see reference to class template instantiation 'fruit::impl::NoBindingFoundError<fruit::Annotated<Annotation,U>>' being compiled
# with
# [
# Annotation=Annotation1,
# U=std::function<std::unique_ptr<ScalerImpl,std::default_delete<ScalerImpl>> (double)>
# ]
#
# So we need to parse the following few lines and use them to replace the placeholder types in the Fruit error type.
replacement_lines = []
if len(following_lines) >= 4 and following_lines[0].strip() == 'with':
assert following_lines[1].strip() == '[', 'Line was: ' + following_lines[1]
for line in itertools.islice(following_lines, 2, None):
line = line.strip()
if line == ']':
break
if line.endswith(','):
line = line[:-1]
replacement_lines.append(line)
for replacement_line in replacement_lines:
match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line)
if not match:
raise Exception('Failed to parse replacement line: %s' % replacement_line)
(type_variable, type_expression) = match.groups()
error_string = re.sub(r'\b' + type_variable + r'\b', type_expression, error_string)
return error_string
def expect_generic_compile_error(expected_error_regex, setup_source_code, source_code, test_params={}):
"""
Tests that the given source produces the expected error during compilation.
:param expected_fruit_error_regex: A regex used to match the Fruit error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
Any identifiers contained in the regex will be replaced using test_params (where a replacement is defined).
:param expected_fruit_error_desc_regex: A regex used to match the Fruit error description,
e.g. 'No explicit binding was found for C, and C is an abstract class'.
:param setup_source_code: The first part of the source code. This is dedented separately from source_code and it's
*not* subject to test_params, unlike source_code.
:param source_code: The second part of the source code. Any identifiers will be replaced using test_params
(where a replacement is defined). This will be dedented.
:param test_params: A dict containing the definition of some identifiers. Each identifier in
expected_fruit_error_regex and source_code will be replaced (textually) with its definition (if a definition
was provided).
"""
expected_error_regex = _replace_using_test_params(expected_error_regex, test_params)
expected_error_regex = expected_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head):
error_message_lines_with_replacements = [
apply_any_error_context_replacements(line, error_message_lines[line_number + 1:])
for line_number, line in enumerate(error_message_lines)]
normalized_error_message_lines = normalize_error_message_lines(error_message_lines_with_replacements)
for line in normalized_error_message_lines:
if re.search(expected_error_regex, line):
return
raise Exception(textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain that.
Compiler command line: {compiler_command}
Error message was:
{error_message}
''').format(expected_error = expected_error_regex, compiler_command=e.command, error_message = error_message_head))
expect_compile_error_helper(check_error, setup_source_code, source_code, test_params)
def expect_compile_error(
expected_fruit_error_regex,
expected_fruit_error_desc_regex,
setup_source_code,
source_code,
test_params={},
ignore_deprecation_warnings=False,
ignore_warnings=False,
disable_error_line_number_check=False):
"""
Tests that the given source produces the expected error during compilation.
:param expected_fruit_error_regex: A regex used to match the Fruit error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
Any identifiers contained in the regex will be replaced using test_params (where a replacement is defined).
:param expected_fruit_error_desc_regex: A regex used to match the Fruit error description,
e.g. 'No explicit binding was found for C, and C is an abstract class'.
:param setup_source_code: The first part of the source code. This is dedented separately from source_code and it's
*not* subject to test_params, unlike source_code.
:param source_code: The second part of the source code. Any identifiers will be replaced using test_params
(where a replacement is defined). This will be dedented.
:param test_params: A dict containing the definition of some identifiers. Each identifier in
expected_fruit_error_regex and source_code will be replaced (textually) with its definition (if a definition
was provided).
:param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.
:param ignore_warnings: A boolean. If True, all warnings will be ignored.
:param disable_error_line_number_check: A boolean. If True, the test will not fail if there are other diagnostic
lines before the expected error.
"""
if '\n' in expected_fruit_error_regex:
raise Exception('expected_fruit_error_regex should not contain newlines')
if '\n' in expected_fruit_error_desc_regex:
raise Exception('expected_fruit_error_desc_regex should not contain newlines')
expected_fruit_error_regex = _replace_using_test_params(expected_fruit_error_regex, test_params)
expected_fruit_error_regex = expected_fruit_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head):
normalized_error_message_lines = normalize_error_message_lines(error_message_lines)
for line_number, line in enumerate(normalized_error_message_lines):
match = re.search('fruit::impl::(.*Error<.*>)', line)
if match:
actual_fruit_error_line_number = line_number
actual_fruit_error = match.groups()[0]
actual_fruit_error = apply_any_error_context_replacements(actual_fruit_error, normalized_error_message_lines[line_number + 1:])
break
else:
raise Exception(textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain user-facing Fruit errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
''').format(expected_error = expected_fruit_error_regex, compiler_command = e.command, error_message = error_message_head))
for line_number, line in enumerate(error_message_lines):
match = re.search(fruit_error_message_extraction_regex, line)
if match:
actual_static_assert_error_line_number = line_number
actual_static_assert_error = match.groups()[0]
break
else:
raise Exception(textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain static_assert errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
''').format(expected_error = expected_fruit_error_regex, compiler_command=e.command, error_message = error_message_head))
try:
regex_search_result = re.search(expected_fruit_error_regex, actual_fruit_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_fruit_error_regex) from e
if not regex_search_result:
raise Exception(textwrap.dedent('''\
The compilation failed as expected, but with a different error type.
Expected Fruit error type: {expected_fruit_error_regex}
Error type was: {actual_fruit_error}
Expected static assert error: {expected_fruit_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message was:
{error_message}
'''.format(
expected_fruit_error_regex = expected_fruit_error_regex,
actual_fruit_error = actual_fruit_error,
expected_fruit_error_desc_regex = expected_fruit_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
error_message = error_message_head)))
try:
regex_search_result = re.search(expected_fruit_error_desc_regex, actual_static_assert_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_fruit_error_desc_regex) from e
if not regex_search_result:
raise Exception(textwrap.dedent('''\
The compilation failed as expected, but with a different error message.
Expected Fruit error type: {expected_fruit_error_regex}
Error type was: {actual_fruit_error}
Expected static assert error: {expected_fruit_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message:
{error_message}
'''.format(
expected_fruit_error_regex = expected_fruit_error_regex,
actual_fruit_error = actual_fruit_error,
expected_fruit_error_desc_regex = expected_fruit_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
error_message = error_message_head)))
# 6 is just a constant that works for both g++ (<=6.0.0 at least) and clang++ (<=4.0.0 at least).
# It might need to be changed.
if not disable_error_line_number_check and (actual_fruit_error_line_number > 6 or actual_static_assert_error_line_number > 6):
raise Exception(textwrap.dedent('''\
The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.
The error type was reported on line {actual_fruit_error_line_number} of the message (should be <=6).
The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).
Error message:
{error_message}
'''.format(
actual_fruit_error_line_number = actual_fruit_error_line_number,
actual_static_assert_error_line_number = actual_static_assert_error_line_number,
error_message = error_message_head)))
for line in error_message_lines[:max(actual_fruit_error_line_number, actual_static_assert_error_line_number)]:
if re.search('fruit::impl::meta', line):
raise Exception(
'The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head)
expect_compile_error_helper(check_error, setup_source_code, source_code, test_params, ignore_deprecation_warnings, ignore_warnings)
def expect_runtime_error(
expected_error_regex,
setup_source_code,
source_code,
test_params={},
ignore_deprecation_warnings=False):
"""
Tests that the given source (compiles successfully and) produces the expected error at runtime.
:param expected_error_regex: A regex used to match the content of stderr.
Any identifiers contained in the regex will be replaced using test_params (where a replacement is defined).
:param setup_source_code: The first part of the source code. This is dedented separately from source_code and it's
*not* subject to test_params, unlike source_code.
:param source_code: The second part of the source code. Any identifiers will be replaced using test_params
(where a replacement is defined). This will be dedented.
:param test_params: A dict containing the definition of some identifiers. Each identifier in
expected_error_regex and source_code will be replaced (textually) with its definition (if a definition
was provided).
"""
expected_error_regex = _replace_using_test_params(expected_error_regex, test_params)
source_code = _construct_final_source_code(setup_source_code, source_code, test_params)
source_file_name = _create_temporary_file(source_code, file_name_suffix='.cpp')
executable_suffix = {'posix': '', 'nt': '.exe'}[os.name]
output_file_name = _create_temporary_file('', executable_suffix)
args = fruit_tests_linker_flags.copy()
if ignore_deprecation_warnings:
args += compiler.get_disable_deprecation_warning_flags()
compiler.compile_and_link(
source=source_file_name,
include_dirs=fruit_tests_include_dirs,
output_file_name=output_file_name,
args=args)
try:
run_compiled_executable(output_file_name)
raise Exception('The test should have failed at runtime, but it ran successfully')
except CommandFailedException as e1:
e = e1
stderr = e.stderr
stderr_head = _cap_to_lines(stderr, 40)
if '\n' in expected_error_regex:
regex_flags = re.MULTILINE
else:
regex_flags = 0
try:
regex_search_result = re.search(expected_error_regex, stderr, flags=regex_flags)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_error_regex) from e
if not regex_search_result:
raise Exception(textwrap.dedent('''\
The test failed as expected, but with a different message.
Expected: {expected_error_regex}
Was:
{stderr}
'''.format(expected_error_regex = expected_error_regex, stderr = stderr_head)))
# Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure.
if not ENABLE_COVERAGE:
try_remove_temporary_file(source_file_name)
try_remove_temporary_file(output_file_name)
def expect_success(setup_source_code, source_code, test_params={}, ignore_deprecation_warnings=False):
"""
Tests that the given source compiles and runs successfully.
:param setup_source_code: The first part of the source code. This is dedented separately from source_code and it's
*not* subject to test_params, unlike source_code.
:param source_code: The second part of the source code. Any identifiers will be replaced using test_params
(where a replacement is defined). This will be dedented.
:param test_params: A dict containing the definition of some identifiers. Each identifier in
source_code will be replaced (textually) with its definition (if a definition was provided).
"""
source_code = _construct_final_source_code(setup_source_code, source_code, test_params)
if 'main(' not in source_code:
source_code += textwrap.dedent('''
int main() {
}
''')
source_file_name = _create_temporary_file(source_code, file_name_suffix='.cpp')
executable_suffix = {'posix': '', 'nt': '.exe'}[os.name]
output_file_name = _create_temporary_file('', executable_suffix)
args = fruit_tests_linker_flags.copy()
if ignore_deprecation_warnings:
args += compiler.get_disable_deprecation_warning_flags()
compiler.compile_and_link(
source=source_file_name,
include_dirs=fruit_tests_include_dirs,
output_file_name=output_file_name,
args=args)
run_compiled_executable(output_file_name)
# Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure.
if not ENABLE_COVERAGE:
try_remove_temporary_file(source_file_name)
try_remove_temporary_file(output_file_name)
# Note: this is not the main function of this file, it's meant to be used as main function from test_*.py files.
def main():
absltest.main(*sys.argv)
| 44.894992
| 202
| 0.680461
|
0809ee7a2936ce5c74b442b27ae28e550567e78e
| 17,898
|
py
|
Python
|
supervisor/addons/validate.py
|
mories76/supervisor
|
fb7bc6bd970fada6c75e3c58c71725fdf44cd1e3
|
[
"Apache-2.0"
] | 24
|
2020-03-08T21:13:00.000Z
|
2020-03-11T06:18:43.000Z
|
supervisor/addons/validate.py
|
mories76/supervisor
|
fb7bc6bd970fada6c75e3c58c71725fdf44cd1e3
|
[
"Apache-2.0"
] | null | null | null |
supervisor/addons/validate.py
|
mories76/supervisor
|
fb7bc6bd970fada6c75e3c58c71725fdf44cd1e3
|
[
"Apache-2.0"
] | null | null | null |
"""Validate add-ons options schema."""
import logging
import re
import secrets
from typing import Any, Dict, List
import uuid
import voluptuous as vol
from ..const import (
ARCH_ALL,
ATTR_ACCESS_TOKEN,
ATTR_ADVANCED,
ATTR_APPARMOR,
ATTR_ARCH,
ATTR_ARGS,
ATTR_AUDIO,
ATTR_AUDIO_INPUT,
ATTR_AUDIO_OUTPUT,
ATTR_AUTH_API,
ATTR_AUTO_UART,
ATTR_AUTO_UPDATE,
ATTR_BOOT,
ATTR_BUILD_FROM,
ATTR_DESCRIPTON,
ATTR_DEVICES,
ATTR_DEVICETREE,
ATTR_DISCOVERY,
ATTR_DOCKER_API,
ATTR_ENVIRONMENT,
ATTR_FULL_ACCESS,
ATTR_GPIO,
ATTR_HASSIO_API,
ATTR_HASSIO_ROLE,
ATTR_HOMEASSISTANT,
ATTR_HOMEASSISTANT_API,
ATTR_HOST_DBUS,
ATTR_HOST_IPC,
ATTR_HOST_NETWORK,
ATTR_HOST_PID,
ATTR_IMAGE,
ATTR_INGRESS,
ATTR_INGRESS_ENTRY,
ATTR_INGRESS_PANEL,
ATTR_INGRESS_PORT,
ATTR_INGRESS_TOKEN,
ATTR_INIT,
ATTR_KERNEL_MODULES,
ATTR_LEGACY,
ATTR_LOCATON,
ATTR_MACHINE,
ATTR_MAP,
ATTR_NAME,
ATTR_NETWORK,
ATTR_OPTIONS,
ATTR_PANEL_ADMIN,
ATTR_PANEL_ICON,
ATTR_PANEL_TITLE,
ATTR_PORTS,
ATTR_PORTS_DESCRIPTION,
ATTR_PRIVILEGED,
ATTR_PROTECTED,
ATTR_REPOSITORY,
ATTR_SCHEMA,
ATTR_SERVICES,
ATTR_SLUG,
ATTR_SNAPSHOT_EXCLUDE,
ATTR_SQUASH,
ATTR_STAGE,
ATTR_STARTUP,
ATTR_STATE,
ATTR_STDIN,
ATTR_SYSTEM,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_UDEV,
ATTR_URL,
ATTR_USER,
ATTR_UUID,
ATTR_VERSION,
ATTR_VIDEO,
ATTR_WEBUI,
BOOT_AUTO,
BOOT_MANUAL,
PRIVILEGED_ALL,
ROLE_ALL,
ROLE_DEFAULT,
STARTUP_ALL,
STARTUP_APPLICATION,
STARTUP_SERVICES,
STATE_STARTED,
STATE_STOPPED,
AddonStages,
)
from ..coresys import CoreSys
from ..discovery.validate import valid_discovery_service
from ..validate import (
DOCKER_PORTS,
DOCKER_PORTS_DESCRIPTION,
network_port,
token,
uuid_match,
)
_LOGGER: logging.Logger = logging.getLogger(__name__)
RE_VOLUME = re.compile(r"^(config|ssl|addons|backup|share)(?::(rw|ro))?$")
RE_SERVICE = re.compile(r"^(?P<service>mqtt|mysql):(?P<rights>provide|want|need)$")
V_STR = "str"
V_INT = "int"
V_FLOAT = "float"
V_BOOL = "bool"
V_PASSWORD = "password"
V_EMAIL = "email"
V_URL = "url"
V_PORT = "port"
V_MATCH = "match"
V_LIST = "list"
RE_SCHEMA_ELEMENT = re.compile(
r"^(?:"
r"|bool|email|url|port"
r"|str(?:\((?P<s_min>\d+)?,(?P<s_max>\d+)?\))?"
r"|password(?:\((?P<p_min>\d+)?,(?P<p_max>\d+)?\))?"
r"|int(?:\((?P<i_min>\d+)?,(?P<i_max>\d+)?\))?"
r"|float(?:\((?P<f_min>[\d\.]+)?,(?P<f_max>[\d\.]+)?\))?"
r"|match\((?P<match>.*)\)"
r"|list\((?P<list>.+)\)"
r")\??$"
)
_SCHEMA_LENGTH_PARTS = (
"i_min",
"i_max",
"f_min",
"f_max",
"s_min",
"s_max",
"p_min",
"p_max",
)
RE_DOCKER_IMAGE = re.compile(r"^([a-zA-Z\-\.:\d{}]+/)*?([\-\w{}]+)/([\-\w{}]+)$")
RE_DOCKER_IMAGE_BUILD = re.compile(
r"^([a-zA-Z\-\.:\d{}]+/)*?([\-\w{}]+)/([\-\w{}]+)(:[\.\-\w{}]+)?$"
)
SCHEMA_ELEMENT = vol.Match(RE_SCHEMA_ELEMENT)
MACHINE_ALL = [
"intel-nuc",
"odroid-c2",
"odroid-n2",
"odroid-xu",
"qemuarm-64",
"qemuarm",
"qemux86-64",
"qemux86",
"raspberrypi",
"raspberrypi2",
"raspberrypi3-64",
"raspberrypi3",
"raspberrypi4-64",
"raspberrypi4",
"tinker",
]
def _simple_startup(value):
"""Simple startup schema."""
if value == "before":
return STARTUP_SERVICES
if value == "after":
return STARTUP_APPLICATION
return value
# pylint: disable=no-value-for-parameter
SCHEMA_ADDON_CONFIG = vol.Schema(
{
vol.Required(ATTR_NAME): vol.Coerce(str),
vol.Required(ATTR_VERSION): vol.Coerce(str),
vol.Required(ATTR_SLUG): vol.Coerce(str),
vol.Required(ATTR_DESCRIPTON): vol.Coerce(str),
vol.Required(ATTR_ARCH): [vol.In(ARCH_ALL)],
vol.Optional(ATTR_MACHINE): [vol.In(MACHINE_ALL)],
vol.Optional(ATTR_URL): vol.Url(),
vol.Required(ATTR_STARTUP): vol.All(_simple_startup, vol.In(STARTUP_ALL)),
vol.Required(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_INIT, default=True): vol.Boolean(),
vol.Optional(ATTR_ADVANCED, default=False): vol.Boolean(),
vol.Optional(ATTR_STAGE, default=AddonStages.STABLE): vol.Coerce(AddonStages),
vol.Optional(ATTR_PORTS): DOCKER_PORTS,
vol.Optional(ATTR_PORTS_DESCRIPTION): DOCKER_PORTS_DESCRIPTION,
vol.Optional(ATTR_WEBUI): vol.Match(
r"^(?:https?|\[PROTO:\w+\]):\/\/\[HOST\]:\[PORT:\d+\].*$"
),
vol.Optional(ATTR_INGRESS, default=False): vol.Boolean(),
vol.Optional(ATTR_INGRESS_PORT, default=8099): vol.Any(
network_port, vol.Equal(0)
),
vol.Optional(ATTR_INGRESS_ENTRY): vol.Coerce(str),
vol.Optional(ATTR_PANEL_ICON, default="mdi:puzzle"): vol.Coerce(str),
vol.Optional(ATTR_PANEL_TITLE): vol.Coerce(str),
vol.Optional(ATTR_PANEL_ADMIN, default=True): vol.Boolean(),
vol.Optional(ATTR_HOMEASSISTANT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_HOST_NETWORK, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_PID, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_IPC, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_DBUS, default=False): vol.Boolean(),
vol.Optional(ATTR_DEVICES): [vol.Match(r"^(.*):(.*):([rwm]{1,3})$")],
vol.Optional(ATTR_AUTO_UART, default=False): vol.Boolean(),
vol.Optional(ATTR_UDEV, default=False): vol.Boolean(),
vol.Optional(ATTR_TMPFS): vol.Match(r"^size=(\d)*[kmg](,uid=\d{1,4})?(,rw)?$"),
vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): vol.Coerce(str)},
vol.Optional(ATTR_PRIVILEGED): [vol.In(PRIVILEGED_ALL)],
vol.Optional(ATTR_APPARMOR, default=True): vol.Boolean(),
vol.Optional(ATTR_FULL_ACCESS, default=False): vol.Boolean(),
vol.Optional(ATTR_AUDIO, default=False): vol.Boolean(),
vol.Optional(ATTR_VIDEO, default=False): vol.Boolean(),
vol.Optional(ATTR_GPIO, default=False): vol.Boolean(),
vol.Optional(ATTR_DEVICETREE, default=False): vol.Boolean(),
vol.Optional(ATTR_KERNEL_MODULES, default=False): vol.Boolean(),
vol.Optional(ATTR_HASSIO_API, default=False): vol.Boolean(),
vol.Optional(ATTR_HASSIO_ROLE, default=ROLE_DEFAULT): vol.In(ROLE_ALL),
vol.Optional(ATTR_HOMEASSISTANT_API, default=False): vol.Boolean(),
vol.Optional(ATTR_STDIN, default=False): vol.Boolean(),
vol.Optional(ATTR_LEGACY, default=False): vol.Boolean(),
vol.Optional(ATTR_DOCKER_API, default=False): vol.Boolean(),
vol.Optional(ATTR_AUTH_API, default=False): vol.Boolean(),
vol.Optional(ATTR_SERVICES): [vol.Match(RE_SERVICE)],
vol.Optional(ATTR_DISCOVERY): [valid_discovery_service],
vol.Optional(ATTR_SNAPSHOT_EXCLUDE): [vol.Coerce(str)],
vol.Required(ATTR_OPTIONS): dict,
vol.Required(ATTR_SCHEMA): vol.Any(
vol.Schema(
{
vol.Coerce(str): vol.Any(
SCHEMA_ELEMENT,
[
vol.Any(
SCHEMA_ELEMENT,
{
vol.Coerce(str): vol.Any(
SCHEMA_ELEMENT, [SCHEMA_ELEMENT]
)
},
)
],
vol.Schema(
{vol.Coerce(str): vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])}
),
)
}
),
False,
),
vol.Optional(ATTR_IMAGE): vol.Match(RE_DOCKER_IMAGE),
vol.Optional(ATTR_TIMEOUT, default=10): vol.All(
vol.Coerce(int), vol.Range(min=10, max=120)
),
},
extra=vol.REMOVE_EXTRA,
)
# pylint: disable=no-value-for-parameter
SCHEMA_BUILD_CONFIG = vol.Schema(
{
vol.Optional(ATTR_BUILD_FROM, default=dict): vol.Schema(
{vol.In(ARCH_ALL): vol.Match(RE_DOCKER_IMAGE_BUILD)}
),
vol.Optional(ATTR_SQUASH, default=False): vol.Boolean(),
vol.Optional(ATTR_ARGS, default=dict): vol.Schema(
{vol.Coerce(str): vol.Coerce(str)}
),
},
extra=vol.REMOVE_EXTRA,
)
# pylint: disable=no-value-for-parameter
SCHEMA_ADDON_USER = vol.Schema(
{
vol.Required(ATTR_VERSION): vol.Coerce(str),
vol.Optional(ATTR_IMAGE): vol.Coerce(str),
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): uuid_match,
vol.Optional(ATTR_ACCESS_TOKEN): token,
vol.Optional(ATTR_INGRESS_TOKEN, default=secrets.token_urlsafe): vol.Coerce(
str
),
vol.Optional(ATTR_OPTIONS, default=dict): dict,
vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_NETWORK): DOCKER_PORTS,
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_PROTECTED, default=True): vol.Boolean(),
vol.Optional(ATTR_INGRESS_PANEL, default=False): vol.Boolean(),
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_ADDON_SYSTEM = SCHEMA_ADDON_CONFIG.extend(
{
vol.Required(ATTR_LOCATON): vol.Coerce(str),
vol.Required(ATTR_REPOSITORY): vol.Coerce(str),
}
)
SCHEMA_ADDONS_FILE = vol.Schema(
{
vol.Optional(ATTR_USER, default=dict): {vol.Coerce(str): SCHEMA_ADDON_USER},
vol.Optional(ATTR_SYSTEM, default=dict): {vol.Coerce(str): SCHEMA_ADDON_SYSTEM},
}
)
SCHEMA_ADDON_SNAPSHOT = vol.Schema(
{
vol.Required(ATTR_USER): SCHEMA_ADDON_USER,
vol.Required(ATTR_SYSTEM): SCHEMA_ADDON_SYSTEM,
vol.Required(ATTR_STATE): vol.In([STATE_STARTED, STATE_STOPPED]),
vol.Required(ATTR_VERSION): vol.Coerce(str),
},
extra=vol.REMOVE_EXTRA,
)
def validate_options(coresys: CoreSys, raw_schema: Dict[str, Any]):
"""Validate schema."""
def validate(struct):
"""Create schema validator for add-ons options."""
options = {}
# read options
for key, value in struct.items():
# Ignore unknown options / remove from list
if key not in raw_schema:
_LOGGER.warning("Unknown options %s", key)
continue
typ = raw_schema[key]
try:
if isinstance(typ, list):
# nested value list
options[key] = _nested_validate_list(coresys, typ[0], value, key)
elif isinstance(typ, dict):
# nested value dict
options[key] = _nested_validate_dict(coresys, typ, value, key)
else:
# normal value
options[key] = _single_validate(coresys, typ, value, key)
except (IndexError, KeyError):
raise vol.Invalid(f"Type error for {key}") from None
_check_missing_options(raw_schema, options, "root")
return options
return validate
# pylint: disable=no-value-for-parameter
# pylint: disable=inconsistent-return-statements
def _single_validate(coresys: CoreSys, typ: str, value: Any, key: str):
"""Validate a single element."""
# if required argument
if value is None:
raise vol.Invalid(f"Missing required option '{key}'")
# Lookup secret
if str(value).startswith("!secret "):
secret: str = value.partition(" ")[2]
value = coresys.secrets.get(secret)
if value is None:
raise vol.Invalid(f"Unknown secret {secret}")
# parse extend data from type
match = RE_SCHEMA_ELEMENT.match(typ)
# prepare range
range_args = {}
for group_name in _SCHEMA_LENGTH_PARTS:
group_value = match.group(group_name)
if group_value:
range_args[group_name[2:]] = float(group_value)
if typ.startswith(V_STR) or typ.startswith(V_PASSWORD):
return vol.All(str(value), vol.Range(**range_args))(value)
elif typ.startswith(V_INT):
return vol.All(vol.Coerce(int), vol.Range(**range_args))(value)
elif typ.startswith(V_FLOAT):
return vol.All(vol.Coerce(float), vol.Range(**range_args))(value)
elif typ.startswith(V_BOOL):
return vol.Boolean()(value)
elif typ.startswith(V_EMAIL):
return vol.Email()(value)
elif typ.startswith(V_URL):
return vol.Url()(value)
elif typ.startswith(V_PORT):
return network_port(value)
elif typ.startswith(V_MATCH):
return vol.Match(match.group("match"))(str(value))
elif typ.startswith(V_LIST):
return vol.In(match.group("list").split("|"))(str(value))
raise vol.Invalid(f"Fatal error for {key} type {typ}")
def _nested_validate_list(coresys, typ, data_list, key):
"""Validate nested items."""
options = []
for element in data_list:
# Nested?
if isinstance(typ, dict):
c_options = _nested_validate_dict(coresys, typ, element, key)
options.append(c_options)
else:
options.append(_single_validate(coresys, typ, element, key))
return options
def _nested_validate_dict(coresys, typ, data_dict, key):
"""Validate nested items."""
options = {}
for c_key, c_value in data_dict.items():
# Ignore unknown options / remove from list
if c_key not in typ:
_LOGGER.warning("Unknown options %s", c_key)
continue
# Nested?
if isinstance(typ[c_key], list):
options[c_key] = _nested_validate_list(
coresys, typ[c_key][0], c_value, c_key
)
else:
options[c_key] = _single_validate(coresys, typ[c_key], c_value, c_key)
_check_missing_options(typ, options, key)
return options
def _check_missing_options(origin, exists, root):
"""Check if all options are exists."""
missing = set(origin) - set(exists)
for miss_opt in missing:
if isinstance(origin[miss_opt], str) and origin[miss_opt].endswith("?"):
continue
raise vol.Invalid(f"Missing option {miss_opt} in {root}")
def schema_ui_options(raw_schema: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Generate UI schema."""
ui_schema = []
# read options
for key, value in raw_schema.items():
if isinstance(value, list):
# nested value list
_nested_ui_list(ui_schema, value, key)
elif isinstance(value, dict):
# nested value dict
_nested_ui_dict(ui_schema, value, key)
else:
# normal value
_single_ui_option(ui_schema, value, key)
return ui_schema
def _single_ui_option(
ui_schema: List[Dict[str, Any]], value: str, key: str, multiple: bool = False
) -> None:
"""Validate a single element."""
ui_node = {"name": key}
# If multiple
if multiple:
ui_node["multiple"] = True
# Parse extend data from type
match = RE_SCHEMA_ELEMENT.match(value)
# Prepare range
for group_name in _SCHEMA_LENGTH_PARTS:
group_value = match.group(group_name)
if not group_value:
continue
if group_name[2:] == "min":
ui_node["lengthMin"] = float(group_value)
elif group_name[2:] == "max":
ui_node["lengthMax"] = float(group_value)
# If required
if value.endswith("?"):
ui_node["optional"] = True
else:
ui_node["required"] = True
# Data types
if value.startswith(V_STR):
ui_node["type"] = "string"
elif value.startswith(V_PASSWORD):
ui_node["type"] = "string"
ui_node["format"] = "password"
elif value.startswith(V_INT):
ui_node["type"] = "integer"
elif value.startswith(V_FLOAT):
ui_node["type"] = "float"
elif value.startswith(V_BOOL):
ui_node["type"] = "boolean"
elif value.startswith(V_EMAIL):
ui_node["type"] = "string"
ui_node["format"] = "email"
elif value.startswith(V_URL):
ui_node["type"] = "string"
ui_node["format"] = "url"
elif value.startswith(V_PORT):
ui_node["type"] = "integer"
elif value.startswith(V_MATCH):
ui_node["type"] = "string"
elif value.startswith(V_LIST):
ui_node["type"] = "select"
ui_node["options"] = match.group("list").split("|")
ui_schema.append(ui_node)
def _nested_ui_list(
ui_schema: List[Dict[str, Any]], option_list: List[Any], key: str
) -> None:
"""UI nested list items."""
try:
element = option_list[0]
except IndexError:
_LOGGER.error("Invalid schema %s", key)
return
if isinstance(element, dict):
_nested_ui_dict(ui_schema, element, key, multiple=True)
else:
_single_ui_option(ui_schema, element, key, multiple=True)
def _nested_ui_dict(
ui_schema: List[Dict[str, Any]],
option_dict: Dict[str, Any],
key: str,
multiple: bool = False,
) -> None:
"""UI nested dict items."""
ui_node = {"name": key, "type": "schema", "optional": True, "multiple": multiple}
nested_schema = []
for c_key, c_value in option_dict.items():
# Nested?
if isinstance(c_value, list):
_nested_ui_list(nested_schema, c_value, c_key)
else:
_single_ui_option(nested_schema, c_value, c_key)
ui_node["schema"] = nested_schema
ui_schema.append(ui_node)
| 31.126957
| 88
| 0.611018
|
b9c1c83a1855e6dff9f1e177e93ee2ff75ce2dc4
| 7,057
|
py
|
Python
|
lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py
|
progrunner17/blueoil
|
5cbe8b2ceebaaa7a6582a377031ae92855bed0aa
|
[
"Apache-2.0"
] | null | null | null |
lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py
|
progrunner17/blueoil
|
5cbe8b2ceebaaa7a6582a377031ae92855bed0aa
|
[
"Apache-2.0"
] | null | null | null |
lmnet/configs/core/object_detection/yolo_v2_pascalvoc_2007_2012.py
|
progrunner17/blueoil
|
5cbe8b2ceebaaa7a6582a377031ae92855bed0aa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from easydict import EasyDict
import tensorflow as tf
from lmnet.common import Tasks
from lmnet.networks.object_detection.yolo_v2 import YoloV2
from lmnet.datasets.pascalvoc_2007_2012 import Pascalvoc20072012
from lmnet.data_processor import Sequence
from lmnet.pre_processor import (
ResizeWithGtBoxes,
DivideBy255,
)
from lmnet.post_processor import (
FormatYoloV2,
ExcludeLowScoreBox,
NMS,
)
from lmnet.data_augmentor import (
Brightness,
Color,
Contrast,
FlipLeftRight,
Hue,
SSDRandomCrop,
)
IS_DEBUG = False
NETWORK_CLASS = YoloV2
DATASET_CLASS = Pascalvoc20072012
IMAGE_SIZE = [416, 416]
BATCH_SIZE = 8
DATA_FORMAT = "NHWC"
TASK = Tasks.OBJECT_DETECTION
CLASSES = DATASET_CLASS.classes
MAX_STEPS = 1000000
SAVE_STEPS = 50000
TEST_STEPS = 10000
SUMMARISE_STEPS = 1000
# distributed training
IS_DISTRIBUTION = False
# for debug
# IS_DEBUG = True
# SUMMARISE_STEPS = 1
# pretrain
IS_PRETRAIN = True
PRETRAIN_VARS = [
'block_1/conv/kernel:0',
'block_1/bn/beta:0',
'block_1/bn/gamma:0',
'block_1/bn/moving_mean:0',
'block_1/bn/moving_variance:0',
'block_2/conv/kernel:0',
'block_2/bn/beta:0',
'block_2/bn/gamma:0',
'block_2/bn/moving_mean:0',
'block_2/bn/moving_variance:0',
'block_3/conv/kernel:0',
'block_3/bn/beta:0',
'block_3/bn/gamma:0',
'block_3/bn/moving_mean:0',
'block_3/bn/moving_variance:0',
'block_4/conv/kernel:0',
'block_4/bn/beta:0',
'block_4/bn/gamma:0',
'block_4/bn/moving_mean:0',
'block_4/bn/moving_variance:0',
'block_5/conv/kernel:0',
'block_5/bn/beta:0',
'block_5/bn/gamma:0',
'block_5/bn/moving_mean:0',
'block_5/bn/moving_variance:0',
'block_6/conv/kernel:0',
'block_6/bn/beta:0',
'block_6/bn/gamma:0',
'block_6/bn/moving_mean:0',
'block_6/bn/moving_variance:0',
'block_7/conv/kernel:0',
'block_7/bn/beta:0',
'block_7/bn/gamma:0',
'block_7/bn/moving_mean:0',
'block_7/bn/moving_variance:0',
'block_8/conv/kernel:0',
'block_8/bn/beta:0',
'block_8/bn/gamma:0',
'block_8/bn/moving_mean:0',
'block_8/bn/moving_variance:0',
'block_9/conv/kernel:0',
'block_9/bn/beta:0',
'block_9/bn/gamma:0',
'block_9/bn/moving_mean:0',
'block_9/bn/moving_variance:0',
'block_10/conv/kernel:0',
'block_10/bn/beta:0',
'block_10/bn/gamma:0',
'block_10/bn/moving_mean:0',
'block_10/bn/moving_variance:0',
'block_11/conv/kernel:0',
'block_11/bn/beta:0',
'block_11/bn/gamma:0',
'block_11/bn/moving_mean:0',
'block_11/bn/moving_variance:0',
'block_12/conv/kernel:0',
'block_12/bn/beta:0',
'block_12/bn/gamma:0',
'block_12/bn/moving_mean:0',
'block_12/bn/moving_variance:0',
'block_13/conv/kernel:0',
'block_13/bn/beta:0',
'block_13/bn/gamma:0',
'block_13/bn/moving_mean:0',
'block_13/bn/moving_variance:0',
'block_14/conv/kernel:0',
'block_14/bn/beta:0',
'block_14/bn/gamma:0',
'block_14/bn/moving_mean:0',
'block_14/bn/moving_variance:0',
'block_15/conv/kernel:0',
'block_15/bn/beta:0',
'block_15/bn/gamma:0',
'block_15/bn/moving_mean:0',
'block_15/bn/moving_variance:0',
'block_16/conv/kernel:0',
'block_16/bn/beta:0',
'block_16/bn/gamma:0',
'block_16/bn/moving_mean:0',
'block_16/bn/moving_variance:0',
'block_17/conv/kernel:0',
'block_17/bn/beta:0',
'block_17/bn/gamma:0',
'block_17/bn/moving_mean:0',
'block_17/bn/moving_variance:0',
'block_18/conv/kernel:0',
'block_18/bn/beta:0',
'block_18/bn/gamma:0',
'block_18/bn/moving_mean:0',
'block_18/bn/moving_variance:0',
'block_19/conv/kernel:0',
'block_19/bn/beta:0',
'block_19/bn/gamma:0',
'block_19/bn/moving_mean:0',
'block_19/bn/moving_variance:0',
'block_20/conv/kernel:0',
'block_20/bn/beta:0',
'block_20/bn/gamma:0',
'block_20/bn/moving_mean:0',
'block_20/bn/moving_variance:0',
'block_21/conv/kernel:0',
'block_21/bn/beta:0',
'block_21/bn/gamma:0',
'block_21/bn/moving_mean:0',
'block_21/bn/moving_variance:0',
'block_22/conv/kernel:0',
'block_22/bn/beta:0',
'block_22/bn/gamma:0',
'block_22/bn/moving_mean:0',
'block_22/bn/moving_variance:0',
# 'conv_23/kernel:0',
# 'conv_23/bias:0',
]
PRETRAIN_DIR = "saved/convert_weight_from_darknet/yolo_v2/checkpoints"
PRETRAIN_FILE = "save.ckpt"
PRE_PROCESSOR = Sequence([
ResizeWithGtBoxes(size=IMAGE_SIZE),
DivideBy255()
])
anchors = [
(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)
]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
FormatYoloV2(
image_size=IMAGE_SIZE,
classes=CLASSES,
anchors=anchors,
data_format=DATA_FORMAT,
),
ExcludeLowScoreBox(threshold=score_threshold),
NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,),
])
NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9, "learning_rate": 1e-4}
# NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
# In the origianl yolov2 Paper, with a starting learning rate of 10−3, dividing it by 10 at 60 and 90 epochs.
# Train data num per epoch is 16551
# NETWORK.LEARNING_RATE_KWARGS = {
# "values": [1e-4, 1e-4],
# "boundaries": [10000],
# }
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.OBJECT_SCALE = 5.0
NETWORK.NO_OBJECT_SCALE = 1.0
NETWORK.CLASS_SCALE = 1.0
NETWORK.COORDINATE_SCALE = 1.0
NETWORK.LOSS_IOU_THRESHOLD = 0.6
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.SCORE_THRESHOLD = score_threshold
NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold
NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size
NETWORK.SEEN_THRESHOLD = 12800
# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
FlipLeftRight(is_bounding_box=True),
Brightness((0.75, 1.25)),
Color((0.75, 1.25)),
Contrast((0.75, 1.25)),
Hue((-10, 10)),
SSDRandomCrop(min_crop_ratio=0.7),
])
DATASET.ENABLE_PREFETCH = True
| 28.686992
| 109
| 0.687544
|
4a116616f1dc8b47b1cfbd310e858127e6794149
| 898
|
py
|
Python
|
mythirdpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
mythirdpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
mythirdpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'tangcheng'
__mtime__ = '12/12/2017'
"""
import urllib.request
import re
from db.DBUtil import persist
def getContent(url):
headers = ("User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
urlopen = urllib.request.urlopen(url)
data = urlopen.read().decode("utf-8")
# print(data)
userpat = '<title>(.*?)<'
titleList = re.compile(userpat, re.S).findall(data)
print("titleList-->", titleList)
for title in titleList:
print("data:", title)
persist(url,title)
if __name__ == '__main__':
getContent("http://jd.com")
getContent("http://chaojihao.net")
| 24.944444
| 132
| 0.640312
|
5dcdf04c37cbf0a856bf2b8b55f46b8b9fa56ac3
| 9,951
|
py
|
Python
|
harmony/http.py
|
bilts/harmony-service-lib-py
|
6eb358539adab9510d75158737de2b55fba85f91
|
[
"Apache-2.0"
] | null | null | null |
harmony/http.py
|
bilts/harmony-service-lib-py
|
6eb358539adab9510d75158737de2b55fba85f91
|
[
"Apache-2.0"
] | null | null | null |
harmony/http.py
|
bilts/harmony-service-lib-py
|
6eb358539adab9510d75158737de2b55fba85f91
|
[
"Apache-2.0"
] | null | null | null |
"""
Utility functions to download data from backend data sources so it can be operated on
locally.
When downloading from an EDL-token aware data source, this module uses EDL shared /
federated token authentication. It includes an optional fallback authentication that
uses an EDL user to download data when the feature is enabled.
This module relies on the harmony.util.config and its environment variables to be
set for correct operation. See that module and the project README for details.
"""
from functools import lru_cache
import json
from urllib.parse import urlencode, urlparse
import requests
from harmony.earthdata import EarthdataAuth, EarthdataSession
from harmony.exceptions import ForbiddenException
from harmony.logging import build_logger
# Timeout in seconds. Per requests docs, this is not a time limit on
# the entire response download; rather, an exception is raised if the
# server has not issued a response for timeout seconds (more
# precisely, if no bytes have been received on the underlying socket
# for timeout seconds). See:
# https://2.python-requests.org/en/master/user/quickstart/#timeouts
TIMEOUT = 60
def is_http(url: str) -> bool:
"""Predicate to determine if the url is an http endpoint.
Parameters
----------
url : str
The URL to check
Returns
-------
bool
Whether the URL is an http endpoint.
"""
return url is not None and urlparse(url).scheme in ['http', 'https']
def localhost_url(url, local_hostname):
"""Return a version of the url optimized for local development.
If the url includes the string `localhost`, it will be replaced by
the `local_hostname`.
Parameters
----------
url : str
The url to check
Returns
-------
str : The url, possibly converted to use a different local hostname
"""
return url.replace('localhost', local_hostname)
def _is_eula_error(body: str) -> bool:
"""
Tries to determine if the exception is due to a EULA that the user needs to
approve, and if so, returns a response with the url where they can do so.
Parameters
----------
body: The body JSON string that may contain the EULA details.
Returns
-------
A boolean indicating if the body contains a EULA error
"""
try:
json_object = json.loads(body)
return "error_description" in json_object and "resolution_url" in json_object
except Exception:
return False
def _eula_error_message(body: str) -> str:
"""
Constructs a user-friendly error indicating the required EULA
acceptance and the URL where the user can do so.
Parameters
----------
body: The body JSON string that may contain the EULA details.
Returns
-------
The string with the EULA message
"""
json_object = json.loads(body)
return (f"Request could not be completed because you need to agree to the EULA "
f"at {json_object['resolution_url']}")
@lru_cache(maxsize=128)
def _valid(oauth_host: str, oauth_client_id: str, access_token: str) -> bool:
"""
Validates the user access token with Earthdata Login.
Parameters
----------
oauth_host: The Earthdata Login hostname
oauth_client_id: The EDL application's client id
access_token: The user's access token to validate
Returns
-------
Boolean indicating a valid or invalid user access token
"""
url = f'{oauth_host}/oauth/tokens/user?token={access_token}&client_id={oauth_client_id}'
response = requests.post(url, timeout=TIMEOUT)
if response.ok:
return True
raise Exception(response.json())
@lru_cache(maxsize=128)
def _earthdata_session():
"""Constructs an EarthdataSession for use to download one or more files."""
return EarthdataSession()
def _download(config, url: str, access_token: str, data):
"""Implements the download functionality.
Using the EarthdataSession and EarthdataAuth extensions to the
`requests` module, this function will download the given url and
perform any necessary Earthdata Login OAuth handshakes.
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
url : str
The url for the resource to download
access_token : str
A shared EDL access token created from the user's access token
and the app identity.
data : dict or Tuple[str, str]
Optional parameter for additional data to send to the server
when making an HTTP POST request. These data will be URL
encoded to a query string containing a series of `key=value`
pairs, separated by ampersands. If None (the default), the
request will be sent with an HTTP GET request.
Returns
-------
requests.Response with the download result
"""
auth = EarthdataAuth(config.oauth_uid, config.oauth_password, access_token)
with _earthdata_session() as session:
session.auth = auth
if data is None:
return session.get(url, timeout=TIMEOUT)
else:
# Including this header since the stdlib does by default,
# but we've switched to `requests` which does not.
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return session.post(url, headers=headers, data=data, timeout=TIMEOUT)
def _download_with_fallback_authn(config, url: str, data):
"""Downloads the given url using Basic authentication as a fallback
mechanism should the normal EDL Oauth handshake fail.
This function requires the `edl_username` and `edl_password`
attributes in the config object to be populated with valid
credentials.
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
url : str
The url for the resource to download
data : dict or Tuple[str, str]
Optional parameter for additional data to send to the server
when making an HTTP POST request. These data will be URL
encoded to a query string containing a series of `key=value`
pairs, separated by ampersands. If None (the default), the
request will be sent with an HTTP GET request.
Returns
-------
requests.Response with the download result
"""
auth = requests.auth.HTTPBasicAuth(config.edl_username, config.edl_password)
if data is None:
return requests.get(url, timeout=TIMEOUT, auth=auth)
else:
return requests.post(url, data=data, timeout=TIMEOUT, auth=auth)
def download(config, url: str, access_token: str, data, destination_file):
"""Downloads the given url using the provided EDL user access token
and writes it to the provided file-like object.
Exception cases:
1. No user access token
2. Invalid user access token
3. Unable to authenticate the user with Earthdata Login
a. User credentials (could happen even after token validation
b. Application credentials
4. Error response when downloading
5. Data requires EULA acceptance by user
6. If fallback authentication enabled, the application credentials are
invalid, or do not have permission to download the data.
Parameters
----------
config : harmony.util.Config
The configuration for the current runtime environment.
url : str
The url for the resource to download
access_token : str
A shared EDL access token created from the user's access token
and the app identity.
data : dict or Tuple[str, str]
Optional parameter for additional data to send to the server
when making an HTTP POST request. These data will be URL
encoded to a query string containing a series of `key=value`
pairs, separated by ampersands. If None (the default), the
request will be sent with an HTTP GET request.
destination_file : file-like
The destination file where the data will be written. Must be
a file-like object opened for binary write.
Returns
-------
requests.Response with the download result
Side-effects
------------
Will write to provided destination_file
"""
response = None
logger = build_logger(config)
logger.info('Downloading %s', url)
if data is not None:
logger.info('Query parameters supplied, will use POST method.')
data = urlencode(data).encode('utf-8')
if access_token is not None and _valid(config.oauth_host, config.oauth_client_id, access_token):
response = _download(config, url, access_token, data)
if response.ok:
destination_file.write(response.content)
logger.info(f'Completed {url}')
return response
if config.fallback_authn_enabled:
msg = ('No valid user access token in request or EDL OAuth authentication failed.'
'Fallback authentication enabled: retrying with Basic auth.')
logger.warning(msg)
response = _download_with_fallback_authn(config, url, data)
if response.ok:
destination_file.write(response.content)
logger.info(f'Completed {url}')
return response
if _is_eula_error(response.content):
msg = _eula_error_message(response.content)
logger.info(f'{msg} due to: {response.content}')
raise ForbiddenException(msg)
if response.status_code in (401, 403):
msg = f'Forbidden: Unable to download {url}'
logger.info(f'{msg} due to: {response.content}')
raise ForbiddenException(msg)
if response.status_code == 500:
logger.info(f'Unable to download (500) due to: {response.content}')
raise Exception('Unable to download.')
logger.info(f'Unable to download (unknown error) due to: {response.content}')
raise Exception('Unable to download: unknown error.')
| 34.195876
| 100
| 0.685157
|
65a3315d93d1fe6017094910d4a47f8d7a0c69e8
| 13,185
|
py
|
Python
|
models/model_wgan_bicubic256.py
|
opteroncx/DGDMLSR
|
c54d60ff6f5feb8f8ecba73e78bcc70fc533035f
|
[
"Apache-2.0"
] | 2
|
2020-09-11T10:22:45.000Z
|
2021-07-14T01:56:17.000Z
|
models/model_wgan_bicubic256.py
|
opteroncx/DGDMLSR
|
c54d60ff6f5feb8f8ecba73e78bcc70fc533035f
|
[
"Apache-2.0"
] | 1
|
2021-07-14T02:09:47.000Z
|
2021-07-14T05:48:21.000Z
|
models/model_wgan_bicubic256.py
|
opteroncx/DGDMLSR
|
c54d60ff6f5feb8f8ecba73e78bcc70fc533035f
|
[
"Apache-2.0"
] | 1
|
2022-01-15T15:06:35.000Z
|
2022-01-15T15:06:35.000Z
|
import torch
import torch.nn as nn
import numpy as np
import math
def get_upsample_filter(size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
filter = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
return torch.from_numpy(filter).float()
class upsample_block(nn.Module):
def __init__(self,in_channels,out_channels):
super(upsample_block,self).__init__()
self.conv = nn.Conv2d(in_channels,out_channels,3,stride=1,padding=1)
self.shuffler = nn.PixelShuffle(2)
self.prelu = nn.PReLU()
def forward(self,x):
return self.prelu(self.shuffler(self.conv(x)))
class _Conv_Block(nn.Module):
def __init__(self):
super(_Conv_Block, self).__init__()
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
def forward(self, x):
output = self.conv_block(x)
return output
class _netGH(nn.Module):
def __init__(self):
super(_netGH, self).__init__()
self.conv_input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
self.conv_input2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
# self.upb = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=4, stride=2, padding=1, bias=False)
self.up = upsample_block(64,64*4)
self.upb = nn.Upsample(scale_factor=2, mode='bicubic')
# self.uim1 = upsample_block(64,64*4)
self.convt_R1R = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
# self.convt_R1I = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
self.convt_F1 = self.make_layer(_Conv_Block)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
c1, c2, h, w = m.weight.data.size()
weight = get_upsample_filter(h)
m.weight.data = weight.view(1, 1, h, w).repeat(c1, c2, 1, 1)
if m.bias is not None:
m.bias.data.zero_()
def make_layer(self, block):
layers = []
layers.append(block())
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.conv_input(x))
# conv1 = self.conv_input2(out)
res = self.convt_F1(out)
convt_I1 = self.upb(x)
convt_F1 = self.up(res)
convt_R1 = self.convt_R1R(convt_F1)
HR = convt_I1 + convt_R1
# return HR,convt_R1,convt_I1
return HR
class _netGL(nn.Module):
def __init__(self):
super(_netGL, self).__init__()
self.conv_input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
# self.convt_I1 = nn.ConvTranspose2d(in_channels=1, out_channels=1, kernel_size=4, stride=2, padding=1, bias=False)
self.convt_R1 = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.down = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
c1, c2, h, w = m.weight.data.size()
weight = get_upsample_filter(h)
m.weight.data = weight.view(1, 1, h, w).repeat(c1, c2, 1, 1)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.relu(self.conv_input(x))
out = self.down(out)
LR_2x = self.conv_block(out)
LR_2x = self.convt_R1(LR_2x)
return LR_2x
class L1_Charbonnier_loss(nn.Module):
"""L1 Charbonnierloss."""
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt( diff * diff + self.eps )
loss = torch.sum(error)
return loss
class _netDH(nn.Module):
def __init__(self):
super(_netDH, self).__init__()
self.features = nn.Sequential(
# input is (1) x 128 x 128
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 128 x 128
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 128 x 128
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 64 x 64
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# state size. (128) x 64 x 64
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# state size. (256) x 32 x 32
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# state size. (256) x 16 x 16
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# state size. (512) x 16 x 16
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
self.LeakyReLU = nn.LeakyReLU(0.2, inplace=True)
self.fc1 = nn.Linear(512 * 16 * 16, 1024)
self.fc2 = nn.Linear(1024, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
#self.sigmoid = nn.Sigmoid()
def forward(self, input):
out = self.features(input)
# state size. (512) x 8 x 8
out = out.view(out.size(0), -1)
# state size. (512 x 8 x 8)
out = self.fc1(out)
# state size. (1024)
out = self.LeakyReLU(out)
out = self.fc2(out)
# state size. (1)
out = out.mean(0)
#out = self.sigmoid(out)
return out.view(1)
class _netDL(nn.Module):
def __init__(self):
super(_netDL, self).__init__()
self.features = nn.Sequential(
# input is (1) x 128 x 128
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 128 x 128
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 128 x 128
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 64 x 64
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# state size. (128) x 64 x 64
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# state size. (256) x 32 x 32
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# state size. (256) x 16 x 16
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# state size. (512) x 16 x 16
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1, bias=False),
#nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
self.LeakyReLU = nn.LeakyReLU(0.2, inplace=True)
self.fc1 = nn.Linear(512 * 8 * 8, 1024)
self.fc2 = nn.Linear(1024, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
#self.sigmoid = nn.Sigmoid()
def forward(self, input):
out = self.features(input)
# print(out.shape)
# state size. (512) x 8 x 8
out = out.view(out.size(0), -1)
# state size. (512 x 8 x 8)
out = self.fc1(out)
# state size. (1024)
out = self.LeakyReLU(out)
out = self.fc2(out)
# state size. (1)
out = out.mean(0)
#out = self.sigmoid(out)
return out.view(1)
| 40.947205
| 123
| 0.571104
|
1a007f4d95b4972cfa3fbf28bd88ac00321c3813
| 3,353
|
py
|
Python
|
ckanext-hdx_service_checker/setup.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 58
|
2015-01-11T09:05:15.000Z
|
2022-03-17T23:44:07.000Z
|
ckanext-hdx_service_checker/setup.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 1,467
|
2015-01-01T16:47:44.000Z
|
2022-02-28T16:51:20.000Z
|
ckanext-hdx_service_checker/setup.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 17
|
2015-05-06T14:04:21.000Z
|
2021-11-11T19:58:16.000Z
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='''ckanext-hdx_service_checker''',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description='''Customizable extension for checking the status of the network services that CKAN depends upon''',
long_description=long_description,
# The project's main homepage.
url='https://github.com/alexandru-m-g/ckanext-hdx_service_checker',
# Author details
author='''alexandru-m-g''',
author_email='''''',
# Choose your license
license='AGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# What does your project relate to?
keywords='''CKAN checker network services''',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=[],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
include_package_data=True,
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points='''
[ckan.plugins]
hdx_service_checker=ckanext.hdx_service_checker.plugin:HdxServiceCheckerPlugin
''',
)
| 38.988372
| 116
| 0.690725
|
39c3360de5ed5436c13f0b5c11ff3ff8f4c1e5e8
| 935
|
py
|
Python
|
python3/max_area_of_island.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | 1
|
2020-10-08T09:17:40.000Z
|
2020-10-08T09:17:40.000Z
|
python3/max_area_of_island.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | null | null | null |
python3/max_area_of_island.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | null | null | null |
# Space : O(n)
# Time : O(m*n)
class Solution:
def crawl(self, grid, x, y):
def bfs(dx, dy):
nonlocal area
if grid[dy][dx] == 1:
area += 1
grid[dy][dx] = 0
elif grid[dy][dx] == 0:
return
for ax, ay in c:
if 0 <= dy + ay < row and 0 <= dx + ax < col:
if grid[dy+ay][dx+ax] == 1:
bfs(dx+ax, dy+ay)
row = len(grid)
col = len(grid[0])
c = [(0, 1), (0, -1), (1, 0), (-1, 0)]
area = 0
bfs(x, y)
return area
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
row = len(grid)
col = len(grid[0])
ans = 0
for y in range(row):
for x in range(col):
if grid[y][x] == 1:
ans = max(ans, self.crawl(grid, x, y))
return ans
| 25.972222
| 61
| 0.37754
|
da4ac95bf0af04b68e4aba2a40f2badc99ff9cb4
| 2,023
|
py
|
Python
|
openpype/helpers/deploy_project.py
|
pypeclub/openpype4-backend
|
a0abe2ed66887c6529b01bbb9cb00278bbff41e4
|
[
"Apache-2.0"
] | 2
|
2022-03-09T08:02:52.000Z
|
2022-03-15T00:34:01.000Z
|
openpype/helpers/deploy_project.py
|
pypeclub/openpype4-backend
|
a0abe2ed66887c6529b01bbb9cb00278bbff41e4
|
[
"Apache-2.0"
] | 1
|
2022-03-08T16:22:34.000Z
|
2022-03-08T16:22:34.000Z
|
openpype/helpers/deploy_project.py
|
pypeclub/openpype4-backend
|
a0abe2ed66887c6529b01bbb9cb00278bbff41e4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any
from openpype.entities.project import ProjectEntity
from openpype.settings.anatomy import Anatomy
async def create_project_from_anatomy(
name: str,
code: str,
anatomy: Anatomy,
library: bool = False,
) -> None:
"""Deploy a project."""
task_types = {}
for task_type in anatomy.task_types:
task_types[task_type.name] = {
k: v for k, v in task_type.dict().items() if k != "name"
}
folder_types = {}
for folder_type in anatomy.folder_types:
folder_types[folder_type.name] = {
k: v for k, v in folder_type.dict().items() if k != "name"
}
#
# Config
#
config: dict[str, Any] = {}
config["roots"] = {}
for root in anatomy.roots:
config["roots"][root.name] = {
"windows": root.windows,
"linux": root.linux,
"darwin": root.darwin,
}
config["templates"] = {
"common": {
"version_padding": anatomy.templates.version_padding,
"version": anatomy.templates.version,
"frame_padding": anatomy.templates.frame_padding,
"frame": anatomy.templates.frame,
}
}
for template_type in ["work", "publish", "hero", "delivery", "others"]:
template_group = anatomy.templates.dict().get(template_type, [])
if not template_group:
continue
config["templates"][template_type] = {}
for template in template_group:
config["templates"][template_type][template["name"]] = {
k: template[k] for k in template.keys() if k != "name"
}
#
# Create a project entity
#
project = ProjectEntity(
payload={
"name": name,
"code": code,
"library": library,
"task_types": task_types,
"folder_types": folder_types,
"attrib": anatomy.attributes,
"config": config,
}
)
await project.save()
| 26.973333
| 75
| 0.554622
|
b061d41f3827ee5e1a10da7afe6efefd3f47d402
| 32,841
|
py
|
Python
|
nmt/nmt.py
|
adorableChowhound/nmt
|
35483dd9a9935dc0e9b1a450efa304dfdfc2fa76
|
[
"Apache-2.0"
] | 4
|
2020-10-13T07:24:15.000Z
|
2021-12-31T02:00:43.000Z
|
nmt/nmt.py
|
adorableChowhound/nmt
|
35483dd9a9935dc0e9b1a450efa304dfdfc2fa76
|
[
"Apache-2.0"
] | null | null | null |
nmt/nmt.py
|
adorableChowhound/nmt
|
35483dd9a9935dc0e9b1a450efa304dfdfc2fa76
|
[
"Apache-2.0"
] | 1
|
2020-10-14T04:59:24.000Z
|
2020-10-14T04:59:24.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
# 整个项目的入口文件
# main()->run_main(train_fn, inference_fn)
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from . import inference
from . import train
from .utils import evaluation_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
INFERENCE_KEYS = ["src_max_len_infer", "tgt_max_len_infer", "subword_option",
"infer_batch_size", "beam_width",
"length_penalty_weight", "coverage_penalty_weight",
"sampling_temperature", "num_translations_per_input",
"infer_mode"]
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument("--num_units", type=int, default=32, help="Network size.") # 网络节点数量
parser.add_argument("--num_layers", type=int, default=2,
help="Network depth.") # 网络的层数,即网络深度
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.") # 编码器的网络层数
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.") # 解码器的网络层数
parser.add_argument("--encoder_type", type=str, default="uni", help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""") # 编码器的类型,uni, bi, gnmt三者之一,编码器的类型会对结果有较大影响
parser.add_argument("--residual", type="bool", nargs="?", const=True,
default=False,
help="Whether to add residual connections.") # 是否采残差网络
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
# 是否是时间主要模式,如果是,运算过程中会有一个矩阵转置运算
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.") # 词嵌入的分片数量
# attention mechanisms
parser.add_argument("--attention", type=str, default="", help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""") # attention机制的类型,可选项 luong|scaled_luong|bahdanau|normed_bahdanau|
parser.add_argument(
"--attention_architecture",
type=str,
default="standard",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""") # attention架构,可选standard|gnmt|gnmt_v2
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""") # 是否在输出单元使用attention,只有standard架构的attention能够使用
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""") # 是否将编码器的隐藏状态传递给解码器,只有在attention机制模型可用
# optimizer
parser.add_argument("--optimizer", type=str, default="sgd", help="sgd | adam")
# 优化器,可选sgd|adam,默认是sgd,即随机梯度下降
parser.add_argument("--learning_rate", type=float, default=1.0,
help="Learning rate. Adam: 0.001 | 0.0001")
# 学习率,默认值1.0,如果使用adam优化器,可选值为0.001|0.0001
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.") # 预热学习率的步数
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""") # 预热学习率的方式,默认是t2t即tensor2tensor的方式
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""") # 学习率衰减方式,可选luong234|luong5|luong10
parser.add_argument(
"--num_train_steps", type=int, default=12000, help="Num steps to train.") # 训练的轮数
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument("--src", type=str, default=None,
help="Source suffix, e.g., en.") # 训练数据中,源数据的文件后缀名
parser.add_argument("--tgt", type=str, default=None,
help="Target suffix, e.g., de.") # 训练数据中,目标数据的文件后缀名
parser.add_argument("--train_prefix", type=str, default=None,
help="Train prefix, expect files with src/tgt suffixes.") # train数据文件的前缀
parser.add_argument("--dev_prefix", type=str, default=None,
help="Dev prefix, expect files with src/tgt suffixes.") # dev数据文件的前缀
parser.add_argument("--test_prefix", type=str, default=None,
help="Test prefix, expect files with src/tgt suffixes.") # test数据文件的前缀
parser.add_argument("--out_dir", type=str, default=None,
help="Store log/model files.") # 模型的保存路径
# Vocab
parser.add_argument("--vocab_prefix", type=str, default=None, help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""") # 词典文件的前缀
parser.add_argument("--embed_prefix", type=str, default=None, help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formated txt files.\
""") # 已经训练好的embedding文件,必须是Glove文件格式。如果没有,使用默认值None
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.") # 句子开始的标记,默认是<s>
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.") # 句子结束的标记,默认是</s>
parser.add_argument("--share_vocab", type="bool", nargs="?", const=True,
default=False,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""") # 训练的源文件和目标文件是否使用一样的词典
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""") # 是否检查特殊标记
# Sequence lengths
parser.add_argument("--src_max_len", type=int, default=50,
help="Max length of src sequences during training.") # 源句子的最大词语数量
parser.add_argument("--tgt_max_len", type=int, default=50,
help="Max length of tgt sequences during training.") # 目标句子的最大词语数量
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.") # 推断的源句子最大词语数量
parser.add_argument("--tgt_max_len_infer", type=int, default=None,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""") # 推断的目标句子最大词语数量
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
# 编码器和解码器的神经网络单元类型,可选lstm|gru|layer_norm_lstm|nas
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.") # 遗忘门的偏置,默认1.0
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)") # 丢弃率,有效防止过拟合
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.") # 将梯度剪裁到指定的标准
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
# 批大小,全部计算梯度耗时耗力,使用小批量数据计算梯度能有效提升速率
parser.add_argument("--steps_per_stats", type=int, default=100,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats")) # 多少步输出一次状态
parser.add_argument("--max_train", type=int, default=0,
help="Limit on the size of training data (0: no limit).") # 限制训练的数量,一般不需要设置
parser.add_argument("--num_buckets", type=int, default=5,
help="Put data into similar-length buckets.") # 分桶数量
parser.add_argument("--num_sampled_softmax", type=int, default=0,
help=("Use sampled_softmax_loss if > 0."
"Otherwise, use full softmax loss."))
# SPM
parser.add_argument("--subword_option", type=str, default="",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of gpus in each worker.") # GPU数量,用于分布式训练
parser.add_argument("--log_device_placement", type="bool", nargs="?",
const=True, default=False, help="Debug GPU allocation.") # 是否输出设备信息
parser.add_argument("--metrics", type=str, default="bleu",
help=("Comma-separated list of evaluations "
"metrics (bleu,rouge,accuracy)")) # 评分方式,默认BLEU
parser.add_argument("--steps_per_external_eval", type=int, default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument("--scope", type=str, default=None,
help="scope to put variables under") # 变量的域,默认translate
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument("--random_seed", type=int, default=None,
help="Random seed (>0, set a specific seed).") # 随机种子,在对数据集乱序的时候有用,也可以不指定
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--num_keep_ckpts", type=int, default=5,
help="Max number of checkpoints to keep.") # 保存最近的checkpoints的数量,默认5
parser.add_argument("--avg_ckpts", type="bool", nargs="?",
const=True, default=False, help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
""")) # 是否均值保存点。可以提高性能
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
# 用于推断的时候,指定某个保存点来推断数据。默认采用评分最高的
parser.add_argument("--inference_input_file", type=str, default=None,
help="Set to the text to decode.") # 推断的输入文件
parser.add_argument("--inference_list", type=str, default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode.")) # 指定输入文件的某些行,用来推断
parser.add_argument("--infer_batch_size", type=int, default=32,
help="Batch size for inference mode.") # 推断的批大小
parser.add_argument("--inference_output_file", type=str, default=None,
help="Output file to store decoding results.") # 推断的输出结果文件
parser.add_argument("--inference_ref_file", type=str, default=None,
help=("""\
Reference file to compute evaluation scores (if provided).\
""")) # 如果提供,用来计算推断结果的得分
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="greedy",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=0,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
""")) # beam search的宽度
parser.add_argument("--length_penalty_weight", type=float, default=0.0,
help="Length penalty for beam search.")
parser.add_argument("--coverage_penalty_weight", type=float, default=0.0,
help="Coverage penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
parser.add_argument("--num_translations_per_input", type=int, default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
""")) # 每个句子输出推断结果的数量,即可以输出多个结果
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.") # 当前任务的id,用于分布式训练
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).") # workers数量
parser.add_argument("--num_inter_threads", type=int, default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument("--num_intra_threads", type=int, default=0,
help="number of intra_op_parallelism_threads")
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.train_prefix,
dev_prefix=flags.dev_prefix,
test_prefix=flags.test_prefix,
vocab_prefix=flags.vocab_prefix,
embed_prefix=flags.embed_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
num_sampled_softmax=flags.num_sampled_softmax,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
share_vocab=flags.share_vocab,
metrics=flags.metrics.split(","),
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
language_model=flags.language_model,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if getattr(hparams, "language_model", None):
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
check_special_token = getattr(hparams, "check_special_token", True)
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
num_embeddings_partitions = getattr(hparams, "num_embeddings_partitions", 0)
_add_argument(hparams, "num_enc_emb_partitions", num_embeddings_partitions)
_add_argument(hparams, "num_dec_emb_partitions", num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if getattr(hparams, "embed_prefix", None):
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
for metric in hparams.metrics:
best_metric_dir = os.path.join(hparams.out_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
if getattr(hparams, "avg_ckpts", None):
best_metric_dir = os.path.join(hparams.out_dir, "avg_best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "avg_best_" + metric, 0, update=False)
_add_argument(hparams, "avg_best_" + metric + "_dir", best_metric_dir)
return hparams
def ensure_compatible_hparams(hparams, default_hparams, hparams_path=""):
"""Make sure the loaded hparams is compatible with new changes."""
default_hparams = utils.maybe_parse_standard_hparams(
default_hparams, hparams_path)
# Set num encoder/decoder layers (for old checkpoints)
if hasattr(hparams, "num_layers"):
if not hasattr(hparams, "num_encoder_layers"):
hparams.add_hparam("num_encoder_layers", hparams.num_layers)
if not hasattr(hparams, "num_decoder_layers"):
hparams.add_hparam("num_decoder_layers", hparams.num_layers)
# For compatible reason, if there are new fields in default_hparams,
# we add them to the current hparams
default_config = default_hparams.values()
config = hparams.values()
for key in default_config:
if key not in config:
hparams.add_hparam(key, default_config[key])
# Update all hparams' keys if override_loaded_hparams=True
if getattr(default_hparams, "override_loaded_hparams", None):
overwritten_keys = default_config.keys()
else:
# For inference
overwritten_keys = INFERENCE_KEYS
for key in overwritten_keys:
if getattr(hparams, key) != default_config[key]:
utils.print_out("# Updating hparams.%s: %s -> %s" %
(key, str(getattr(hparams, key)),
str(default_config[key])))
setattr(hparams, key, default_config[key])
return hparams
def create_or_load_hparams(
out_dir, default_hparams, hparams_path, save_hparams=True):
"""Create hparams or load hparams from out_dir."""
hparams = utils.load_hparams(out_dir)
if not hparams:
hparams = default_hparams
hparams = utils.maybe_parse_standard_hparams(
hparams, hparams_path)
else:
hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Save HParams
if save_hparams:
utils.save_hparams(out_dir, hparams)
for metric in hparams.metrics:
utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"), hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""):
"""Run main."""
# Job
jobid = flags.jobid
num_workers = flags.num_workers
utils.print_out("# Job id %d" % jobid)
# GPU device
utils.print_out(
"# Devices visible to TensorFlow: %s" % repr(tf.Session().list_devices()))
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
loaded_hparams = False
if flags.ckpt: # Try to load hparams from the same directory as ckpt
ckpt_dir = os.path.dirname(flags.ckpt)
ckpt_hparams_file = os.path.join(ckpt_dir, "hparams")
if tf.gfile.Exists(ckpt_hparams_file) or flags.hparams_path:
hparams = create_or_load_hparams(
ckpt_dir, default_hparams, flags.hparams_path,
save_hparams=False)
loaded_hparams = True
if not loaded_hparams: # Try to load from out_dir
assert out_dir
hparams = create_or_load_hparams(
out_dir, default_hparams, flags.hparams_path,
save_hparams=(jobid == 0))
## Train / Decode
# 决定是走train逻辑还是走infer逻辑
if flags.inference_input_file:
# Inference
# 取最新的checkpoint,执行inference_fn
# Inference output directory
trans_file = flags.inference_output_file
assert trans_file
trans_dir = os.path.dirname(trans_file)
if not tf.gfile.Exists(trans_dir): tf.gfile.MakeDirs(trans_dir)
# Inference indices
hparams.inference_indices = None
if flags.inference_list:
(hparams.inference_indices) = (
[int(token) for token in flags.inference_list.split(",")])
# Inference
ckpt = flags.ckpt
if not ckpt:
ckpt = tf.train.latest_checkpoint(out_dir)
inference_fn(ckpt, flags.inference_input_file,
trans_file, hparams, num_workers, jobid)
# Evaluation
ref_file = flags.inference_ref_file
if ref_file and tf.gfile.Exists(trans_file):
for metric in hparams.metrics:
score = evaluation_utils.evaluate(
ref_file,
trans_file,
metric,
hparams.subword_option)
utils.print_out(" %s: %.1f" % (metric, score))
else:
# Train
# 走train.py的train()
train_fn(hparams, target_session=target_session)
def main(unused_argv):
default_hparams = create_hparams(FLAGS)
train_fn = train.train
inference_fn = inference.inference
run_main(FLAGS, default_hparams, train_fn, inference_fn)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 45.48615
| 113
| 0.623885
|
2a5a1629bc3fe54e5aa2085a36d4e0d8fc7907a5
| 835
|
py
|
Python
|
tweets/serializers.py
|
LuisManuelGlz/twitter-clone-server
|
a794f3322b9a02b480f026e4ce1342f9c4641d7d
|
[
"MIT"
] | null | null | null |
tweets/serializers.py
|
LuisManuelGlz/twitter-clone-server
|
a794f3322b9a02b480f026e4ce1342f9c4641d7d
|
[
"MIT"
] | null | null | null |
tweets/serializers.py
|
LuisManuelGlz/twitter-clone-server
|
a794f3322b9a02b480f026e4ce1342f9c4641d7d
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Tweet
from users.serializers import UserSerializer
class TweetSimpleSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Tweet
fields = ['id', 'content', 'user', 'likes_total', 'created_at']
class TweetSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Tweet
fields = ['id', 'content', 'user', 'likes', 'created_at']
class TweetLikesSerializer(serializers.ModelSerializer):
likes = UserSerializer(many=True)
class Meta:
model = Tweet
fields = ['likes']
class TweetLikeSerializer(serializers.ModelSerializer):
class Meta:
model = Tweet
fields = ['likes_total']
read_only_fields = ['likes_total']
| 23.857143
| 67
| 0.705389
|
96104880265137ae45274e8e2254ea541f8f5f13
| 15,457
|
py
|
Python
|
PINNFramework/PINN.py
|
albernsrya/NeuralSolvers
|
d044ffd6d74ab7814e2e86fc41949ad04688e06a
|
[
"MIT"
] | null | null | null |
PINNFramework/PINN.py
|
albernsrya/NeuralSolvers
|
d044ffd6d74ab7814e2e86fc41949ad04688e06a
|
[
"MIT"
] | null | null | null |
PINNFramework/PINN.py
|
albernsrya/NeuralSolvers
|
d044ffd6d74ab7814e2e86fc41949ad04688e06a
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from itertools import chain
from torch.utils.data import DataLoader
from .InitalCondition import InitialCondition
from .BoundaryCondition import BoundaryCondition, PeriodicBC, DirichletBC, NeumannBC, RobinBC
from .PDELoss import PDELoss
from .JoinedDataset import JoinedDataset
from .HPMLoss import HPMLoss
try:
import horovod.torch as hvd
except:
print("Was not able to import Horovod. Thus Horovod support is not enabled")
class PINN(nn.Module):
def __init__(self, model: torch.nn.Module, input_dimension: int, output_dimension: int,
pde_loss: PDELoss, initial_condition: InitialCondition, boundary_condition,
use_gpu=True, use_horovod=False):
"""
Initializes an physics-informed neural network (PINN). A PINN consists of a model which represents the solution
of the underlying partial differential equation(PDE) u, three loss terms representing initial (IC) and boundary
condition(BC) and the PDE and a dataset which represents the bounded domain U.
Args:
model : is the model which is trained to represent the underlying PDE
input_dimension : represents the dimension of the input vector x
output_dimension : represents the dimension of the solution u
pde_loss: Instance of the PDELoss class. Represents the underlying PDE
initial_condition: Instance of the InitialCondition class. Represents the initial condition
boundary_condition (BoundaryCondition, list): Instance of the BoundaryCondition class or a list of instances
of the BoundaryCondition class
use_gpu: enables gpu usage
use_horovod: enables horovod support
"""
super(PINN, self).__init__()
# checking if the model is a torch module more model checking should be possible
self.use_gpu = use_gpu
self.use_horovod = use_horovod
self.rank = 0 # initialize rank 0 by default in order to make the fit method more flexible
if self.use_horovod:
# Initialize Horovod
hvd.init()
# Pin GPU to be used to process local rank (one GPU per process)
torch.cuda.set_device(hvd.local_rank())
self.rank = hvd.rank()
if not isinstance(model, nn.Module):
raise TypeError("Only models of type torch.nn.Module are allowed")
self.model = model
if self.use_gpu:
self.model.cuda()
self.dtype = torch.cuda.FloatTensor
else:
self.dtype = torch.FloatTensor
# checking if the input dimension is well defined
if type(input_dimension) is not int:
raise TypeError("Only integers are allowed as input dimension")
elif input_dimension <= 0:
raise ValueError("Input dimension has to be greater than zero")
else:
self.input_dimension = input_dimension
# checking if the output dimension is well defined
if type(output_dimension) is not int:
raise TypeError("Only integers are allowed as output dimension")
elif input_dimension <= 0:
raise ValueError("Input dimension has to be greater than zero")
else:
self.output_dimension = output_dimension
if not isinstance(pde_loss, PDELoss):
raise TypeError("PDE loss has to be an instance of a PDE Loss class")
self.pde_loss = pde_loss
self.is_hpm = False
if isinstance(pde_loss,HPMLoss):
self.is_hpm = True
if isinstance(initial_condition, InitialCondition):
self.initial_condition = initial_condition
else:
raise TypeError("Initial condition has to be an instance of the InitialCondition class")
joined_datasets = {"Initial_Condition": initial_condition.dataset, "PDE": pde_loss.dataset}
if not self.is_hpm:
if type(boundary_condition) is list:
for bc in boundary_condition:
if not isinstance(bc, BoundaryCondition):
raise TypeError("Boundary Condition has to be an instance of the BoundaryCondition class ")
self.boundary_condition = boundary_condition
joined_datasets[bc.name] = bc.dataset
elif isinstance(boundary_condition, BoundaryCondition):
self.boundary_condition = boundary_condition
else:
raise TypeError("Boundary Condition has to be an instance of the BoundaryCondition class"
"or a list of instances of the BoundaryCondition class")
self.dataset = JoinedDataset(joined_datasets)
def forward(self, x):
"""
Predicting the solution at given position x
"""
return self.model(x)
def save_model(self, pinn_path, hpm_path=None):
"""
Saves the state dict of the models. Differs between HPM and Model
Args:
pinn_path: path where the pinn get stored
hpm_path: path where the HPM get stored
"""
if isinstance(self.pde_loss, HPMLoss):
if hpm_path is None:
raise ValueError("Saving path for the HPM has to be defined")
torch.save(self.model.state_dict(), pinn_path)
torch.save(self.pde_loss.hpm_model.state_dict(), hpm_path)
else:
torch.save(self.model.state_dict(), pinn_path)
def load_model(self, pinn_path, hpm_path=None):
"""
Load the state dict of the models. Differs between HPM and Model
Args:
pinn_path: path from where the pinn get loaded
hpm_path: path from where the HPM get loaded
"""
if isinstance(self.pde_loss, HPMLoss):
if hpm_path is None:
raise ValueError("Loading path for the HPM has to be defined")
self.model.load_state_dict(torch.load(pinn_path))
self.pde_loss.hpm_model.load_state_dict(torch.load(hpm_path))
else:
self.model.load_state_dict(torch.load(pinn_path))
def calculate_boundary_condition(self, boundary_condition: BoundaryCondition, training_data):
"""
This function classifies the boundary condition and calculates the satisfaction
Args:
boundary_condition (BoundaryCondition) : boundary condition to be calculated
training_data: training data used for evaluation
"""
if isinstance(boundary_condition, PeriodicBC):
# Periodic Boundary Condition
if isinstance(training_data, list) and len(training_data) == 2:
return boundary_condition(training_data[0][0].type(self.dtype),
training_data[1][0].type(self.dtype),
self.model)
else:
raise ValueError(
"The boundary condition {} has to be tuple of coordinates for lower and upper bound".
format(boundary_condition.name))
if isinstance(boundary_condition, DirichletBC):
# Dirchlet Boundary Condition
if not isinstance(training_data, list):
return boundary_condition(training_data.type(self.dtype)[0], self.model)
else:
raise ValueError("The boundary condition {} should be a tensor of coordinates not a tuple".
format(boundary_condition.name))
if isinstance(boundary_condition, NeumannBC):
# Neumann Boundary Condition
if not isinstance(training_data, list):
return boundary_condition(training_data.type(self.dtype)[0], self.model)
else:
raise ValueError("The boundary condition {} should be a tensor of coordinates not a tuple".
format(boundary_condition.name))
if isinstance(boundary_condition, RobinBC):
# Robin Boundary Condition
if isinstance(training_data, list) and len(training_data) == 2:
return boundary_condition(training_data[0][0].type(self.dtype),
training_data[1][0].type(self.dtype),
self.model)
else:
raise ValueError(
"The boundary condition {} has to be tuple of coordinates for lower and upper bound".
format(boundary_condition.name))
def pinn_loss(self, training_data):
"""
Function for calculating the PINN loss. The PINN Loss is a weighted sum of losses for initial and boundary
condition and the residual of the PDE
Args:
training_data (Dictionary): Training Data for calculating the PINN loss in form of ta dictionary. The
dictionary holds the training data for initial condition at the key "Initial_Condition" training data for
the PDE at the key "PDE" and the data for the boundary condition under the name of the boundary condition
"""
pinn_loss = 0
# unpack training data
if type(training_data["Initial_Condition"]) is not list:
raise ValueError("Training Data for initial condition is a tuple (x,y) with x the input coordinates"
" and ground truth values y")
# initial condition loss
if len(training_data["Initial_Condition"]) == 2:
pinn_loss += self.initial_condition(
training_data["Initial_Condition"][0][0].type(self.dtype),
self.model,
training_data["Initial_Condition"][1][0].type(self.dtype),
)
else:
raise ValueError("Training Data for initial condition is a tuple (x,y) with x the input coordinates"
" and ground truth values y")
if type(training_data["PDE"]) is not list:
pinn_loss += self.pde_loss(
training_data["PDE"][0].type(self.dtype), self.model
)
else:
raise ValueError("Training Data for PDE data is a single tensor consists of residual points ")
if not self.is_hpm:
if isinstance(self.boundary_condition, list):
for bc in self.boundary_condition:
pinn_loss += self.calculate_boundary_condition(bc, training_data[bc.name])
else:
pinn_loss += self.calculate_boundary_condition(
self.boundary_condition,
training_data[self.boundary_condition.name],
)
return pinn_loss
def fit(self, epochs, optimizer='Adam', learning_rate=1e-3, lbfgs_finetuning=True,
writing_cylcle= 30, save_model=True, pinn_path='best_model_pinn.pt', hpm_path='best_model_hpm.pt'):
"""
Function for optimizing the parameters of the PINN-Model
Args:
epochs (int) : number of epochs used for training
optimizer (String, torch.optim.Optimizer) : Optimizer used for training. At the moment only ADAM and LBFGS
are supported by string command. It is also possible to give instances of torch optimizers as a parameter
learning_rate: The learning rate of the optimizer
lbfgs_finetuning: Enables LBFGS finetuning after main training
writing_cylcle: defines the cylcus of model writing
save_model: enables or disables checkpointing
pinn_path: defines the path where the pinn get stored
hpm_path: defines the path where the hpm get stored
"""
if isinstance(self.pde_loss, HPMLoss):
params = list(self.model.parameters()) + list(self.pde_loss.hpm_model.parameters())
named_parameters = chain(self.model.named_parameters(),self.pde_loss.hpm_model.named_parameters())
if self.use_horovod and lbfgs_finetuning:
raise ValueError("LBFGS Finetuning is not possible with horovod")
if optimizer == 'Adam':
optim = torch.optim.Adam(params, lr=learning_rate)
elif optimizer == 'LBFGS':
if self.use_horovod:
raise TypeError("LBFGS is not supported with Horovod")
else:
optim = torch.optim.LBFGS(params, lr=learning_rate)
else:
optim = optimizer
if lbfgs_finetuning and not self.use_horovod:
lbfgs_optim = torch.optim.LBFGS(params, lr=0.9)
def closure():
lbfgs_optim.zero_grad()
pinn_loss = self.pinn_loss(training_data)
pinn_loss.backward()
return pinn_loss
else:
named_parameters = self.model.named_parameters()
if optimizer == 'Adam':
optim = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
elif optimizer == 'LBFGS':
optim = torch.optim.LBFGS(self.model.parameters(), lr=learning_rate)
else:
optim = optimizer
if lbfgs_finetuning and not self.use_horovod:
lbfgs_optim = torch.optim.LBFGS(self.model.parameters(), lr=0.9)
def closure():
lbfgs_optim.zero_grad()
pinn_loss = self.pinn_loss(training_data)
pinn_loss.backward()
return pinn_loss
minimum_pinn_loss = float("inf")
if self.use_horovod:
# Partition dataset among workers using DistributedSampler
train_sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset, num_replicas=hvd.size(), rank=hvd.rank())
data_loader = DataLoader(self.dataset, batch_size=1,sampler=train_sampler)
optim = hvd.DistributedOptimizer(optim, named_parameters=named_parameters)
# Broadcast parameters from rank 0 to all other processes.
hvd.broadcast_parameters(self.model.state_dict(), root_rank=0)
if isinstance(self.pde_loss, HPMLoss):
hvd.broadcast_parameters(self.pinn_loss.hpm_model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optim, root_rank=0)
else:
data_loader = DataLoader(self.dataset, batch_size=1)
for epoch in range(epochs):
for training_data in data_loader:
training_data = training_data
optim.zero_grad()
pinn_loss = self.pinn_loss(training_data)
pinn_loss.backward()
if not self.rank:
print("PINN Loss {} Epoch {} from {}".format(pinn_loss, epoch, epochs))
optim.step()
if (pinn_loss < minimum_pinn_loss) and not (epoch % writing_cylcle) and save_model and not self.rank:
self.save_model(pinn_path, hpm_path)
minimum_pinn_loss = pinn_loss
if lbfgs_finetuning:
lbfgs_optim.step(closure)
print("After LBFGS-B: PINN Loss {} Epoch {} from {}".format(pinn_loss, epoch, epochs))
if (pinn_loss < minimum_pinn_loss) and not (epoch % writing_cylcle) and save_model:
self.save_model(pinn_path, hpm_path)
| 47.56
| 120
| 0.620043
|
a144ef38167a114a0bb30209e21539d0a06f6275
| 3,407
|
py
|
Python
|
tests/test_summary.py
|
jianxiongcai/tensorboardX
|
c2fa5959d08ee1d7d59f89f6f8fbe9788ca81a2d
|
[
"MIT"
] | null | null | null |
tests/test_summary.py
|
jianxiongcai/tensorboardX
|
c2fa5959d08ee1d7d59f89f6f8fbe9788ca81a2d
|
[
"MIT"
] | null | null | null |
tests/test_summary.py
|
jianxiongcai/tensorboardX
|
c2fa5959d08ee1d7d59f89f6f8fbe9788ca81a2d
|
[
"MIT"
] | null | null | null |
from tensorboardX import summary
from .expect_reader import compare_proto
import numpy as np
import pytest
import unittest
np.random.seed(0)
# compare_proto = write_proto # massive update expect
class SummaryTest(unittest.TestCase):
def test_uint8_image(self):
'''
Tests that uint8 image (pixel values in [0, 255]) is not changed
'''
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
scale_factor = summary._calc_scale_factor(test_image)
assert scale_factor == 1, 'Values are already in [0, 255], scale factor should be 1'
def test_float32_image(self):
'''
Tests that float32 image (pixel values in [0, 1]) are scaled correctly
to [0, 255]
'''
test_image = np.random.rand(3, 32, 32).astype(np.float32)
scale_factor = summary._calc_scale_factor(test_image)
assert scale_factor == 255, 'Values are in [0, 1], scale factor should be 255'
def test_list_input(self):
with pytest.raises(Exception) as e_info:
summary.histogram('dummy', [1,3,4,5,6], 'tensorflow')
def test_empty_input(self):
print('expect error here:')
with pytest.raises(Exception) as e_info:
summary.histogram('dummy', np.ndarray(0), 'tensorflow')
def test_image_with_boxes(self):
compare_proto(summary.image_boxes('dummy',
np.random.rand(3, 32, 32).astype(np.float32),
np.array([[10, 10, 40, 40]])), self)
def test_image_with_one_channel(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(1, 8, 8).astype(np.float32), dataformats='CHW'), self)
def test_image_with_one_channel_batched(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(2, 1, 8, 8).astype(np.float32), dataformats='NCHW'), self)
def test_image_with_3_channel_batched(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(2, 3, 8, 8).astype(np.float32), dataformats='NCHW'), self)
def test_image_without_channel(self):
np.random.seed(0)
compare_proto(summary.image('dummy', np.random.rand(8, 8).astype(np.float32), dataformats='HW'), self)
def test_video(self):
try:
import moviepy
except ImportError:
return
np.random.seed(0)
compare_proto(summary.video('dummy', np.random.rand(4, 3, 1, 8, 8).astype(np.float32)), self)
summary.video('dummy', np.random.rand(16, 48, 1, 28, 28).astype(np.float32))
#summary.video('dummy', np.random.rand(20, 7, 1, 8, 8).astype(np.float32))
def test_audio(self):
np.random.seed(0)
compare_proto(summary.audio('dummy', np.random.rand(42)), self)
def test_text(self):
compare_proto(summary.text('dummy', 'text 123'), self)
def test_histogram_auto(self):
np.random.seed(0)
compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='auto', max_bins=5), self)
def test_histogram_fd(self):
np.random.seed(0)
compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='fd', max_bins=5), self)
def test_histogram_doane(self):
np.random.seed(0)
compare_proto(summary.histogram('dummy', np.random.rand(1024), bins='doane', max_bins=5), self)
| 40.559524
| 118
| 0.643088
|
b3be691c031f960f5e3f0f813b928c8aae5fa63f
| 447
|
py
|
Python
|
users/migrations/0004_auto_20210311_1636.py
|
mdpe-ir/mdCms
|
69aea3687a2b9d7846b196c00a0cd3866c54fd4c
|
[
"BSD-3-Clause-Attribution"
] | 1
|
2021-03-09T19:03:35.000Z
|
2021-03-09T19:03:35.000Z
|
users/migrations/0004_auto_20210311_1636.py
|
mdpe-ir/mdCms
|
69aea3687a2b9d7846b196c00a0cd3866c54fd4c
|
[
"BSD-3-Clause-Attribution"
] | null | null | null |
users/migrations/0004_auto_20210311_1636.py
|
mdpe-ir/mdCms
|
69aea3687a2b9d7846b196c00a0cd3866c54fd4c
|
[
"BSD-3-Clause-Attribution"
] | 1
|
2021-11-13T06:04:33.000Z
|
2021-11-13T06:04:33.000Z
|
# Generated by Django 3.1.7 on 2021-03-11 13:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20210310_1255'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='default.jpg', upload_to='2021/03/11', verbose_name='تصویر پروفایل'),
),
]
| 23.526316
| 113
| 0.61745
|
d5e91ace10306bbe6a6503d1ae98cd15f9d5f6f1
| 5,271
|
py
|
Python
|
src/scripts/retrieval/ner/get_ner.py
|
RahulRoyMattam/fever-baselines
|
8977ffd939dbdb17bf4f190d45f11dabb7755668
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/retrieval/ner/get_ner.py
|
RahulRoyMattam/fever-baselines
|
8977ffd939dbdb17bf4f190d45f11dabb7755668
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/retrieval/ner/get_ner.py
|
RahulRoyMattam/fever-baselines
|
8977ffd939dbdb17bf4f190d45f11dabb7755668
|
[
"Apache-2.0"
] | 1
|
2019-06-12T18:25:08.000Z
|
2019-06-12T18:25:08.000Z
|
import argparse
import json
import logging
import http.client
import urllib.parse
import datetime
import time
from tqdm import tqdm
from flair.models import SequenceTagger
from flair.data import Sentence
from common.dataset.reader import JSONLineReader
from common.util.random import SimpleRandom
from retrieval.fever_doc_db import FeverDocDB
from rte.riedel.data import FEVERLabelSchema, FEVERGoldFormatter
from nltk.corpus import stopwords
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#from allennlp.predictors.predictor import Predictor
# Replace the subscriptionKey string value with your valid subscription key.
subscriptionKey = '<enter valid subscription key>'
host = 'api.cognitive.microsoft.com'
path = '/bing/v7.0/entities'
def get_suggestions(query):
headers = {'Ocp-Apim-Subscription-Key': subscriptionKey}
conn = http.client.HTTPSConnection(host)
mkt = 'en-US'
params = '?mkt=' + mkt + '&q=' + urllib.parse.quote(query)
conn.request("GET", path + params, None, headers)
response = conn.getresponse()
return response.read()
class FEVERReader:
"""
Read full text for evidence sentences from fever db
"""
def __init__(self,
db: FeverDocDB) -> None:
self.db = db
self.formatter = FEVERGoldFormatter(set(self.db.get_doc_ids()), FEVERLabelSchema())
self.reader = JSONLineReader()
def get_doc_line(self,doc,line):
lines = self.db.get_doc_lines(doc)
if line > -1:
return lines.split("\n")[line].split("\t")[1]
else:
non_empty_lines = [line.split("\t")[1] for line in lines.split("\n") if len(line.split("\t"))>1 and len(line.split("\t")[1].strip())]
return non_empty_lines[SimpleRandom.get_instance().next_rand(0,len(non_empty_lines)-1)]
def get_evidence_text(self, evidence):
lines = set([self.get_doc_line(d[0], d[1]) for d in evidence])
premise = " ".join(lines)
return premise
def contains_word(s, w):
return f' {w} ' in f' {s} '
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--db', type=str, help='/path/to/saved/db.db')
parser.add_argument('--split',type=str)
args = parser.parse_args()
db = FeverDocDB(args.db)
split = args.split
fever = FEVERReader(db)
#delete the below model from the cache
#predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/ner-model-2018.04.26.tar.gz")
tagger = SequenceTagger.load('ner')
throttle_start = datetime.datetime.now()
with open("data/fever/{0}.ns.pages.p1.jsonl".format(split),"r") as f:
with open("data/fever/{0}.ns.ner.pages.p1.jsonl".format(split),"w+") as f2:
for line in tqdm(f.readlines()):
js = json.loads(line)
fever_line = fever.formatter.format_line(js)
evidence = fever.get_evidence_text(fever_line["evidence"])
#tags = predictor.predict(
# sentence=evidence
#)
evidence_ner = Sentence(evidence)
claim_ner = Sentence(fever_line["claim"])
# predict NER tags
tagger.predict(evidence_ner)
tagger.predict(claim_ner)
evidence_tags = evidence_ner.to_dict(tag_type='ner')
claim_tags = claim_ner.to_dict(tag_type='ner')
missing_entity = False
for c_entity in claim_tags["entities"]:
if not contains_word(evidence, c_entity["text"]):
missing_entity = True
break
claim_tags_list = list([c["text"] for c in claim_tags["entities"]])
js["ner_claim"] = claim_tags_list
js["ner_evidence"] = list([c["text"] for c in evidence_tags["entities"]])
js["ner_missing"] = missing_entity
js["ner_related"] = []
js["fact"] = []
try:
throttle_now = datetime.datetime.now() - throttle_start
if throttle_now.total_seconds() < 0.25:
time.sleep(0.25)
bing_res = get_suggestions(' '.join(claim_tags_list))
throttle_start = datetime.datetime.now()
bing = json.loads(bing_res)
if 'entities' in bing:
for b_values in bing['entities']['value']:
for c in claim_tags_list:
if contains_word(b_values['description'], c):
js["ner_related"].append([b_values['name'], c])
for w in fever_line["claim"].split():
if w not in claim_tags_list and w not in stopwords.words('english'):
if contains_word(b_values['description'], w):
js["fact"].append([b_values['name'], w])
else:
js["ner_related"] = [["None"]]
except Exception as e:
print(e)
f2.write(json.dumps(js)+"\n")
| 37.382979
| 145
| 0.579586
|
2e3244fb0d34fd5953bbfd4a90e58f0cb60a0949
| 155,070
|
py
|
Python
|
angr/analyses/cfg/cfg_fast.py
|
aeflores/angr
|
ac85a3f168375ed0ee20551b1b716c1bff4ac02b
|
[
"BSD-2-Clause"
] | 1
|
2020-11-18T16:39:11.000Z
|
2020-11-18T16:39:11.000Z
|
angr/analyses/cfg/cfg_fast.py
|
aeflores/angr
|
ac85a3f168375ed0ee20551b1b716c1bff4ac02b
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/cfg/cfg_fast.py
|
aeflores/angr
|
ac85a3f168375ed0ee20551b1b716c1bff4ac02b
|
[
"BSD-2-Clause"
] | 1
|
2020-11-18T16:39:13.000Z
|
2020-11-18T16:39:13.000Z
|
import itertools
import logging
import math
import re
import string
from collections import defaultdict, OrderedDict
from sortedcontainers import SortedDict
import claripy
import cle
import pyvex
from cle.address_translator import AT
from archinfo.arch_soot import SootAddressDescriptor
from archinfo.arch_arm import is_arm_arch, get_real_address_if_arm
from ...knowledge_plugins.cfg import CFGNode, MemoryDataSort, MemoryData
from ...knowledge_plugins.xrefs import XRef, XRefType
from ...misc.ux import deprecated
from ... import sim_options as o
from ...errors import (AngrCFGError, SimEngineError, SimMemoryError, SimTranslationError, SimValueError,
AngrUnsupportedSyscallError
)
from ...utils.constants import DEFAULT_STATEMENT
from ..forward_analysis import ForwardAnalysis, AngrSkipJobNotice
from .cfg_arch_options import CFGArchOptions
from .cfg_base import CFGBase
from .segment_list import SegmentList
VEX_IRSB_MAX_SIZE = 400
l = logging.getLogger(name=__name__)
class FunctionReturn:
"""
FunctionReturn describes a function call in a specific location and its return location. Hashable and equatable
"""
__slots__ = ('callee_func_addr', 'caller_func_addr', 'call_site_addr', 'return_to', )
def __init__(self, callee_func_addr, caller_func_addr, call_site_addr, return_to):
self.callee_func_addr = callee_func_addr
self.caller_func_addr = caller_func_addr
self.call_site_addr = call_site_addr
self.return_to = return_to
def __eq__(self, other):
"""
Comparison
:param FunctionReturn other: The other object
:return: True if equal, False otherwise
"""
return self.callee_func_addr == other.callee_func_addr and \
self.caller_func_addr == other.caller_func_addr and \
self.call_site_addr == other.call_site_addr and \
self.return_to == other.return_to
def __hash__(self):
return hash((self.callee_func_addr, self.caller_func_addr, self.call_site_addr, self.return_to))
class PendingJobs:
"""
A collection of pending jobs during CFG recovery.
"""
def __init__(self, functions, deregister_job_callback):
self._jobs = OrderedDict() # A mapping between function addresses and lists of pending jobs
self._functions = functions
self._deregister_job_callback = deregister_job_callback
self._returning_functions = set()
self._updated_functions = set() # Addresses of functions whose returning status have changed between two
# consecutive calls to cleanup().
self._job_count = 0
def __len__(self):
return self._job_count
def __bool__(self):
return self._job_count > 0
__nonzero__ = __bool__
def _pop_job(self, func_addr):
jobs = self._jobs[func_addr]
j = jobs.pop(-1)
if not jobs:
del self._jobs[func_addr]
self._job_count -= 1
return j
def add_job(self, job):
func_addr = job.returning_source
if func_addr not in self._jobs:
self._jobs[func_addr] = [ ]
self._jobs[func_addr].append(job)
self._job_count += 1
def pop_job(self, returning=True):
"""
Pop a job from the pending jobs list.
When returning == True, we prioritize the jobs whose functions are known to be returning (function.returning is
True). As an optimization, we are sorting the pending jobs list according to job.function.returning.
:param bool returning: Only pop a pending job if the corresponding function returns.
:return: A pending job if we can find one, or None if we cannot find any that satisfies the requirement.
:rtype: angr.analyses.cfg.cfg_fast.CFGJob
"""
if not self:
return None
if not returning:
return self._pop_job(next(reversed(self._jobs.keys())))
# Prioritize returning functions
for func_addr in reversed(self._jobs.keys()):
if func_addr not in self._returning_functions:
continue
return self._pop_job(func_addr)
return None
def cleanup(self):
"""
Remove those pending exits if:
a) they are the return exits of non-returning SimProcedures
b) they are the return exits of non-returning syscalls
b) they are the return exits of non-returning functions
:return: None
"""
pending_exits_to_remove = defaultdict(list)
for func_addr in self._updated_functions:
if func_addr not in self._jobs:
continue
jobs = self._jobs[func_addr]
for i, pe in enumerate(jobs):
if pe.returning_source is None:
# The original call failed. This pending exit must be followed.
continue
func = self._functions.function(pe.returning_source)
if func is None:
# Why does it happen?
l.warning("An expected function at %s is not found. Please report it to Fish.",
pe.returning_source if pe.returning_source is not None else 'None')
continue
if func.returning is False:
# Oops, it's not returning
# Remove this pending exit
pending_exits_to_remove[pe.returning_source].append(i)
for func_addr, indices in pending_exits_to_remove.items():
jobs = self._jobs[func_addr]
for index in reversed(indices):
job = jobs[index]
self._deregister_job_callback(job.func_addr, job)
del jobs[index]
self._job_count -= 1
if not jobs:
del self._jobs[func_addr]
self.clear_updated_functions()
def add_returning_function(self, func_addr):
"""
Mark a function as returning.
:param int func_addr: Address of the function that returns.
:return: None
"""
self._returning_functions.add(func_addr)
self._updated_functions.add(func_addr)
def add_nonreturning_function(self, func_addr):
"""
Mark a function as not returning.
:param int func_addr: Address of the function that does not return.
:return: None
"""
self._updated_functions.add(func_addr)
def clear_updated_functions(self):
"""
Clear the updated_functions set.
:return: None
"""
self._updated_functions.clear()
#
# Descriptors of edges in individual function graphs
#
class FunctionEdge:
__slots__ = ('src_func_addr', 'stmt_idx', 'ins_addr',)
def apply(self, cfg):
raise NotImplementedError()
class FunctionTransitionEdge(FunctionEdge):
__slots__ = ('src_node', 'dst_addr', 'src_func_addr', 'to_outside', 'dst_func_addr')
def __init__(self, src_node, dst_addr, src_func_addr, to_outside=False, dst_func_addr=None, stmt_idx=None,
ins_addr=None):
self.src_node = src_node
self.dst_addr = dst_addr
self.src_func_addr = src_func_addr
self.to_outside = to_outside
self.dst_func_addr = dst_func_addr
self.stmt_idx = stmt_idx
self.ins_addr = ins_addr
def apply(self, cfg):
to_outside = self.to_outside
if not to_outside:
# is it jumping to outside? Maybe we are seeing more functions now.
dst_node = cfg.model.get_any_node(self.dst_addr, force_fastpath=True)
if dst_node is not None and dst_node.function_address != self.src_func_addr:
to_outside = True
return cfg._function_add_transition_edge(
self.dst_addr,
self.src_node,
self.src_func_addr,
to_outside=to_outside,
dst_func_addr=self.dst_func_addr,
stmt_idx=self.stmt_idx,
ins_addr=self.ins_addr,
)
class FunctionCallEdge(FunctionEdge):
__slots__ = ('src_node', 'dst_addr', 'ret_addr', 'syscall')
def __init__(self, src_node, dst_addr, ret_addr, src_func_addr, syscall=False, stmt_idx=None, ins_addr=None):
self.src_node = src_node
self.dst_addr = dst_addr
self.ret_addr = ret_addr
self.src_func_addr = src_func_addr
self.syscall = syscall
self.stmt_idx = stmt_idx
self.ins_addr = ins_addr
def apply(self, cfg):
return cfg._function_add_call_edge(
self.dst_addr,
self.src_node,
self.src_func_addr,
syscall=self.syscall,
stmt_idx=self.stmt_idx,
ins_addr=self.ins_addr,
)
class FunctionFakeRetEdge(FunctionEdge):
__slots__ = ('src_node', 'dst_addr', 'confirmed')
def __init__(self, src_node, dst_addr, src_func_addr, confirmed=None):
self.src_node = src_node
self.dst_addr = dst_addr
self.src_func_addr = src_func_addr
self.confirmed = confirmed
def apply(self, cfg):
return cfg._function_add_fakeret_edge(
self.dst_addr,
self.src_node,
self.src_func_addr,
confirmed=self.confirmed,
)
class FunctionReturnEdge(FunctionEdge):
__slots__ = ('ret_from_addr', 'ret_to_addr', 'dst_func_addr')
def __init__(self, ret_from_addr, ret_to_addr, dst_func_addr):
self.ret_from_addr = ret_from_addr
self.ret_to_addr = ret_to_addr
self.dst_func_addr = dst_func_addr
def apply(self, cfg):
return cfg._function_add_return_edge(
self.ret_from_addr,
self.ret_to_addr,
self.dst_func_addr
)
#
# CFGJob
#
class CFGJob:
"""
Defines a job to work on during the CFG recovery
"""
__slots__ = ('addr', 'func_addr', 'jumpkind', 'ret_target', 'last_addr', 'src_node', 'src_ins_addr', 'src_stmt_idx',
'returning_source', 'syscall', '_func_edges', 'job_type')
JOB_TYPE_NORMAL = "Normal"
JOB_TYPE_FUNCTION_PROLOGUE = "Function-prologue"
JOB_TYPE_COMPLETE_SCANNING = "Complete-scanning"
def __init__(self, addr, func_addr, jumpkind, ret_target=None, last_addr=None, src_node=None, src_ins_addr=None,
src_stmt_idx=None, returning_source=None, syscall=False, func_edges=None, job_type=JOB_TYPE_NORMAL):
self.addr = addr
self.func_addr = func_addr
self.jumpkind = jumpkind
self.ret_target = ret_target
self.last_addr = last_addr
self.src_node = src_node
self.src_ins_addr = src_ins_addr
self.src_stmt_idx = src_stmt_idx
self.returning_source = returning_source
self.syscall = syscall
self.job_type = job_type
self._func_edges = func_edges
def add_function_edge(self, edge):
if self._func_edges is None:
self._func_edges = [ ]
self._func_edges.append(edge)
def apply_function_edges(self, cfg, clear=False):
if not self._func_edges:
return
for edge in self._func_edges:
edge.apply(cfg)
if clear:
self._func_edges = None
def __repr__(self):
if isinstance(self.addr, SootAddressDescriptor):
return "<CFGJob {}>".format(self.addr)
else:
return "<CFGJob%s %#08x @ func %#08x>" % (" syscall" if self.syscall else "", self.addr, self.func_addr)
def __eq__(self, other):
return self.addr == other.addr and \
self.func_addr == other.func_addr and \
self.jumpkind == other.jumpkind and \
self.ret_target == other.ret_target and \
self.last_addr == other.last_addr and \
self.src_node == other.src_node and \
self.src_stmt_idx == other.src_stmt_idx and \
self.src_ins_addr == other.src_ins_addr and \
self.returning_source == other.returning_source and \
self.syscall == other.syscall
def __hash__(self):
return hash((self.addr, self.func_addr, self.jumpkind, self.ret_target, self.last_addr, self.src_node,
self.src_stmt_idx, self.src_ins_addr, self.returning_source, self.syscall)
)
class CFGFast(ForwardAnalysis, CFGBase): # pylint: disable=abstract-method
"""
We find functions inside the given binary, and build a control-flow graph in very fast manners: instead of
simulating program executions, keeping track of states, and performing expensive data-flow analysis, CFGFast will
only perform light-weight analyses combined with some heuristics, and with some strong assumptions.
In order to identify as many functions as possible, and as accurate as possible, the following operation sequence
is followed:
# Active scanning
- If the binary has "function symbols" (TODO: this term is not accurate enough), they are starting points of
the code scanning
- If the binary does not have any "function symbol", we will first perform a function prologue scanning on the
entire binary, and start from those places that look like function beginnings
- Otherwise, the binary's entry point will be the starting point for scanning
# Passive scanning
- After all active scans are done, we will go through the whole image and scan all code pieces
Due to the nature of those techniques that are used here, a base address is often not required to use this analysis
routine. However, with a correct base address, CFG recovery will almost always yield a much better result. A custom
analysis, called GirlScout, is specifically made to recover the base address of a binary blob. After the base
address is determined, you may want to reload the binary with the new base address by creating a new Project object,
and then re-recover the CFG.
"""
# TODO: Move arch_options to CFGBase, and add those logic to CFGEmulated as well.
PRINTABLES = string.printable.replace("\x0b", "").replace("\x0c", "").encode()
SPECIAL_THUNKS = {
'AMD64': {
bytes.fromhex('E807000000F3900FAEE8EBF9488D642408C3'): ('ret',),
bytes.fromhex('E807000000F3900FAEE8EBF948890424C3'): ('jmp', 'rax'),
}
}
tag = "CFGFast"
def __init__(self,
binary=None,
objects=None,
regions=None,
pickle_intermediate_results=False,
symbols=True,
function_prologues=True,
resolve_indirect_jumps=True,
force_segment=False,
force_complete_scan=True,
indirect_jump_target_limit=100000,
data_references=False,
cross_references=False,
normalize=False,
start_at_entry=True,
function_starts=None,
extra_memory_regions=None,
data_type_guessing_handlers=None,
arch_options=None,
indirect_jump_resolvers=None,
base_state=None,
exclude_sparse_regions=True,
skip_specific_regions=True,
heuristic_plt_resolving=None,
detect_tail_calls=False,
low_priority=False,
cfb=None,
model=None,
use_patches=False,
start=None, # deprecated
end=None, # deprecated
collect_data_references=None, # deprecated
extra_cross_references=None, # deprecated
**extra_arch_options
):
"""
:param binary: The binary to recover CFG on. By default the main binary is used.
:param objects: A list of objects to recover the CFG on. By default it will recover the CFG of
all loaded objects.
:param iterable regions: A list of tuples in the form of (start address, end address) describing memory
regions that the CFG should cover.
:param bool pickle_intermediate_results: If we want to store the intermediate results or not.
:param bool symbols: Get function beginnings from symbols in the binary.
:param bool function_prologues: Scan the binary for function prologues, and use those positions as function
beginnings
:param bool resolve_indirect_jumps: Try to resolve indirect jumps. This is necessary to resolve jump targets
from jump tables, etc.
:param bool force_segment: Force CFGFast to rely on binary segments instead of sections.
:param bool force_complete_scan: Perform a complete scan on the binary and maximize the number of identified
code blocks.
:param bool data_references: Enables the collection of references to data used by individual instructions.
This does not collect 'cross-references', particularly those that involve
multiple instructions. For that, see `cross_references`
:param bool cross_references: Whether CFGFast should collect "cross-references" from the entire program or
not. This will populate the knowledge base with references to and from each
recognizable address constant found in the code. Note that, because this
performs constant propagation on the entire program, it may be much slower and
consume more memory.
This option implies `data_references=True`.
:param bool normalize: Normalize the CFG as well as all function graphs after CFG recovery.
:param bool start_at_entry: Begin CFG recovery at the entry point of this project. Setting it to False
prevents CFGFast from viewing the entry point as one of the starting points of
code scanning.
:param list function_starts: A list of extra function starting points. CFGFast will try to resume scanning
from each address in the list.
:param list extra_memory_regions: A list of 2-tuple (start-address, end-address) that shows extra memory
regions. Integers falling inside will be considered as pointers.
:param list indirect_jump_resolvers: A custom list of indirect jump resolvers. If this list is None or empty,
default indirect jump resolvers specific to this architecture and binary
types will be loaded.
:param base_state: A state to use as a backer for all memory loads
:param bool detect_tail_calls: Enable aggressive tail-call optimization detection.
:param int start: (Deprecated) The beginning address of CFG recovery.
:param int end: (Deprecated) The end address of CFG recovery.
:param CFGArchOptions arch_options: Architecture-specific options.
:param dict extra_arch_options: Any key-value pair in kwargs will be seen as an arch-specific option and will
be used to set the option value in self._arch_options.
Extra parameters that angr.Analysis takes:
:param progress_callback: Specify a callback function to get the progress during CFG recovery.
:param bool show_progressbar: Should CFGFast show a progressbar during CFG recovery or not.
:return: None
"""
ForwardAnalysis.__init__(self, allow_merging=False)
CFGBase.__init__(
self,
'fast',
0,
normalize=normalize,
binary=binary,
force_segment=force_segment,
base_state=base_state,
iropt_level=1, # right now this is a must, since we rely on the VEX optimization to tell us
# the concrete jump targets of each block.
resolve_indirect_jumps=resolve_indirect_jumps,
indirect_jump_resolvers=indirect_jump_resolvers,
indirect_jump_target_limit=indirect_jump_target_limit,
detect_tail_calls=detect_tail_calls,
low_priority=low_priority,
model=model,
)
# necessary warnings
regions_not_specified = regions is None and binary is None and not objects
if self.project.loader._auto_load_libs is True and end is None and len(self.project.loader.all_objects) > 3 \
and regions_not_specified:
l.warning('"auto_load_libs" is enabled. With libraries loaded in project, CFGFast will cover libraries, '
'which may take significantly more time than expected. You may reload the binary with '
'"auto_load_libs" disabled, or specify "regions" to limit the scope of CFG recovery.'
)
if collect_data_references is not None:
l.warning('"collect_data_references" is deprecated and will be removed soon. Please use '
'"data_references" instead')
data_references = collect_data_references
if extra_cross_references is not None:
l.warning('"extra_cross_references" is deprecated and will be removed soon. Please use '
'"cross_references" instead')
cross_references = extra_cross_references
if start is not None or end is not None:
l.warning('"start" and "end" are deprecated and will be removed soon. Please use "regions" to specify one '
'or more memory regions instead.'
)
if regions is None:
regions = [ (start, end) ]
else:
l.warning('"regions", "start", and "end" are all specified. Ignoring "start" and "end".')
if binary is not None and not objects:
objects = [ binary ]
regions = regions if regions is not None else self._executable_memory_regions(objects=objects,
force_segment=force_segment
)
if exclude_sparse_regions:
new_regions = [ ]
for start_, end_ in regions:
if not self._is_region_extremely_sparse(start_, end_, base_state=base_state):
new_regions.append((start_, end_))
regions = new_regions
if skip_specific_regions:
if base_state is not None:
l.warning("You specified both base_state and skip_specific_regions. They may conflict with each other.")
new_regions = [ ]
for start_, end_ in regions:
if not self._should_skip_region(start_):
new_regions.append((start_, end_))
regions = new_regions
if not regions and self.project.arch.name != 'Soot':
raise AngrCFGError("Regions are empty or all regions are skipped. You may want to manually specify regions.")
# sort the regions
regions = sorted(regions, key=lambda x: x[0])
self._regions_size = sum((b - a) for a, b in regions)
# initial self._regions as a sorted dict
self._regions = SortedDict(regions)
self._pickle_intermediate_results = pickle_intermediate_results
self._use_symbols = symbols
self._use_function_prologues = function_prologues
self._force_complete_scan = force_complete_scan
if heuristic_plt_resolving is None:
# If unspecified, we only enable heuristic PLT resolving when there is at least one binary loaded with the
# ELF backend
self._heuristic_plt_resolving = len(self.project.loader.all_elf_objects) > 0
else:
self._heuristic_plt_resolving = heuristic_plt_resolving
self._start_at_entry = start_at_entry
self._extra_function_starts = function_starts
self._extra_memory_regions = extra_memory_regions
self._cross_references = cross_references
# You need data refs to get cross refs
self._collect_data_ref = data_references or self._cross_references
self._use_patches = use_patches
self._arch_options = arch_options if arch_options is not None else CFGArchOptions(
self.project.arch, **extra_arch_options)
self._data_type_guessing_handlers = [ ] if data_type_guessing_handlers is None else data_type_guessing_handlers
self._cfb = cfb
l.debug("CFG recovery covers %d regions:", len(self._regions))
for start_addr in self._regions:
l.debug("... %#x - %#x", start_addr, self._regions[start_addr])
# mapping to all known thunks
self._known_thunks = {}
self._initial_state = None
self._next_addr = None
# Create the segment list
self._seg_list = SegmentList()
self._read_addr_to_run = defaultdict(list)
self._write_addr_to_run = defaultdict(list)
self._function_prologue_addrs = None
self._remaining_function_prologue_addrs = None
#
# Variables used during analysis
#
self._pending_jobs = None
self._traced_addresses = None
self._function_returns = None
self._function_exits = None
# A mapping between address and the actual data in memory
# self._memory_data = { }
# A mapping between address of the instruction that's referencing the memory data and the memory data itself
# self.insn_addr_to_memory_data = { }
# self._graph = None
# Start working!
self._analyze()
def __getstate__(self):
d = dict(self.__dict__)
d['_progress_callback'] = None
return d
def __setstate__(self, d):
self.__dict__.update(d)
#
# Utils
#
@staticmethod
def _calc_entropy(data, size=None):
"""
Calculate the entropy of a piece of data
:param data: The target data to calculate entropy on
:param size: Size of the data, Optional.
:return: A float
"""
if not data:
return 0
entropy = 0
if size is None:
size = len(data)
data = bytes(pyvex.ffi.buffer(data, size))
for x in range(0, 256):
p_x = float(data.count(x)) / size
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy
#
# Properties
#
@property
def graph(self):
return self._model.graph
@property
def _insn_addr_to_memory_data(self):
l.warning('_insn_addr_to_memory_data has been made public and is deprecated. Please fix your code accordingly.')
return self._model.insn_addr_to_memory_data
@property
def _memory_data(self):
return self._model.memory_data
@property
def memory_data(self):
return self._model.memory_data
@property
def jump_tables(self):
return self._model.jump_tables
@property
def insn_addr_to_memory_data(self):
return self._model.insn_addr_to_memory_data
#
# Private methods
#
# Methods for determining scanning scope
def _inside_regions(self, address):
"""
Check if the address is inside any existing region.
:param int address: Address to check.
:return: True if the address is within one of the memory regions, False otherwise.
:rtype: bool
"""
try:
start_addr = next(self._regions.irange(maximum=address, reverse=True))
except StopIteration:
return False
else:
return address < self._regions[start_addr]
def _get_min_addr(self):
"""
Get the minimum address out of all regions. We assume self._regions is sorted.
:return: The minimum address.
:rtype: int
"""
if not self._regions:
if self.project.arch.name != "Soot":
l.error("self._regions is empty or not properly set.")
return None
return next(self._regions.irange())
def _next_address_in_regions(self, address):
"""
Return the next immediate address that is inside any of the regions.
:param int address: The address to start scanning.
:return: The next address that is inside one of the memory regions.
:rtype: int
"""
if self._inside_regions(address):
return address
try:
return next(self._regions.irange(minimum=address, reverse=False))
except StopIteration:
return None
# Methods for scanning the entire image
def _next_unscanned_addr(self, alignment=None):
"""
Find the next address that we haven't processed
:param alignment: Assures the address returns must be aligned by this number
:return: An address to process next, or None if all addresses have been processed
"""
# TODO: Take care of those functions that are already generated
if self._next_addr is None:
self._next_addr = self._get_min_addr()
curr_addr = self._next_addr
else:
curr_addr = self._next_addr + 1
if not self._inside_regions(curr_addr):
curr_addr = self._next_address_in_regions(curr_addr)
if curr_addr is None:
l.debug("All addresses within memory regions have been scanned.")
return None
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - (curr_addr % alignment) + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._regions.items():
if start <= curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
break
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._inside_regions(curr_addr):
l.debug("Returning a new recon address: %#x", curr_addr)
return curr_addr
l.debug("%#x is beyond the ending point. Returning None.", curr_addr)
return None
def _load_a_byte_as_int(self, addr):
if self._base_state is not None:
try:
val = self._base_state.mem_concrete(addr, 1, inspect=False, disable_actions=True)
except SimValueError:
# Not concretizable
l.debug("Address %#x is not concretizable!", addr)
return None
else:
val = self._fast_memory_load_byte(addr)
if val is None:
return None
return val
def _scan_for_printable_strings(self, start_addr):
addr = start_addr
sz = []
is_sz = True
# Get data until we meet a null-byte
while self._inside_regions(addr):
l.debug("Searching address %x", addr)
val = self._load_a_byte_as_int(addr)
if val is None:
break
if val == 0:
if len(sz) < 4:
is_sz = False
break
if val not in self.PRINTABLES:
is_sz = False
break
sz.append(val)
addr += 1
if sz and is_sz:
l.debug("Got a string of %d chars: [%s]", len(sz), bytes(sz).decode())
string_length = len(sz) + 1
return string_length
# no string is found
return 0
def _scan_for_repeating_bytes(self, start_addr, repeating_byte):
assert len(repeating_byte) == 1
addr = start_addr
repeating_length = 0
while self._inside_regions(addr):
val = self._load_a_byte_as_int(addr)
if val is None:
break
if val == repeating_byte:
repeating_length += 1
else:
break
addr += 1
if repeating_length > self.project.arch.bytes: # this is pretty random
return repeating_length
else:
return 0
def _next_code_addr_core(self):
"""
Call _next_unscanned_addr() first to get the next address that is not scanned. Then check if data locates at
that address seems to be code or not. If not, we'll continue to for the next un-scanned address.
"""
next_addr = self._next_unscanned_addr()
if next_addr is None:
return None
start_addr = next_addr
while True:
string_length = self._scan_for_printable_strings(start_addr)
if string_length:
self._seg_list.occupy(start_addr, string_length, "string")
start_addr += string_length
if self.project.arch.name in ('X86', 'AMD64'):
cc_length = self._scan_for_repeating_bytes(start_addr, '\xcc')
if cc_length:
self._seg_list.occupy(start_addr, cc_length, "alignment")
start_addr += cc_length
else:
cc_length = 0
zeros_length = self._scan_for_repeating_bytes(start_addr, '\x00')
if zeros_length:
self._seg_list.occupy(start_addr, zeros_length, "alignment")
start_addr += zeros_length
if string_length == 0 and cc_length == 0 and zeros_length == 0:
# umm now it's probably code
break
instr_alignment = self._initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
# occupy those few bytes
self._seg_list.occupy(start_addr, instr_alignment - (start_addr % instr_alignment), 'alignment')
start_addr = start_addr - start_addr % instr_alignment + \
instr_alignment
return start_addr
def _next_code_addr(self):
while True:
addr = self._next_code_addr_core()
if addr is None:
return None
# if the new address is already occupied
if not self._seg_list.is_occupied(addr):
return addr
# Overriden methods from ForwardAnalysis
def _job_key(self, job):
return job.addr
def _pre_analysis(self):
# Call _initialize_cfg() before self.functions is used.
self._initialize_cfg()
# Scan for __x86_return_thunk and friends
self._known_thunks = self._find_thunks()
# Initialize variables used during analysis
self._pending_jobs = PendingJobs(self.functions, self._deregister_analysis_job)
self._traced_addresses = set()
self._function_returns = defaultdict(set)
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
self._function_exits = defaultdict(set)
# Create an initial state. Store it to self so we can use it globally.
self._initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = self._initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH, o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS, o.SYMBOL_FILL_UNCONSTRAINED_MEMORY}
# initial_options.remove(o.COW_STATES)
self._initial_state.options = initial_options
starting_points = set()
# clear all existing functions
self.kb.functions.clear()
if self._use_symbols:
starting_points |= self._function_addresses_from_symbols
if self._extra_function_starts:
starting_points |= set(self._extra_function_starts)
# Sort it
starting_points = sorted(list(starting_points), reverse=True)
if self._start_at_entry and self.project.entry is not None and self._inside_regions(self.project.entry) and \
self.project.entry not in starting_points:
# make sure self.project.entry is inserted
starting_points += [ self.project.entry ]
# Create jobs for all starting points
for sp in starting_points:
job = CFGJob(sp, sp, 'Ijk_Boring')
self._insert_job(job)
# register the job to function `sp`
self._register_analysis_job(sp, job)
self._updated_nonreturning_functions = set()
if self._use_function_prologues and self.project.concrete_target is None:
self._function_prologue_addrs = sorted(self._func_addrs_from_prologues())
# make a copy of those prologue addresses, so that we can pop from the list
self._remaining_function_prologue_addrs = self._function_prologue_addrs[::]
# make function_prologue_addrs a set for faster lookups
self._function_prologue_addrs = set(self._function_prologue_addrs)
def _pre_job_handling(self, job): # pylint:disable=arguments-differ
"""
Some pre job-processing tasks, like update progress bar.
:param CFGJob job: The CFGJob instance.
:return: None
"""
if self._low_priority:
self._release_gil(len(self._nodes), 20, 0.0001)
# a new entry is picked. Deregister it
self._deregister_analysis_job(job.func_addr, job)
if not self._inside_regions(job.addr):
obj = self.project.loader.find_object_containing(job.addr)
if obj is not None and isinstance(obj, self._cle_pseudo_objects):
pass
else:
# it's outside permitted regions. skip.
raise AngrSkipJobNotice()
# Do not calculate progress if the user doesn't care about the progress at all
if self._show_progressbar or self._progress_callback:
max_percentage_stage_1 = 50.0
percentage = self._seg_list.occupied_size * max_percentage_stage_1 / self._regions_size
if percentage > max_percentage_stage_1:
percentage = max_percentage_stage_1
self._update_progress(percentage, cfg=self)
def _intra_analysis(self):
pass
def _get_successors(self, job): # pylint:disable=arguments-differ
# current_function_addr = job.func_addr
# addr = job.addr
# if current_function_addr != -1:
# l.debug("Tracing new exit %#x in function %#x", addr, current_function_addr)
# else:
# l.debug("Tracing new exit %#x", addr)
jobs = self._scan_block(job)
# l.debug("... got %d jobs: %s", len(jobs), jobs)
for job_ in jobs: # type: CFGJob
# register those jobs
self._register_analysis_job(job_.func_addr, job_)
return jobs
def _handle_successor(self, job, successor, successors):
return [ successor ]
def _merge_jobs(self, *jobs):
pass
def _widen_jobs(self, *jobs):
pass
def _post_process_successors(self, irsb, successors):
if is_arm_arch(self.project.arch) and irsb.addr % 2 == 1:
# we are in thumb mode. filter successors
successors = self._arm_thumb_filter_jump_successors(irsb,
successors,
lambda tpl: tpl[1],
lambda tpl: tpl[0],
lambda tpl: tpl[3],
)
return successors
def _post_job_handling(self, job, new_jobs, successors):
pass
def _job_queue_empty(self):
if self._pending_jobs:
# fastpath
# look for a job that comes from a function that must return
# if we can find one, just use it
job = self._pop_pending_job(returning=True)
if job is not None:
self._insert_job(job)
return
self._clean_pending_exits()
# did we finish analyzing any function?
# fill in self._completed_functions
self._make_completed_functions()
# analyze function features, most importantly, whether each function returns or not
self._analyze_all_function_features()
# Clear _changed_functions set
self._updated_nonreturning_functions = set()
if self._pending_jobs:
self._clean_pending_exits()
job = self._pop_pending_job(returning=True)
if job is not None:
self._insert_job(job)
return
job = self._pop_pending_job(returning=False)
if job is not None:
self._insert_job(job)
return
# Try to see if there is any indirect jump left to be resolved
if self._resolve_indirect_jumps and self._indirect_jumps_to_resolve:
self._process_unresolved_indirect_jumps()
if self._job_info_queue:
return
if self._use_function_prologues and self._remaining_function_prologue_addrs:
while self._remaining_function_prologue_addrs:
prolog_addr = self._remaining_function_prologue_addrs[0]
self._remaining_function_prologue_addrs = self._remaining_function_prologue_addrs[1:]
if self._seg_list.is_occupied(prolog_addr):
continue
job = CFGJob(prolog_addr, prolog_addr, 'Ijk_Boring')
self._insert_job(job)
self._register_analysis_job(prolog_addr, job)
return
if self._force_complete_scan:
addr = self._next_code_addr()
if addr is None:
l.debug("Force-scan jumping failed")
else:
l.debug("Force-scanning to %#x", addr)
if addr is not None:
# if this is ARM and addr % 4 != 0, it has to be THUMB
if is_arm_arch(self.project.arch) and addr % 2 == 0 and addr % 4 != 0:
addr |= 1
job = CFGJob(addr, addr, "Ijk_Boring", last_addr=None, job_type=CFGJob.JOB_TYPE_COMPLETE_SCANNING)
self._insert_job(job)
self._register_analysis_job(addr, job)
def _post_analysis(self):
self._make_completed_functions()
if self._normalize:
# Normalize the control flow graph first before rediscovering all functions
self.normalize()
if self.project.arch.name in ('X86', 'AMD64', 'MIPS32'):
self._remove_redundant_overlapping_blocks()
self._updated_nonreturning_functions = set()
# Revisit all edges and rebuild all functions to correctly handle returning/non-returning functions.
self.make_functions()
self._analyze_all_function_features(all_funcs_completed=True)
# Scan all functions, and make sure all fake ret edges are either confirmed or removed
for f in self.functions.values():
all_edges = f.transition_graph.edges(data=True)
callsites_to_functions = defaultdict(list) # callsites to functions mapping
for src, dst, data in all_edges:
if 'type' in data:
if data['type'] == 'call':
callsites_to_functions[src.addr].append(dst.addr)
edges_to_remove = [ ]
for src, dst, data in all_edges:
if 'type' in data:
if data['type'] == 'fake_return' and 'confirmed' not in data:
# Get all possible functions being called here
target_funcs = [ self.functions.function(addr=func_addr)
for func_addr in callsites_to_functions[src.addr]
]
if target_funcs and all(t is not None and t.returning is False for t in target_funcs):
# Remove this edge
edges_to_remove.append((src, dst))
else:
# Mark this edge as confirmed
f._confirm_fakeret(src, dst)
for edge in edges_to_remove:
f.transition_graph.remove_edge(*edge)
# Clear the cache
f._local_transition_graph = None
# Scan all functions, and make sure .returning for all functions are either True or False
for f in self.functions.values():
if f.returning is None:
f.returning = len(f.endpoints) > 0 # pylint:disable=len-as-condition
# Finally, mark endpoints of every single function
for function in self.kb.functions.values():
function.mark_nonreturning_calls_endpoints()
# optional: remove functions that must be alignments
self.mark_function_alignments()
# make return edges
self._make_return_edges()
if self.project.arch.name != 'Soot':
if self.project.loader.main_object.sections:
# this binary has sections
# make sure we have data entries assigned at the beginning of each data section
for sec in self.project.loader.main_object.sections:
if sec.memsize > 0 and not sec.is_executable and sec.is_readable:
for seg in self.project.loader.main_object.segments:
if seg.vaddr <= sec.vaddr < seg.vaddr + seg.memsize:
break
else:
continue
if sec.vaddr not in self.model.memory_data:
self.model.memory_data[sec.vaddr] = MemoryData(sec.vaddr, 0, MemoryDataSort.Unknown)
# If they asked for it, give it to them. All of it.
if self._cross_references:
self._do_full_xrefs()
r = True
while r:
r = self._tidy_data_references()
CFGBase._post_analysis(self)
self._finish_progress()
def _do_full_xrefs(self):
l.info("Building cross-references...")
# Time to make our CPU hurt
state = self.project.factory.blank_state()
for f_addr in self.functions:
f = None
try:
f = self.functions[f_addr]
if f.is_simprocedure:
continue
l.debug("\tFunction %s", f.name)
# constant prop
prop = self.project.analyses.Propagator(func=f, base_state=state)
# Collect all the refs
self.project.analyses.XRefs(func=f, replacements=prop.replacements)
except Exception: # pylint: disable=broad-except
if f is not None:
l.exception("Error collecting XRefs for function %s.", f.name, exc_info=True)
else:
l.exception("Error collecting XRefs for function %#x.", f_addr, exc_info=True)
# Methods to get start points for scanning
def _func_addrs_from_prologues(self):
"""
Scan the entire program image for function prologues, and start code scanning at those positions
:return: A list of possible function addresses
"""
# Pre-compile all regexes
regexes = list()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.append(r)
# EDG says: I challenge anyone bothering to read this to come up with a better
# way to handle CPU modes that affect instruction decoding.
# Since the only one we care about is ARM/Thumb right now
# we have this gross hack. Sorry about that.
thumb_regexes = list()
if hasattr(self.project.arch, 'thumb_prologs'):
for ins_regex in self.project.arch.thumb_prologs:
# Thumb prologues are found at even addrs, but their actual addr is odd!
# Isn't that great?
r = re.compile(ins_regex)
thumb_regexes.append(r)
# Construct the binary blob first
unassured_functions = [ ]
for start_, bytes_ in self._binary.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position)
# HACK part 2: Yes, i really have to do this
for regex in thumb_regexes:
# Match them!
for mo in regex.finditer(bytes_):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
mapped_position = AT.from_rva(position, self._binary).to_mva()
if self._addr_in_exec_memory_regions(mapped_position):
unassured_functions.append(mapped_position+1)
l.info("Found %d functions with prologue scanning.", len(unassured_functions))
return unassured_functions
# Basic block scanning
def _scan_block(self, cfg_job):
"""
Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list
"""
addr = cfg_job.addr
current_func_addr = cfg_job.func_addr
# Fix the function address
# This is for rare cases where we cannot successfully determine the end boundary of a previous function, and
# as a consequence, our analysis mistakenly thinks the previous function goes all the way across the boundary,
# resulting the missing of the second function in function manager.
if addr in self._function_addresses_from_symbols:
current_func_addr = addr
if self._addr_hooked_or_syscall(addr):
entries = self._scan_procedure(cfg_job, current_func_addr)
else:
entries = self._scan_irsb(cfg_job, current_func_addr)
return entries
def _scan_procedure(self, cfg_job, current_func_addr):
"""
Checks the hooking procedure for this address searching for new static
exit points to add to successors (generating entries for them)
if this address has not been traced before. Updates previous CFG nodes
with edges.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_func_addr: Address of the current function.
:return: List of successors
:rtype: list
"""
addr = cfg_job.addr
try:
if self.project.is_hooked(addr):
procedure = self.project.hooked_by(addr)
name = procedure.display_name
else:
procedure = self.project.simos.syscall_from_addr(addr)
name = procedure.display_name
if addr not in self._nodes:
cfg_node = CFGNode(addr, 0, self.model,
function_address=current_func_addr,
simprocedure_name=name,
no_ret=procedure.NO_RET,
block_id=addr,
)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
else:
cfg_node = self._nodes[addr]
except (SimMemoryError, SimEngineError):
return [ ]
self._graph_add_edge(cfg_node, cfg_job.src_node, cfg_job.jumpkind, cfg_job.src_ins_addr,
cfg_job.src_stmt_idx
)
self._function_add_node(cfg_node, current_func_addr)
# Add edges going to this node in function graphs
cfg_job.apply_function_edges(self, clear=True)
# If we have traced it before, don't trace it anymore
if addr in self._traced_addresses:
return [ ]
else:
# Mark the address as traced
self._traced_addresses.add(addr)
entries = [ ]
if procedure.ADDS_EXITS:
# Get two blocks ahead
if cfg_job.src_node is None:
l.warning("%s is supposed to yield new exits, but it fails to do so.", name)
return []
grandparent_nodes = list(self.graph.predecessors(cfg_job.src_node))
grandparent_node = grandparent_nodes[0] if grandparent_nodes else None
blocks_ahead = [ ]
if grandparent_node is not None:
blocks_ahead.append(self._lift(grandparent_node.addr).vex)
blocks_ahead.append(self._lift(cfg_job.src_node.addr).vex)
procedure.project = self.project
procedure.arch = self.project.arch
new_exits = procedure.static_exits(blocks_ahead)
for new_exit in new_exits:
addr_ = new_exit['address']
jumpkind = new_exit['jumpkind']
namehint = new_exit.get('namehint', None)
if isinstance(addr_, claripy.ast.BV) and not addr_.symbolic:
addr_ = addr_._model_concrete.value
if not isinstance(addr_, int):
continue
entries += self._create_jobs(addr_, jumpkind, current_func_addr, None, addr_, cfg_node, None,
None,
)
if namehint and addr_ not in self.kb.labels:
unique_label = self.kb.labels.get_unique_label(namehint)
self.kb.labels[addr_] = unique_label
if not procedure.NO_RET:
# it returns
cfg_node.has_return = True
self._function_exits[current_func_addr].add(addr)
self._function_add_return_site(addr, current_func_addr)
else:
# the procedure does not return
self._updated_nonreturning_functions.add(current_func_addr)
return entries
def _scan_irsb(self, cfg_job, current_func_addr):
"""
Generate a list of successors (generating them each as entries) to IRSB.
Updates previous CFG nodes with edges.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_func_addr: Address of the current function
:return: a list of successors
:rtype: list
"""
addr, function_addr, cfg_node, irsb = self._generate_cfgnode(cfg_job, current_func_addr)
# Add edges going to this node in function graphs
cfg_job.apply_function_edges(self, clear=True)
# function_addr and current_function_addr can be different. e.g. when tracing an optimized tail-call that jumps
# into another function that has been identified before.
if cfg_node is None:
# exceptions occurred, or we cannot get a CFGNode for other reasons
return [ ]
self._graph_add_edge(cfg_node, cfg_job.src_node, cfg_job.jumpkind, cfg_job.src_ins_addr,
cfg_job.src_stmt_idx
)
self._function_add_node(cfg_node, function_addr)
if self.functions.get_by_addr(function_addr).returning is not True:
self._updated_nonreturning_functions.add(function_addr)
# If we have traced it before, don't trace it anymore
real_addr = get_real_address_if_arm(self.project.arch, addr)
if real_addr in self._traced_addresses:
# the address has been traced before
return [ ]
else:
# Mark the address as traced
self._traced_addresses.add(real_addr)
# irsb cannot be None here
# assert irsb is not None
# IRSB is only used once per CFGNode. We should be able to clean up the CFGNode here in order to save memory
cfg_node.irsb = None
self._process_block_arch_specific(addr, irsb, function_addr)
# Scan the basic block to collect data references
if self._collect_data_ref:
self._collect_data_references(irsb, addr)
# Get all possible successors
irsb_next, jumpkind = irsb.next, irsb.jumpkind
successors = [ ]
last_ins_addr = None
ins_addr = addr
if irsb.statements:
for i, stmt in enumerate(irsb.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
successors.append((i,
last_ins_addr if self.project.arch.branch_delay_slot else ins_addr,
stmt.dst,
stmt.jumpkind
)
)
elif isinstance(stmt, pyvex.IRStmt.IMark):
last_ins_addr = ins_addr
ins_addr = stmt.addr + stmt.delta
else:
for ins_addr, stmt_idx, exit_stmt in irsb.exit_statements:
successors.append((
stmt_idx,
last_ins_addr if self.project.arch.branch_delay_slot else ins_addr,
exit_stmt.dst,
exit_stmt.jumpkind
))
successors.append((DEFAULT_STATEMENT,
last_ins_addr if self.project.arch.branch_delay_slot else ins_addr, irsb_next, jumpkind)
)
entries = [ ]
successors = self._post_process_successors(irsb, successors)
# Process each successor
for suc in successors:
stmt_idx, ins_addr, target, jumpkind = suc
entries += self._create_jobs(target, jumpkind, function_addr, irsb, addr, cfg_node, ins_addr,
stmt_idx
)
return entries
def _create_jobs(self, target, jumpkind, current_function_addr, irsb, addr, cfg_node, ins_addr, stmt_idx):
"""
Given a node and details of a successor, makes a list of CFGJobs
and if it is a call or exit marks it appropriately so in the CFG
:param int target: Destination of the resultant job
:param str jumpkind: The jumpkind of the edge going to this node
:param int current_function_addr: Address of the current function
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param int addr: The predecessor address
:param CFGNode cfg_node: The CFGNode of the predecessor node
:param int ins_addr: Address of the source instruction.
:param int stmt_idx: ID of the source statement.
:return: a list of CFGJobs
:rtype: list
"""
if type(target) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
target_addr = target.con.value
elif type(target) in (pyvex.IRConst.U8, pyvex.IRConst.U16, pyvex.IRConst.U32, pyvex.IRConst.U64): # pylint: disable=unidiomatic-typecheck
target_addr = target.value
elif type(target) is int: # pylint: disable=unidiomatic-typecheck
target_addr = target
else:
target_addr = None
if target_addr in self._known_thunks and jumpkind == 'Ijk_Boring':
thunk_kind = self._known_thunks[target_addr][0]
if thunk_kind == 'ret':
jumpkind = 'Ijk_Ret'
target_addr = None
elif thunk_kind == 'jmp':
pass # ummmmmm not sure about this one
else:
raise AngrCFGError("This shouldn't be possible")
jobs = [ ]
is_syscall = jumpkind.startswith("Ijk_Sys")
# Special handling:
# If a call instruction has a target that points to the immediate next instruction, we treat it as a boring jump
if jumpkind == "Ijk_Call" and \
not self.project.arch.call_pushes_ret and \
cfg_node.instruction_addrs and \
ins_addr == cfg_node.instruction_addrs[-1] and \
target_addr == irsb.addr + irsb.size:
jumpkind = "Ijk_Boring"
if target_addr is None:
# The target address is not a concrete value
if jumpkind == "Ijk_Ret":
# This block ends with a return instruction.
if current_function_addr != -1:
self._function_exits[current_function_addr].add(addr)
self._function_add_return_site(addr, current_function_addr)
self.functions[current_function_addr].returning = True
self._pending_jobs.add_returning_function(current_function_addr)
cfg_node.has_return = True
elif self._resolve_indirect_jumps and \
(jumpkind in ('Ijk_Boring', 'Ijk_Call', 'Ijk_InvalICache') or jumpkind.startswith('Ijk_Sys')):
# This is an indirect jump. Try to resolve it.
# FIXME: in some cases, a statementless irsb will be missing its instr addresses
# and this next part will fail. Use the real IRSB instead
irsb = cfg_node.block.vex
cfg_node.instruction_addrs = irsb.instruction_addresses
resolved, resolved_targets, ij = self._indirect_jump_encountered(addr, cfg_node, irsb,
current_function_addr, stmt_idx)
if resolved:
for resolved_target in resolved_targets:
if jumpkind == 'Ijk_Call':
jobs += self._create_job_call(cfg_node.addr, irsb, cfg_node, stmt_idx, ins_addr,
current_function_addr, resolved_target, jumpkind)
else:
edge = FunctionTransitionEdge(cfg_node, resolved_target, current_function_addr,
to_outside=False, stmt_idx=stmt_idx, ins_addr=ins_addr,
)
ce = CFGJob(resolved_target, current_function_addr, jumpkind,
last_addr=resolved_target, src_node=cfg_node, src_stmt_idx=stmt_idx,
src_ins_addr=ins_addr, func_edges=[ edge ],
)
jobs.append(ce)
return jobs
if jumpkind in ("Ijk_Boring", 'Ijk_InvalICache'):
resolved_as_plt = False
if irsb and self._heuristic_plt_resolving:
# Test it on the initial state. Does it jump to a valid location?
# It will be resolved only if this is a .plt entry
resolved_as_plt = self._resolve_plt(addr, irsb, ij)
if resolved_as_plt:
jump_target = next(iter(ij.resolved_targets))
target_func_addr = jump_target # TODO: FIX THIS
edge = FunctionTransitionEdge(cfg_node, jump_target, current_function_addr,
to_outside=True, dst_func_addr=jump_target,
stmt_idx=stmt_idx, ins_addr=ins_addr,
)
ce = CFGJob(jump_target, target_func_addr, jumpkind, last_addr=jump_target,
src_node=cfg_node, src_stmt_idx=stmt_idx, src_ins_addr=ins_addr,
func_edges=[edge],
)
jobs.append(ce)
if resolved_as_plt:
# has been resolved as a PLT entry. Remove it from indirect_jumps_to_resolve
if ij.addr in self._indirect_jumps_to_resolve:
self._indirect_jumps_to_resolve.remove(ij.addr)
self._deregister_analysis_job(current_function_addr, ij)
else:
# add it to indirect_jumps_to_resolve
self._indirect_jumps_to_resolve.add(ij)
# register it as a job for the current function
self._register_analysis_job(current_function_addr, ij)
else: # jumpkind == "Ijk_Call" or jumpkind.startswith('Ijk_Sys')
self._indirect_jumps_to_resolve.add(ij)
self._register_analysis_job(current_function_addr, ij)
jobs += self._create_job_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, None,
jumpkind, is_syscall=is_syscall
)
elif target_addr is not None:
# This is a direct jump with a concrete target.
# pylint: disable=too-many-nested-blocks
if jumpkind in ('Ijk_Boring', 'Ijk_InvalICache'):
# if the target address is at another section, it has to be jumping to a new function
if not self._addrs_belong_to_same_section(addr, target_addr):
target_func_addr = target_addr
to_outside = True
else:
# it might be a jumpout
target_func_addr = None
real_target_addr = get_real_address_if_arm(self.project.arch, target_addr)
if real_target_addr in self._traced_addresses:
node = self.model.get_any_node(target_addr)
if node is not None:
target_func_addr = node.function_address
if target_func_addr is None:
target_func_addr = current_function_addr
to_outside = not target_func_addr == current_function_addr
edge = FunctionTransitionEdge(cfg_node, target_addr, current_function_addr,
to_outside=to_outside,
dst_func_addr=target_func_addr,
ins_addr=ins_addr,
stmt_idx=stmt_idx,
)
ce = CFGJob(target_addr, target_func_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_ins_addr=ins_addr, src_stmt_idx=stmt_idx, func_edges=[ edge ])
jobs.append(ce)
elif jumpkind == 'Ijk_Call' or jumpkind.startswith("Ijk_Sys"):
jobs += self._create_job_call(addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr,
target_addr, jumpkind, is_syscall=is_syscall
)
else:
# TODO: Support more jumpkinds
l.debug("Unsupported jumpkind %s", jumpkind)
l.debug("Instruction address: %#x", ins_addr)
return jobs
def _create_job_call(self, addr, irsb, cfg_node, stmt_idx, ins_addr, current_function_addr, target_addr, jumpkind,
is_syscall=False):
"""
Generate a CFGJob for target address, also adding to _pending_entries
if returning to succeeding position (if irsb arg is populated)
:param int addr: Address of the predecessor node
:param pyvex.IRSB irsb: IRSB of the predecessor node
:param CFGNode cfg_node: The CFGNode instance of the predecessor node
:param int stmt_idx: ID of the source statement
:param int ins_addr: Address of the source instruction
:param int current_function_addr: Address of the current function
:param int target_addr: Destination of the call
:param str jumpkind: The jumpkind of the edge going to this node
:param bool is_syscall: Is the jump kind (and thus this) a system call
:return: A list of CFGJobs
:rtype: list
"""
jobs = [ ]
if is_syscall:
# Fix the target_addr for syscalls
tmp_state = self.project.factory.blank_state(mode="fastpath", addr=cfg_node.addr)
# Find the first successor with a syscall jumpkind
succ = next(iter(succ for succ in self.project.factory.successors(tmp_state).flat_successors
if succ.history.jumpkind and succ.history.jumpkind.startswith("Ijk_Sys")), None)
if succ is None:
# For some reason, there is no such successor with a syscall jumpkind
target_addr = self._unresolvable_call_target_addr
else:
try:
syscall_stub = self.project.simos.syscall(succ)
if syscall_stub: # can be None if simos is not a subclass of SimUserspac
syscall_addr = syscall_stub.addr
target_addr = syscall_addr
else:
target_addr = self._unresolvable_call_target_addr
except AngrUnsupportedSyscallError:
target_addr = self._unresolvable_call_target_addr
if isinstance(target_addr, SootAddressDescriptor):
new_function_addr = target_addr.method
else:
new_function_addr = target_addr
if irsb is None:
return_site = None
else:
if self.project.arch.name != 'Soot':
return_site = addr + irsb.size # We assume the program will always return to the succeeding position
else:
# For Soot, we return to the next statement, which is not necessarily the next block (as Shimple does
# not break blocks at calls)
assert isinstance(ins_addr, SootAddressDescriptor)
soot_block = irsb
return_block_idx = ins_addr.block_idx
if stmt_idx + 1 >= soot_block.label + len(soot_block.statements):
# tick the block ID
return_block_idx += 1
return_site = SootAddressDescriptor(ins_addr.method, return_block_idx, stmt_idx + 1)
edge = None
if new_function_addr is not None:
edge = FunctionCallEdge(cfg_node, new_function_addr, return_site, current_function_addr, syscall=is_syscall,
ins_addr=ins_addr, stmt_idx=ins_addr,
)
if new_function_addr is not None:
# Keep tracing from the call
ce = CFGJob(target_addr, new_function_addr, jumpkind, last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, syscall=is_syscall, func_edges=[ edge ]
)
jobs.append(ce)
callee_might_return = True
callee_function = None
if new_function_addr is not None:
if is_syscall or self.project.is_hooked(new_function_addr):
# we can create the function if it is a syscall or a SimProcedure and it does not exist yet. Note that
# syscalls are handled as SimProcedures anyway.
callee_function = self.kb.functions.function(addr=new_function_addr, syscall=is_syscall, create=True)
else:
callee_function = self.kb.functions.function(addr=new_function_addr, syscall=is_syscall)
if callee_function is not None:
callee_might_return = not (callee_function.returning is False)
if callee_might_return:
func_edges = [ ]
if return_site is not None:
if callee_function is not None and callee_function.returning is True:
fakeret_edge = FunctionFakeRetEdge(cfg_node, return_site, current_function_addr, confirmed=True)
func_edges.append(fakeret_edge)
ret_edge = FunctionReturnEdge(new_function_addr, return_site, current_function_addr)
func_edges.append(ret_edge)
# Also, keep tracing from the return site
ce = CFGJob(return_site, current_function_addr, 'Ijk_FakeRet', last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, returning_source=new_function_addr,
syscall=is_syscall, func_edges=func_edges)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
elif callee_function is not None and callee_function.returning is False:
pass # Don't go past a call that does not return!
else:
# HACK: We don't know where we are jumping. Let's assume we fakeret to the
# next instruction after the block
# TODO: FIXME: There are arch-specific hints to give the correct ret site
# Such as looking for constant values of LR in this block for ARM stuff.
fakeret_edge = FunctionFakeRetEdge(cfg_node, return_site, current_function_addr, confirmed=None)
func_edges.append(fakeret_edge)
fr = FunctionReturn(new_function_addr, current_function_addr, addr, return_site)
if fr not in self._function_returns[new_function_addr]:
self._function_returns[new_function_addr].add(fr)
ce = CFGJob(return_site, current_function_addr, 'Ijk_FakeRet', last_addr=addr, src_node=cfg_node,
src_stmt_idx=stmt_idx, src_ins_addr=ins_addr, returning_source=new_function_addr,
syscall=is_syscall, func_edges=func_edges)
self._pending_jobs.add_job(ce)
# register this job to this function
self._register_analysis_job(current_function_addr, ce)
return jobs
# Data reference processing
def _collect_data_references(self, irsb, irsb_addr):
"""
Unoptimizes IRSB and _add_data_reference's for individual statements or
for parts of statements (e.g. Store)
:param pyvex.IRSB irsb: Block to scan for data references
:param int irsb_addr: Address of block
:return: None
"""
if irsb.data_refs:
self._process_irsb_data_refs(irsb)
elif irsb.statements:
irsb = self._unoptimize_irsb(irsb)
# for each statement, collect all constants that are referenced or used.
self._collect_data_references_by_scanning_stmts(irsb, irsb_addr)
def _process_irsb_data_refs(self, irsb):
for ref in irsb.data_refs:
if ref.data_size:
self._seg_list.occupy(ref.data_addr, ref.data_size, "unknown")
self._add_data_reference(
irsb.addr,
ref.stmt_idx,
ref.ins_addr,
ref.data_addr,
data_size=ref.data_size,
data_type=ref.data_type_str
)
def _unoptimize_irsb(self, irsb):
if self.project.arch.name in ('X86', 'AMD64'):
# first pass to see if there are any cross-statement optimizations. if so, we relift the basic block with
# optimization level 0 to preserve as much constant references as possible
empty_insn = False
all_statements = len(irsb.statements)
for i, stmt in enumerate(irsb.statements[:-1]):
if isinstance(stmt, pyvex.IRStmt.IMark) and (
isinstance(irsb.statements[i + 1], pyvex.IRStmt.IMark) or
(i + 2 < all_statements and isinstance(irsb.statements[i + 2], pyvex.IRStmt.IMark))
):
# this is a very bad check...
# the correct way to do it is to disable cross-instruction optimization in VEX
empty_insn = True
break
if empty_insn:
# make sure opt_level is 0
irsb = self._lift(addr=irsb.addr, size=irsb.size, opt_level=0, collect_data_refs=True).vex
return irsb
def _collect_data_references_by_scanning_stmts(self, irsb, irsb_addr):
# helper methods
def _process(stmt_idx_, data_, insn_addr, next_insn_addr, data_size=None, data_type=None):
"""
Helper method used for calling _add_data_reference after checking
for manipulation of constants
:param pyvex.IRSB irsb_: Edited block (as might be de-optimised)
:param pyvex.IRStmt.* stmt_: Statement
:param int stmt_idx_: Statement ID
:param data_: data manipulated by statement
:param int insn_addr: instruction address
:param int next_insn_addr: next instruction address
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
"""
if type(data_) is pyvex.expr.Const: # pylint: disable=unidiomatic-typecheck
val = data_.con.value
elif type(data_) is int:
val = data_
else:
return
if val != next_insn_addr:
if data_size:
# Mark the region as unknown so we won't try to create a code block covering this region in the
# future.
self._seg_list.occupy(val, data_size, "unknown")
self._add_data_reference(irsb_addr, stmt_idx_, insn_addr, val, data_size=data_size, data_type=data_type)
# get all instruction addresses
instr_addrs = irsb.instruction_addresses
# for each statement, collect all constants that are referenced or used.
instr_addr = None
next_instr_addr = None
for stmt_idx, stmt in enumerate(irsb.statements):
if type(stmt) is pyvex.IRStmt.IMark: # pylint: disable=unidiomatic-typecheck
instr_addr = stmt.addr + stmt.delta
# there can be weird cases sometimes... I've seen two IMarks with the exact same address showing up one
# after the other.
if instr_addrs and instr_addr == instr_addrs[0]:
instr_addr = instr_addrs[0]
instr_addrs = instr_addrs[1 : ]
next_instr_addr = instr_addrs[0] if instr_addrs else None
elif type(stmt) is pyvex.IRStmt.WrTmp: # pylint: disable=unidiomatic-typecheck
if type(stmt.data) is pyvex.IRExpr.Load: # pylint: disable=unidiomatic-typecheck
# load
# e.g. t7 = LDle:I64(0x0000000000600ff8)
size = stmt.data.result_size(irsb.tyenv) // 8 # convert to bytes
_process(stmt_idx, stmt.data.addr, instr_addr, next_instr_addr, data_size=size, data_type='integer')
elif type(stmt.data) in (pyvex.IRExpr.Binop, ): # pylint: disable=unidiomatic-typecheck
# rip-related addressing
if stmt.data.op in ('Iop_Add32', 'Iop_Add64') and \
all(type(arg) is pyvex.expr.Const for arg in stmt.data.args):
# perform the addition
loc = stmt.data.args[0].con.value + stmt.data.args[1].con.value
_process(stmt_idx, loc, instr_addr, next_instr_addr)
else:
# binary operation
for arg in stmt.data.args:
_process(stmt_idx, arg, instr_addr, next_instr_addr)
elif type(stmt.data) is pyvex.IRExpr.Const: # pylint: disable=unidiomatic-typecheck
_process(stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt.data) is pyvex.IRExpr.ITE:
for child_expr in stmt.data.child_expressions:
_process(stmt_idx, child_expr, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Put: # pylint: disable=unidiomatic-typecheck
# put
# e.g. PUT(rdi) = 0x0000000000400714
if stmt.offset not in (self._initial_state.arch.ip_offset, ):
_process(stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Store: # pylint: disable=unidiomatic-typecheck
# store addr
_process(stmt_idx, stmt.addr, instr_addr, next_instr_addr)
# store data
_process(stmt_idx, stmt.data, instr_addr, next_instr_addr)
elif type(stmt) is pyvex.IRStmt.Dirty:
_process(stmt_idx, stmt.mAddr, instr_addr, next_instr_addr, data_size=stmt.mSize, data_type='fp')
def _add_data_reference(self, irsb_addr, stmt_idx, insn_addr, data_addr, # pylint: disable=unused-argument
data_size=None, data_type=None):
"""
Checks addresses are in the correct segments and creates or updates
MemoryData in _memory_data as appropriate, labelling as segment
boundaries or data type
:param int irsb_addr: irsb address
:param int stmt_idx: Statement ID
:param int insn_addr: instruction address
:param data_addr: address of data manipulated by statement
:param data_size: Size of the data being manipulated
:param str data_type: Type of the data being manipulated
:return: None
"""
# Make sure data_addr is within a valid memory range
if not self.project.loader.find_segment_containing(data_addr):
# data might be at the end of some section or segment...
# let's take a look
for segment in self.project.loader.main_object.segments:
if segment.vaddr + segment.memsize == data_addr:
# yeah!
new_data = False
if data_addr not in self._memory_data:
data = MemoryData(data_addr, 0, MemoryDataSort.SegmentBoundary)
self._memory_data[data_addr] = data
new_data = True
if new_data or self._cross_references:
cr = XRef(ins_addr=insn_addr, block_addr=irsb_addr, stmt_idx=stmt_idx,
memory_data=self.model.memory_data[data_addr], xref_type=XRefType.Offset,
)
self.kb.xrefs.add_xref(cr)
break
return
new_data = False
if data_addr not in self._memory_data:
if data_type is not None and data_size is not None:
data = MemoryData(data_addr, data_size, data_type, max_size=data_size)
else:
data = MemoryData(data_addr, 0, MemoryDataSort.Unknown)
self._memory_data[data_addr] = data
new_data = True
if new_data or self._cross_references:
cr = XRef(ins_addr=insn_addr, block_addr=irsb_addr, stmt_idx=stmt_idx,
memory_data=self.model.memory_data[data_addr],
xref_type=XRefType.Offset,
)
self.kb.xrefs.add_xref(cr)
self.insn_addr_to_memory_data[insn_addr] = self._memory_data[data_addr]
def _tidy_data_references(self):
"""
:return: True if new data entries are found, False otherwise.
:rtype: bool
"""
# Make sure all memory data entries cover all data sections
keys = sorted(self._memory_data.keys())
for i, data_addr in enumerate(keys):
data = self._memory_data[data_addr]
if self._addr_in_exec_memory_regions(data.address):
# TODO: Handle data among code regions (or executable regions)
pass
else:
if i + 1 != len(keys):
next_data_addr = keys[i + 1]
else:
next_data_addr = None
# goes until the end of the section/segment
# TODO: the logic needs more testing
sec = self.project.loader.find_section_containing(data_addr)
next_sec_addr = None
if sec is not None:
last_addr = sec.vaddr + sec.memsize
else:
# it does not belong to any section. what's the next adjacent section? any memory data does not go
# beyong section boundaries
next_sec = self.project.loader.find_section_next_to(data_addr)
if next_sec is not None:
next_sec_addr = next_sec.vaddr
seg = self.project.loader.find_segment_containing(data_addr)
if seg is not None:
last_addr = seg.vaddr + seg.memsize
else:
# We got an address that is not inside the current binary...
l.warning('_tidy_data_references() sees an address %#08x that does not belong to any '
'section or segment.', data_addr
)
last_addr = None
if next_data_addr is None:
boundary = last_addr
elif last_addr is None:
boundary = next_data_addr
else:
boundary = min(last_addr, next_data_addr)
if next_sec_addr is not None:
boundary = min(boundary, next_sec_addr)
if boundary is not None:
data.max_size = boundary - data_addr
if data.max_size is None:
print('wtf')
keys = sorted(self._memory_data.keys())
new_data_found = False
i = 0
# pylint:disable=too-many-nested-blocks
while i < len(keys):
data_addr = keys[i]
i += 1
memory_data = self._memory_data[data_addr]
if memory_data.sort == MemoryDataSort.SegmentBoundary:
continue
content_holder = [ ]
# let's see what sort of data it is
if memory_data.sort in (MemoryDataSort.Unknown, MemoryDataSort.Unspecified) or \
(memory_data.sort == MemoryDataSort.Integer and memory_data.size == self.project.arch.bytes):
data_type, data_size = self._guess_data_type(data_addr, memory_data.max_size,
content_holder=content_holder)
else:
data_type, data_size = memory_data.sort, memory_data.size
if data_type is not None:
memory_data.size = data_size
memory_data.sort = data_type
if len(content_holder) == 1:
memory_data.content = content_holder[0]
if memory_data.max_size is not None and (0 < memory_data.size < memory_data.max_size):
# Create another memory_data object to fill the gap
new_addr = data_addr + memory_data.size
new_md = MemoryData(new_addr, None, None, max_size=memory_data.max_size - memory_data.size)
self._memory_data[new_addr] = new_md
# Make a copy of all old references
old_crs = self.kb.xrefs.get_xrefs_by_dst(data_addr)
crs = [ ]
for old_cr in old_crs:
cr = old_cr.copy()
cr.memory_data = new_md
crs.append(cr)
self.kb.xrefs.add_xrefs(crs)
keys.insert(i, new_addr)
if data_type == MemoryDataSort.PointerArray:
# make sure all pointers are identified
pointer_size = self.project.arch.bytes
old_crs = self.kb.xrefs.get_xrefs_by_dst(data_addr)
for j in range(0, data_size, pointer_size):
ptr = self._fast_memory_load_pointer(data_addr + j)
# is this pointer coming from the current binary?
obj = self.project.loader.find_object_containing(ptr, membership_check=False)
if obj is not self.project.loader.main_object:
# the pointer does not come from current binary. skip.
continue
if self._seg_list.is_occupied(ptr):
sort = self._seg_list.occupied_by_sort(ptr)
if sort == 'code':
continue
elif sort == 'pointer-array':
continue
# TODO: other types
if ptr not in self._memory_data:
new_md = MemoryData(ptr, 0, MemoryDataSort.Unknown, pointer_addr=data_addr + j)
self._memory_data[ptr] = new_md
# Make a copy of the old reference
crs = [ ]
for old_cr in old_crs:
cr = old_cr.copy()
cr.memory_data = new_md
crs.append(cr)
self.kb.xrefs.add_xrefs(crs)
new_data_found = True
else:
memory_data.size = memory_data.max_size
self._seg_list.occupy(data_addr, memory_data.size, memory_data.sort)
return new_data_found
def _guess_data_type(self, data_addr, max_size, content_holder=None):
"""
Make a guess to the data type.
Users can provide their own data type guessing code when initializing CFGFast instance, and each guessing
handler will be called if this method fails to determine what the data is.
:param int data_addr: Address of the data.
:param int max_size: The maximum size this data entry can be.
:return: a tuple of (data type, size). (None, None) if we fail to determine the type or the size.
:rtype: tuple
"""
# quick check: if it's at the beginning of a binary, it might be the ELF header
elfheader_sort, elfheader_size = self._guess_data_type_elfheader(data_addr, max_size)
if elfheader_sort:
return elfheader_sort, elfheader_size
try:
ref = next(iter(self.kb.xrefs.get_xrefs_by_dst(data_addr))) # type: XRef
irsb_addr = ref.block_addr
stmt_idx = ref.stmt_idx
except StopIteration:
irsb_addr, stmt_idx = None, None
if max_size is None:
max_size = 0
if self._seg_list.is_occupied(data_addr) and self._seg_list.occupied_by_sort(data_addr) == 'code':
# it's a code reference
# TODO: Further check if it's the beginning of an instruction
return MemoryDataSort.CodeReference, 0
pointer_size = self.project.arch.bytes
# who's using it?
if isinstance(self.project.loader.main_object, cle.MetaELF):
plt_entry = self.project.loader.main_object.reverse_plt.get(irsb_addr, None)
if plt_entry is not None:
# IRSB is owned by plt!
return MemoryDataSort.GOTPLTEntry, pointer_size
# is it in a section with zero bytes, like .bss?
obj = self.project.loader.find_object_containing(data_addr)
section = obj.find_section_containing(data_addr)
if section is not None and section.only_contains_uninitialized_data:
# Nothing much you can do
return None, None
pointers_count = 0
max_pointer_array_size = min(512 * pointer_size, max_size)
for i in range(0, max_pointer_array_size, pointer_size):
ptr = self._fast_memory_load_pointer(data_addr + i)
if ptr is not None:
#if self._seg_list.is_occupied(ptr) and self._seg_list.occupied_by_sort(ptr) == 'code':
# # it's a code reference
# # TODO: Further check if it's the beginning of an instruction
# pass
if self.project.loader.find_section_containing(ptr) is not None or \
self.project.loader.find_segment_containing(ptr) is not None or \
(self._extra_memory_regions and
next(((a < ptr < b) for (a, b) in self._extra_memory_regions), None)
):
# it's a pointer of some sort
# TODO: Determine what sort of pointer it is
pointers_count += 1
else:
break
if pointers_count:
return MemoryDataSort.PointerArray, pointer_size * pointers_count
try:
data = self.project.loader.memory.load(data_addr, 1024)
except KeyError:
data = b''
# Is it an unicode string?
# TODO: Support unicode string longer than the max length
if len(data) >= 4 and data[1] == 0 and data[3] == 0 and data[0] in self.PRINTABLES:
def can_decode(n):
try:
data[:n*2].decode('utf_16_le')
except UnicodeDecodeError:
return False
return True
if can_decode(4) or can_decode(5) or can_decode(6):
running_failures = 0
last_success = 4
for i in range(4, len(data) // 2):
if can_decode(i):
last_success = i
running_failures = 0
if data[i*2-2] == 0 and data[i*2-1] == 0:
break
else:
running_failures += 1
if running_failures > 3:
break
return MemoryDataSort.UnicodeString, last_success
if data:
try:
zero_pos = data.index(0)
except ValueError:
zero_pos = None
if (zero_pos is not None and zero_pos > 0 and all(c in self.PRINTABLES for c in data[:zero_pos])) or \
all(c in self.PRINTABLES for c in data):
# it's a string
# however, it may not be terminated
string_data = data if zero_pos is None else data[:zero_pos]
if content_holder is not None:
content_holder.append(string_data)
return MemoryDataSort.String, min(len(string_data) + 1, 1024)
for handler in self._data_type_guessing_handlers:
irsb = None if irsb_addr is None else self.model.get_any_node(irsb_addr).block.vex
sort, size = handler(self, irsb, irsb_addr, stmt_idx, data_addr, max_size)
if sort is not None:
return sort, size
return None, None
def _guess_data_type_elfheader(self, data_addr, max_size):
"""
Is the specified data chunk an ELF header?
:param int data_addr: Address of the data chunk
:param int max_size: Size of the data chunk.
:return: A tuple of ('elf-header', size) if it is, or (None, None) if it is not.
:rtype: tuple
"""
obj = self.project.loader.find_object_containing(data_addr)
if obj is None:
# it's not mapped
return None, None
if data_addr == obj.min_addr and 4 < max_size < 1000:
# Does it start with the ELF magic bytes?
try:
data = self.project.loader.memory.load(data_addr, 4)
except KeyError:
return None, None
if data == b"\x7fELF":
# yes!
return MemoryDataSort.ELFHeader, max_size
return None, None
# Indirect jumps processing
def _resolve_plt(self, addr, irsb, indir_jump):
"""
Determine if the IRSB at the given address is a PLT stub. If it is, concretely execute the basic block to
resolve the jump target.
:param int addr: Address of the block.
:param irsb: The basic block.
:param IndirectJump indir_jump: The IndirectJump instance.
:return: True if the IRSB represents a PLT stub and we successfully resolved the target.
False otherwise.
:rtype: bool
"""
# is the address identified by CLE as a PLT stub?
if self.project.loader.all_elf_objects:
# restrict this heuristics to ELF files only
if not any([ addr in obj.reverse_plt for obj in self.project.loader.all_elf_objects ]):
return False
# Make sure the IRSB has statements
if not irsb.has_statements:
irsb = self.project.factory.block(irsb.addr, size=irsb.size).vex
# try to resolve the jump target
simsucc = self.project.engines.default_engine.process(self._initial_state, irsb, force_addr=addr)
if len(simsucc.successors) == 1:
ip = simsucc.successors[0].ip
if ip._model_concrete is not ip:
target_addr = ip._model_concrete.value
if (self.project.loader.find_object_containing(target_addr, membership_check=False) is not
self.project.loader.main_object) \
or self.project.is_hooked(target_addr):
# resolved!
# Fill the IndirectJump object
indir_jump.resolved_targets.add(target_addr)
l.debug("Address %#x is resolved as a PLT entry, jumping to %#x", addr, target_addr)
return True
return False
def _indirect_jump_resolved(self, jump, jump_addr, resolved_by, targets):
"""
Called when an indirect jump is successfully resolved.
:param IndirectJump jump: The resolved indirect jump.
:param IndirectJumpResolver resolved_by: The resolver used to resolve this indirect jump.
:param list targets: List of indirect jump targets.
:return: None
"""
source_addr = jump.addr
if jump.jumptable:
# Fill in the jump_tables dict
self.jump_tables[jump.addr] = jump
# occupy the jump table region
self._seg_list.occupy(jump.jumptable_addr, jump.jumptable_size, "data")
jump.resolved_targets = targets
all_targets = set(targets)
for addr in all_targets:
to_outside = addr in self.functions or not self._addrs_belong_to_same_section(jump.addr, addr)
# TODO: get a better estimate of the function address
target_func_addr = jump.func_addr if not to_outside else addr
func_edge = FunctionTransitionEdge(self._nodes[source_addr], addr, jump.func_addr, to_outside=to_outside,
dst_func_addr=target_func_addr
)
job = CFGJob(addr, target_func_addr, jump.jumpkind,
last_addr=source_addr,
src_node=self._nodes[source_addr],
src_ins_addr=None,
src_stmt_idx=None,
func_edges=[func_edge],
)
self._insert_job(job)
self._register_analysis_job(target_func_addr, job)
self._deregister_analysis_job(jump.func_addr, jump)
CFGBase._indirect_jump_resolved(self, jump, jump.addr, resolved_by, targets)
def _indirect_jump_unresolved(self, jump):
"""
Called when we cannot resolve an indirect jump.
:param IndirectJump jump: The unresolved indirect jump.
:return: None
"""
# add a node from this node to UnresolvableJumpTarget or UnresolvalbeCallTarget node,
# depending on its jump kind
src_node = self._nodes[jump.addr]
if jump.jumpkind == 'Ijk_Boring':
unresolvable_target_addr = self._unresolvable_jump_target_addr
simprocedure_name = 'UnresolvableJumpTarget'
elif jump.jumpkind == 'Ijk_Call':
unresolvable_target_addr = self._unresolvable_call_target_addr
simprocedure_name = 'UnresolvableCallTarget'
else:
raise AngrCFGError('It should be impossible')
dst_node = CFGNode(unresolvable_target_addr, 0, self.model,
function_address=unresolvable_target_addr,
simprocedure_name=simprocedure_name,
block_id=unresolvable_target_addr,
)
# add the dst_node to self._nodes
if unresolvable_target_addr not in self._nodes:
self._nodes[unresolvable_target_addr] = dst_node
self._nodes_by_addr[unresolvable_target_addr].append(dst_node)
self._graph_add_edge(dst_node, src_node, jump.jumpkind, jump.ins_addr, jump.stmt_idx)
# mark it as a jumpout site for that function
self._function_add_transition_edge(unresolvable_target_addr, src_node, jump.func_addr,
to_outside=True,
dst_func_addr=unresolvable_target_addr,
ins_addr=jump.ins_addr,
stmt_idx=jump.stmt_idx,
)
self._deregister_analysis_job(jump.func_addr, jump)
CFGBase._indirect_jump_unresolved(self, jump)
# Removers
def _remove_redundant_overlapping_blocks(self):
"""
On some architectures there are sometimes garbage bytes (usually nops) between functions in order to properly
align the succeeding function. CFGFast does a linear sweeping which might create duplicated blocks for
function epilogues where one block starts before the garbage bytes and the other starts after the garbage bytes.
This method enumerates all blocks and remove overlapping blocks if one of them is aligned to 0x10 and the other
contains only garbage bytes.
:return: None
"""
sorted_nodes = sorted(self.graph.nodes(), key=lambda n: n.addr if n is not None else 0)
all_plt_stub_addrs = set(itertools.chain.from_iterable(obj.reverse_plt.keys() for obj in self.project.loader.all_objects if isinstance(obj, cle.MetaELF)))
# go over the list. for each node that is the beginning of a function and is not properly aligned, if its
# leading instruction is a single-byte or multi-byte nop, make sure there is another CFGNode starts after the
# nop instruction
nodes_to_append = {}
# pylint:disable=too-many-nested-blocks
for a in sorted_nodes:
if a.addr in self.functions and a.addr not in all_plt_stub_addrs and \
not self._addr_hooked_or_syscall(a.addr):
all_in_edges = self.graph.in_edges(a, data=True)
if not any([data['jumpkind'] == 'Ijk_Call' for _, _, data in all_in_edges]):
# no one is calling it
# this function might be created from linear sweeping
try:
block = self._lift(a.addr, size=0x10 - (a.addr % 0x10), opt_level=1)
except SimTranslationError:
continue
nop_length = None
if self._is_noop_block(self.project.arch, block):
# fast path: in most cases, the entire block is a single byte or multi-byte nop, which VEX
# optimizer is able to tell
nop_length = block.size
else:
# this is not a no-op block. Determine where nop instructions terminate.
insns = block.capstone.insns
if insns:
nop_length = self._get_nop_length(insns)
if nop_length is None or nop_length <= 0:
continue
# leading nop for alignment.
next_node_addr = a.addr + nop_length
if nop_length < a.size and \
not (next_node_addr in self._nodes or next_node_addr in nodes_to_append):
# create a new CFGNode that starts there
next_node_size = a.size - nop_length
next_node = CFGNode(next_node_addr, next_node_size, self.model,
function_address=next_node_addr,
instruction_addrs=[i for i in a.instruction_addrs
if next_node_addr <= i
< next_node_addr + next_node_size
],
thumb=a.thumb,
byte_string=None if a.byte_string is None else a.byte_string[nop_length:],
block_id=next_node_addr,
)
self.graph.add_node(next_node)
# create edges accordingly
all_out_edges = self.graph.out_edges(a, data=True)
for _, dst, data in all_out_edges:
self.graph.add_edge(next_node, dst, **data)
nodes_to_append[next_node_addr] = next_node
# make sure there is a function begins there
try:
snippet = self._to_snippet(addr=next_node_addr, size=next_node_size,
base_state=self._base_state)
self.functions._add_node(next_node_addr, snippet)
except (SimEngineError, SimMemoryError):
continue
# append all new nodes to sorted nodes
if nodes_to_append:
sorted_nodes = sorted(sorted_nodes + list(nodes_to_append.values()), key=lambda n: n.addr if n is not None else 0)
removed_nodes = set()
a = None # it always hold the very recent non-removed node
for i in range(len(sorted_nodes)): # pylint:disable=consider-using-enumerate
if a is None:
a = sorted_nodes[0]
continue
b = sorted_nodes[i]
if self._addr_hooked_or_syscall(b.addr):
continue
if b in removed_nodes:
# skip all removed nodes
continue
if a.addr <= b.addr and \
(a.addr + a.size > b.addr):
# They are overlapping
try:
block = self.project.factory.fresh_block(a.addr, b.addr - a.addr, backup_state=self._base_state)
except SimTranslationError:
a = b
continue
if block.capstone.insns and all([ self._is_noop_insn(insn) for insn in block.capstone.insns ]):
# It's a big nop - no function starts with nop
# add b to indices
self._nodes[b.addr] = b
self._nodes_by_addr[b.addr].append(b)
# shrink a
self._shrink_node(a, b.addr - a.addr, remove_function=False)
a = b
continue
all_functions = self.kb.functions
# now things are a little harder
# if there is no incoming edge to b, we should replace b with a
# this is mostly because we misidentified the function beginning. In fact a is the function beginning,
# but somehow we thought b is the beginning
if a.addr + a.size == b.addr + b.size:
in_edges = len([ _ for _, _, data in self.graph.in_edges([b], data=True) ])
if in_edges == 0:
# we use node a to replace node b
# link all successors of b to a
for _, dst, data in self.graph.out_edges([b], data=True):
self.graph.add_edge(a, dst, **data)
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
# skip b
removed_nodes.add(b)
continue
# next case - if b is directly from function prologue detection, or a basic block that is a successor of
# a wrongly identified basic block, we might be totally misdecoding b
if b.instruction_addrs[0] not in a.instruction_addrs:
# use a, truncate b
new_b_addr = a.addr + a.size # b starts right after a terminates
new_b_size = b.addr + b.size - new_b_addr # this may not be the size we want, since b might be
# misdecoded
# totally remove b
if b.addr in self._nodes:
del self._nodes[b.addr]
if b.addr in self._nodes_by_addr and b in self._nodes_by_addr[b.addr]:
self._nodes_by_addr[b.addr].remove(b)
self.graph.remove_node(b)
if b.addr in all_functions:
del all_functions[b.addr]
removed_nodes.add(b)
if new_b_size > 0:
# there are still some parts left in node b - we don't want to lose it
dummy_job = CFGJob(new_b_addr, a.function_address, None)
self._scan_block(dummy_job)
continue
# for other cases, we'll let them be for now
a = b # update a
def _remove_node(self, node):
"""
Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None
"""
self.graph.remove_node(node)
if node.addr in self._nodes:
del self._nodes[node.addr]
# We wanna remove the function as well
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if node.addr in self.kb.functions.callgraph:
self.kb.functions.callgraph.remove_node(node.addr)
def _shrink_node(self, node, new_size, remove_function=True):
"""
Shrink the size of a node in CFG.
:param CFGNode node: The CFGNode to shrink
:param int new_size: The new size of the basic block
:param bool remove_function: If there is a function starting at `node`, should we remove that function or not.
:return: None
"""
# Generate the new node
new_node = CFGNode(node.addr, new_size, self.model,
function_address=None if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs
if node.addr <= i < node.addr + new_size
],
thumb=node.thumb,
byte_string=None if node.byte_string is None else node.byte_string[:new_size],
block_id=node.addr,
)
old_in_edges = self.graph.in_edges(node, data=True)
for src, _, data in old_in_edges:
self.graph.add_edge(src, new_node, **data)
successor_node_addr = node.addr + new_size
if successor_node_addr in self._nodes:
successor = self._nodes[successor_node_addr]
else:
successor_size = node.size - new_size
successor = CFGNode(successor_node_addr, successor_size, self.model,
function_address=successor_node_addr if remove_function else node.function_address,
instruction_addrs=[i for i in node.instruction_addrs if i >= node.addr + new_size],
thumb=node.thumb,
byte_string=None if node.byte_string is None else node.byte_string[new_size:]
)
self.graph.add_edge(new_node, successor, jumpkind='Ijk_Boring')
# if the node B already has resolved targets, we will skip all unresolvable successors when adding old out edges
# from node A to node B.
# this matters in cases where node B is resolved as a special indirect jump entry (like a PLT stub), but (node
# A + node B) wasn't properly resolved.
unresolvable_target_addrs = (self._unresolvable_jump_target_addr, self._unresolvable_call_target_addr)
has_resolved_targets = any([ node_.addr not in unresolvable_target_addrs
for node_ in self.graph.successors(successor) ]
)
old_out_edges = self.graph.out_edges(node, data=True)
for _, dst, data in old_out_edges:
if (has_resolved_targets and dst.addr not in unresolvable_target_addrs) or \
not has_resolved_targets:
self.graph.add_edge(successor, dst, **data)
# remove the old node from indices
if node.addr in self._nodes and self._nodes[node.addr] is node:
del self._nodes[node.addr]
if node.addr in self._nodes_by_addr and node in self._nodes_by_addr[node.addr]:
self._nodes_by_addr[node.addr].remove(node)
# remove the old node form the graph
self.graph.remove_node(node)
# add the new node to indices
self._nodes[new_node.addr] = new_node
self._nodes_by_addr[new_node.addr].append(new_node)
# the function starting at this point is probably totally incorrect
# hopefull future call to `make_functions()` will correct everything
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if not remove_function:
# add functions back
self._function_add_node(node, node.addr)
successor_node = self.model.get_any_node(successor_node_addr)
if successor_node and successor_node.function_address == node.addr:
# if there is absolutely no predecessors to successor_node, we'd like to add it as a new function
# so that it will not be left behind
if not list(self.graph.predecessors(successor_node)):
self._function_add_node(successor_node, successor_node_addr)
#if node.addr in self.kb.functions.callgraph:
# self.kb.functions.callgraph.remove_node(node.addr)
def _analyze_all_function_features(self, all_funcs_completed=False):
"""
Iteratively analyze all changed functions, update their returning attribute, until a fix-point is reached (i.e.
no new returning/not-returning functions are found).
:return: None
"""
while True:
new_changes = self._iteratively_analyze_function_features(all_funcs_completed=all_funcs_completed)
new_returning_functions = new_changes['functions_return']
new_not_returning_functions = new_changes['functions_do_not_return']
if not new_returning_functions and not new_not_returning_functions:
break
for returning_function in new_returning_functions:
self._pending_jobs.add_returning_function(returning_function.addr)
if returning_function.addr in self._function_returns:
for fr in self._function_returns[returning_function.addr]:
# Confirm them all
if not self.kb.functions.contains_addr(fr.caller_func_addr):
# FIXME: A potential bug might arise here. After post processing (phase 2), if the function
# specified by fr.caller_func_addr has been merged to another function during phase 2, we
# will simply skip this FunctionReturn here. It might lead to unconfirmed fake_ret edges
# in the newly merged function. Fix this bug in the future when it becomes an issue.
continue
if self.kb.functions.get_by_addr(fr.caller_func_addr).returning is not True:
self._updated_nonreturning_functions.add(fr.caller_func_addr)
return_to_node = self._nodes.get(fr.return_to, None)
if return_to_node is None:
return_to_snippet = self._to_snippet(addr=fr.return_to, base_state=self._base_state)
else:
return_to_snippet = self._to_snippet(cfg_node=self._nodes[fr.return_to])
self.kb.functions._add_return_from_call(fr.caller_func_addr, fr.callee_func_addr,
return_to_snippet)
del self._function_returns[returning_function.addr]
for nonreturning_function in new_not_returning_functions:
self._pending_jobs.add_nonreturning_function(nonreturning_function.addr)
if nonreturning_function.addr in self._function_returns:
for fr in self._function_returns[nonreturning_function.addr]:
# Remove all those FakeRet edges
if self.kb.functions.contains_addr(fr.caller_func_addr) and \
self.kb.functions.get_by_addr(fr.caller_func_addr).returning is not True:
self._updated_nonreturning_functions.add(fr.caller_func_addr)
del self._function_returns[nonreturning_function.addr]
def _pop_pending_job(self, returning=True):
return self._pending_jobs.pop_job(returning=returning)
def _clean_pending_exits(self):
self._pending_jobs.cleanup()
#
# Graph utils
#
def _graph_add_edge(self, cfg_node, src_node, src_jumpkind, src_ins_addr, src_stmt_idx):
"""
Add edge between nodes, or add node if entry point
:param CFGNode cfg_node: node which is jumped to
:param CFGNode src_node: node which is jumped from none if entry point
:param str src_jumpkind: what type of jump the edge takes
:param int or str src_stmt_idx: source statements ID
:return: None
"""
if src_node is None:
self.graph.add_node(cfg_node)
else:
self.graph.add_edge(src_node, cfg_node, jumpkind=src_jumpkind, ins_addr=src_ins_addr,
stmt_idx=src_stmt_idx)
@staticmethod
def _get_return_endpoints(func):
all_endpoints = func.endpoints_with_type
return all_endpoints.get('return', [])
def _get_jumpout_targets(self, func):
jumpout_targets = set()
callgraph_outedges = self.functions.callgraph.out_edges(func.addr, data=True)
# find the ones whose type is transition
for _, dst, data in callgraph_outedges:
if data.get('type', None) == 'transition':
jumpout_targets.add(dst)
return jumpout_targets
def _get_return_sources(self, func):
# We will create a return edge for each returning point of this function
# Get all endpoints
all_endpoints = func.endpoints_with_type
# However, we do not want to create return edge if the endpoint is not a returning endpoint.
# For example, a PLT stub on x86/x64 always jump to the real library function, so we should create a return
# edge from that library function to the call site, instead of creating a return edge from the PLT stub to
# the call site.
if all_endpoints['transition']:
# it has jump outs
# it is, for example, a PLT stub
# we take the endpoints of the function it calls. this is not always correct, but it can handle many
# cases.
jumpout_targets = self._get_jumpout_targets(func)
jumpout_target_endpoints = set()
for jumpout_func_addr in jumpout_targets:
if jumpout_func_addr in self.functions:
jumpout_target_endpoints |= set(self._get_return_endpoints(self.functions[jumpout_func_addr]))
endpoints = jumpout_target_endpoints
else:
endpoints = set()
# then we take all return endpoints of the current function
endpoints |= all_endpoints.get('return', set())
return endpoints
def _make_return_edges(self):
"""
For each returning function, create return edges in self.graph.
:return: None
"""
for func_addr, func in self.functions.items():
if func.returning is False:
continue
# get the node on CFG
if func.startpoint is None:
l.warning('Function %#x does not have a startpoint (yet).', func_addr)
continue
startpoint = self.model.get_any_node(func.startpoint.addr)
if startpoint is None:
# weird...
l.warning('No CFGNode is found for function %#x in _make_return_edges().', func_addr)
continue
endpoints = self._get_return_sources(func)
# get all callers
callers = self.model.get_predecessors(startpoint, jumpkind='Ijk_Call')
# for each caller, since they all end with a call instruction, get the immediate successor
return_targets = itertools.chain.from_iterable(
self.model.get_successors(caller, excluding_fakeret=False, jumpkind='Ijk_FakeRet') for caller in callers
)
return_targets = set(return_targets)
for ep in endpoints:
src = self.model.get_any_node(ep.addr)
for rt in return_targets:
if not src.instruction_addrs:
ins_addr = None
else:
if self.project.arch.branch_delay_slot:
if len(src.instruction_addrs) > 1:
ins_addr = src.instruction_addrs[-2]
else:
l.error('At %s: expecting more than one instruction. Only got one.', src)
ins_addr = None
else:
ins_addr = src.instruction_addrs[-1]
self._graph_add_edge(rt, src, 'Ijk_Ret', ins_addr, DEFAULT_STATEMENT)
#
# Function utils
#
def _function_add_node(self, cfg_node, function_addr):
"""
Adds node to function manager, converting address to CodeNode if
possible
:param CFGNode cfg_node: A CFGNode instance.
:param int function_addr: Address of the current function.
:return: None
"""
snippet = self._to_snippet(cfg_node=cfg_node)
self.kb.functions._add_node(function_addr, snippet)
def _function_add_transition_edge(self, dst_addr, src_node, src_func_addr, to_outside=False, dst_func_addr=None,
stmt_idx=None, ins_addr=None):
"""
Add a transition edge to the function transiton map.
:param int dst_addr: Address that the control flow transits to.
:param CFGNode src_node: The source node that the control flow transits from.
:param int src_func_addr: Function address.
:return: True if the edge is correctly added. False if any exception occurred (for example, the target address
does not exist)
:rtype: bool
"""
try:
target_node = self._nodes.get(dst_addr, None)
if target_node is None:
target_snippet = self._to_snippet(addr=dst_addr, base_state=self._base_state)
else:
target_snippet = self._to_snippet(cfg_node=target_node)
if src_node is None:
# Add this basic block into the function manager
self.kb.functions._add_node(src_func_addr, target_snippet)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
if not to_outside:
self.kb.functions._add_transition_to(src_func_addr, src_snippet, target_snippet, stmt_idx=stmt_idx,
ins_addr=ins_addr
)
else:
self.kb.functions._add_outside_transition_to(src_func_addr, src_snippet, target_snippet,
to_function_addr=dst_func_addr,
stmt_idx=stmt_idx, ins_addr=ins_addr
)
return True
except (SimMemoryError, SimEngineError):
return False
def _function_add_call_edge(self, addr, src_node, function_addr, syscall=False, stmt_idx=None, ins_addr=None):
"""
Add a call edge to the function transition map.
:param int addr: Address that is being called (callee).
:param CFGNode src_node: The source CFG node (caller).
:param int ret_addr: Address that returns to (in case the function returns).
:param int function_addr: Function address..
:param bool syscall: If this is a call to a syscall or not.
:param int or str stmt_idx: Statement ID of this call.
:param int or None ins_addr: Instruction address of this call.
:return: True if the edge is added. False if any exception occurred.
:rtype: bool
"""
try:
if src_node is None:
self.kb.functions._add_node(function_addr, addr, syscall=syscall)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
return_to_outside = False
ret_snippet = None
self.kb.functions._add_call_to(function_addr, src_snippet, addr, ret_snippet, syscall=syscall,
stmt_idx=stmt_idx, ins_addr=ins_addr,
return_to_outside=return_to_outside,
)
return True
except (SimMemoryError, SimEngineError):
return False
def _function_add_fakeret_edge(self, addr, src_node, src_func_addr, confirmed=None):
"""
Generate CodeNodes for target and source, if no source node add node
for function, otherwise creates fake return to in function manager
:param int addr: target address
:param angr.analyses.CFGNode src_node: source node
:param int src_func_addr: address of function
:param confirmed: used as attribute on eventual digraph
:return: None
"""
target_node = self._nodes.get(addr, None)
if target_node is None:
target_snippet = self._to_snippet(addr=addr, base_state=self._base_state)
else:
target_snippet = self._to_snippet(cfg_node=target_node)
if src_node is None:
self.kb.functions._add_node(src_func_addr, target_snippet)
else:
src_snippet = self._to_snippet(cfg_node=src_node)
self.kb.functions._add_fakeret_to(src_func_addr, src_snippet, target_snippet, confirmed=confirmed)
def _function_add_return_site(self, addr, function_addr):
"""
Generate CodeNodes for target address, registers node for function to
function manager as return site
:param int addr: target address
:param int function_addr: address of function
:return: None
"""
try:
target = self._to_snippet(self._nodes[addr])
except KeyError:
target = addr
self.kb.functions._add_return_from(function_addr, target)
def _function_add_return_edge(self, return_from_addr, return_to_addr, function_addr):
"""
Generate CodeNodes for return_to_addr, add this node for function to
function manager generating new edge
:param int return_from_addr: target address
:param int return_to_addr: target address
:param int function_addr: address of function
:return: None
"""
return_to_node = self._nodes.get(return_to_addr, None)
if return_to_node is None:
return_to_snippet = self._to_snippet(addr=return_to_addr, base_state=self._base_state)
to_outside = False
else:
return_to_snippet = self._to_snippet(cfg_node=return_to_node)
to_outside = return_to_node.function_address != function_addr
self.kb.functions._add_return_from_call(function_addr, return_from_addr, return_to_snippet,
to_outside=to_outside)
#
# Architecture-specific methods
#
def _arm_track_lr_on_stack(self, addr, irsb, function):
"""
At the beginning of the basic block, we check if the first instruction stores the LR register onto the stack.
If it does, we calculate the offset of that store, and record the offset in function.info.
For instance, here is the disassembly of a THUMB mode function:
000007E4 STR.W LR, [SP,#var_4]!
000007E8 MOV R2, R1
000007EA SUB SP, SP, #0xC
000007EC MOVS R1, #0
...
00000800 ADD SP, SP, #0xC
00000802 LDR.W PC, [SP+4+var_4],#4
The very last basic block has a jumpkind of Ijk_Boring, which is because VEX cannot do such complicated analysis
to determine the real jumpkind.
As we can see, instruction 7e4h stores LR at [sp-4], and at the end of this function, instruction 802 loads LR
from [sp], then increments sp by 4. We execute the first instruction, and track the following things:
- if the value from register LR is stored onto the stack.
- the difference between the offset of the LR store on stack, and the SP after the store.
If at the end of the function, the LR is read out from the stack at the exact same stack offset, we will change
the jumpkind of the final IRSB to Ijk_Ret.
This method can be enabled by setting "ret_jumpkind_heuristics", which is an architecture-specific option on
ARM, to True.
:param int addr: Address of the basic block.
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if irsb.statements is None:
return
if 'lr_saved_on_stack' in function.info:
return
# if it does, we log it down to the Function object.
lr_offset = self.project.arch.registers['lr'][0]
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
initial_lr = 0xabcdef
tmps = {}
# pylint:disable=too-many-nested-blocks
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
if stmt.addr + stmt.delta != addr:
break
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get):
if data.offset == sp_offset:
tmps[stmt.tmp] = initial_sp
elif data.offset == lr_offset:
tmps[stmt.tmp] = initial_lr
elif isinstance(data, pyvex.IRExpr.Binop):
if data.op == 'Iop_Sub32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] - arg1.con.value
elif isinstance(stmt, (pyvex.IRStmt.Store, pyvex.IRStmt.StoreG)):
data = stmt.data
storing_lr = False
if isinstance(data, pyvex.IRExpr.RdTmp):
if data.tmp in tmps:
val = tmps[data.tmp]
if val == initial_lr:
# we are storing LR to somewhere
storing_lr = True
if storing_lr:
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
if stmt.addr.tmp in tmps:
storing_addr = tmps[stmt.addr.tmp]
function.info['lr_saved_on_stack'] = True
function.info['lr_on_stack_offset'] = storing_addr - initial_sp
break
if 'lr_saved_on_stack' not in function.info:
function.info['lr_saved_on_stack'] = False
def _arm_track_read_lr_from_stack(self, irsb, function): # pylint:disable=unused-argument
"""
At the end of a basic block, simulate the very last instruction to see if the return address is read from the
stack and written in PC. If so, the jumpkind of this IRSB will be set to Ijk_Ret. For detailed explanations,
please see the documentation of _arm_track_lr_on_stack().
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if 'lr_saved_on_stack' not in function.info or not function.info['lr_saved_on_stack']:
return
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
last_sp = None
tmps = {}
tmp_irsb = self._lift(irsb.instruction_addresses[-1], opt_level=self._iropt_level).vex
# pylint:disable=too-many-nested-blocks
for stmt in tmp_irsb.statements:
if isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get) and data.offset == sp_offset:
# t0 = GET:I32(sp)
tmps[stmt.tmp] = initial_sp
elif isinstance(data, pyvex.IRExpr.Binop):
# only support Add
if data.op == 'Iop_Add32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] + arg1.con.value
elif isinstance(data, pyvex.IRExpr.Load):
if isinstance(data.addr, pyvex.IRExpr.RdTmp):
if data.addr.tmp in tmps:
tmps[stmt.tmp] = ('load', tmps[data.addr.tmp])
elif isinstance(stmt, pyvex.IRStmt.Put):
if stmt.offset == sp_offset and isinstance(stmt.data, pyvex.IRExpr.RdTmp):
if stmt.data.tmp in tmps:
# loading things into sp
last_sp = tmps[stmt.data.tmp]
if last_sp is not None and isinstance(tmp_irsb.next, pyvex.IRExpr.RdTmp):
val = tmps.get(tmp_irsb.next.tmp, None)
# val being None means there are statements that we do not handle
if isinstance(val, tuple) and val[0] == 'load':
# the value comes from memory
memory_addr = val[1]
if isinstance(last_sp, int):
lr_on_stack_offset = memory_addr - last_sp
else:
lr_on_stack_offset = memory_addr - last_sp[1]
if lr_on_stack_offset == function.info['lr_on_stack_offset']:
# the jumpkind should be Ret instead of boring
irsb.jumpkind = 'Ijk_Ret'
#
# Other methods
#
def _generate_cfgnode(self, cfg_job, current_function_addr):
"""
Generate a CFGNode that starts at `cfg_job.addr`.
Since lifting machine code to IRSBs is slow, self._nodes is used as a cache of CFGNodes.
If the current architecture is ARM, this method will try to lift the block in the mode specified by the address
(determined by the parity of the address: even for ARM, odd for THUMB), and in case of decoding failures, try
the other mode. If the basic block is successfully decoded in the other mode (different from the initial one),
`addr` and `current_function_addr` are updated.
:param CFGJob cfg_job: The CFGJob instance.
:param int current_function_addr: Address of the current function.
:return: A 4-tuple of (new address, new function address, CFGNode instance, IRSB object)
:rtype: tuple
"""
addr = cfg_job.addr
try:
if addr in self._nodes:
cfg_node = self._nodes[addr]
irsb = cfg_node.irsb
if cfg_node.function_address != current_function_addr:
# the node has been assigned to another function before.
# we should update the function address.
current_function_addr = cfg_node.function_address
return addr, current_function_addr, cfg_node, irsb
is_x86_x64_arch = self.project.arch.name in ('X86', 'AMD64')
if is_arm_arch(self.project.arch):
real_addr = addr & (~1)
else:
real_addr = addr
# if possible, check the distance between `addr` and the end of this section
distance = VEX_IRSB_MAX_SIZE
obj = self.project.loader.find_object_containing(addr, membership_check=False)
if obj:
# is there a section?
has_executable_section = len([ sec for sec in obj.sections if sec.is_executable ]) > 0 # pylint:disable=len-as-condition
section = self.project.loader.find_section_containing(addr)
if has_executable_section and section is None:
# the basic block should not exist here...
return None, None, None, None
if section is not None:
if not section.is_executable:
# the section is not executable...
return None, None, None, None
distance = section.vaddr + section.memsize - real_addr
distance = min(distance, VEX_IRSB_MAX_SIZE)
# TODO: handle segment information as well
# also check the distance between `addr` and the closest function.
# we don't want to have a basic block that spans across function boundaries
next_func = self.functions.ceiling_func(addr + 1)
if next_func is not None:
distance_to_func = (next_func.addr & (~1) if is_arm_arch(self.project.arch) else next_func.addr) - real_addr
if distance_to_func != 0:
if distance is None:
distance = distance_to_func
else:
distance = min(distance, distance_to_func)
# in the end, check the distance between `addr` and the closest occupied region in segment list
next_noncode_addr = self._seg_list.next_pos_with_sort_not_in(addr, { "code" }, max_distance=distance)
if next_noncode_addr is not None:
distance_to_noncode_addr = next_noncode_addr - real_addr
distance = min(distance, distance_to_noncode_addr)
# Let's try to create the pyvex IRSB directly, since it's much faster
nodecode = False
irsb = None
irsb_string = None
try:
lifted_block = self._lift(addr, size=distance, opt_level=self._iropt_level, collect_data_refs=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode') and \
is_arm_arch(self.project.arch) and \
self._arch_options.switch_mode_on_nodecode:
# maybe the current mode is wrong?
nodecode = False
if addr % 2 == 0:
addr_0 = addr + 1
else:
addr_0 = addr - 1
if addr_0 in self._nodes:
# it has been analyzed before
cfg_node = self._nodes[addr_0]
irsb = cfg_node.irsb
return addr_0, cfg_node.function_address, cfg_node, irsb
try:
lifted_block = self._lift(addr_0, size=distance, opt_level=self._iropt_level,
collect_data_refs=True)
irsb = lifted_block.vex_nostmt
irsb_string = lifted_block.bytes[:irsb.size]
except SimTranslationError:
nodecode = True
if not (nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode'):
# it is decodeable
if current_function_addr == addr:
current_function_addr = addr_0
addr = addr_0
if nodecode or irsb.size == 0 or irsb.jumpkind == 'Ijk_NoDecode':
# decoding error
# is the current location already occupied and marked as non-code?
# it happens in cases like the following:
#
# BL a_nonreturning_func (but we don't know it does not return)
# alignment (mov r8, r8)
# data_ref_0:
# DCD "type found!"
#
occupied_sort = self._seg_list.occupied_by_sort(real_addr)
if occupied_sort and occupied_sort != "code":
# no wonder we cannot decode it
return None, None, None, None
# we still occupy that location since it cannot be decoded anyways
if irsb is None:
irsb_size = 0
else:
irsb_size = irsb.size
# special handling for ud, ud1, and ud2 on x86 and x86-64
if is_x86_x64_arch \
and len(irsb_string) >= 2 \
and irsb_string[-2:] in {
b'\x0f\xff', # ud0
b'\x0f\xb9', # ud1
b'\x0f\x0b', # ud2
}:
# ud0, ud1, and ud2 are actually valid instructions.
valid_ins = True
nodecode_size = 2
else:
valid_ins = False
nodecode_size = 1
self._seg_list.occupy(addr, irsb_size, 'code')
self._seg_list.occupy(addr + irsb_size, nodecode_size, 'nodecode')
if not valid_ins:
l.error("Decoding error occurred at address %#x of function %#x.",
addr + irsb_size,
current_function_addr
)
return None, None, None, None
is_thumb = False
# Occupy the block in segment list
if irsb.size > 0:
if is_arm_arch(self.project.arch) and addr % 2 == 1:
# thumb mode
is_thumb=True
self._seg_list.occupy(real_addr, irsb.size, "code")
# Create a CFG node, and add it to the graph
cfg_node = CFGNode(addr, irsb.size, self.model,
function_address=current_function_addr,
block_id=addr,
irsb=irsb,
thumb=is_thumb,
byte_string=irsb_string,
)
if self._cfb is not None:
self._cfb.add_obj(addr, lifted_block)
self._nodes[addr] = cfg_node
self._nodes_by_addr[addr].append(cfg_node)
return addr, current_function_addr, cfg_node, irsb
except (SimMemoryError, SimEngineError):
return None, None, None, None
def _process_block_arch_specific(self, addr, irsb, func_addr): # pylint: disable=unused-argument
"""
According to arch types ['ARMEL', 'ARMHF', 'MIPS32'] does different
fixes
For ARM deals with link register on the stack
(see _arm_track_lr_on_stack)
For MIPS32 simulates a new state where the global pointer is 0xffffffff
from current address after three steps if the first successor does not
adjust this value updates this function address (in function manager)
to use a conrete global pointer
:param int addr: irsb address
:param pyvex.IRSB irsb: irsb
:param func_addr: function address
:return: None
"""
if is_arm_arch(self.project.arch):
if self._arch_options.ret_jumpkind_heuristics:
if addr == func_addr:
self._arm_track_lr_on_stack(addr, irsb, self.functions[func_addr])
elif 'lr_saved_on_stack' in self.functions[func_addr].info and \
self.functions[func_addr].info['lr_saved_on_stack'] and \
irsb.jumpkind == 'Ijk_Boring' and \
irsb.next is not None and \
isinstance(irsb.next, pyvex.IRExpr.RdTmp):
# do a bunch of checks to avoid unnecessary simulation from happening
self._arm_track_read_lr_from_stack(irsb, self.functions[func_addr])
elif self.project.arch.name in {"MIPS32", "MIPS64"}:
function = self.kb.functions.function(func_addr)
if addr >= func_addr and addr - func_addr < 15 * 4 and 'gp' not in function.info:
# check if gp is being written to
last_gp_setting_insn_id = None
insn_ctr = 0
if not irsb.statements:
# Get an IRSB with statements
irsb = self.project.factory.block(irsb.addr, size=irsb.size).vex
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
insn_ctr += 1
if insn_ctr >= 10:
break
elif isinstance(stmt, pyvex.IRStmt.Put) and stmt.offset == self.project.arch.registers['gp'][0]:
last_gp_setting_insn_id = insn_ctr
break
if last_gp_setting_insn_id is None:
return
# Prudently search for $gp values
state = self.project.factory.blank_state(addr=addr, mode="fastpath",
remove_options={o.OPTIMIZE_IR}
)
state.regs.t9 = func_addr
state.regs.gp = 0xffffffff
succ = self.project.factory.successors(state, num_inst=last_gp_setting_insn_id + 1)
if not succ.flat_successors:
return
state = succ.flat_successors[0]
if not state.regs.gp.symbolic and state.solver.is_false(state.regs.gp == 0xffffffff):
function.info['gp'] = state.regs.gp._model_concrete.value
def _find_thunks(self):
if self.project.arch.name not in self.SPECIAL_THUNKS:
return {}
result = {}
for code, meaning in self.SPECIAL_THUNKS[self.project.arch.name].items():
for addr in self.project.loader.memory.find(code):
if self._addr_in_exec_memory_regions(addr):
result[addr] = meaning
return result
def _lift(self, addr, *args, **kwargs): # pylint:disable=arguments-differ
kwargs['extra_stop_points'] = set(self._known_thunks)
if self._use_patches:
# let's see if there is a patch at this location
all_patches = self.kb.patches.get_all_patches(addr, VEX_IRSB_MAX_SIZE)
if all_patches:
# Use bytes from patches instead
offset = addr
byte_string = b""
for p in all_patches:
if offset < p.addr:
byte_string += self._fast_memory_load_bytes(offset, p.addr - offset)
offset = p.addr
assert p.addr <= offset < p.addr + len(p)
byte_string += p.new_bytes[offset - p.addr: min(VEX_IRSB_MAX_SIZE - (offset-addr), p.addr + len(p) - offset)]
offset = p.addr + len(p)
kwargs['byte_string'] = byte_string
return super(CFGFast, self)._lift(addr, *args, **kwargs)
#
# Public methods
#
def copy(self):
n = CFGFast.__new__(CFGFast)
for attr, value in self.__dict__.items():
if attr.startswith('__') and attr.endswith('__'):
continue
setattr(n, attr, value)
n._exec_mem_regions = self._exec_mem_regions[::]
n._seg_list = self._seg_list.copy()
n._function_addresses_from_symbols = self._function_addresses_from_symbols.copy()
n._model = self._model.copy()
return n
def output(self):
s = "%s" % self._graph.edges(data=True)
return s
@deprecated(replacement="angr.analyses.CFB")
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = []
for cfg_node in self.graph.nodes():
size = cfg_node.size
lst.append((cfg_node.addr, size))
lst = sorted(lst, key=lambda x: x[0])
return lst
from angr.analyses import AnalysesHub
AnalysesHub.register_default('CFGFast', CFGFast)
| 43.424811
| 162
| 0.576611
|
4c318031cf0012164ac6e0443fc731e0283d7cd6
| 3,552
|
py
|
Python
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_parameters_for_import_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_parameters_for_import_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_parameters_for_import_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class CreateParametersForImportRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'body': 'GetParametersForImportRequestBody'
}
attribute_map = {
'version_id': 'version_id',
'body': 'body'
}
def __init__(self, version_id=None, body=None):
"""CreateParametersForImportRequest - a model defined in huaweicloud sdk"""
self._version_id = None
self._body = None
self.discriminator = None
self.version_id = version_id
if body is not None:
self.body = body
@property
def version_id(self):
"""Gets the version_id of this CreateParametersForImportRequest.
API版本号
:return: The version_id of this CreateParametersForImportRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this CreateParametersForImportRequest.
API版本号
:param version_id: The version_id of this CreateParametersForImportRequest.
:type: str
"""
self._version_id = version_id
@property
def body(self):
"""Gets the body of this CreateParametersForImportRequest.
:return: The body of this CreateParametersForImportRequest.
:rtype: GetParametersForImportRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateParametersForImportRequest.
:param body: The body of this CreateParametersForImportRequest.
:type: GetParametersForImportRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateParametersForImportRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.311111
| 83
| 0.569538
|
e2f0940599d79286ca9cd21789dd493026f48112
| 689
|
py
|
Python
|
scripts/deployment/export/export_pretrained.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 5,447
|
2018-04-25T18:02:51.000Z
|
2022-03-31T00:59:49.000Z
|
scripts/deployment/export/export_pretrained.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 1,566
|
2018-04-25T21:14:04.000Z
|
2022-03-31T06:42:42.000Z
|
scripts/deployment/export/export_pretrained.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 1,345
|
2018-04-25T18:44:13.000Z
|
2022-03-30T19:32:53.000Z
|
"""Script for export pre-trained models in GluonCV model zoo."""
from __future__ import print_function
import argparse
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
def parse_args():
parser = argparse.ArgumentParser("Export model helper.")
parser.add_argument('--model', '-m', required=True, type=str, help='Name of the model')
parser.add_argument('--no-preprocess', action='store_true', help='Do not include standard preprocess.')
args = parser.parse_args()
return args
args = parse_args()
net = gcv.model_zoo.get_model(args.model, pretrained=True)
gcv.utils.export_block(args.model, net, preprocess=(not args.no_preprocess), layout='HWC')
print('Done...')
| 38.277778
| 107
| 0.7373
|
245f7f7c64201308624e4ce0bca706ffafbe0fa2
| 8,736
|
py
|
Python
|
endesive/hsm.py
|
spitoglou/endesive
|
36c4dadbdf1f297e72fd653b0f5f792a4d238857
|
[
"MIT"
] | null | null | null |
endesive/hsm.py
|
spitoglou/endesive
|
36c4dadbdf1f297e72fd653b0f5f792a4d238857
|
[
"MIT"
] | null | null | null |
endesive/hsm.py
|
spitoglou/endesive
|
36c4dadbdf1f297e72fd653b0f5f792a4d238857
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env vpython3
# coding: utf-8
import os
import sys
import binascii
import datetime
import PyKCS11
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from asn1crypto import x509 as asn1x509
from asn1crypto import keys as asn1keys
from asn1crypto import pem as asn1pem
class HSM:
def __init__(self, dllpath):
self.pkcs11 = PyKCS11.PyKCS11Lib()
self.pkcs11.load(dllpath)
self.session = None
def getSlot(self, label):
slots = self.pkcs11.getSlotList(tokenPresent=True)
for slot in slots:
info = self.pkcs11.getTokenInfo(slot)
try:
if info.label.split('\0')[0].strip() == label:
return slot
except AttributeError:
continue
return None
def create(self, label, pin, sopin):
slot = self.getSlot(label)
if slot is not None:
return
slot = self.pkcs11.getSlotList(tokenPresent=True)[-1]
self.pkcs11.initToken(slot, sopin, label)
session = self.pkcs11.openSession(slot, PyKCS11.CKF_SERIAL_SESSION | PyKCS11.CKF_RW_SESSION)
session.login(sopin, user_type=PyKCS11.CKU_SO)
session.initPin(pin)
session.logout()
session.closeSession()
def login(self, label, pin):
slot = self.getSlot(label)
if slot is None:
return
self.session = self.pkcs11.openSession(slot, PyKCS11.CKF_SERIAL_SESSION | PyKCS11.CKF_RW_SESSION)
self.session.login(pin)
def logout(self):
if self.session is not None:
self.session.logout()
self.session.closeSession()
self.session = None
def gen_privkey(self, label, key_id, key_length=2048):
# label - just a label for identifying objects
# key_id has to be the same for both objects, it will also be necessary
# when importing the certificate, to ensure it is linked with these keys.
# key_length - key-length in bits
public_template = [
(PyKCS11.CKA_CLASS, PyKCS11.CKO_PUBLIC_KEY),
(PyKCS11.CKA_TOKEN, PyKCS11.CK_TRUE),
(PyKCS11.CKA_PRIVATE, PyKCS11.CK_FALSE),
(PyKCS11.CKA_MODULUS_BITS, key_length),
# (PyKCS11.CKA_PUBLIC_EXPONENT, (0x01, 0x00, 0x01)),
(PyKCS11.CKA_ENCRYPT, PyKCS11.CK_TRUE),
(PyKCS11.CKA_VERIFY, PyKCS11.CK_TRUE),
(PyKCS11.CKA_VERIFY_RECOVER, PyKCS11.CK_TRUE),
(PyKCS11.CKA_WRAP, PyKCS11.CK_TRUE),
(PyKCS11.CKA_LABEL, label),
(PyKCS11.CKA_ID, key_id)
# (PyKCS11.CKA_KEY_TYPE, PyKCS11.CKK_RSA),
# (PyKCS11.CKA_SENSITIVE, PyKCS11.CK_FALSE),
]
private_template = [
(PyKCS11.CKA_CLASS, PyKCS11.CKO_PRIVATE_KEY),
(PyKCS11.CKA_TOKEN, PyKCS11.CK_TRUE),
(PyKCS11.CKA_PRIVATE, PyKCS11.CK_TRUE),
(PyKCS11.CKA_DECRYPT, PyKCS11.CK_TRUE),
(PyKCS11.CKA_SIGN, PyKCS11.CK_TRUE),
(PyKCS11.CKA_SIGN_RECOVER, PyKCS11.CK_TRUE),
(PyKCS11.CKA_UNWRAP, PyKCS11.CK_TRUE),
(PyKCS11.CKA_LABEL, label),
(PyKCS11.CKA_ID, key_id)
# (PyKCS11.CKA_SENSITIVE, PyKCS11.CK_TRUE),
]
self.session.generateKeyPair(public_template, private_template)
def cert_save(self, cert, label, subject, key_id):
cert_template = [
(PyKCS11.CKA_CLASS, PyKCS11.CKO_CERTIFICATE),
(PyKCS11.CKA_CERTIFICATE_TYPE, PyKCS11.CKC_X_509),
(PyKCS11.CKA_TOKEN, PyKCS11.CK_TRUE),
(PyKCS11.CKA_LABEL, label.encode('utf-8')),
(PyKCS11.CKA_ID, key_id), # must be set, and DER see Table 24, X.509 Certificate Object Attributes
(PyKCS11.CKA_SUBJECT, subject.encode('utf-8')), # must be set and DER, see Table 24, X.509 Certificate Object Attributes
#(PyKCS11.CKA_PRIVATE, PyKCS11.CK_FALSE),
#(PyKCS11.CKA_TRUSTED, PyKCS11.CK_TRUE),
#(PyKCS11.CKA_SENSITIVE, PyKCS11.CK_FALSE),
#(PyKCS11.CKA_ENCRYPT, PyKCS11.CK_TRUE),
#(PyKCS11.CKA_VERIFY, PyKCS11.CK_TRUE),
#(PyKCS11.CKA_MODIFIABLE, PyKCS11.CK_TRUE),
# (PyKCS11.CKA_ISSUER, cert.Issuer);
# (PyKCS11.CKA_SERIAL_NUMBER,cert.SerialNumber)
(PyKCS11.CKA_VALUE, cert), # must be BER-encoded
]
self.session.createObject(cert_template)
def cert_load(self, keyID):
rec = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_CERTIFICATE), (PyKCS11.CKA_ID, keyID)])
if len(rec) == 0:
return None
value = bytes(rec[0].to_dict()['CKA_VALUE'])
return value
def certsign(self, sn, pubKey, subject, until, caprivKey):
tbs = asn1x509.TbsCertificate({
'version': 'v1',
'serial_number': sn,
'issuer': asn1x509.Name.build({
'common_name': 'CA',
}),
'subject': asn1x509.Name.build({
'common_name': subject,
}),
'signature': {
'algorithm': 'sha256_rsa',
'parameters': None,
},
'validity': {
'not_before': asn1x509.Time({
'utc_time': datetime.datetime.utcnow(),
}),
'not_after': asn1x509.Time({
'utc_time': until,
}),
},
'subject_public_key_info': {
'algorithm': {
'algorithm': 'rsa',
'parameters': None,
},
'public_key': pubKey
}
})
# Sign the TBS Certificate
data = tbs.dump()
value = self.session.sign(caprivKey, data, PyKCS11.Mechanism(PyKCS11.CKM_SHA256_RSA_PKCS, None))
value = bytes(bytearray(value))
cert = asn1x509.Certificate({
'tbs_certificate': tbs,
'signature_algorithm': {
'algorithm': 'sha256_rsa',
'parameters': None,
},
'signature_value': value,
})
return cert.dump()
def ca_gen(self, label, keyID, subject):
privKey = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_PRIVATE_KEY), (PyKCS11.CKA_ID, keyID)])[0]
pubKey = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_PUBLIC_KEY), (PyKCS11.CKA_ID, keyID)])[0]
modulus = self.session.getAttributeValue(pubKey, [PyKCS11.CKA_MODULUS])[0]
modulus = binascii.hexlify(bytearray(modulus)).decode("utf-8")
exponent = self.session.getAttributeValue(pubKey, [PyKCS11.CKA_PUBLIC_EXPONENT])[0]
exponent = binascii.hexlify(bytearray(exponent)).decode("utf-8")
pubKey = asn1keys.RSAPublicKey({
'modulus':int('0x'+modulus, 16),
'public_exponent':int('0x'+exponent, 16)
})
#pubKey = asn1keys.RSAPublicKey.load(pubKey.dump())
until = datetime.datetime.utcnow() + datetime.timedelta(days=365*10)
der_bytes = self.certsign(1, pubKey, subject, until, privKey)
self.cert_save(der_bytes, label, subject, keyID)
def ca_sign(self, keyID, label, sn, subject, days, cakeyID):
caprivKey = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_PRIVATE_KEY), (PyKCS11.CKA_ID, cakeyID)])[0]
pubKey = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_PUBLIC_KEY), (PyKCS11.CKA_ID, keyID)])[0]
modulus = self.session.getAttributeValue(pubKey, [PyKCS11.CKA_MODULUS])[0]
modulus = binascii.hexlify(bytearray(modulus)).decode("utf-8")
exponent = self.session.getAttributeValue(pubKey, [PyKCS11.CKA_PUBLIC_EXPONENT])[0]
exponent = binascii.hexlify(bytearray(exponent)).decode("utf-8")
pubKey = asn1keys.RSAPublicKey({
'modulus':int('0x'+modulus, 16),
'public_exponent':int('0x'+exponent, 16)
})
#pubKey = asn1keys.RSAPublicKey.load(pubKey.dump())
until = datetime.datetime.utcnow() + datetime.timedelta(days=days)
der_bytes = self.certsign(sn, pubKey, subject, until, caprivKey)
self.cert_save(der_bytes, label, subject, keyID)
def cert_export(self, fname, keyID):
der_bytes = self.cert_load(keyID)
pem_bytes = asn1pem.armor('CERTIFICATE', der_bytes)
open(fname+'.der', 'wb').write(der_bytes)
open(fname+'.pem', 'wb').write(pem_bytes)
| 40.632558
| 133
| 0.610691
|
ce0c31c55afa89fc8de20cb8cab31ac0f2a98994
| 5,621
|
py
|
Python
|
dask_image/ndfilters/_utils.py
|
akhalighi/dask-image
|
8ff0f16dba8a874c7d8d3adf4e5e8bac5f4ee1bf
|
[
"BSD-3-Clause"
] | null | null | null |
dask_image/ndfilters/_utils.py
|
akhalighi/dask-image
|
8ff0f16dba8a874c7d8d3adf4e5e8bac5f4ee1bf
|
[
"BSD-3-Clause"
] | null | null | null |
dask_image/ndfilters/_utils.py
|
akhalighi/dask-image
|
8ff0f16dba8a874c7d8d3adf4e5e8bac5f4ee1bf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
import collections
import inspect
import numbers
import re
import numpy
from .._pycompat import irange, izip, strlike
def _get_docstring(func):
# Drop the output parameter from the docstring.
split_doc_params = lambda s: re.subn( # noqa: E731
"( [A-Za-z]+ : )", "\0\\1", s)[0].split("\0")
drop_doc_param = lambda s: not s.startswith(" output : ") # noqa: E731
func_doc = "" if func.__doc__ is None else func.__doc__
cleaned_docstring = "".join([
l for l in split_doc_params(func_doc) if drop_doc_param(l)
])
docstring = """
Wrapped copy of "{mod_name}.{func_name}"
Excludes the output parameter as it would not work with Dask arrays.
Original docstring:
{doc}
""".format(
mod_name=inspect.getmodule(func).__name__,
func_name=func.__name__,
doc=cleaned_docstring,
)
return docstring
def _update_wrapper(func):
def _updater(wrapper):
wrapper.__name__ = func.__name__
wrapper.__doc__ = _get_docstring(func)
return wrapper
return _updater
def _get_depth_boundary(ndim, depth, boundary=None):
if not isinstance(ndim, numbers.Integral):
raise TypeError("Expected integer value for `ndim`.")
if ndim <= 0:
raise ValueError("Expected positive value for `ndim`.")
if isinstance(depth, numbers.Number):
depth = ndim * (depth,)
if not isinstance(depth, collections.Sized):
raise TypeError("Unexpected type for `depth`.")
if len(depth) != ndim:
raise ValueError("Expected `depth` to have a length equal to `ndim`.")
if isinstance(depth, collections.Sequence):
depth = dict(izip(irange(ndim), depth))
if not isinstance(depth, collections.Mapping):
raise TypeError("Unexpected type for `depth`.")
if not all(map(lambda d: isinstance(d, numbers.Integral), depth.values())):
raise TypeError("Expected integer values for `depth`.")
if not all(map(lambda d: d >= 0, depth.values())):
raise ValueError("Expected positive semidefinite values for `depth`.")
depth = dict([(a, int(d)) for a, d in depth.items()])
if (boundary is None) or isinstance(boundary, strlike):
boundary = ndim * (boundary,)
if not isinstance(boundary, collections.Sized):
raise TypeError("Unexpected type for `boundary`.")
if len(boundary) != ndim:
raise ValueError(
"Expected `boundary` to have a length equal to `ndim`."
)
if isinstance(boundary, collections.Sequence):
boundary = dict(izip(irange(ndim), boundary))
if not isinstance(boundary, collections.Mapping):
raise TypeError("Unexpected type for `boundary`.")
type_check = lambda b: (b is None) or isinstance(b, strlike) # noqa: E731
if not all(map(type_check, boundary.values())):
raise TypeError("Expected string-like values for `boundary`.")
return depth, boundary
def _get_size(ndim, size):
if not isinstance(ndim, numbers.Integral):
raise TypeError("The ndim must be of integral type.")
if isinstance(size, numbers.Number):
size = ndim * (size,)
size = numpy.array(size)
if size.ndim != 1:
raise RuntimeError("The size must have only one dimension.")
if len(size) != ndim:
raise RuntimeError(
"The size must have a length equal to the number of dimensions."
)
if not issubclass(size.dtype.type, numbers.Integral):
raise TypeError("The size must be of integral type.")
size = tuple(size)
return size
def _get_origin(size, origin=0):
size = numpy.array(size)
ndim = len(size)
if isinstance(origin, numbers.Number):
origin = ndim * (origin,)
origin = numpy.array(origin)
if not issubclass(origin.dtype.type, numbers.Integral):
raise TypeError("The origin must be of integral type.")
# Validate dimensions.
if origin.ndim != 1:
raise RuntimeError("The origin must have only one dimension.")
if len(origin) != ndim:
raise RuntimeError(
"The origin must have the same length as the number of dimensions"
" as the array being filtered."
)
# Validate origin is bounded.
if not (origin < ((size + 1) // 2)).all():
raise ValueError("The origin must be within the footprint.")
origin = tuple(origin)
return origin
def _get_depth(size, origin=0):
origin = numpy.array(_get_origin(size, origin))
size = numpy.array(size)
half_size = size // 2
depth = half_size + abs(origin)
depth = tuple(depth)
return depth
def _get_footprint(ndim, size=None, footprint=None):
# Verify that we only got size or footprint.
if size is None and footprint is None:
raise RuntimeError("Must provide either size or footprint.")
if size is not None and footprint is not None:
raise RuntimeError("Provide either size or footprint, but not both.")
# Get a footprint based on the size.
if size is not None:
size = _get_size(ndim, size)
footprint = numpy.ones(size, dtype=bool)
# Validate the footprint.
if footprint.ndim != ndim:
raise RuntimeError(
"The footprint must have the same number of dimensions as"
" the array being filtered."
)
if footprint.size == 0:
raise RuntimeError("The footprint must have only non-zero dimensions.")
# Convert to Boolean.
footprint = (footprint != 0)
return footprint
| 30.22043
| 79
| 0.644191
|
7ade909570889192a40a375c113ccd1a77e7fdc3
| 12,207
|
py
|
Python
|
pointnet2/pointnet2_utils.py
|
StannisZhou/votenet
|
20c6de8f6c0ca32ceb1fccad2f8e5cfa936fdc2e
|
[
"MIT"
] | 1,479
|
2019-08-22T20:10:42.000Z
|
2022-03-29T01:36:03.000Z
|
pointnet2/pointnet2_utils.py
|
StannisZhou/votenet
|
20c6de8f6c0ca32ceb1fccad2f8e5cfa936fdc2e
|
[
"MIT"
] | 145
|
2019-08-23T08:03:39.000Z
|
2022-03-26T09:54:17.000Z
|
pointnet2/pointnet2_utils.py
|
StannisZhou/votenet
|
20c6de8f6c0ca32ceb1fccad2f8e5cfa936fdc2e
|
[
"MIT"
] | 351
|
2019-08-23T02:39:17.000Z
|
2022-03-06T13:22:49.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
| 28.65493
| 144
| 0.579176
|
c9542c54e83e65e20a247adc8bc860da9b9b720a
| 2,979
|
py
|
Python
|
monasca-events-api-0.3.0/monasca_events_api/app/healthcheck/kafka_check.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 13
|
2016-09-14T22:02:36.000Z
|
2019-01-28T22:04:44.000Z
|
monasca-events-api-0.3.0/monasca_events_api/app/healthcheck/kafka_check.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
monasca-events-api-0.3.0/monasca_events_api/app/healthcheck/kafka_check.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 3
|
2017-07-27T11:44:35.000Z
|
2018-07-10T12:11:39.000Z
|
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from monasca_common.kafka_lib import client
from oslo_log import log
from monasca_events_api import conf
LOG = log.getLogger(__name__)
CONF = conf.CONF
CheckResult = collections.namedtuple('CheckResult', ['healthy', 'message'])
"""Result from the healthcheck, contains healthy(boolean) and message"""
class KafkaHealthCheck(object):
"""Evaluates kafka health
Healthcheck verifies if:
* kafka server is up and running
* there is a configured topic in kafka
If following conditions are met healthcheck returns healthy status.
Otherwise unhealthy status is returned with explanation.
Example of middleware configuration:
.. code-block:: ini
[events_publisher]
kafka_url = localhost:8900
kafka_topics = events
Note:
It is possible to specify multiple topics if necessary.
Just separate them with ,
"""
def healthcheck(self):
url = CONF.events_publisher.kafka_url
try:
kafka_client = client.KafkaClient(hosts=url)
except client.KafkaUnavailableError as ex:
LOG.error(repr(ex))
error_str = 'Could not connect to kafka at %s' % url
return CheckResult(healthy=False, message=error_str)
result = self._verify_topics(kafka_client)
self._disconnect_gracefully(kafka_client)
return result
# noinspection PyMethodMayBeStatic
def _verify_topics(self, kafka_client):
topics = CONF.events_publisher.topics
for t in topics:
# kafka client loads metadata for topics as fast
# as possible (happens in __init__), therefore this
# topic_partitions is sure to be filled
for_topic = t in kafka_client.topic_partitions
if not for_topic:
error_str = 'Kafka: Topic %s not found' % t
LOG.error(error_str)
return CheckResult(healthy=False, message=error_str)
return CheckResult(healthy=True, message='OK')
# noinspection PyMethodMayBeStatic
def _disconnect_gracefully(self, kafka_client):
# at this point, client is connected so it must be closed
# regardless of topic existence
try:
kafka_client.close()
except Exception as ex:
# log that something went wrong and move on
LOG.error(repr(ex))
| 31.357895
| 75
| 0.684458
|
422b6bddc40d34b06604847f56bacf021190e919
| 42,614
|
py
|
Python
|
cinder/volume/drivers/ibm/flashsystem_common.py
|
UbuntuEvangelist/cinder
|
cbb55074de48176cbaa3f31a5b1d595b8aad7aa8
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/ibm/flashsystem_common.py
|
UbuntuEvangelist/cinder
|
cbb55074de48176cbaa3f31a5b1d595b8aad7aa8
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/ibm/flashsystem_common.py
|
UbuntuEvangelist/cinder
|
cbb55074de48176cbaa3f31a5b1d595b8aad7aa8
|
[
"Apache-2.0"
] | 15
|
2017-01-12T10:35:10.000Z
|
2019-04-19T08:22:10.000Z
|
# Copyright 2015 IBM Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for IBM FlashSystem storage systems.
Limitations:
1. Cinder driver only works when open_access_enabled=off.
"""
import re
import string
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
FLASHSYSTEM_VOLPOOL_NAME = 'mdiskgrp0'
FLASHSYSTEM_VOL_IOGRP = 0
flashsystem_opts = [
cfg.StrOpt('flashsystem_connection_protocol',
default='FC',
help='Connection protocol should be FC. '
'(Default is FC.)'),
cfg.BoolOpt('flashsystem_multihostmap_enabled',
default=True,
help='Allows vdisk to multi host mapping. '
'(Default is True)')
]
CONF = cfg.CONF
CONF.register_opts(flashsystem_opts)
class FlashSystemDriver(san.SanDriver):
"""IBM FlashSystem volume driver.
Version history:
1.0.0 - Initial driver
1.0.1 - Code clean up
1.0.2 - Add lock into vdisk map/unmap, connection
initialize/terminate
1.0.3 - Initial driver for iSCSI
1.0.4 - Split Flashsystem driver into common and FC
1.0.5 - Report capability of volume multiattach
1.0.6 - Fix bug #1469581, add I/T mapping check in
terminate_connection
"""
VERSION = "1.0.6"
def __init__(self, *args, **kwargs):
super(FlashSystemDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(flashsystem_opts)
self._storage_nodes = {}
self._protocol = None
self._context = None
self._system_name = None
self._system_id = None
def _ssh(self, ssh_cmd, check_exit_code=True):
try:
return self._run_ssh(ssh_cmd, check_exit_code)
except processutils.ProcessExecutionError as e:
msg = (_('CLI Exception output:\n command: %(cmd)s\n '
'stdout: %(out)s\n stderr: %(err)s')
% {'cmd': ssh_cmd, 'out': e.stdout,
'err': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _append_dict(self, dict_, key, value):
key, value = key.strip(), value.strip()
obj = dict_.get(key, None)
if obj is None:
dict_[key] = value
elif isinstance(obj, list):
obj.append(value)
dict_[key] = obj
else:
dict_[key] = [obj, value]
return dict_
def _assert_ssh_return(self, test, fun, ssh_cmd, out, err):
self._driver_assert(test,
(_('%(fun)s: Failed with unexpected CLI output.\n '
'Command: %(cmd)s\n stdout: %(out)s\n '
'stderr: %(err)s')
% {'fun': fun, 'cmd': ssh_cmd,
'out': six.text_type(out),
'err': six.text_type(err)}))
def _build_default_params(self):
return {'protocol': self.configuration.flashsystem_connection_protocol,
'multipath': self.configuration.flashsystem_multipath_enabled}
def _build_initiator_target_map(self, initiator_wwpns, target_wwpns):
map = {}
for i_wwpn in initiator_wwpns:
idx = six.text_type(i_wwpn)
map[idx] = []
for t_wwpn in target_wwpns:
map[idx].append(t_wwpn)
return map
def _connector_to_hostname_prefix(self, connector):
"""Translate connector info to storage system host name.
Translate a host's name and IP to the prefix of its hostname on the
storage subsystem. We create a host name from the host and
IP address, replacing any invalid characters (at most 55 characters),
and adding a random 8-character suffix to avoid collisions. The total
length should be at most 63 characters.
"""
# Build cleanup translation tables for host names
invalid_ch_in_host = ''
for num in range(0, 128):
ch = six.text_type(chr(num))
if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
invalid_ch_in_host = invalid_ch_in_host + ch
host_name = connector['host']
if isinstance(host_name, six.text_type):
unicode_host_name_filter = {ord(six.text_type(char)): u'-'
for char in invalid_ch_in_host}
host_name = host_name.translate(unicode_host_name_filter)
elif isinstance(host_name, str):
string_host_name_filter = string.maketrans(
invalid_ch_in_host, '-' * len(invalid_ch_in_host))
host_name = host_name.translate(string_host_name_filter)
else:
msg = _('_create_host: Can not translate host name. Host name '
'is not unicode or string.')
LOG.error(msg)
raise exception.NoValidHost(reason=msg)
host_name = six.text_type(host_name)
# FlashSystem family doesn't like hostname that starts with number.
if not re.match('^[A-Za-z]', host_name):
host_name = '_' + host_name
return host_name[:55]
def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id,
dest_vdisk_name, dest_vdisk_id):
"""Copy data from src vdisk to dest vdisk.
To be able to copy data between vdisks, we must ensure that both
vdisks have been mapped to host. If vdisk has not been mapped,
it must be mapped firstly. When data copy completed, vdisk
should be restored to previous mapped or non-mapped status.
"""
LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.',
{'src': src_vdisk_name, 'dest': dest_vdisk_name})
connector = utils.brick_get_connector_properties()
(src_map, src_lun_id) = self._is_vdisk_map(
src_vdisk_name, connector)
(dest_map, dest_lun_id) = self._is_vdisk_map(
dest_vdisk_name, connector)
src_map_device = None
src_properties = None
dest_map_device = None
dest_properties = None
try:
if not src_map:
src_lun_id = self._map_vdisk_to_host(src_vdisk_name,
connector)
if not dest_map:
dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name,
connector)
src_properties = self._get_vdisk_map_properties(
connector, src_lun_id, src_vdisk_name,
src_vdisk_id, self._get_vdisk_params(None))
src_map_device = self._scan_device(src_properties)
dest_properties = self._get_vdisk_map_properties(
connector, dest_lun_id, dest_vdisk_name,
dest_vdisk_id, self._get_vdisk_params(None))
dest_map_device = self._scan_device(dest_properties)
src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name)
# vdisk capacity is bytes, translate into MB
size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi
volume_utils.copy_volume(
src_map_device['path'],
dest_map_device['path'],
size_in_mb,
self.configuration.volume_dd_blocksize)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy %(src)s to %(dest)s.'),
{'src': src_vdisk_name, 'dest': dest_vdisk_name})
finally:
if not dest_map:
self._unmap_vdisk_from_host(dest_vdisk_name, connector)
self._remove_device(dest_properties, dest_map_device)
if not src_map:
self._unmap_vdisk_from_host(src_vdisk_name, connector)
self._remove_device(src_properties, src_map_device)
LOG.debug(
'leave: _copy_vdisk_data: %(src)s -> %(dest)s.',
{'src': src_vdisk_name, 'dest': dest_vdisk_name})
def _create_and_copy_vdisk_data(self, src_vdisk_name, src_vdisk_id,
dest_vdisk_name, dest_vdisk_id):
vdisk_attr = self._get_vdisk_attributes(src_vdisk_name)
self._driver_assert(
vdisk_attr is not None,
(_('_create_and_copy_vdisk_data: Failed to get attributes for '
'vdisk %s.') % src_vdisk_name))
self._create_vdisk(dest_vdisk_name, vdisk_attr['capacity'], 'b', None)
# create a timer to lock vdisk that will be used to data copy
timer = loopingcall.FixedIntervalLoopingCall(
self._set_vdisk_copy_in_progress,
[src_vdisk_name, dest_vdisk_name])
timer.start(interval=self._check_lock_interval).wait()
try:
self._copy_vdisk_data(src_vdisk_name, src_vdisk_id,
dest_vdisk_name, dest_vdisk_id)
finally:
self._unset_vdisk_copy_in_progress(
[src_vdisk_name, dest_vdisk_name])
def _create_vdisk(self, name, size, unit, opts):
"""Create a new vdisk."""
LOG.debug('enter: _create_vdisk: vdisk %s.', name)
ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp',
FLASHSYSTEM_VOLPOOL_NAME, '-iogrp',
six.text_type(FLASHSYSTEM_VOL_IOGRP),
'-size', size, '-unit', unit]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(out.strip(), '_create_vdisk',
ssh_cmd, out, err)
# Ensure that the output is as expected
match_obj = re.search(
'Virtual Disk, id \[([0-9]+)\], successfully created', out)
self._driver_assert(
match_obj is not None,
(_('_create_vdisk %(name)s - did not find '
'success message in CLI output.\n '
'stdout: %(out)s\n stderr: %(err)s')
% {'name': name, 'out': six.text_type(out),
'err': six.text_type(err)}))
LOG.debug('leave: _create_vdisk: vdisk %s.', name)
def _delete_host(self, host_name):
"""Delete a host on the storage system."""
LOG.debug('enter: _delete_host: host %s.', host_name)
ssh_cmd = ['svctask', 'rmhost', host_name]
out, err = self._ssh(ssh_cmd)
# No output should be returned from rmhost
self._assert_ssh_return(
(not out.strip()),
'_delete_host', ssh_cmd, out, err)
LOG.debug('leave: _delete_host: host %s.', host_name)
def _delete_vdisk(self, name, force):
"""Deletes existing vdisks."""
LOG.debug('enter: _delete_vdisk: vdisk %s.', name)
# Try to delete volume only if found on the storage
vdisk_defined = self._is_vdisk_defined(name)
if not vdisk_defined:
LOG.warning(_LW('warning: Tried to delete vdisk %s but '
'it does not exist.'), name)
return
ssh_cmd = ['svctask', 'rmvdisk', '-force', name]
if not force:
ssh_cmd.remove('-force')
out, err = self._ssh(ssh_cmd)
# No output should be returned from rmvdisk
self._assert_ssh_return(
(not out.strip()),
('_delete_vdisk %(name)s') % {'name': name},
ssh_cmd, out, err)
LOG.debug('leave: _delete_vdisk: vdisk %s.', name)
def _driver_assert(self, assert_condition, exception_message):
"""Internal assertion mechanism for CLI output."""
if not assert_condition:
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
def _execute_command_and_parse_attributes(self, ssh_cmd):
"""Execute command on the FlashSystem and parse attributes.
Exception is raised if the information from the system
can not be obtained.
"""
LOG.debug(
'enter: _execute_command_and_parse_attributes: '
'command: %s.', six.text_type(ssh_cmd))
try:
out, err = self._ssh(ssh_cmd)
except processutils.ProcessExecutionError:
LOG.warning(_LW('Failed to run command: '
'%s.'), ssh_cmd)
# Does not raise exception when command encounters error.
# Only return and the upper logic decides what to do.
return None
self._assert_ssh_return(
out,
'_execute_command_and_parse_attributes', ssh_cmd, out, err)
attributes = {}
for attrib_line in out.split('\n'):
# If '!' not found, return the string and two empty strings
attrib_name, foo, attrib_value = attrib_line.partition('!')
if attrib_name is not None and attrib_name.strip():
self._append_dict(attributes, attrib_name, attrib_value)
LOG.debug(
'leave: _execute_command_and_parse_attributes: '
'command: %(cmd)s attributes: %(attr)s.',
{'cmd': six.text_type(ssh_cmd),
'attr': six.text_type(attributes)})
return attributes
def _get_hdr_dic(self, header, row, delim):
"""Return CLI row data as a dictionary indexed by names from header.
The strings are converted to columns using the delimiter in delim.
"""
attributes = header.split(delim)
values = row.split(delim)
self._driver_assert(
len(values) == len(attributes),
(_('_get_hdr_dic: attribute headers and values do not match.\n '
'Headers: %(header)s\n Values: %(row)s.')
% {'header': six.text_type(header), 'row': six.text_type(row)}))
dic = {a: v for a, v in zip(attributes, values)}
return dic
def _get_host_from_connector(self, connector):
"""List the hosts defined in the storage.
Return the host name with the given connection info, or None if there
is no host fitting that information.
"""
LOG.debug('enter: _get_host_from_connector: %s.', connector)
# Get list of host in the storage
ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
out, err = self._ssh(ssh_cmd)
if not out.strip():
return None
# If we have FC information, we have a faster lookup option
hostname = None
host_lines = out.strip().split('\n')
self._assert_ssh_return(
host_lines,
'_get_host_from_connector', ssh_cmd, out, err)
header = host_lines.pop(0).split('!')
self._assert_ssh_return(
'name' in header,
'_get_host_from_connector', ssh_cmd, out, err)
name_index = header.index('name')
hosts = map(lambda x: x.split('!')[name_index], host_lines)
hostname = self._find_host_exhaustive(connector, hosts)
LOG.debug('leave: _get_host_from_connector: host %s.', hostname)
return hostname
def _get_hostvdisk_mappings(self, host_name):
"""Return the defined storage mappings for a host."""
return_data = {}
ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name]
out, err = self._ssh(ssh_cmd)
mappings = out.strip().split('\n')
if mappings:
header = mappings.pop(0)
for mapping_line in mappings:
mapping_data = self._get_hdr_dic(header, mapping_line, '!')
return_data[mapping_data['vdisk_name']] = mapping_data
return return_data
def _get_node_data(self):
"""Get and verify node configuration."""
# Get storage system name and id
ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
attributes = self._execute_command_and_parse_attributes(ssh_cmd)
if not attributes or not ('name' in attributes):
msg = _('Could not get system name.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
self._system_name = attributes['name']
self._system_id = attributes['id']
# Validate value of open_access_enabled flag, for now only
# support when open_access_enabled is off
if not attributes or not ('open_access_enabled' in attributes) or (
attributes['open_access_enabled'] != 'off'):
msg = _('open_access_enabled is not off.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Validate that the array exists
pool = FLASHSYSTEM_VOLPOOL_NAME
ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
attributes = self._execute_command_and_parse_attributes(ssh_cmd)
if not attributes:
msg = _('Unable to parse attributes.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if not ('status' in attributes) or (
attributes['status'] == 'offline'):
msg = (_('Array does not exist or is offline. '
'Current status of array is %s.')
% attributes['status'])
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
# Get the iSCSI names of the FlashSystem nodes
ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(
out.strip(), '_get_config_data', ssh_cmd, out, err)
nodes = out.strip().splitlines()
self._assert_ssh_return(nodes, '_get_node_data', ssh_cmd, out, err)
header = nodes.pop(0)
for node_line in nodes:
try:
node_data = self._get_hdr_dic(header, node_line, '!')
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
self._log_cli_output_error('_get_node_data',
ssh_cmd, out, err)
try:
node = {
'id': node_data['id'],
'name': node_data['name'],
'IO_group': node_data['IO_group_id'],
'WWNN': node_data['WWNN'],
'status': node_data['status'],
'WWPN': [],
'protocol': None,
'iscsi_name': node_data['iscsi_name'],
'config_node': node_data['config_node'],
'ipv4': [],
'ipv6': [],
}
if node['status'] == 'online':
self._storage_nodes[node['id']] = node
except KeyError:
self._handle_keyerror('lsnode', header)
def _get_vdisk_attributes(self, vdisk_name):
"""Return vdisk attributes
Exception is raised if the information from system can not be
parsed/matched to a single vdisk.
"""
ssh_cmd = [
'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_name]
return self._execute_command_and_parse_attributes(ssh_cmd)
def _get_vdiskhost_mappings(self, vdisk_name):
"""Return the defined storage mappings for a vdisk."""
return_data = {}
ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name]
out, err = self._ssh(ssh_cmd)
mappings = out.strip().split('\n')
if mappings:
header = mappings.pop(0)
for mapping_line in mappings:
mapping_data = self._get_hdr_dic(header, mapping_line, '!')
return_data[mapping_data['host_name']] = mapping_data
return return_data
def _get_vdisk_params(self, type_id):
params = self._build_default_params()
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
for k, value in specs.items():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# We generally do not look at capabilities in the driver, but
# protocol is a special case where the user asks for a given
# protocol and we want both the scheduler and the driver to act
# on the value.
if ((not scope or scope == 'capabilities') and
key == 'storage_protocol'):
scope = None
key = 'protocol'
# Anything keys that the driver should look at should have the
# 'drivers' scope.
if scope and scope != "drivers":
continue
if key in params:
this_type = type(params[key]).__name__
if this_type == 'int':
value = int(value)
elif this_type == 'bool':
value = strutils.bool_from_string(value)
params[key] = value
self._check_vdisk_params(params)
return params
def _handle_keyerror(self, function, header):
msg = (_('Did not find expected column in %(fun)s: %(hdr)s.')
% {'fun': function, 'hdr': header})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _is_vdisk_defined(self, vdisk_name):
"""Check if vdisk is defined."""
LOG.debug('enter: _is_vdisk_defined: vdisk %s.', vdisk_name)
vdisk_attributes = self._get_vdisk_attributes(vdisk_name)
LOG.debug(
'leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s.',
{'vol': vdisk_name, 'str': vdisk_attributes is not None})
if vdisk_attributes is None:
return False
else:
return True
def _is_vdisk_copy_in_progress(self, vdisk_name):
LOG.debug(
'_is_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.',
{'vdisk': vdisk_name,
'vdisk_in_progress':
six.text_type(self._vdisk_copy_in_progress)})
if vdisk_name not in self._vdisk_copy_in_progress:
LOG.debug(
'_is_vdisk_copy_in_progress: '
'vdisk copy is not in progress.')
raise loopingcall.LoopingCallDone(retvalue=True)
def _is_vdisk_map(self, vdisk_name, connector):
"""Check if vdisk is mapped.
If map, return True and lun id.
If not map, return False and expected lun id.
"""
LOG.debug('enter: _is_vdisk_map: %(src)s.', {'src': vdisk_name})
map_flag = False
result_lun = '-1'
host_name = self._get_host_from_connector(connector)
if host_name is None:
return (map_flag, int(result_lun))
mapping_data = self._get_hostvdisk_mappings(host_name)
if vdisk_name in mapping_data:
map_flag = True
result_lun = mapping_data[vdisk_name]['SCSI_id']
else:
lun_used = [int(v['SCSI_id']) for v in mapping_data.values()]
lun_used.sort()
# Start from 1 due to problems with lun id being 0.
result_lun = 1
for lun_id in lun_used:
if result_lun < lun_id:
break
elif result_lun == lun_id:
result_lun += 1
LOG.debug(
'leave: _is_vdisk_map: %(src)s '
'mapped %(map_flag)s %(result_lun)s.',
{'src': vdisk_name,
'map_flag': six.text_type(map_flag),
'result_lun': result_lun})
return (map_flag, int(result_lun))
def _log_cli_output_error(self, function, cmd, out, err):
LOG.error(_LE('%(fun)s: Failed with unexpected CLI output.\n '
'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n'),
{'fun': function,
'cmd': cmd,
'out': six.text_type(out),
'err': six.text_type(err)})
@utils.synchronized('flashsystem-map', external=True)
def _map_vdisk_to_host(self, vdisk_name, connector):
"""Create a mapping between a vdisk to a host."""
LOG.debug(
'enter: _map_vdisk_to_host: vdisk %(vdisk_name)s to '
'host %(host)s.',
{'vdisk_name': vdisk_name, 'host': connector})
# Check if a host object is defined for this host name
host_name = self._get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to FlashSystem
host_name = self._create_host(connector)
# Verify that create_new_host succeeded
self._driver_assert(
host_name is not None,
(_('_create_host failed to return the host name.')))
(map_flag, result_lun) = self._is_vdisk_map(vdisk_name, connector)
# Volume is not mapped to host, create a new LUN
if not map_flag:
ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name,
'-scsi', six.text_type(result_lun), vdisk_name]
out, err = self._ssh(ssh_cmd, check_exit_code=False)
if err and err.startswith('CMMVC6071E'):
if not self.configuration.flashsystem_multihostmap_enabled:
msg = _('flashsystem_multihostmap_enabled is set '
'to False, not allow multi host mapping. '
'CMMVC6071E The VDisk-to-host mapping '
'was not created because the VDisk is '
'already mapped to a host.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for i in range(len(ssh_cmd)):
if ssh_cmd[i] == 'mkvdiskhostmap':
ssh_cmd.insert(i + 1, '-force')
# try to map one volume to multiple hosts
out, err = self._ssh(ssh_cmd)
LOG.info(_LI('Volume %s is mapping to multiple hosts.'),
vdisk_name)
self._assert_ssh_return(
'successfully created' in out,
'_map_vdisk_to_host', ssh_cmd, out, err)
else:
self._assert_ssh_return(
'successfully created' in out,
'_map_vdisk_to_host', ssh_cmd, out, err)
LOG.debug(
('leave: _map_vdisk_to_host: LUN %(result_lun)s, vdisk '
'%(vdisk_name)s, host %(host_name)s.'),
{'result_lun': result_lun,
'vdisk_name': vdisk_name, 'host_name': host_name})
return int(result_lun)
def _port_conf_generator(self, cmd):
ssh_cmd = cmd + ['-delim', '!']
out, err = self._ssh(ssh_cmd)
if not out.strip():
return
port_lines = out.strip().split('\n')
if not port_lines:
return
header = port_lines.pop(0)
yield header
for portip_line in port_lines:
try:
port_data = self._get_hdr_dic(header, portip_line, '!')
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
self._log_cli_output_error('_port_conf_generator',
ssh_cmd, out, err)
yield port_data
def _remove_device(self, properties, device):
LOG.debug('enter: _remove_device')
if not properties or not device:
LOG.warning(_LW('_remove_device: invalid properties or device.'))
return
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = properties['driver_volume_type']
connector = utils.brick_get_connector(protocol,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
conn=properties)
connector.disconnect_volume(properties['data'], device)
LOG.debug('leave: _remove_device')
def _scan_device(self, properties):
LOG.debug('enter: _scan_device')
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = properties['driver_volume_type']
connector = utils.brick_get_connector(protocol,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
conn=properties)
device = connector.connect_volume(properties['data'])
host_device = device['path']
if not connector.check_valid_device(host_device):
msg = (_('Unable to access the backend storage '
'via the path %(path)s.') % {'path': host_device})
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug('leave: _scan_device')
return device
@utils.synchronized('flashsystem-unmap', external=True)
def _unmap_vdisk_from_host(self, vdisk_name, connector):
if 'host' in connector:
host_name = self._get_host_from_connector(connector)
self._driver_assert(
host_name is not None,
(_('_get_host_from_connector failed to return the host name '
'for connector.')))
else:
host_name = None
# Check if vdisk-host mapping exists, remove if it does. If no host
# name was given, but only one mapping exists, we can use that.
mapping_data = self._get_vdiskhost_mappings(vdisk_name)
if not mapping_data:
LOG.warning(_LW('_unmap_vdisk_from_host: No mapping of volume '
'%(vol_name)s to any host found.'),
{'vol_name': vdisk_name})
return
if host_name is None:
if len(mapping_data) > 1:
LOG.warning(_LW('_unmap_vdisk_from_host: Multiple mappings of '
'volume %(vdisk_name)s found, no host '
'specified.'),
{'vdisk_name': vdisk_name})
return
else:
host_name = list(mapping_data.keys())[0]
else:
if host_name not in mapping_data:
LOG.error(_LE('_unmap_vdisk_from_host: No mapping of volume '
'%(vol_name)s to host %(host_name)s found.'),
{'vol_name': vdisk_name, 'host_name': host_name})
return
# We have a valid host_name now
ssh_cmd = ['svctask', 'rmvdiskhostmap',
'-host', host_name, vdisk_name]
out, err = self._ssh(ssh_cmd)
# Verify CLI behaviour - no output is returned from rmvdiskhostmap
self._assert_ssh_return(
(not out.strip()),
'_unmap_vdisk_from_host', ssh_cmd, out, err)
# If this host has no more mappings, delete it
mapping_data = self._get_hostvdisk_mappings(host_name)
if not mapping_data:
self._delete_host(host_name)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = {
'vendor_name': 'IBM',
'driver_version': self.VERSION,
'storage_protocol': self._protocol,
'total_capacity_gb': 0,
'free_capacity_gb': 0,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'multiattach': True,
}
pool = FLASHSYSTEM_VOLPOOL_NAME
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = '%s_%s' % (self._system_name, pool)
data['volume_backend_name'] = backend_name
ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
attributes = self._execute_command_and_parse_attributes(ssh_cmd)
if not attributes:
msg = _('_update_volume_stats: Could not get storage pool data.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
data['total_capacity_gb'] = (
float(attributes['capacity']) / units.Gi)
data['free_capacity_gb'] = (
float(attributes['free_capacity']) / units.Gi)
data['easytier_support'] = False # Do not support easy tier
data['location_info'] = (
'FlashSystemDriver:%(sys_id)s:%(pool)s'
% {'sys_id': self._system_id, 'pool': pool})
self._stats = data
def _set_vdisk_copy_in_progress(self, vdisk_list):
LOG.debug(
'_set_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.',
{'vdisk': six.text_type(vdisk_list),
'vdisk_in_progress':
six.text_type(self._vdisk_copy_in_progress)})
get_lock = True
self._vdisk_copy_lock.acquire()
for vdisk in vdisk_list:
if vdisk in self._vdisk_copy_in_progress:
get_lock = False
break
if get_lock:
self._vdisk_copy_in_progress.update(vdisk_list)
self._vdisk_copy_lock.release()
if get_lock:
LOG.debug(
'_set_vdisk_copy_in_progress: %s.',
six.text_type(self._vdisk_copy_in_progress))
raise loopingcall.LoopingCallDone(retvalue=True)
def _unset_vdisk_copy_in_progress(self, vdisk_list):
LOG.debug(
'_unset_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.',
{'vdisk': six.text_type(vdisk_list),
'vdisk_in_progress':
six.text_type(self._vdisk_copy_in_progress)})
self._vdisk_copy_lock.acquire()
for vdisk in vdisk_list:
if vdisk in self._vdisk_copy_in_progress:
self._vdisk_copy_in_progress.remove(vdisk)
self._vdisk_copy_lock.release()
def _wait_vdisk_copy_completed(self, vdisk_name):
timer = loopingcall.FixedIntervalLoopingCall(
self._is_vdisk_copy_in_progress, vdisk_name)
timer.start(interval=self._check_lock_interval).wait()
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._system_name is None:
msg = (
_('check_for_setup_error: Unable to determine system name.'))
raise exception.VolumeBackendAPIException(data=msg)
if self._system_id is None:
msg = _('check_for_setup_error: Unable to determine system id.')
raise exception.VolumeBackendAPIException(data=msg)
required_flags = ['san_ip', 'san_ssh_port', 'san_login']
for flag in required_flags:
if not self.configuration.safe_get(flag):
msg = (_('%s is not set.') % flag)
raise exception.InvalidInput(reason=msg)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
msg = _('check_for_setup_error: Password or SSH private key '
'is required for authentication: set either '
'san_password or san_private_key option.')
raise exception.InvalidInput(reason=msg)
params = self._build_default_params()
self._check_vdisk_params(params)
LOG.debug('leave: check_for_setup_error')
def create_volume(self, volume):
"""Create volume."""
vdisk_name = volume['name']
vdisk_params = self._get_vdisk_params(volume['volume_type_id'])
vdisk_size = six.text_type(volume['size'])
return self._create_vdisk(vdisk_name, vdisk_size, 'gb', vdisk_params)
def delete_volume(self, volume):
"""Delete volume."""
vdisk_name = volume['name']
self._wait_vdisk_copy_completed(vdisk_name)
self._delete_vdisk(vdisk_name, False)
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.debug('enter: extend_volume: volume %s.', volume['name'])
vdisk_name = volume['name']
self._wait_vdisk_copy_completed(vdisk_name)
extend_amt = int(new_size) - volume['size']
ssh_cmd = (['svctask', 'expandvdisksize', '-size',
six.text_type(extend_amt), '-unit', 'gb', vdisk_name])
out, err = self._ssh(ssh_cmd)
# No output should be returned from expandvdisksize
self._assert_ssh_return(
(not out.strip()),
'extend_volume', ssh_cmd, out, err)
LOG.debug('leave: extend_volume: volume %s.', volume['name'])
def create_snapshot(self, snapshot):
"""Create snapshot from volume."""
LOG.debug(
'enter: create_snapshot: create %(snap)s from %(vol)s.',
{'snap': snapshot['name'], 'vol': snapshot['volume']['name']})
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = (_(
'create_snapshot: Volume status must be "available" or '
'"in-use" for snapshot. The invalid status is %s.') % status)
raise exception.InvalidVolume(msg)
self._create_and_copy_vdisk_data(snapshot['volume']['name'],
snapshot['volume']['id'],
snapshot['name'],
snapshot['id'])
LOG.debug(
'leave: create_snapshot: create %(snap)s from %(vol)s.',
{'snap': snapshot['name'], 'vol': snapshot['volume']['name']})
def delete_snapshot(self, snapshot):
"""Delete snapshot."""
LOG.debug(
'enter: delete_snapshot: delete %(snap)s.',
{'snap': snapshot['name']})
self._wait_vdisk_copy_completed(snapshot['name'])
self._delete_vdisk(snapshot['name'], False)
LOG.debug(
'leave: delete_snapshot: delete %(snap)s.',
{'snap': snapshot['name']})
def create_volume_from_snapshot(self, volume, snapshot):
"""Create volume from snapshot."""
LOG.debug(
'enter: create_volume_from_snapshot: create %(vol)s from '
'%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']})
if volume['size'] != snapshot['volume_size']:
msg = _('create_volume_from_snapshot: Volume size is different '
'from snapshot based volume.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
status = snapshot['status']
if status != 'available':
msg = (_('create_volume_from_snapshot: Snapshot status '
'must be "available" for creating volume. '
'The invalid status is: %s.') % status)
raise exception.InvalidSnapshot(msg)
self._create_and_copy_vdisk_data(snapshot['name'],
snapshot['id'],
volume['name'],
volume['id'])
LOG.debug(
'leave: create_volume_from_snapshot: create %(vol)s from '
'%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']})
def create_cloned_volume(self, volume, src_volume):
"""Create volume from a source volume."""
LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.',
{'src': src_volume['name'], 'vol': volume['name']})
if src_volume['size'] != volume['size']:
msg = _('create_cloned_volume: Source and destination '
'size differ.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self._create_and_copy_vdisk_data(src_volume['name'],
src_volume['id'],
volume['name'],
volume['id'])
LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.',
{'src': src_volume['name'], 'vol': volume['name']})
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
| 39.384473
| 79
| 0.575468
|
d92d189ddc19307435bb2833bb300b5f392cfb29
| 7,332
|
py
|
Python
|
env/lib/python3.5/site-packages/sklearn/externals/joblib/externals/loky/backend/spawn.py
|
Udolf15/recommedMeMovies
|
be5ae74acd98e3f93beaaa5bb55623974fb24247
|
[
"MIT"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
venv/lib/python3.7/site-packages/sklearn/externals/joblib/externals/loky/backend/spawn.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
venv/lib/python3.7/site-packages/sklearn/externals/joblib/externals/loky/backend/spawn.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 3
|
2019-06-13T07:10:54.000Z
|
2020-09-11T06:01:40.000Z
|
###############################################################################
# Prepares and processes the data to setup the new process environment
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from multiprocessing/spawn.py (17/02/2017)
# * Improve logging data
#
import os
import sys
import runpy
import types
from multiprocessing import process, util
from sklearn.externals.joblib.externals.loky.backend import context
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def get_executable():
return _python_exe
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
def get_preparation_data(name, init_main_module=True):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=bytes(process.current_process().authkey),
)
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
if len(util._logger.handlers) > 0:
h = util._logger.handlers[0]
d['log_fmt'] = h.formatter._fmt
sys_path = [p for p in sys.path]
try:
i = sys_path.index('')
except ValueError:
pass
else:
sys_path[i] = process.ORIGINAL_DIR
d.update(
name=name,
sys_path=sys_path,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd()
)
if sys.platform != "win32":
# Pass the semaphore_tracker pid to avoid re-spawning it in every child
from . import semaphore_tracker
semaphore_tracker.ensure_running()
d['tracker_pid'] = semaphore_tracker._semaphore_tracker._pid
# Figure out whether to initialise main in the subprocess as a module
# or through direct execution (or to leave it alone entirely)
if init_main_module:
main_module = sys.modules['__main__']
try:
main_mod_name = getattr(main_module.__spec__, "name", None)
except BaseException:
main_mod_name = None
if main_mod_name is not None:
d['init_main_from_name'] = main_mod_name
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(main_module, '__file__', None)
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['init_main_from_path'] = os.path.normpath(main_path)
# Compat for python2.7
d['main_path'] = d['init_main_from_path']
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'log_fmt' in data:
import logging
util.get_logger().handlers[0].setFormatter(
logging.Formatter(data['log_fmt'])
)
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'tacker_pid' in data:
from . import semaphore_tracker
semaphore_tracker._semaphore_tracker._pid = data["tracker_pid"]
if 'init_main_from_name' in data:
_fixup_main_from_name(data['init_main_from_name'])
elif 'init_main_from_path' in data:
_fixup_main_from_path(data['init_main_from_path'])
# Multiprocessing module helpers to fix up the main module in
# spawned subprocesses
def _fixup_main_from_name(mod_name):
# __main__.py files for packages, directories, zip archives, etc, run
# their "main only" code unconditionally, so we don't even try to
# populate anything in __main__, nor do we make any changes to
# __main__ attributes
current_main = sys.modules['__main__']
if mod_name == "__main__" or mod_name.endswith(".__main__"):
return
# If this process was forked, __main__ may already be populated
if getattr(current_main.__spec__, "name", None) == mod_name:
return
# Otherwise, __main__ may contain some non-main code where we need to
# support unpickling it properly. We rerun it as __mp_main__ and make
# the normal __main__ an alias to that
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def _fixup_main_from_path(main_path):
# If this process was forked, __main__ may already be populated
current_main = sys.modules['__main__']
# Unfortunately, the main ipython launch script historically had no
# "if __name__ == '__main__'" guard, so we work around that
# by treating it like a __main__.py file
# See https://github.com/ipython/ipython/issues/4698
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == 'ipython':
return
# Otherwise, if __file__ already has the setting we expect,
# there's nothing more to do
if getattr(current_main, '__file__', None) == main_path:
return
# If the parent process has sent a path through rather than a module
# name we assume it is an executable script that may contain
# non-main code that needs to be executed
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def import_main_path(main_path):
'''
Set sys.modules['__main__'] to module at main_path
'''
_fixup_main_from_path(main_path)
| 32.732143
| 79
| 0.652209
|
2c0f7103056d35d30b31af61fbc60510eb9f97a5
| 6,249
|
py
|
Python
|
release/stubs.min/Wms/RemotingObjects/BackgroundAgents.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/Wms/RemotingObjects/BackgroundAgents.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/Wms/RemotingObjects/BackgroundAgents.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module Wms.RemotingObjects.BackgroundAgents calls itself BackgroundAgents
# from Wms.RemotingObjects,Version=1.23.1.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no important
from __init__ import *
# no functions
# classes
class BackgroundAgent(DbObject):
""" BackgroundAgent() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BackgroundAgent()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __reduce_ex__(self,*args):
pass
BackgroundAgentPk=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BackgroundAgentPk(self: BackgroundAgent) -> int
Set: BackgroundAgentPk(self: BackgroundAgent)=value
"""
Id=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Id(self: BackgroundAgent) -> str
Set: Id(self: BackgroundAgent)=value
"""
IsActive=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""True if LastSeen was updated within specified minutes.
Get: IsActive(self: BackgroundAgent) -> bool
"""
IsAuthorized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsAuthorized(self: BackgroundAgent) -> bool
Set: IsAuthorized(self: BackgroundAgent)=value
"""
LastSeen=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Last UTC time a ping was received
Get: LastSeen(self: BackgroundAgent) -> DateTime
Set: LastSeen(self: BackgroundAgent)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: BackgroundAgent) -> str
Set: Name(self: BackgroundAgent)=value
"""
OS=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: OS(self: BackgroundAgent) -> str
Set: OS(self: BackgroundAgent)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Type(self: BackgroundAgent) -> BackgroundAgentType
Set: Type(self: BackgroundAgent)=value
"""
class BackgroundAgents(List):
""" BackgroundAgents() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BackgroundAgents()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __reduce_ex__(self,*args):
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
class BackgroundAgentStatus(object):
""" BackgroundAgentStatus() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BackgroundAgentStatus()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
ActiveAgents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ActiveAgents(self: BackgroundAgentStatus) -> int
Set: ActiveAgents(self: BackgroundAgentStatus)=value
"""
InactiveAgents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: InactiveAgents(self: BackgroundAgentStatus) -> int
Set: InactiveAgents(self: BackgroundAgentStatus)=value
"""
class BackgroundAgentType:
""" enum BackgroundAgentType,values: PrintAgent (1),Unknown (0) """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BackgroundAgentType()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
PrintAgent=None
Unknown=None
value__=None
class PingMessage(object):
"""
Send a ping message to let boxwise know listener is still active.
PingMessage()
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return PingMessage()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
AgentType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Type of the agent that sends this message
Get: AgentType(self: PingMessage) -> BackgroundAgentType
Set: AgentType(self: PingMessage)=value
"""
CreatedAt=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Datetime when this ping was created.
Get: CreatedAt(self: PingMessage) -> DateTime
Set: CreatedAt(self: PingMessage)=value
"""
ListenerId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Id of the listing agent (instance id).
Get: ListenerId(self: PingMessage) -> str
Set: ListenerId(self: PingMessage)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Name of the listener to identify (e.g. Machinename on which it is running
Get: Name(self: PingMessage) -> str
Set: Name(self: PingMessage)=value
"""
OS=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Meta data about the OS the background agent is running on.
Get: OS(self: PingMessage) -> str
Set: OS(self: PingMessage)=value
"""
| 30.043269
| 215
| 0.68779
|
c1b8a9677878622f4e0a5a7a818a11957e70fd4c
| 1,435
|
py
|
Python
|
plotly/validators/layout/ternary/baxis/_tickfont.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 1
|
2018-07-16T01:51:47.000Z
|
2018-07-16T01:51:47.000Z
|
plotly/validators/layout/ternary/baxis/_tickfont.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/layout/ternary/baxis/_tickfont.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 1
|
2019-02-18T04:12:56.000Z
|
2019-02-18T04:12:56.000Z
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='tickfont',
parent_name='layout.ternary.baxis',
**kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Tickfont',
data_docs="""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include *Arial*,
*Balto*, *Courier New*, *Droid Sans*,, *Droid
Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
""",
**kwargs
)
| 36.794872
| 72
| 0.560976
|
aa7aa94a876dcc6aa57aa83f2949de86347dc542
| 751
|
py
|
Python
|
client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningBlobDiff.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 835
|
2017-02-08T20:14:24.000Z
|
2020-03-12T17:37:49.000Z
|
client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningBlobDiff.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 651
|
2019-04-18T12:55:07.000Z
|
2022-03-31T23:45:09.000Z
|
client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningBlobDiff.py
|
stefan-petrov-toptal/modeldb
|
a8a9b9da6ed964c91351230b2f0d2703c75794de
|
[
"Apache-2.0"
] | 170
|
2017-02-13T14:49:22.000Z
|
2020-02-19T17:59:12.000Z
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class VersioningBlobDiff(BaseType):
def __init__(self, path=None, dataset=None):
required = {
"path": False,
"dataset": False,
}
self.path = path
self.dataset = dataset
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .VersioningDatasetDiff import VersioningDatasetDiff
tmp = d.get('path', None)
if tmp is not None:
d['path'] = tmp
tmp = d.get('dataset', None)
if tmp is not None:
d['dataset'] = VersioningDatasetDiff.from_json(tmp)
return VersioningBlobDiff(**d)
| 24.225806
| 62
| 0.647137
|
a91719a90068092138f19bbb74eecd515070a1cb
| 7,818
|
py
|
Python
|
pycaret/tests/test_classification.py
|
hanaseleb/pycaret
|
1fe6e1a6bee642351c4b6064d769f97294713f48
|
[
"MIT"
] | 1
|
2022-03-29T19:35:35.000Z
|
2022-03-29T19:35:35.000Z
|
pycaret/tests/test_classification.py
|
hanaseleb/pycaret
|
1fe6e1a6bee642351c4b6064d769f97294713f48
|
[
"MIT"
] | null | null | null |
pycaret/tests/test_classification.py
|
hanaseleb/pycaret
|
1fe6e1a6bee642351c4b6064d769f97294713f48
|
[
"MIT"
] | 1
|
2022-03-15T13:56:16.000Z
|
2022-03-15T13:56:16.000Z
|
import os, sys
sys.path.insert(0, os.path.abspath(".."))
import pandas as pd
import pytest
import pycaret.classification
import pycaret.datasets
from mlflow.tracking.client import MlflowClient
import uuid
@pytest.fixture(scope='module')
def juice_dataframe():
# loading dataset
return pycaret.datasets.get_data("juice")
@pytest.fixture(scope='module')
def tracking_api():
client = MlflowClient()
return client
def test(juice_dataframe):
assert isinstance(juice_dataframe, pd.core.frame.DataFrame)
# init setup
clf1 = pycaret.classification.setup(
juice_dataframe,
target="Purchase",
remove_multicollinearity=True,
multicollinearity_threshold=0.95,
log_experiment=True,
silent=True,
html=False,
session_id=123,
n_jobs=1,
)
# compare models
top3 = pycaret.classification.compare_models(errors="raise", n_select=100)[:3]
assert isinstance(top3, list)
# tune model
tuned_top3 = [pycaret.classification.tune_model(i, n_iter=3) for i in top3]
assert isinstance(tuned_top3, list)
pycaret.classification.tune_model(top3[0], n_iter=3, choose_better=True)
# ensemble model
bagged_top3 = [pycaret.classification.ensemble_model(i) for i in tuned_top3]
assert isinstance(bagged_top3, list)
# blend models
blender = pycaret.classification.blend_models(top3)
# stack models
stacker = pycaret.classification.stack_models(estimator_list=top3)
predict_holdout = pycaret.classification.predict_model(stacker)
# plot model
lr = pycaret.classification.create_model("lr")
pycaret.classification.plot_model(lr, save=True, scale=5)
# select best model
best = pycaret.classification.automl(optimize="MCC")
# hold out predictions
predict_holdout = pycaret.classification.predict_model(best)
assert isinstance(predict_holdout, pd.core.frame.DataFrame)
# predictions on new dataset
predict_holdout = pycaret.classification.predict_model(best, data=juice_dataframe)
assert isinstance(predict_holdout, pd.core.frame.DataFrame)
# calibrate model
calibrated_best = pycaret.classification.calibrate_model(best)
# finalize model
final_best = pycaret.classification.finalize_model(best)
# save model
pycaret.classification.save_model(best, "best_model_23122019")
# load model
saved_best = pycaret.classification.load_model("best_model_23122019")
# returns table of models
all_models = pycaret.classification.models()
assert isinstance(all_models, pd.core.frame.DataFrame)
# get config
X_train = pycaret.classification.get_config("X_train")
X_test = pycaret.classification.get_config("X_test")
y_train = pycaret.classification.get_config("y_train")
y_test = pycaret.classification.get_config("y_test")
assert isinstance(X_train, pd.core.frame.DataFrame)
assert isinstance(X_test, pd.core.frame.DataFrame)
assert isinstance(y_train, pd.core.series.Series)
assert isinstance(y_test, pd.core.series.Series)
# set config
pycaret.classification.set_config("seed", 124)
seed = pycaret.classification.get_config("seed")
assert seed == 124
assert 1 == 1
class TestClassificationExperimentCustomTags:
def test_classification_setup_fails_with_experiment_custom_tags(self, juice_dataframe):
with pytest.raises(TypeError):
# init setup
_ = pycaret.classification.setup(
juice_dataframe,
target="Purchase",
remove_multicollinearity=True,
multicollinearity_threshold=0.95,
log_experiment=True,
silent=True,
html=False,
session_id=123,
n_jobs=1,
experiment_name=uuid.uuid4().hex,
experiment_custom_tags='custom_tag'
)
@pytest.mark.parametrize('custom_tag', [1, ('pytest', 'True'), True, 1000.0])
def test_classification_setup_fails_with_experiment_custom_multiples_inputs(self, custom_tag):
with pytest.raises(TypeError):
# init setup
_ = pycaret.classification.setup(
pycaret.datasets.get_data("juice"),
target="Purchase",
remove_multicollinearity=True,
multicollinearity_threshold=0.95,
log_experiment=True,
silent=True,
html=False,
session_id=123,
n_jobs=1,
experiment_name=uuid.uuid4().hex,
experiment_custom_tags=custom_tag
)
def test_classification_compare_models_fails_with_experiment_custom_tags(self, juice_dataframe):
with pytest.raises(TypeError):
# init setup
_ = pycaret.classification.setup(
juice_dataframe,
target="Purchase",
remove_multicollinearity=True,
multicollinearity_threshold=0.95,
log_experiment=True,
silent=True,
html=False,
session_id=123,
n_jobs=1,
experiment_name=uuid.uuid4().hex,
experiment_custom_tags={'pytest' : 'awesome_framework'}
)
# compare models
_ = pycaret.classification.compare_models(errors="raise", n_select=100, experiment_custom_tags='invalid_tag')[:3]
def test_classification_finalize_models_fails_with_experiment_custom_tags(self, juice_dataframe):
with pytest.raises(TypeError):
# init setup
_ = pycaret.classification.setup(
juice_dataframe,
target="Purchase",
remove_multicollinearity=True,
multicollinearity_threshold=0.95,
log_experiment=True,
silent=True,
html=False,
session_id=123,
n_jobs=1,
experiment_name=uuid.uuid4().hex,
experiment_custom_tags={'pytest' : 'awesome_framework'}
)
# compare models
_ = pycaret.classification.compare_models(errors="raise", n_select=100)[:3]
# select best model
best = pycaret.classification.automl(optimize="MCC")
# finalize model
_ = pycaret.classification.finalize_model(best, experiment_custom_tags='pytest')
def test_classification_models_with_experiment_custom_tags(self, juice_dataframe, tracking_api):
# init setup
experiment_name = uuid.uuid4().hex
_ = pycaret.classification.setup(
juice_dataframe,
target="Purchase",
remove_multicollinearity=True,
multicollinearity_threshold=0.95,
log_experiment=True,
silent=True,
html=False,
session_id=123,
n_jobs=1,
experiment_name=experiment_name,
)
# compare models
_ = pycaret.classification.compare_models(errors="raise", n_select=100, experiment_custom_tags={'pytest' : 'testing'})[:3]
#get experiment data
experiment = [e for e in tracking_api.list_experiments() if e.name == experiment_name][0]
experiment_id = experiment.experiment_id
#get run's info
experiment_run = tracking_api.list_run_infos(experiment_id)[0]
#get run id
run_id = experiment_run.run_id
#get run data
run_data = tracking_api.get_run(run_id)
#assert that custom tag was inserted
assert 'testing' == run_data.to_dictionary().get('data').get("tags").get("pytest")
if __name__ == "__main__":
test()
TestClassificationExperimentCustomTags()
| 34.440529
| 130
| 0.647608
|
33f0ebb48427cb74ab5dbb7e7b345df772a88eba
| 2,222
|
py
|
Python
|
xgboost-sys/xgboost/tests/python/test_updaters.py
|
MashPlant/rust-xgboost
|
8a91943e284fece347f8b38fffca70325adbcdfd
|
[
"MIT"
] | 769
|
2015-01-02T03:15:00.000Z
|
2022-03-30T11:22:52.000Z
|
xgboost-sys/xgboost/tests/python/test_updaters.py
|
MashPlant/rust-xgboost
|
8a91943e284fece347f8b38fffca70325adbcdfd
|
[
"MIT"
] | 54
|
2015-01-01T01:12:39.000Z
|
2017-05-21T02:56:14.000Z
|
xgboost-sys/xgboost/tests/python/test_updaters.py
|
MashPlant/rust-xgboost
|
8a91943e284fece347f8b38fffca70325adbcdfd
|
[
"MIT"
] | 372
|
2015-01-03T21:10:27.000Z
|
2022-03-03T03:46:36.000Z
|
import testing as tm
import unittest
import xgboost as xgb
try:
from regression_test_utilities import run_suite, parameter_combinations, \
assert_results_non_increasing
except ImportError:
None
class TestUpdaters(unittest.TestCase):
def test_histmaker(self):
tm._skip_if_no_sklearn()
variable_param = {'updater': ['grow_histmaker'], 'max_depth': [2, 8]}
for param in parameter_combinations(variable_param):
result = run_suite(param)
assert_results_non_increasing(result, 1e-2)
def test_colmaker(self):
tm._skip_if_no_sklearn()
variable_param = {'updater': ['grow_colmaker'], 'max_depth': [2, 8]}
for param in parameter_combinations(variable_param):
result = run_suite(param)
assert_results_non_increasing(result, 1e-2)
def test_fast_histmaker(self):
tm._skip_if_no_sklearn()
variable_param = {'tree_method': ['hist'], 'max_depth': [2, 8], 'max_bin': [2, 256],
'grow_policy': ['depthwise', 'lossguide'], 'max_leaves': [64, 0],
'silent': [1]}
for param in parameter_combinations(variable_param):
result = run_suite(param)
assert_results_non_increasing(result, 1e-2)
# hist must be same as exact on all-categorial data
dpath = 'demo/data/'
ag_dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
ag_dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
ag_param = {'max_depth': 2,
'tree_method': 'hist',
'eta': 1,
'silent': 1,
'objective': 'binary:logistic',
'eval_metric': 'auc'}
hist_res = {}
exact_res = {}
xgb.train(ag_param, ag_dtrain, 10, [(ag_dtrain, 'train'), (ag_dtest, 'test')],
evals_result=hist_res)
ag_param["tree_method"] = "exact"
xgb.train(ag_param, ag_dtrain, 10, [(ag_dtrain, 'train'), (ag_dtest, 'test')],
evals_result=exact_res)
assert hist_res['train']['auc'] == exact_res['train']['auc']
assert hist_res['test']['auc'] == exact_res['test']['auc']
| 39.678571
| 92
| 0.589559
|
582c0f3286ffa21f251ad285c5e669e66b998bda
| 1,647
|
py
|
Python
|
core-python-basic-knowledge/file_io_and_resource_management/files.py
|
hassonor/core-python
|
92672aa72c1474061df5247a2dd4dfd9fab1642a
|
[
"MIT"
] | 1
|
2022-03-09T20:58:33.000Z
|
2022-03-09T20:58:33.000Z
|
core-python-basic-knowledge/file_io_and_resource_management/files.py
|
hassonor/core-python
|
92672aa72c1474061df5247a2dd4dfd9fab1642a
|
[
"MIT"
] | null | null | null |
core-python-basic-knowledge/file_io_and_resource_management/files.py
|
hassonor/core-python
|
92672aa72c1474061df5247a2dd4dfd9fab1642a
|
[
"MIT"
] | null | null | null |
"""
open()
-> Open a file for reading or writing
-> file: the path to the file (required)
-> mode: read, write, or append, plus binary or text
-> encoding: encoding to use in text mode
open() Modes
Mode 'r' -> open for reading
Mode 'w' -> open for writing
Mode 'a -> open for appending
Selector 'b' -> binary mode
Selector 't' -> text mode
e.g:
'wb' -> Open for writing in binary mode
'at' -> Open for appending in text mode
open() -> returns a file-like object.
write()
-> returns the number of codepoints written. Don't sum these values to determine file length.
"""
import sys
# Write File
f = open('write_to_file.txt', mode='wt', encoding='utf-8')
f.write('What are the roots that clutch, ')
f.write('what branched grow\n')
f.write('Out of this stony rubbish? ')
f.close()
# Read File
g = open('write_to_file.txt', mode='rt', encoding='utf-8')
print(g.read(32))
print(g.read())
g.seek(0) # move back the point of the pointer to the start of the file
print(g.readline())
print(g.readline())
g.seek(0)
print(g.readlines()) # read to list of lines
g.close()
# Appending text File
h = open('write_to_file.txt', mode='at', encoding='utf-8')
h.writelines(
['Son of man, \n', 'You cannot say, or guess, ',
'for you know only,'
' \n', 'A heap of broken images, '
's', 'where the sub beats\n'])
h.close()
# File Iteration
test_file = open(sys.argv[1], mode='rt', encoding='utf-8')
for line in test_file:
sys.stdout.write(line)
test_file.close()
"""
with-block
-> Control flow structure for managing resources
-> Can be used with any objects - such as files - which support the context-manager protocol
"""
| 23.869565
| 93
| 0.672738
|
8effa1cff4085b4c9ac1890017c0a69a9f860c57
| 24,339
|
py
|
Python
|
test/test_trace_events.py
|
runtime-jupyter-safety/runtime-jupyter-safety
|
f62a24b5b4f44fed5111c31441bc6a105441e34c
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_trace_events.py
|
runtime-jupyter-safety/runtime-jupyter-safety
|
f62a24b5b4f44fed5111c31441bc6a105441e34c
|
[
"BSD-3-Clause"
] | 20
|
2020-04-17T02:32:50.000Z
|
2020-05-07T05:50:32.000Z
|
test/test_trace_events.py
|
runtime-jupyter-safety/runtime-jupyter-safety
|
f62a24b5b4f44fed5111c31441bc6a105441e34c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import difflib
import functools
import logging
from types import FrameType
from typing import List, Set, Union
import hypothesis.strategies as st
from hypothesis import example, given, settings
from nbsafety.safety import NotebookSafety
from nbsafety.singletons import tracer
from nbsafety.tracing.nbsafety_tracer import SafetyTracer
from pyccolo import TraceEvent
from .utils import make_safety_fixture, skipif_known_failing
logging.basicConfig(level=logging.INFO)
NotebookSafety.instance()
_ALL_EVENTS_WITH_HANDLERS = SafetyTracer.instance().events_with_registered_handlers
_RECORDED_EVENTS = []
def subsets(draw, elements):
return {e for e in elements if draw(st.booleans())}
def patched_emit_event_fixture():
_RECORDED_EVENTS.clear()
original_emit_event = SafetyTracer._emit_event
def _patched_emit_event(
self, evt: Union[str, TraceEvent], node_id: int, frame: FrameType, **kwargs
):
event = evt if isinstance(evt, TraceEvent) else TraceEvent(evt)
if frame.f_code.co_filename.startswith("<ipython-input"):
is_traced_lambda = frame.f_code.co_name == "<traced_lambda>"
if not (
(
event == TraceEvent.call
and (self.call_depth == 0 or is_traced_lambda)
)
or (
event == TraceEvent.return_
and (self.call_depth == 1 or is_traced_lambda)
)
):
if event in self.events_with_registered_handlers:
_RECORDED_EVENTS.append(event)
return original_emit_event(self, event, node_id, frame, **kwargs)
SafetyTracer._emit_event = _patched_emit_event
yield
SafetyTracer._emit_event = original_emit_event
# Reset dependency graph before each test
_safety_fixture, run_cell_ = make_safety_fixture(
extra_fixture=patched_emit_event_fixture,
# trace_messages_enabled=True,
)
_DIFFER = difflib.Differ()
def patch_events_with_registered_handlers_to_subset(testfunc):
@functools.wraps(testfunc)
@settings(max_examples=20, deadline=None)
@example(events=set(_ALL_EVENTS_WITH_HANDLERS))
def wrapped_testfunc(events):
events |= {
TraceEvent.before_subscript_load,
TraceEvent.after_subscript_load,
TraceEvent.before_subscript_store,
TraceEvent.before_subscript_del,
TraceEvent._load_saved_slice,
TraceEvent.before_load_complex_symbol,
TraceEvent.after_load_complex_symbol,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_attribute_store,
TraceEvent.before_attribute_del,
TraceEvent.before_call,
TraceEvent.after_call,
TraceEvent.argument,
TraceEvent.before_return,
TraceEvent.after_return,
TraceEvent.call,
TraceEvent.return_,
TraceEvent.exception,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_assign_rhs,
}
list_literal_related = {
TraceEvent.before_list_literal,
TraceEvent.after_list_literal,
TraceEvent.list_elt,
}
if events & list_literal_related:
events |= list_literal_related
set_literal_related = {
TraceEvent.before_set_literal,
TraceEvent.after_set_literal,
TraceEvent.set_elt,
}
if events & set_literal_related:
events |= set_literal_related
tuple_literal_related = {
TraceEvent.before_tuple_literal,
TraceEvent.after_tuple_literal,
TraceEvent.tuple_elt,
}
if events & tuple_literal_related:
events |= tuple_literal_related
dict_literal_related = {
TraceEvent.before_dict_literal,
TraceEvent.after_dict_literal,
TraceEvent.dict_key,
TraceEvent.dict_value,
}
if events & dict_literal_related:
events |= dict_literal_related
orig_handlers = tracer().events_with_registered_handlers
try:
tracer().events_with_registered_handlers = frozenset(events)
_RECORDED_EVENTS.clear()
testfunc(events)
finally:
tracer().events_with_registered_handlers = orig_handlers
return wrapped_testfunc
def filter_events_to_subset(
events: List[TraceEvent], subset: Set[TraceEvent]
) -> List[TraceEvent]:
return [evt for evt in events if evt in subset]
def throw_and_print_diff_if_recorded_not_equal_to(actual: List[TraceEvent]) -> None:
assert _RECORDED_EVENTS == actual, "\n".join(
_DIFFER.compare(
[evt.value for evt in _RECORDED_EVENTS], [evt.value for evt in actual]
)
)
_RECORDED_EVENTS.clear()
def run_cell(cell, **kwargs):
# print()
# print('*******************************************')
# print('running', cell)
# print('*******************************************')
# print()
run_cell_(cell, **kwargs)
@st.composite
def subsets(draw, elements):
return {e for e in elements if draw(st.booleans())} | {
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
TraceEvent.call,
TraceEvent.return_,
TraceEvent.after_for_loop_iter,
TraceEvent.after_while_loop_iter,
}
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_recorded_events_simple(events):
assert _RECORDED_EVENTS == []
run_cell('logging.info("foo")')
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_call,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_recorded_events_two_stmts(events):
assert _RECORDED_EVENTS == []
run_cell("x = [1, 2, 3]")
run_cell("logging.info(x)")
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.before_list_literal,
*([TraceEvent.list_elt] * 3),
TraceEvent.after_list_literal,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_call,
TraceEvent.load_name,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_nested_chains_no_call(events):
assert _RECORDED_EVENTS == []
run_cell('logging.info("foo is %s", logging.info("foo"))')
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_call,
TraceEvent.argument,
# next events correspond to `logging.info("foo")`
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_call,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_list_nested_in_dict(events):
assert _RECORDED_EVENTS == []
run_cell("x = {1: [2, 3, 4]}")
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.before_dict_literal,
TraceEvent.dict_key,
TraceEvent.before_list_literal,
*([TraceEvent.list_elt] * 3),
TraceEvent.after_list_literal,
TraceEvent.dict_value,
TraceEvent.after_dict_literal,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_function_call(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
def foo(x):
return [x]
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
run_cell("foo([42])")
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.before_list_literal,
TraceEvent.list_elt,
TraceEvent.after_list_literal,
TraceEvent.argument,
TraceEvent.call,
TraceEvent.before_function_body,
TraceEvent.before_stmt,
TraceEvent.before_return,
TraceEvent.before_list_literal,
TraceEvent.load_name,
TraceEvent.list_elt,
TraceEvent.after_list_literal,
TraceEvent.after_return,
TraceEvent.after_function_execution,
TraceEvent.return_,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_lambda_in_tuple(events):
assert _RECORDED_EVENTS == []
run_cell("x = (lambda: 42,)")
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.before_tuple_literal,
TraceEvent.before_lambda,
TraceEvent.after_lambda,
TraceEvent.tuple_elt,
TraceEvent.after_tuple_literal,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_fancy_slices(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
import numpy as np
class Foo:
def __init__(self, x):
self.x = x
foo = Foo(1)
arr = np.zeros((3, 3, 3))
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
# import numpy as np
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
# class Foo: ...
TraceEvent.before_stmt,
TraceEvent.call,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.return_,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
# foo = Foo(1)
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.argument,
TraceEvent.call,
TraceEvent.before_function_body,
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.load_name,
TraceEvent.after_assign_rhs,
TraceEvent.load_name,
TraceEvent.before_attribute_store,
TraceEvent.after_stmt,
TraceEvent.after_function_execution,
TraceEvent.return_,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
# arr = np.zeros((3, 3, 3))
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_call,
TraceEvent.before_tuple_literal,
TraceEvent.tuple_elt,
TraceEvent.tuple_elt,
TraceEvent.tuple_elt,
TraceEvent.after_tuple_literal,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
run_cell("logging.info(arr[foo.x:foo.x+1,...])")
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.before_call,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.after_load_complex_symbol,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_attribute_load,
TraceEvent.after_attribute_load,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_subscript_slice,
TraceEvent.before_subscript_load,
TraceEvent._load_saved_slice,
TraceEvent.after_subscript_load,
TraceEvent.after_load_complex_symbol,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_for_loop(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
for i in range(10):
pass
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
]
+ [
TraceEvent.before_for_loop_body,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_for_loop_iter,
# ] * 10 + [
]
* 1
+ [
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_while_loop(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
i = 0
while i < 10:
i += 1
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
TraceEvent.before_stmt,
]
+ [
TraceEvent.load_name,
TraceEvent.before_while_loop_body,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_while_loop_iter,
# ] * 10 + [
]
* 1
+ [
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_loop_with_continue(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
for i in range(10):
continue
print("hi")
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.before_for_loop_body,
TraceEvent.before_stmt,
TraceEvent.after_for_loop_iter,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_for_loop_nested_in_while_loop(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
i = 0
while i < 10:
for j in range(2):
i += 1
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
TraceEvent.before_stmt,
TraceEvent.before_assign_rhs,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
TraceEvent.before_stmt,
]
+ [
TraceEvent.load_name,
TraceEvent.before_while_loop_body,
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.argument,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.before_for_loop_body,
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_for_loop_iter,
# TraceEvent.before_stmt,
# TraceEvent.after_stmt,
# TraceEvent.after_loop_iter,
TraceEvent.after_stmt,
TraceEvent.after_while_loop_iter,
# ] * 5 + [
]
* 1
+ [
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
@given(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
@patch_events_with_registered_handlers_to_subset
def test_lambda_wrapping_call(events):
assert _RECORDED_EVENTS == []
run_cell(
"""
z = 42
def f():
return z
lam = lambda: f()
x = lam()
"""
)
throw_and_print_diff_if_recorded_not_equal_to(
filter_events_to_subset(
[
TraceEvent.init_module,
# z = 42
TraceEvent.before_stmt,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
# def f(): ...
TraceEvent.before_stmt,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
# lam = lambda: f()
TraceEvent.before_stmt,
TraceEvent.before_lambda,
TraceEvent.after_lambda,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
# x = lam()
TraceEvent.before_stmt,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.call,
TraceEvent.before_lambda_body,
TraceEvent.before_load_complex_symbol,
TraceEvent.load_name,
TraceEvent.before_call,
TraceEvent.call,
TraceEvent.before_function_body,
TraceEvent.before_stmt,
TraceEvent.before_return,
TraceEvent.load_name,
TraceEvent.after_return,
TraceEvent.after_function_execution,
TraceEvent.return_,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.return_,
TraceEvent.after_call,
TraceEvent.after_load_complex_symbol,
TraceEvent.after_assign_rhs,
TraceEvent.after_stmt,
TraceEvent.after_module_stmt,
],
events,
)
)
| 33.432692
| 84
| 0.57755
|
d19b38d1cfd2b800edd3b6748bd4f3877a27ce18
| 3,982
|
py
|
Python
|
lambda/lambda_function.py
|
alshapton/Space-X-Info-Alexa
|
05d2f7a3c0bf6151c6c3ccab358eb432e47a93a6
|
[
"MIT"
] | null | null | null |
lambda/lambda_function.py
|
alshapton/Space-X-Info-Alexa
|
05d2f7a3c0bf6151c6c3ccab358eb432e47a93a6
|
[
"MIT"
] | null | null | null |
lambda/lambda_function.py
|
alshapton/Space-X-Info-Alexa
|
05d2f7a3c0bf6151c6c3ccab358eb432e47a93a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#import boto3
import logging
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
#import pytz
# UI components
from ask_sdk_model import Response
# Import core intent handling classes
from CoreIntentHandlers.LaunchRequestHandler import LaunchRequestHandler #@ <BLANK>
from CoreIntentHandlers.CancelOrStopIntentHandler import CancelOrStopIntentHandler #@ <BLANK>
from CoreIntentHandlers.CatchAllExceptionHandler import CatchAllExceptionHandler #@ <BLANK>
from CoreIntentHandlers.IntentReflectorHandler import IntentReflectorHandler #@ <BLANK>
from CoreIntentHandlers.HelpIntentHandler import HelpIntentHandler #@ <BLANK>
from CoreIntentHandlers.SessionEndedRequestHandler import SessionEndedRequestHandler #@ <BLANK>
# Granular Help Handler
from CoreIntentHandlers.AssistanceIntentHandler import AssistHandler #@ Get mode detailled help
# Shared Handlers
from FunctionalIntentHandlers.Shared.Handlers import ChangeUnitsHandler #@ Swap units of measure (Miles/Km)
# Import functional intent handling classes
# Roadster
from FunctionalIntentHandlers.Roadster.Handlers import RoadsterOrbitHandler #@ Information about the eliptical orbit of the Tesla roadster
from FunctionalIntentHandlers.Roadster.Handlers import RoadsterSpeedHandler #@ Find out how fast the vehicle is travelling
from FunctionalIntentHandlers.Roadster.Handlers import RoadsterLocationHandler #@ Find the location of the Tesla Roadster
from FunctionalIntentHandlers.Roadster.Handlers import RoadsterMarsHandler #@ Find the location of the Tesla Roadster with respect to Mars
from FunctionalIntentHandlers.Roadster.Handlers import RoadsterInfoHandler #@ Get the complete low-down on the Roadster
# Launches
from FunctionalIntentHandlers.Launches.Handlers import LaunchNextHandler #@ The next launch
from FunctionalIntentHandlers.Launches.Handlers import LaunchLastHandler #@ The most recent launch
# Landing Pads
from FunctionalIntentHandlers.LandingPads.Handlers import LandingPadsHandler #@ Find out about Space/X's landing pads, zones and drone ships
# Company
from FunctionalIntentHandlers.Info.Handlers import CompanyHandler #@ Get information about the Space/X company itself
# Set logging level
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# The SkillBuilder object acts as the entry point for the skill, routing all request and response
# payloads to the handlers above.
sb = SkillBuilder()
# Skill startup Handler
sb.add_request_handler(LaunchRequestHandler())
# Core Handlers
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(HelpIntentHandler())
# Roadster Handlers
sb.add_request_handler(RoadsterMarsHandler())
sb.add_request_handler(AssistHandler())
sb.add_request_handler(RoadsterOrbitHandler())
sb.add_request_handler(RoadsterLocationHandler())
sb.add_request_handler(RoadsterSpeedHandler())
sb.add_request_handler(RoadsterInfoHandler())
# Launch Handlers
sb.add_request_handler(LaunchNextHandler())
sb.add_request_handler(LaunchLastHandler())
# Shared Component Handler
sb.add_request_handler(ChangeUnitsHandler())
# Landing Pads Handler
sb.add_request_handler(LandingPadsHandler())
# Company Handler
sb.add_request_handler(CompanyHandler())
# Exception Handler to deal with mop up
sb.add_exception_handler(CatchAllExceptionHandler())
sb.add_request_handler(IntentReflectorHandler()) # This MUST be last so it doesn't override the custom intent handlers
lambda_handler = sb.lambda_handler()
# End of Lambda Function
| 40.632653
| 151
| 0.801105
|
392fd4d7d4ab0ef07297911fa104492a550448f4
| 10,015
|
py
|
Python
|
body/body_textEditor.py
|
XiantaoCheng/Structure
|
5a12452dbe03fd37baf3059578cd1dd10f25e161
|
[
"MIT"
] | 1
|
2020-01-15T02:02:59.000Z
|
2020-01-15T02:02:59.000Z
|
body/body_textEditor.py
|
XiantaoCheng/Structure
|
5a12452dbe03fd37baf3059578cd1dd10f25e161
|
[
"MIT"
] | null | null | null |
body/body_textEditor.py
|
XiantaoCheng/Structure
|
5a12452dbe03fd37baf3059578cd1dd10f25e161
|
[
"MIT"
] | null | null | null |
import sys, re
if __name__=='__main__':
sys.path.append(sys.path[0]+'\\..')
from body.bone import NetP
from body.soul import Karma
from body.body_motor import Motor
from body.body_pool import Pool
from body.body_brain import Brain
from body.body_debugger import Debugger
from tools import tools_sl, tools_basic
from PyQt5.QtWidgets import QTextEdit, QApplication, QMessageBox, QFontDialog
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QTextCursor, QFont
# import matlab.engine
class Editor(QTextEdit):
def __init__(self,name):
super().__init__()
self.m_self=None
self.m_pool=None
self.m_motor=None
self.m_debugger=None
self.m_screen=None
self.m_plainText=None
self.m_readPtr=None
self.m_currentFile=''
self.m_changed=False
self.textChanged.connect(self.changed)
self.m_systemMark='\n-----------系统-----------\n'
def initialize(self,point):
if point==None:
point=NetP('editor')
self.m_self=point
point.m_dev=self
point.m_permission=0
pt_text=tools_basic.getPoint(point,'m_plainText','text')
pt_pool=tools_basic.getPoint(point,'m_pool','pool')
pt_motor=tools_basic.getPoint(point,'m_motor','compiler')
pt_debugger=tools_basic.getPoint(point,'m_debugger','debugger')
pt_screen=tools_basic.getPoint(point,'m_screen','screen')
self.modifyPtStruct(pt_debugger,pt_motor,pt_pool)
self.m_plainText=pt_text
self.setReadPtr(pt_text)
self.m_pool=Pool(pt_pool)
self.m_motor=Motor(pt_motor)
self.m_debugger=Debugger(pt_debugger)
self.m_screen=Brain(pt_screen)
self.m_pool.register(self.m_screen.m_self)
self.m_pool.register(self.m_debugger.m_self)
self.updateByPts()
self.setFont(QFont('宋体'))
self.setStyleSheet('font: 20px;')
self.show()
def modifyPtStruct(self,pt_debugger,pt_motor,pt_pool):
tools_basic.setPoint(pt_debugger,'m_motor',pt_motor)
tools_basic.setPoint(pt_motor,'m_source',pt_pool)
pt_lib=tools_basic.getPoint(pt_pool,'m_lib')
tools_basic.setPoint(pt_lib,'m_motor',pt_motor)
def resizeEvent(self, QResizeEvent):
self.updateSysPts()
return super().resizeEvent(QResizeEvent)
def keyPressEvent(self, QKeyEvent):
modifier=QApplication.keyboardModifiers()
if modifier==Qt.ControlModifier:
if QKeyEvent.key()==Qt.Key_S:
self.saveAsFile()
elif QKeyEvent.key()==Qt.Key_R:
self.runCode()
elif QKeyEvent.key()==Qt.Key_T:
self.debugCode()
elif QKeyEvent.key()==Qt.Key_Q:
self.setReadPtr(self.m_plainText)
return super().keyPressEvent(QKeyEvent)
def openFile(self,fileName):
[text1,text2]=self.readFile(fileName)
if text1==None and text2==None:
return False
self.m_currentFile=fileName
self.loadText(text1,text2)
self.m_changed=False
self.updateState()
return True
def readFile(self,fileName):
try:
f=open(fileName,encoding='gbk')
except:
print("The file, "+fileName+", doesn't exist.")
return [None,None]
try:
textGbk=f.read()
except:
textGbk=None
f.close()
f=open(fileName,encoding='utf-8')
try:
textUtf=f.read()
except:
textUtf=None
f.close()
return [textGbk,textUtf]
def loadText(self,text1,text2):
head=None
if text1==None:
code,ni=self.fixFormat(text2)
elif text2==None:
code,ni=self.fixFormat(text1)
else:
code1,n1=self.fixFormat(text1)
code2,n2=self.fixFormat(text2)
if n1==-1:
code=code2
else:
code=code1
list_pt=tools_basic.buildPoints_tokener(code)
# for point in list_pt:
# point.m_permission=0
# if point.m_db[0]!=None or point.m_db[1]!=None:
# continue
# for con in point.m_con:
# if con.m_db[1]==point:
# break
# head=point
head=list_pt[0]
self.initialize(head)
# for point in list_pt:
# if point.m_name=='in':
# print(point.info(),point.m_permission)
def fixFormat(self,text):
ni=text.find(self.m_systemMark)
# old fashion
if ni!=0:
# code='editor(,);m_plainText(editor,text);text\"'+code+'\"(,);'
code=self.transferCode(text)
# new fashion
else:
code=text[len(self.m_systemMark):]
return code,ni
def transferCode(self,text):
plainText,sysPt,nrmPt=self.takeParts_oldFasion(text)
code='editor(,);m_plainText(editor,text);text\"'+plainText\
+'\"(,);m_pool(editor,pool);pool(,);m_contain(pool,points);'+\
'points\"'+nrmPt+'\"(,);'
return code
def takeParts_oldFasion(self,wholeText):
normalMark='\n----------普通----------\n'
systemMark='\n----------系统----------\n'
n=wholeText.rfind(normalMark)
if n==-1:
return [wholeText,'','']
s=wholeText.rfind(systemMark,0,n)
if s==-1:
return [wholeText,'','']
return [wholeText[0:s],wholeText[s+len(systemMark):n],wholeText[n+len(normalMark):]]
def saveAsFile(self,fileName=None):
if fileName==None:
fileName=self.m_currentFile
if fileName=='':
QMessageBox.Warning(self,"Save failed!","Warning: the file name can't be empty")
text=self.m_systemMark+self.saveText()
f=open(fileName,'+w')
f.write(text)
f.close()
self.m_currentFile=fileName
self.m_changed=False
self.updateState()
def saveText(self):
list_pt=tools_basic.getAllSystemPt(self.m_self)
return tools_basic.writeStdCode([],list_pt)
def updateState(self):
title=''
if self.m_changed==True:
title='*'
i=self.m_currentFile.rfind('\\')
if i+1==len(self.m_currentFile):
i=-1
title+=self.m_currentFile[i+1:]
if self.m_readPtr!=self.m_plainText:
title+=': '+self.m_readPtr.info(1)
self.setWindowTitle(title)
def changed(self):
self.m_changed=True
self.updateState()
if self.m_self!=None:
# pt_text=tools_basic.getPoint(self.m_self,'m_plainText')
# pt_text.m_text=self.toPlainText()
self.m_readPtr.m_text=self.toPlainText()
def runCode(self):
# complete the selection area
text=self.toPlainText()
cursor=self.textCursor()
s=cursor.selectionStart()
e=cursor.selectionEnd()
ns=text.rfind('\n',0,s)+1
ne=text.find('\n',e,-1)
cursor=self.selectText(ns,ne)
code=cursor.selectedText().replace("\u2029",'\n')
# operate code
operation_pool=self.m_motor.m_inputs
if self.m_self not in operation_pool:
operation_pool.append(self.m_self)
outputs=self.m_motor.runCode(code)
operation_pool.remove(self.m_self)
self.m_pool.input(outputs)
def debugCode(self):
# complete the selection area
text=self.toPlainText()
cursor=self.textCursor()
s=cursor.selectionStart()
e=cursor.selectionEnd()
ns=text.rfind('\n',0,s)+1
ne=text.find('\n',e,-1)
cursor=self.selectText(ns,ne)
code=cursor.selectedText().replace("\u2029",'\n')
#debug
if self.m_debugger.isVisible()==False:
self.m_debugger.setVisible(True)
self.m_debugger.reset(code)
def setReadPtr(self,pt_text):
self.m_readPtr=pt_text
self.setPlainText(pt_text.m_text)
def selectText(self,start,end):
cursor=self.textCursor()
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.Right,QTextCursor.MoveAnchor,start)
if end==-1:
cursor.movePosition(QTextCursor.End,QTextCursor.KeepAnchor)
else:
cursor.movePosition(QTextCursor.Right,QTextCursor.KeepAnchor,end-start)
self.setTextCursor(cursor)
return cursor
######## functions interact with points
def updateSysPts(self):
pt_x=tools_basic.getPoint(self.m_self,'m_x')
pt_y=tools_basic.getPoint(self.m_self,'m_y')
pt_height=tools_basic.getPoint(self.m_self,'m_height')
pt_width=tools_basic.getPoint(self.m_self,'m_width')
pt_x.m_name=str(self.geometry().x())
pt_y.m_name=str(self.geometry().y())
pt_width.m_name=str(self.geometry().width())
pt_height.m_name=str(self.geometry().height())
def updateByPts(self):
pt_x=tools_basic.getPoint(self.m_self,'m_x','300')
pt_y=tools_basic.getPoint(self.m_self,'m_y','300')
pt_height=tools_basic.getPoint(self.m_self,'m_height','600')
pt_width=tools_basic.getPoint(self.m_self,'m_width','300')
x=int(pt_x.m_name)
y=int(pt_y.m_name)
width=int(pt_width.m_name)
height=int(pt_height.m_name)
self.setGeometry(x,y,width,height)
if __name__=="__main__":
app=QApplication(sys.argv)
editor=Editor("editor")
if len(sys.argv)<2:
print("Invalid file name!")
else:
print(sys.argv[1])
editor.openFile(sys.argv[1])
sys.exit(app.exec_())
| 33.383333
| 93
| 0.582227
|
bf89386e68d59623351cdaf351f5250358ce932f
| 14,315
|
py
|
Python
|
marabou/train/src/models/cnn_classifier.py
|
mmarouen/marabou
|
2f3f7512714c94b8d8f3da12751895ea091fe5f6
|
[
"MIT"
] | 2
|
2020-05-24T12:15:33.000Z
|
2020-05-24T12:29:29.000Z
|
marabou/train/src/models/cnn_classifier.py
|
mmarouen/marabou
|
2f3f7512714c94b8d8f3da12751895ea091fe5f6
|
[
"MIT"
] | 11
|
2020-05-02T23:26:49.000Z
|
2022-02-27T08:10:59.000Z
|
marabou/train/src/models/cnn_classifier.py
|
mmarouen/marabou
|
2f3f7512714c94b8d8f3da12751895ea091fe5f6
|
[
"MIT"
] | null | null | null |
import os
import pickle
import re
import subprocess
import time
from itertools import compress
import numpy as np
from cv2 import cv2
from keras.applications.vgg16 import VGG16
from keras.models import Model, load_model
from keras.layers import Dense, Flatten, Dropout
from keras.utils import to_categorical
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from src.utils.config_loader import FashionClassifierConfigReader
class DataPreprocessor:
"""
Utility class performing several data preprocessing steps
"""
def __init__(self, config: FashionClassifierConfigReader):
self.validation_split = config.validation_split
self.image_height = config.image_height
self.image_width = config.image_width
def split_train_test(self, X, y):
"""
Wrapper method to split training data into a validation set and a training set
Args:
X: tokenized predictors
y: labels
Returns:
tuple consisting of training predictors, training labels, validation predictors, validation labels
"""
print("===========> data split")
unique_labels = list(set(y))
n_labels = len(unique_labels)
labels_to_idx = {t: i for i, t in enumerate(unique_labels)}
idx_to_labels = {i: t for i, t in enumerate(unique_labels)}
y = [labels_to_idx[i] for i in y]
y = to_categorical(y, num_classes=n_labels)
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=self.validation_split)
print("----> data split finish")
print('training features shape ', X_train.shape)
print('testing features shape ', X_test.shape)
print('training target shape ', np.asarray(y_train).shape)
print('testing target shape ', np.asarray(y_test).shape)
return X_train, X_test, np.asarray(y_train), np.asarray(y_test), idx_to_labels
def load_images(self, X):
"""
Loads an array containing training images ready to be injected in the CNN
Args:
X: list of image urls
Returns:
array having shape (n_images, image_height, image_width, 3)
"""
X_result = []
for image_url in X:
im = cv2.imread(image_url, 1)
im = cv2.resize(im, (self.image_width, self.image_height))
X_result.append(im)
X_result = np.asarray(X_result)
return X_result
class CNNClothing:
"""
Handles the RNN model
"""
def __init__(self, *args, **kwargs):
self.use_pretrained_cnn = None
self.pretrained_network_path = None
self.pretrained_network_name = None
self.pretrained_layer = None
self.model = None
self.n_labels = None
self.idx_to_labels = None
self.batch_size = None
keys = kwargs.keys()
if 'config' in keys:
self.init_from_config_file(args[0], kwargs['config'])
else:
self.init_from_files(kwargs['h5_file'], kwargs['class_file'])
def init_from_files(self, h5_file, class_file):
"""
Initializes the class from a previously saved model
Args:
h5_file: url to a saved class
Return:
None
"""
self.model = load_model(h5_file)
with open(class_file, 'rb') as f:
self.image_height = pickle.load(f)
self.image_width = pickle.load(f)
self.idx_to_labels = pickle.load(f)
def init_from_config_file(self, idx_to_labels, config: FashionClassifierConfigReader):
"""
initialize the class for the first time from a given configuration file and data processor
Args:
idx_to_labels: conversion from indices to original labels
config: .json configuration reader
Return:
None
"""
self.use_pretrained_cnn = config.pre_trained_cnn
self.pretrained_cnn_name = config.pretrained_network_name
self.model = None
self.n_iter = 10
self.image_height = config.image_height
self.image_width = config.image_width
self.idx_to_labels = idx_to_labels
self.batch_size = config.batch_size
self.n_labels = len(idx_to_labels)
if self.pretrained_network_name == "vgg16":
self.pretrained_network_path = config.pretrained_network_vgg
elif self.pretrained_network_name == "lenet":
self.pretrained_network_path = config.pretrained_network_lenet
self.model = self.build_model()
def build_model(self):
"""
Builds an CNN model according to fixed architecture
Return:
None
"""
print("===========> build model")
vggmodel = VGG16(include_top=False, input_shape=(self.image_height, self.image_width, 3))
for layer in vggmodel.layers:
layer.trainable = False
x = vggmodel.layers[-1].output
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(self.n_labels, activation='softmax')(x)
# define new model
model = Model(inputs=vggmodel.inputs, outputs=x)
# summarize
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['acc'])
print(model.summary())
return model
def fit(self, X_train, y_train, X_test=None, y_test=None):
"""
Fits the model object to the data
Args:
X_train: numpy array containing encoded training features
y_train: numpy array containing training targets
X_test: numpy array containing encoded test features
y_test: numpy array containing test targets
Return:
history of mertrics + classification report
"""
report = None
if (X_test is not None) and (y_test is not None):
history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter,
batch_size=self.batch_size, validation_data=(X_test, y_test),
verbose=2)
y_hat = self.predict(X_test)
y = np.argmax(y_test, axis=1)
y = [self.idx_to_labels[i] for i in y]
report = classification_report(y, y_hat, output_dict=True)
df = pd.DataFrame(report).transpose().round(2)
print(df)
else:
history = self.model.fit(x=X_train, y=y_train, epochs=self.n_iter, batch_size=self.batch_size, verbose=2)
return history, report
def predict(self, X_test):
"""
Inference method
Args:
X_test: predictors array
Return:
numpy array containing the class for token character in the sentence
"""
probs = self.model.predict(X_test)
labels = np.argmax(probs, axis=1)
labels = [self.idx_to_labels[i] for i in labels]
return labels
def predict_proba(self, X_test):
"""
Inference method
Args:
X_test: array of predictors
Return:
numpy array containing the probabilities of a positive review for each list entry
"""
probs = self.model.predict(X_test)
return probs
def save_model(self, file_name_prefix):
"""
Saves the trained model into a h5 file
Args:
file_name_prefix: a file name prefix having the following format 'sentiment_analysis_%Y%m%d_%H%M%S'
Return:
None
"""
root_dir = os.environ.get("MARABOU_HOME")
if not os.path.isdir(os.path.join(root_dir, "marabou/train/trained_models")):
os.mkdir(os.path.join(root_dir, "marabou/train/trained_models"))
model_folder = os.path.join(root_dir, "marabou/train/trained_models")
file_url_keras_model = os.path.join(model_folder, file_name_prefix + "_rnn_model.h5")
self.model.save(file_url_keras_model)
file_url_class = os.path.join(model_folder, file_name_prefix + "_rnn_class.pkl")
with open(file_url_class, 'wb') as handle:
pickle.dump(self.image_height, handle)
pickle.dump(self.image_width, handle)
pickle.dump(self.idx_to_labels, handle)
print("----> model saved to %s" % file_url_keras_model)
print("----> class saved to %s" % file_url_class)
def save_classification_report(self, report, file_name_prefix):
"""
Saves the classification report to a txt file
Args:
report: a classification report object
file_name_prefix: a file name prefix having the following format 'sentiment_analysis_%Y%m%d_%H%M%S'
Return:
None
"""
root_dir = os.environ.get("MARABOU_HOME")
if not os.path.isdir(os.path.join(root_dir, "marabou/train/perf")):
os.mkdir(os.path.join(root_dir, "marabou/train/perf"))
plot_folder = os.path.join(root_dir, "marabou/train/perf")
report_file_url = os.path.join(plot_folder, file_name_prefix + "_report.txt")
df = pd.DataFrame(report).transpose().round(2)
df['classes'] = df.index
f = open(report_file_url, "w")
line = "{:15} |{:10} |{:10} |{:10} |{:10}|\n".format("classes", "precision", "recall", "f1-score", "support")
f.write(line)
for _, row in df.iterrows():
line = "{:15} |{:10} |{:10} |{:10} |{:10}|\n".format(row[4], row[0], row[1], row[2], row[3])
f.write(line)
f.close()
print("----> classification report saved to %s" % report_file_url)
def save_learning_curve(self, history, file_name_prefix):
"""
Saves the learning curve plot
Args:
history: a dictionary object containing training and validation dataset loss function values and
objective function values for each training iteration
file_name_prefix: a file name prefix having the following format 'fashion_mnist_%Y%m%d_%H%M%S'
Return:
None
"""
root_dir = os.environ.get("MARABOU_HOME")
if not os.path.isdir(os.path.join(root_dir, "marabou/train/perf")):
os.mkdir(os.path.join(root_dir, "marabou/train/perf"))
plot_folder = os.path.join(root_dir, "marabou/train/perf")
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
fig, ax = plt.subplots(1, 2)
ax[0].plot(epochs, acc, 'bo', label='Training acc')
ax[0].plot(epochs, val_acc, 'b', label='Validation acc')
ax[0].set_title('Training and validation accuracy')
ax[0].legend()
fig.suptitle('model performance')
ax[1].plot(epochs, loss, 'bo', label='Training loss')
ax[1].plot(epochs, val_loss, 'b', label='Validation loss')
ax[1].set_title('Training and validation loss')
ax[1].legend()
plot_file_url = os.path.join(plot_folder, file_name_prefix + "_learning_curve.png")
plt.savefig(plot_file_url)
plt.close()
print("----> learning curve saved to %s" % plot_file_url)
@staticmethod
def load_model(h5_file_url=None, class_file_url=None, collect_from_gdrive=False):
"""
Extracts a model saved using the save_model function
Args:
h5_file_url: gdrive link for the trained model
class_file_url: gdrive link for the class file
collect_from_gdrive: whether to collect the model file from google drive
Return:
model object and a tokenizer object
"""
trained_model = None
root_dir = os.environ.get("MARABOU_HOME")
if root_dir is None:
return None, None
if not os.path.isdir(os.path.join(root_dir, "marabou/evaluation/trained_models")):
return None, None
model_dir = os.path.join(root_dir, "marabou/evaluation/trained_models")
if not collect_from_gdrive:
model_files_list = os.listdir(model_dir)
if len(model_files_list) > 0:
rnn_models_idx = [("fashion_imagenet" in f) and ("rnn" in f) for f in model_files_list]
if np.sum(rnn_models_idx) > 0:
rnn_model = list(compress(model_files_list, rnn_models_idx))
model_dates = [int(''.join(re.findall(r'\d+', f))) for f in rnn_model]
h5_file_name = rnn_model[np.argmax(model_dates)]
class_file = h5_file_name.replace("rnn_model.h5", "rnn_class.pkl")
if os.path.isfile(os.path.join(model_dir, class_file)):
trained_model = CNNClothing(h5_file=os.path.join(model_dir, h5_file_name),
class_file=os.path.join(model_dir, class_file))
return trained_model
return None
return None
return None
else:
bash_script_folder = os.path.join(root_dir, "marabou/train/bash_scripts")
print("===========> collecting model file from link")
script_path = os.path.join(bash_script_folder, "load_fashion_model_file.sh")
file_prefix = "fashion_imagenet_loaded_%s" % time.strftime("%Y%m%d_%H%M%S")
h5_file_name = file_prefix + "_rnn_model.h5"
class_file_name = h5_file_name.replace("rnn_model.h5", "rnn_class.pkl")
h5_file_local_url = os.path.join(model_dir, h5_file_name)
class_file_local_url = os.path.join(model_dir, class_file_name)
subprocess.call("%s %s %s %s %s" % (script_path, h5_file_url,
h5_file_local_url, class_file_url, class_file_local_url), shell=True)
if (os.path.isfile(h5_file_local_url) and os.path.isfile(class_file_local_url)):
trained_model = CNNClothing(h5_file=h5_file_local_url, class_file=class_file_local_url)
return trained_model
else:
return None
| 43.11747
| 117
| 0.617324
|
1c465512236dd5e487d4620bb11fe1ccf6b857ef
| 631
|
py
|
Python
|
pysoup/logger/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | 4
|
2016-02-21T12:40:44.000Z
|
2019-06-13T13:23:19.000Z
|
pysoup/logger/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | null | null | null |
pysoup/logger/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | 1
|
2020-07-16T12:22:12.000Z
|
2020-07-16T12:22:12.000Z
|
import os.path
import pysoup.utils.assets
class Logger(object):
def __init__(self, cwd):
self._log = ''
self._cwd = cwd
def log(self, text):
self._log += '{0}\n'.format(text)
def log_dependency_results(self, failed_dependencies):
for dependency in failed_dependencies:
self.log('could not install {0}'.format(dependency))
def dump_to_file(self, filename='soup.log'):
if self._log != '':
with open(os.path.join(self._cwd, filename), 'wb') as f:
f.write(pysoup.utils.assets.LOGO)
f.write('\n{0}'.format(self._log))
| 27.434783
| 68
| 0.59588
|
2ebd7e224efda901f03494bb9963e5a47a5af332
| 561
|
py
|
Python
|
tests/basics/special_comparisons.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 13,648
|
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/basics/special_comparisons.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 7,092
|
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/basics/special_comparisons.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 4,942
|
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
class A:
def __eq__(self, other):
print("A __eq__ called")
return True
class B:
def __ne__(self, other):
print("B __ne__ called")
return True
class C:
def __eq__(self, other):
print("C __eq__ called")
return False
class D:
def __ne__(self, other):
print("D __ne__ called")
return False
a = A()
b = B()
c = C()
d = D()
def test(s):
print(s)
print(eval(s))
for x in 'abcd':
for y in 'abcd':
test('{} == {}'.format(x,y))
test('{} != {}'.format(x,y))
| 16.5
| 36
| 0.509804
|
e424708f0e8028fb143635d432fbe1c539e2a2d0
| 3,697
|
py
|
Python
|
jogo_da_velha.py
|
mateusmantoan/jogo_da_velha
|
669917331373375857ad1d3e893400b5f125aaa3
|
[
"CC0-1.0"
] | null | null | null |
jogo_da_velha.py
|
mateusmantoan/jogo_da_velha
|
669917331373375857ad1d3e893400b5f125aaa3
|
[
"CC0-1.0"
] | null | null | null |
jogo_da_velha.py
|
mateusmantoan/jogo_da_velha
|
669917331373375857ad1d3e893400b5f125aaa3
|
[
"CC0-1.0"
] | null | null | null |
from os import system, name
from time import sleep
from random import sample
import random
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def print_table():
clear()
sleep(0.1)
if human == "X" or human == "O":
print("You chose", human, "\n")
print(table[0]," | ",table[1]," | ",table[2],"\n--|---|--\n", table[3]," | ", table[4]," | ", table[5], \
"\n--|---|--\n", table[6], " | ", table[7], " | ", table[8], "\n", sep="")
def restart_or_out():
x = input("Press 1 for play again, 2 for finish the game: ")
if x == "1":
start()
elif x == "2":
exit()
else:
print("invalid option", x, "is not 1 or 2")
restart_or_out()
def finish_game(p):
print_table()
if human == p:
print("congrats human, you win")
restart_or_out()
else:
print("you lose human")
restart_or_out()
def verify_win(p):
#verify line
if table[0] == p and table[1] == p and table[2] == p or table[3] == p and table[4] == p and table[5] == p \
or table[6] == p and table[7] == p and table[8] == p:
finish_game(p)
# verify collum
elif table[0] == p and table[3] == p and table[4] == p or table[1] == p and table[4] == p and table[7] == p \
or table[2] == p and table[5] == p and table[8] == p:
finish_game(p)
# verify diagonal
elif table[0] == p and table[4] == p and table[8] == p or table[2] == p and table[4] == p and table[6] == p:
finish_game(p)
else:
return None
def choose_player():
global human
global computer
p = input('The "X" player will start the game\nChoose 1 for X or 2 for O: ')
if p == "1":
human, computer = "X", "O"
elif p == "2":
human, computer = "O", "X"
else:
print(p, "is not 1 or 2, try again\n")
choose_player()
def choose_level():
global level
x = input("Press 1 to easy: ")
if x == "1":
level = "easy"
# elif x == "2":
# level = "normal"
# elif x == "3":
# level = "hard"
else:
print(p, "is not 1, 2 or 3, try again\n")
choose_level()
def human_select_field():
f = input("select a empty field: ")
if f not in table:
print("invalid option, try again\n")
human_select_field()
else:
table[int(f)] = human
verify_win(human)
def computer_select_field():
if level == "easy":
f = sample(table, 1)
f = f[0]
if f == computer or f == human:
computer_select_field()
else:
table[int(f)] = computer
verify_win(computer)
print_table()
#elif level == "normal":
# f = sample(table, 1)
# f = f[0]
# if f == computer or f == human:
# computer_select_field()
#else:
# table[int(f)] = computer
# verify_win(computer)
# print_table()
def start():
clear()
input("Tic Tac Toe\n\nPress ENTER to start")
global table, empty_table, human, computer, level
table = ["0", "1", "2", "3", "4", "5", "6", "7", "8"]
level = "easy"
clear()
choose_player()
choose_level()
print_table()
if human == "X":
for i in range(len(table)):
if i == 0 or i % 2 == 0:
human_select_field()
else:
computer_select_field()
if computer == "X":
for i in range(len(table)):
if i == 0 or i % 2 == 0:
computer_select_field()
else:
human_select_field()
print("Human and Computer loses")
restart_or_out()
start()
| 27.589552
| 113
| 0.505004
|
fc90444551cff36d3a561885e3f79b96e9b73dc6
| 894
|
py
|
Python
|
backend/website/squatofadt/urls.py
|
KiOui/squatofadt
|
78ddd629c49a0d6043ddfb0e5609bfc5842d986e
|
[
"MIT"
] | null | null | null |
backend/website/squatofadt/urls.py
|
KiOui/squatofadt
|
78ddd629c49a0d6043ddfb0e5609bfc5842d986e
|
[
"MIT"
] | null | null | null |
backend/website/squatofadt/urls.py
|
KiOui/squatofadt
|
78ddd629c49a0d6043ddfb0e5609bfc5842d986e
|
[
"MIT"
] | null | null | null |
"""squatofadt URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path("oauth/", include("oauth2_provider.urls", namespace="oauth2_provider")),
path("api/", include("squatofadt.api.urls")),
]
| 37.25
| 81
| 0.705817
|
a1c34d095e3b9efcc7b467d8dfb03da0abf26351
| 4,738
|
py
|
Python
|
openmdao.util/src/openmdao/util/filexfer.py
|
OzanCKN/OpenMDAO-Framework
|
05e9d4b9bc41d0ec00a7073545146c925cd33b0b
|
[
"Apache-2.0"
] | 1
|
2015-11-05T11:14:45.000Z
|
2015-11-05T11:14:45.000Z
|
openmdao.util/src/openmdao/util/filexfer.py
|
janus/OpenMDAO-Framework
|
05e9d4b9bc41d0ec00a7073545146c925cd33b0b
|
[
"Apache-2.0"
] | null | null | null |
openmdao.util/src/openmdao/util/filexfer.py
|
janus/OpenMDAO-Framework
|
05e9d4b9bc41d0ec00a7073545146c925cd33b0b
|
[
"Apache-2.0"
] | 1
|
2020-07-15T02:45:54.000Z
|
2020-07-15T02:45:54.000Z
|
import fnmatch
import glob
import os
import sys
import zipfile
from openmdao.util.log import NullLogger
def filexfer(src_server, src_path, dst_server, dst_path, mode=''):
"""
Transfer a file from one place to another.
If `src_server` or `dst_server` is None, then the :mod:`os` module
is used for the source or destination respectively. Otherwise the
respective object must support :meth:`open`, :meth:`stat`, and
:meth:`chmod`.
After the copy has completed, permission bits from :meth:`stat` are set
via :meth:`chmod`.
src_server: Proxy
Host to get file from.
src_path: string
Path to file on `src_server`.
dst_server: Proxy
Host to put file to.
dst_path: string
Path to file on `dst_server`.
mode: string
Mode settings for :func:`open`, not including 'r' or 'w'.
"""
if src_server is None:
src_file = open(src_path, 'r'+mode)
else:
src_file = src_server.open(src_path, 'r'+mode)
try:
if dst_server is None:
dst_file = open(dst_path, 'w'+mode)
else:
dst_file = dst_server.open(dst_path, 'w'+mode)
if src_server is None and dst_server is None:
chunk = 1 << 20 # 1MB locally.
else:
chunk = 1 << 17 # 128KB over network.
try:
data = src_file.read(chunk)
while data:
dst_file.write(data)
data = src_file.read(chunk)
finally:
dst_file.close()
finally:
src_file.close()
if src_server is None:
mode = os.stat(src_path).st_mode
else:
mode = src_server.stat(src_path).st_mode
if dst_server is None:
os.chmod(dst_path, mode)
else:
dst_server.chmod(dst_path, mode)
def pack_zipfile(patterns, filename, logger=NullLogger):
"""
Create 'zip' file `filename` of files in `patterns`.
Returns ``(nfiles, nbytes)``.
patterns: list
List of :mod:`fnmatch` style patterns.
filename: string
Name of zip file to create.
logger: Logger
Used for recording progress.
"""
nfiles = 0
nbytes = 0
zipped = zipfile.ZipFile(filename, 'w')
try:
for pattern in patterns:
for path in glob.glob(pattern):
size = os.path.getsize(path)
logger.debug("packing '%s' (%d)...", path, size)
zipped.write(path)
nfiles += 1
nbytes += size
finally:
zipped.close()
return (nfiles, nbytes)
def unpack_zipfile(filename, logger=NullLogger, textfiles=None):
"""
Unpack 'zip' file `filename`.
Returns ``(nfiles, nbytes)``.
filename: string
Name of zip file to unpack.
logger: Logger
Used for recording progress.
textfiles: list
List of :mod:`fnmatch` style patterns specifying which upnapcked files
are text files possibly needing newline translation. If not supplied,
the first 4KB of each is scanned for a zero byte. If not found then the
file is assumed to be a text file.
"""
# ZipInfo.create_system code for local system.
local_system = 0 if sys.platform == 'win32' else 3
nfiles = 0
nbytes = 0
zipped = zipfile.ZipFile(filename, 'r')
try:
for info in zipped.infolist():
filename = info.filename
size = info.file_size
logger.debug('unpacking %r (%d)...', filename, size)
zipped.extract(info)
if info.create_system != local_system:
if textfiles is None:
with open(filename, 'rb') as inp:
data = inp.read(1 << 12)
if '\0' not in data:
logger.debug('translating %r...', filename)
translate_newlines(filename)
else:
for pattern in textfiles:
if fnmatch.fnmatch(filename, pattern):
logger.debug('translating %r...', filename)
translate_newlines(filename)
nfiles += 1
nbytes += size
finally:
zipped.close()
return (nfiles, nbytes)
def translate_newlines(filename):
"""
Translate the newlines of `filename` to the local standard.
filename: string
Name of the file to be translated.
The translated file will replace this file.
"""
with open(filename, 'rU') as inp:
with open('__translated__', 'w') as out:
for line in inp:
out.write(line)
os.remove(filename)
os.rename('__translated__', filename)
| 28.542169
| 79
| 0.573027
|
2a5111cb2c898391500c7f39a9d9cb2e85eeb07c
| 1,163
|
py
|
Python
|
courses/fields.py
|
SergePogorelov/educa
|
c3b23b22f94788099f5b607d98dac957287b3923
|
[
"BSD-3-Clause"
] | null | null | null |
courses/fields.py
|
SergePogorelov/educa
|
c3b23b22f94788099f5b607d98dac957287b3923
|
[
"BSD-3-Clause"
] | 8
|
2021-03-19T11:28:02.000Z
|
2022-03-12T00:48:58.000Z
|
courses/fields.py
|
SergePogorelov/educa
|
c3b23b22f94788099f5b607d98dac957287b3923
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
class OrderField(models.PositiveIntegerField):
def __init__(self, for_fields=None, *args, **kwargs):
self.for_fields = for_fields
super(OrderField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
if getattr(model_instance, self.attname) is None:
# Значение пусто.
try:
qs = self.model.objects.all()
if self.for_fields:
# Фильтруем объекты с такими же значениями полей,
# перечисленных в "for_fields".
query = {field:getattr(model_instance, field) for field in self.for_fields}
qs = qs.filter(**query)
# Получаем заказ последнего объекта.
last_item = qs.latest(self.attname)
value = last_item.order + 1
except ObjectDoesNotExist:
value = 0
setattr(model_instance, self.attname, value)
return value
else:
return super(OrderField, self).pre_save(model_instance, add)
| 40.103448
| 95
| 0.590714
|
acf59ca7d715f91f4c8078da84321bce9e3f6771
| 7,350
|
py
|
Python
|
manhuagui.py
|
q6806161/manhuagui
|
42e278d73c234f2015823a4ef3dadeebdc596d56
|
[
"MIT"
] | null | null | null |
manhuagui.py
|
q6806161/manhuagui
|
42e278d73c234f2015823a4ef3dadeebdc596d56
|
[
"MIT"
] | null | null | null |
manhuagui.py
|
q6806161/manhuagui
|
42e278d73c234f2015823a4ef3dadeebdc596d56
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Author:Lvcong Chen
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException,NoSuchElementException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from multiprocessing import Process, Queue
import requests
import urllib3
import socket
import time
import random
import imp
import re
import sys
import os
import winsound
imp.reload(sys)
requests.packages.urllib3.disable_warnings()
class One_Punch_Man_Spider(object):
def __init__(self):
self.pattern_maxpage = re.compile(r"""(
<h2>(.*?)</h2>
.*?<span\s+id=['|"]page['|"]>\d+</span>\W+(\d+)
)""",re.VERBOSE|re.S)
self.pattern_picture_download_url = re.compile(r"""(
(id=['|"]mangaFile['|"]\s+src=['|"](.*?)['|"]) # 图片下载地址提取
)""",re.VERBOSE|re.S)
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Wi\
n64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.\
0.3729.108 Safari/537.36'}
self.s = requests.Session()
self.url_charpter_first_page_first = "https://www.manhuagui.com/comic/9637/438862.html"
#
def chrome_set(self):
"""chorm的selenium设置"""
chrome_options=Options()
chrome_options.add_argument('--ignore-certificate-errors')
# chrome_options.add_argument('--headless')
capa = DesiredCapabilities.CHROME
capa["pageLoadStrategy"] = "none"
driver = webdriver.Chrome(desired_capabilities=capa, options=chrome_options)
wait = WebDriverWait(driver,7)
return (driver,wait)
# 图片下载地址模块
def picture_url_crawler(self,maxpage,driver,wait):
page_turn = 1
picture_url_list = []
check_time = 0
while page_turn <= int(maxpage) and check_time<=3:
try:
wait.until(EC.presence_of_element_located((By.ID,"mangaFile")))
html_text = driver.page_source
items = re.findall(self.pattern_picture_download_url,html_text)
picture_url = re.sub(';','&',re.sub('&', '', items[0][-1]))
picture_url_list.append(picture_url)
page_next = wait.until(EC.element_to_be_clickable((By.ID,"next"))) # 点击下一页
driver.execute_script("arguments[0].click();", page_next)
time.sleep(random.uniform(1,3))
page_turn += 1
except TimeoutException as e:
driver.refresh()
check_time +=1
self.alarm_sound(e)
continue
if check_time ==3:
sys.exit()
return picture_url_list
# 警报音模块
def alarm_sound(self,e):
winsound.Beep(200, 3000)
print("元素不存在",e)
"""获取每话首页渲染后的html"""
def picture_url_list(self,q):
driver,wait = self.chrome_set()
try:
driver.get(self.url_charpter_first_page_first)
wait.until(EC.presence_of_element_located((By.ID,"tbBox")))
driver.execute_script('window.stop()')
end_flag = 1
check_time = 0
except TimeoutException as e:
self.alarm_sound(e)
else:
while end_flag!=0 and check_time<=3:
try:
url_now = driver.current_url
wait.until(EC.presence_of_element_located((By.ID,"mangaFile")))
html_text_maxpage = driver.page_source
maxpage = re.findall(self.pattern_maxpage,html_text_maxpage)[0][-1]
charpter = re.findall(self.pattern_maxpage,html_text_maxpage)[0][1]
referer = re.sub(r"#[p]{1}=\d+",'',driver.current_url)
if "卷" not in charpter:
print(f"{charpter}最大页数—{maxpage}")
picture_url_list = self.picture_url_crawler(maxpage,driver,wait)
time.sleep(2)
charpter_next = wait.until(EC.presence_of_element_located((By.CLASS_NAME,"nextC")))
driver.execute_script("arguments[0].click();", charpter_next) # 防止按键遮挡
try:
wait.until(EC.presence_of_element_located((By.CLASS_NAME,"tip-alert")))
end_flag = 0
print("全部爬取完毕,congratulations!")
except NoSuchElementException:
pass
while True:
if q.empty():
q.put((referer,charpter,picture_url_list,end_flag))
break
time.sleep(1)
except TimeoutException:
check_time += 1
driver.refresh()
continue
"""下载图片,并保存到文件夹中"""
def picture_download(self,q):
while True:
charpter_url_list_endflag = q.get(True)
picture_url_list = charpter_url_list_endflag[2]
charpter = charpter_url_list_endflag[1]
endflag = charpter_url_list_endflag[-1]
referer = charpter_url_list_endflag[0]
headers = {
"Referer":referer,
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Wi\
n64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.\
0.3729.108 Safari/537.36'}
page = 1
print(f"正在下载{charpter}")
for picture_url in picture_url_list:
reload_time = 0
while page <= len(picture_url_list) and reload_time <= 5:
try:
response = self.s.get(picture_url,headers=headers,timeout=5,verify=False)
os.makedirs(f"E:\黑色四叶操\{charpter}")
with open(f"E:\黑色四叶操\{charpter}\{page}.jpg","wb") as f:
writer = f.write(response.content)
break
except (requests.exceptions.ConnectionError,socket.timeout,urllib3.exceptions.ReadTimeoutError):
print("图片下载失败",e)
time.sleep(2)
reload_time += 1
continue
except FileExistsError:
with open(f"E:\黑色四叶操\{charpter}\{page}.jpg","wb") as f:
writer = f.write(response.content)
break
page += 1
if endflag ==0:
return
if __name__=="__main__":
q = Queue()
one_punch_man_cartoon_downloader = One_Punch_Man_Spider()
picture_url_writer = Process(target=one_punch_man_cartoon_downloader.picture_url_list,args=(q,))
picture_save = Process(target=one_punch_man_cartoon_downloader.picture_download,args=(q,))
picture_url_writer.start()
picture_save.start()
#等待proc_write1结束
picture_url_writer.join()
picture_save.join()
#picture_save进程是死循环,强制结束
# picture_save.terminate()
os.system(r'E:\KuGou\1.mp3')
| 40.384615
| 116
| 0.565306
|
e7dcb0448c7097d067edb298c3e52fb93d5e0fa5
| 15,178
|
py
|
Python
|
torchgeo/datasets/benin_cashews.py
|
remtav/torchgeo
|
d06b103f81edec4f4e0d13ccd621d318364679a2
|
[
"MIT"
] | null | null | null |
torchgeo/datasets/benin_cashews.py
|
remtav/torchgeo
|
d06b103f81edec4f4e0d13ccd621d318364679a2
|
[
"MIT"
] | null | null | null |
torchgeo/datasets/benin_cashews.py
|
remtav/torchgeo
|
d06b103f81edec4f4e0d13ccd621d318364679a2
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Smallholder Cashew Plantations in Benin dataset."""
import json
import os
from functools import lru_cache
from typing import Callable, Dict, Optional, Tuple
import matplotlib.pyplot as plt
import numpy as np
import rasterio
import rasterio.features
import torch
from rasterio.crs import CRS
from torch import Tensor
from .geo import VisionDataset
from .utils import check_integrity, download_radiant_mlhub_dataset, extract_archive
# TODO: read geospatial information from stac.json files
class BeninSmallHolderCashews(VisionDataset):
r"""Smallholder Cashew Plantations in Benin dataset.
This dataset contains labels for cashew plantations in a 120 km\ :sup:`2`\ area
in the center of Benin. Each pixel is classified for Well-managed plantation,
Poorly-managed plantation, No plantation and other classes. The labels are
generated using a combination of ground data collection with a handheld GPS device,
and final corrections based on Airbus Pléiades imagery. See
`this website <https://doi.org/10.34911/rdnt.hfv20i>`__ for dataset details.
Specifically, the data consists of Sentinel 2 imagery from a 120 km\ :sup:`2`\ area
in the center of Benin over 71 points in time from 11/05/2019 to 10/30/2020
and polygon labels for 6 classes:
0. No data
1. Well-managed plantation
2. Poorly-managed planatation
3. Non-plantation
4. Residential
5. Background
6. Uncertain
If you use this dataset in your research, please cite the following:
* https://doi.org/10.34911/rdnt.hfv20i
.. note::
This dataset requires the following additional library to be installed:
* `radiant-mlhub <https://pypi.org/project/radiant-mlhub/>`_ to download the
imagery and labels from the Radiant Earth MLHub
"""
dataset_id = "ts_cashew_benin"
image_meta = {
"filename": "ts_cashew_benin_source.tar.gz",
"md5": "957272c86e518a925a4e0d90dab4f92d",
}
target_meta = {
"filename": "ts_cashew_benin_labels.tar.gz",
"md5": "f9d3f0c671427d852fae9b52a0ae0051",
}
dates = (
"2019_11_05",
"2019_11_10",
"2019_11_15",
"2019_11_20",
"2019_11_30",
"2019_12_05",
"2019_12_10",
"2019_12_15",
"2019_12_20",
"2019_12_25",
"2019_12_30",
"2020_01_04",
"2020_01_09",
"2020_01_14",
"2020_01_19",
"2020_01_24",
"2020_01_29",
"2020_02_08",
"2020_02_13",
"2020_02_18",
"2020_02_23",
"2020_02_28",
"2020_03_04",
"2020_03_09",
"2020_03_14",
"2020_03_19",
"2020_03_24",
"2020_03_29",
"2020_04_03",
"2020_04_08",
"2020_04_13",
"2020_04_18",
"2020_04_23",
"2020_04_28",
"2020_05_03",
"2020_05_08",
"2020_05_13",
"2020_05_18",
"2020_05_23",
"2020_05_28",
"2020_06_02",
"2020_06_07",
"2020_06_12",
"2020_06_17",
"2020_06_22",
"2020_06_27",
"2020_07_02",
"2020_07_07",
"2020_07_12",
"2020_07_17",
"2020_07_22",
"2020_07_27",
"2020_08_01",
"2020_08_06",
"2020_08_11",
"2020_08_16",
"2020_08_21",
"2020_08_26",
"2020_08_31",
"2020_09_05",
"2020_09_10",
"2020_09_15",
"2020_09_20",
"2020_09_25",
"2020_09_30",
"2020_10_10",
"2020_10_15",
"2020_10_20",
"2020_10_25",
"2020_10_30",
)
ALL_BANDS = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B8A",
"B09",
"B11",
"B12",
"CLD",
)
RGB_BANDS = ("B04", "B03", "B02")
classes = [
"No data",
"Well-managed planatation",
"Poorly-managed planatation",
"Non-planatation",
"Residential",
"Background",
"Uncertain",
]
# Same for all tiles
tile_height = 1186
tile_width = 1122
def __init__(
self,
root: str = "data",
chip_size: int = 256,
stride: int = 128,
bands: Tuple[str, ...] = ALL_BANDS,
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
download: bool = False,
api_key: Optional[str] = None,
checksum: bool = False,
verbose: bool = False,
) -> None:
"""Initialize a new Benin Smallholder Cashew Plantations Dataset instance.
Args:
root: root directory where dataset can be found
chip_size: size of chips
stride: spacing between chips, if less than chip_size, then there
will be overlap between chips
bands: the subset of bands to load
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
download: if True, download dataset and store it in the root directory
api_key: a RadiantEarth MLHub API key to use for downloading the dataset
checksum: if True, check the MD5 of the downloaded files (may be slow)
verbose: if True, print messages when new tiles are loaded
Raises:
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
self._validate_bands(bands)
self.root = os.path.expanduser(root)
self.chip_size = chip_size
self.stride = stride
self.bands = bands
self.transforms = transforms
self.checksum = checksum
self.verbose = verbose
if download:
self._download(api_key)
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. "
+ "You can use download=True to download it"
)
# Calculate the indices that we will use over all tiles
self.chips_metadata = []
for y in list(range(0, self.tile_height - self.chip_size, stride)) + [
self.tile_height - self.chip_size
]:
for x in list(range(0, self.tile_width - self.chip_size, stride)) + [
self.tile_width - self.chip_size
]:
self.chips_metadata.append((y, x))
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
a dict containing image, mask, transform, crs, and metadata at index.
"""
y, x = self.chips_metadata[index]
img, transform, crs = self._load_all_imagery(self.bands)
labels = self._load_mask(transform)
img = img[:, :, y : y + self.chip_size, x : x + self.chip_size]
labels = labels[y : y + self.chip_size, x : x + self.chip_size]
sample = {
"image": img,
"mask": labels,
"x": torch.tensor(x),
"y": torch.tensor(y),
"transform": transform,
"crs": crs,
}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self) -> int:
"""Return the number of chips in the dataset.
Returns:
length of the dataset
"""
return len(self.chips_metadata)
def _validate_bands(self, bands: Tuple[str, ...]) -> None:
"""Validate list of bands.
Args:
bands: user-provided tuple of bands to load
Raises:
AssertionError: if ``bands`` is not a tuple
ValueError: if an invalid band name is provided
"""
assert isinstance(bands, tuple), "The list of bands must be a tuple"
for band in bands:
if band not in self.ALL_BANDS:
raise ValueError(f"'{band}' is an invalid band name.")
@lru_cache(maxsize=128)
def _load_all_imagery(
self, bands: Tuple[str, ...] = ALL_BANDS
) -> Tuple[Tensor, rasterio.Affine, CRS]:
"""Load all the imagery (across time) for the dataset.
Optionally allows for subsetting of the bands that are loaded.
Args:
bands: tuple of bands to load
Returns:
imagery of shape (70, number of bands, 1186, 1122) where 70 is the number
of points in time, 1186 is the tile height, and 1122 is the tile width
rasterio affine transform, mapping pixel coordinates to geo coordinates
coordinate reference system of transform
"""
if self.verbose:
print("Loading all imagery")
img = torch.zeros(
len(self.dates),
len(bands),
self.tile_height,
self.tile_width,
dtype=torch.float32,
)
for date_index, date in enumerate(self.dates):
single_scene, transform, crs = self._load_single_scene(date, self.bands)
img[date_index] = single_scene
return img, transform, crs
@lru_cache(maxsize=128)
def _load_single_scene(
self, date: str, bands: Tuple[str, ...]
) -> Tuple[Tensor, rasterio.Affine, CRS]:
"""Load the imagery for a single date.
Optionally allows for subsetting of the bands that are loaded.
Args:
date: date of the imagery to load
bands: bands to load
Returns:
Tensor containing a single image tile, rasterio affine transform,
mapping pixel coordinates to geo coordinates, and coordinate
reference system of transform.
Raises:
AssertionError: if ``date`` is invalid
"""
assert date in self.dates
if self.verbose:
print(f"Loading imagery at {date}")
img = torch.zeros(
len(bands), self.tile_height, self.tile_width, dtype=torch.float32
)
for band_index, band_name in enumerate(self.bands):
filepath = os.path.join(
self.root,
"ts_cashew_benin_source",
f"ts_cashew_benin_source_00_{date}",
f"{band_name}.tif",
)
with rasterio.open(filepath) as src:
transform = src.transform # same transform for every bands
crs = src.crs
array = src.read().astype(np.float32)
img[band_index] = torch.from_numpy(array)
return img, transform, crs
@lru_cache()
def _load_mask(self, transform: rasterio.Affine) -> Tensor:
"""Rasterizes the dataset's labels (in geojson format)."""
# Create a mask layer out of the geojson
mask_geojson_fn = os.path.join(
self.root, "ts_cashew_benin_labels", "_common", "labels.geojson"
)
with open(mask_geojson_fn) as f:
geojson = json.load(f)
labels = [
(feature["geometry"], feature["properties"]["class"])
for feature in geojson["features"]
]
mask_data = rasterio.features.rasterize(
labels,
out_shape=(self.tile_height, self.tile_width),
fill=0, # nodata value
transform=transform,
all_touched=False,
dtype=np.uint8,
)
mask = torch.from_numpy(mask_data).long()
return mask
def _check_integrity(self) -> bool:
"""Check integrity of dataset.
Returns:
True if dataset files are found and/or MD5s match, else False
"""
images: bool = check_integrity(
os.path.join(self.root, self.image_meta["filename"]),
self.image_meta["md5"] if self.checksum else None,
)
targets: bool = check_integrity(
os.path.join(self.root, self.target_meta["filename"]),
self.target_meta["md5"] if self.checksum else None,
)
return images and targets
def _download(self, api_key: Optional[str] = None) -> None:
"""Download the dataset and extract it.
Args:
api_key: a RadiantEarth MLHub API key to use for downloading the dataset
Raises:
RuntimeError: if download doesn't work correctly or checksums don't match
"""
if self._check_integrity():
print("Files already downloaded and verified")
return
download_radiant_mlhub_dataset(self.dataset_id, self.root, api_key)
image_archive_path = os.path.join(self.root, self.image_meta["filename"])
target_archive_path = os.path.join(self.root, self.target_meta["filename"])
for fn in [image_archive_path, target_archive_path]:
extract_archive(fn, self.root)
def plot(
self,
sample: Dict[str, Tensor],
show_titles: bool = True,
time_step: int = 0,
suptitle: Optional[str] = None,
) -> plt.Figure:
"""Plot a sample from the dataset.
Args:
sample: a sample returned by :meth:`__getitem__`
show_titles: flag indicating whether to show titles above each panel
time_step: time step at which to access image, beginning with 0
suptitle: optional string to use as a suptitle
Returns:
a matplotlib Figure with the rendered sample
Raises:
ValueError: if the RGB bands are not included in ``self.bands``
.. versionadded:: 0.2
"""
rgb_indices = []
for band in self.RGB_BANDS:
if band in self.bands:
rgb_indices.append(self.bands.index(band))
else:
raise ValueError("Dataset doesn't contain some of the RGB bands")
num_time_points = sample["image"].shape[0]
assert time_step < num_time_points
image = np.rollaxis(sample["image"][time_step, rgb_indices].numpy(), 0, 3)
image = np.clip(image / 3000, 0, 1)
mask = sample["mask"].numpy()
num_panels = 2
showing_predictions = "prediction" in sample
if showing_predictions:
predictions = sample["prediction"].numpy()
num_panels += 1
fig, axs = plt.subplots(ncols=num_panels, figsize=(4 * num_panels, 4))
axs[0].imshow(image)
axs[0].axis("off")
if show_titles:
axs[0].set_title(f"t={time_step}")
axs[1].imshow(mask, vmin=0, vmax=6, interpolation="none")
axs[1].axis("off")
if show_titles:
axs[1].set_title("Mask")
if showing_predictions:
axs[2].imshow(predictions, vmin=0, vmax=6, interpolation="none")
axs[2].axis("off")
if show_titles:
axs[2].set_title("Predictions")
if suptitle is not None:
plt.suptitle(suptitle)
return fig
| 30.97551
| 88
| 0.579787
|
98458209f4c94cabac7a6153419d6373f308f9fd
| 13,417
|
py
|
Python
|
api/views.py
|
noutrela/jaikuenginepatch
|
ab723e5480f36c471faa8bca4c65c2f7487f0365
|
[
"Apache-2.0"
] | 1
|
2016-05-09T10:35:57.000Z
|
2016-05-09T10:35:57.000Z
|
api/views.py
|
noutrela/jaikuenginepatch
|
ab723e5480f36c471faa8bca4c65c2f7487f0365
|
[
"Apache-2.0"
] | null | null | null |
api/views.py
|
noutrela/jaikuenginepatch
|
ab723e5480f36c471faa8bca4c65c2f7487f0365
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import StringIO
from django import http
from django import template
from django.conf import settings
from django.core import serializers
from django.template import loader
import simplejson
from google.appengine.ext import db
from api import xmlrpc
from jaikucommon import api
from jaikucommon import decorator
from jaikucommon import exception
from jaikucommon import im
from jaikucommon import legacy
from jaikucommon import messages
from jaikucommon import oauth_util
from jaikucommon import sms
from jaikucommon import user
from jaikucommon import util
from jaikucommon import validate
from jaikucommon import views as common_views
from jaikucommon.protocol import xmpp
from jaikucommon.protocol import sms as sms_protocol
_XML_RPC_DISPATCHER = xmlrpc.XmlRpcDispatcher(api.PublicApi.methods)
@decorator.login_required
def api_keys(request):
# special case this because we want to redirect to the edit page
if 'oauth_generate_consumer' in request.POST:
action = 'oauth_generate_consumer'
(called, rv) = common_views.call_api_from_request(request, action)
return util.RedirectFlash(rv.url(), messages.flash(action))
handled = common_views.handle_view_action(
request,
{}
)
if handled:
return handled
# Get list of consumer tokenss for this actor
consumer_tokens = api.oauth_get_actor_consumers(request.user,
request.user.nick)
# TODO(termie): Get list of access tokens this actor has given others
access_tokens = []
# for templates
full_page = 'Keys'
page = 'keys'
area = 'api'
c = template.RequestContext(request, locals())
t = loader.get_template('keys.html')
return http.HttpResponse(t.render(c))
@decorator.login_required
def api_key(request, consumer_key):
handled = common_views.handle_view_action(
request,
{
'oauth_consumer_delete': '/api/keys',
'oauth_consumer_update': request.path,
}
)
if handled:
return handled
consumer_token_ref = api.oauth_get_consumer(request.user, consumer_key)
# for templates
full_page = 'Keys / %s' % consumer_key
page = 'key'
area = 'api'
OAUTH_WEB = 'web'
OAUTH_DESKTOP = 'desktop'
OAUTH_MOBILE = 'mobile'
c = template.RequestContext(request, locals())
t = loader.get_template('key.html')
return http.HttpResponse(t.render(c))
@decorator.login_required
def api_key_legacy(request):
if not settings.API_ALLOW_LEGACY_AUTH:
raise http.Http404()
key = legacy.generate_personal_key(request.user)
return http.HttpResponse(key)
def api_doc(request, doc):
content_template = loader.get_template('built_%s.html' % doc)
content = content_template.render(template.Context())
# for templates
full_page = 'Documentation'
page = 'docs'
area = 'api'
c = template.RequestContext(request, locals())
t = loader.get_template('doc.html')
return http.HttpResponse(t.render(c))
def api_docs(request):
# get the list of api methods for the index
methods = api.PublicApi.methods.keys()
api_methods = {}
for m in methods:
parts = m.split('_')
category = parts[0]
api_methods.setdefault(category, [])
api_methods[category].append(m)
api_methods = api_methods.items()
api_methods.sort()
# for templates
full_page = 'Documentation'
page = 'docs'
area = 'api'
c = template.RequestContext(request, locals())
t = loader.get_template('docs.html')
return http.HttpResponse(t.render(c))
@decorator.login_required
def api_tokens(request):
"""Show the user the set of tokens currently enabled, and allow them to
disable/delete them.
"""
handled = common_views.handle_view_action(
request,
{
'oauth_revoke_access_token': '/api/tokens',
}
)
if handled:
return handled
consumer_tokens = api.oauth_get_actor_tokens(request.user,
request.user.nick)
# for templates
full_page = 'Tokens'
page = 'tokens'
area = 'api'
c = template.RequestContext(request, locals())
t = loader.get_template('tokens.html')
return http.HttpResponse(t.render(c))
# OAuth stuff
def api_request_token(request):
"""
checks that the request is well formed
makes sure the consumer is valid
makes a new request token
returns the request token & secret
"""
token = oauth_util.handle_fetch_request_token(request)
return http.HttpResponse(token.to_string())
@decorator.login_required
def api_authorize(request):
"""
checks on the request token provided or ask the user enter one
allows the user to authorize this
if consumer style is web and a callback is provided redirect to it
otherwise suggest that the user notify their application that authorization
has completed
"""
oauth_token = request.REQUEST.get('oauth_token', None)
if not oauth_token:
# please enter token page
pass
oauth_token_ref = api.oauth_get_request_token(api.ROOT, oauth_token)
if not oauth_token_ref:
raise Exception("bad token")
oauth_consumer_ref = api.oauth_get_consumer(api.ROOT,
oauth_token_ref.consumer)
if not oauth_consumer_ref:
raise Exception("bad consumer")
if "active" != oauth_consumer_ref.status:
raise Exception("inactive consumer")
perms = request.REQUEST.get('perms', 'read')
if request.POST:
# we posted to this page to authorize
# TODO verify nonce
validate.nonce(request, "authorize_token")
api.oauth_authorize_request_token(api.ROOT, oauth_token_ref.key_,
actor=request.user.nick, perms=perms)
oauth_callback = request.POST.get("oauth_callback", None)
if oauth_callback and oauth_consumer_ref.type == "web":
return http.HttpResponseRedirect(oauth_callback)
c = template.RequestContext(request, locals())
t = loader.get_template('authorized.html')
return http.HttpResponse(t.render(c))
perms_pretty = {'read': 'view',
'write': 'view and update',
'delete': 'view, update and delete'}[perms]
c = template.RequestContext(request, locals())
t = loader.get_template('authorize.html')
return http.HttpResponse(t.render(c))
def api_access_token(request):
"""
checks that the request is well formed
checks that the request token provided has been authorized
if it has generate a new access token and return it
"""
token = oauth_util.handle_fetch_access_token(request)
return http.HttpResponse(token.to_string())
# Interface
def api_call(request, format="json"):
""" the public api
attempts to validate a request as a valid oauth request then
builds the appropriate api_user object and tries to dispatch
to the provided method
"""
servertime = api.utcnow()
try:
kwargs = oauth_util.get_method_kwargs(request)
json_params = kwargs.pop('json_params', None)
if json_params:
parsed = simplejson.loads(json_params)
# Turn the keys from unicode to str so that they can be used as method
# parameters.
kwargs.update(
dict([(str(k), v) for k, v in parsed.iteritems()]))
method = kwargs.pop('method', '').replace('.', '_')
if method == 'presence_send':
method = 'post'
if not method:
raise exception.ApiException(exception.NO_METHOD, "No method specified")
# Allows us to turn off authentication for testing purposes
if not settings.API_DISABLE_VERIFICATION:
api_user = request.user
else:
api_user = api.ROOT
method_ref = api.PublicApi.get_method(method, api_user)
if not method_ref:
raise exception.ApiException(exception.INVALID_METHOD,
'Invalid method: %s' % method)
if not api_user:
raise exception.ApiException(0x00, 'Invalid API user')
if getattr(api_user, 'legacy', None) and method == 'post':
kwargs['nick'] = api_user.nick
rv = method_ref(api_user, **kwargs)
if rv is None:
raise exception.ApiException(0x00, 'method %s returned None'%(method))
return render_api_response(rv, format, servertime=servertime)
except oauth_util.OAuthError, e:
exc = exception.ApiException(exception.OAUTH_ERROR, e.message)
return render_api_response(exc, format)
except exception.ApiException, e:
return render_api_response(e, format)
except TypeError, e:
exc = exception.ApiException(exception.INVALID_ARGUMENTS, str(e))
return render_api_response(exc, format)
except:
exception.handle_exception(request)
return render_api_response(request.errors[0], format)
# some error happened
return render_api_response(request.errors[0], format)
def api_xmlrpc(request):
return _XML_RPC_DISPATCHER.dispatch(request)
@decorator.debug_only
def api_loaddata(request):
""" this is a debug and testing api used to fill a test site with
initial data from fixtures, it should not be accessible on a non-debug
instance
"""
format = request.POST.get('format', 'json')
fixture = request.POST.get('fixture', '[]')
fixture_ref = StringIO.StringIO(fixture)
def _loaddata():
try:
count = 0
models = set()
objects = serializers.deserialize(format, fixture_ref)
for obj in objects:
count += 1
models.add(obj.object.__class__)
real_obj = obj.object
real_obj.put()
return count
except Exception, e:
raise
#count = db.run_in_transaction(_loaddata)
count = _loaddata()
return http.HttpResponse("Loaded %s items from fixture" % count)
@decorator.debug_only
def api_cleardata(request):
""" this is a debug api for testing, specifically it clears data from the
datastore, it should only be accessible from a debug instance
"""
kind = request.GET.get('kind', 'InboxEntry')
c = 0
from google.appengine.api import datastore
from google.appengine.runtime.apiproxy_errors import DeadlineExceededError
try:
q = datastore.Query(kind)
for o in q.Run():
c += 1
logging.debug(o)
datastore.Delete(o.key())
except Exception, e:
logging.error("Deadline Errorr %s" % e)
return http.HttpResponse("kind=%s&count=%s" % (kind, c))
def api_vendor_sms_receive(request, vendor_secret=None):
""" a one off implementation for receiving sms from IPX """
if vendor_secret != settings.SMS_VENDOR_SECRET:
raise exception.ApiException(0x00, "Invalid secret")
sms_message = sms_protocol.SmsMessage.from_request(request)
sms_service = sms.SmsService(sms_protocol.SmsConnection())
sms_service.init_handlers()
rv = sms_service.handle_message(sms_message.sender,
sms_message.target,
sms_message.message)
return http.HttpResponse(rv)
def api_vendor_xmpp_receive(request):
"""Receive any XMPP message, at the moment it expects the message to
already be parsed."""
if not settings.IM_ENABLED:
raise http.Http404()
xmpp_message = xmpp.XmppMessage.from_request(request)
if (settings.IM_TEST_ONLY and
xmpp_message.sender.base() not in settings.IM_TEST_JIDS):
raise http.Http404()
im_service = im.ImService(xmpp.XmppConnection())
im_service.init_handlers()
rv = im_service.handle_message(xmpp_message.sender,
xmpp_message.target,
xmpp_message.message)
return http.HttpResponse(rv)
def api_vendor_queue_process(request):
""" process a queue item, redirect to self if there were more """
secret = request.REQUEST.get('secret')
if secret != settings.QUEUE_VENDOR_SECRET:
raise exception.ApiException(0x00, "Invalid secret")
try:
rv = api.task_process_any(api.ROOT)
if rv:
return http.HttpResponseRedirect(request.get_full_path())
except exception.ApiNoTasks:
pass
return http.HttpResponse('')
def _model_to_dict(rv):
# TODO(mikie): This must be replaced with a to_dict() on the model object so
# that we can remove/add fields and change representations if needed.
o = {}
if not rv:
return o
if isinstance(rv, list):
o = []
for item in rv:
o.append(_model_to_dict(item))
return o
for prop in rv.properties().keys():
value = getattr(rv, prop)
if (isinstance(value, datetime.datetime)):
value = str(value)
o[prop] = value
return o
def render_api_response(rv, format="json", servertime=None):
if isinstance(rv, exception.ApiException):
o = {"status": "error"}
o.update(rv.to_dict())
elif isinstance(rv, exception.ValidationError):
o = {"status": "error", "msg": str(rv)}
else:
o = {"status": "ok"}
# TODO make this into something real
rv = {"rv": rv.to_api()}
o.update(rv)
if servertime:
o['servertime'] = str(servertime)
return http.HttpResponse(simplejson.dumps(o))
| 29.815556
| 79
| 0.701945
|
51c64d0dc842d8684b66b99014ee7328a8661ff1
| 2,381
|
py
|
Python
|
src/position/close.py
|
bbeale/v20-python-samples
|
99af42ebe927d81ded08884d775c7aeffe2b6149
|
[
"MIT"
] | 257
|
2016-11-03T16:41:01.000Z
|
2022-03-31T03:07:39.000Z
|
src/position/close.py
|
bbeale/v20-python-samples
|
99af42ebe927d81ded08884d775c7aeffe2b6149
|
[
"MIT"
] | 19
|
2016-12-19T04:52:18.000Z
|
2021-06-06T18:30:35.000Z
|
src/position/close.py
|
bbeale/v20-python-samples
|
99af42ebe927d81ded08884d775c7aeffe2b6149
|
[
"MIT"
] | 149
|
2016-11-19T14:53:59.000Z
|
2022-03-18T20:38:27.000Z
|
#!/usr/bin/env python
import argparse
import common.config
import common.view
import common.args
from order.view import print_order_create_response_transactions
def main():
"""
Close an open Trade in an Account
"""
parser = argparse.ArgumentParser()
#
# Add the command line argument to parse to the v20 config
#
common.config.add_argument(parser)
parser.add_argument(
"instrument",
type=common.args.instrument,
help=(
"The Instrument of the Position to close. If prepended "
"with an '@', this will be interpreted as a client Trade ID"
)
)
parser.add_argument(
"--long-units",
default=None,
help=(
"The amount of the long Position to close. Either the string "
"'ALL' indicating a full Position close, the string 'NONE', or "
"the number of units of the Position to close"
)
)
parser.add_argument(
"--short-units",
default=None,
help=(
"The amount of the short Position to close. Either the string "
"'ALL' indicating a full Position close, the string 'NONE', or "
"the number of units of the Position to close"
)
)
args = parser.parse_args()
account_id = args.config.active_account
#
# Create the api context based on the contents of the
# v20 config file
#
api = args.config.create_context()
if args.long_units is not None and args.short_units is not None:
response = api.position.close(
account_id,
args.instrument,
longUnits=args.long_units,
shortUnits=args.short_units
)
elif args.long_units is not None:
response = api.position.close(
account_id,
args.instrument,
longUnits=args.long_units
)
elif args.short_units is not None:
response = api.position.close(
account_id,
args.instrument,
shortUnits=args.short_units
)
else:
print("No units have been provided")
return
print(
"Response: {} ({})\n".format(
response.status,
response.reason
)
)
print_order_create_response_transactions(response)
if __name__ == "__main__":
main()
| 24.802083
| 76
| 0.589668
|
afbe20c818836adc5f843a79b2b9a95c32bdd04e
| 3,350
|
py
|
Python
|
education_math_homework_generator/gen_number_line.py
|
sourcery-ai-bot/education_math_homework_generator
|
e5545dcee4cca18f2480b88f9019bbff208202f6
|
[
"MIT"
] | null | null | null |
education_math_homework_generator/gen_number_line.py
|
sourcery-ai-bot/education_math_homework_generator
|
e5545dcee4cca18f2480b88f9019bbff208202f6
|
[
"MIT"
] | null | null | null |
education_math_homework_generator/gen_number_line.py
|
sourcery-ai-bot/education_math_homework_generator
|
e5545dcee4cca18f2480b88f9019bbff208202f6
|
[
"MIT"
] | null | null | null |
import argparse
from education_math_homework_generator.util import convert_latex_to_pdf
from education_math_homework_generator.util import remove_temporary_files
# Credits:
# example code for numberlines from stackexchange
# https://tex.stackexchange.com/questions/148252/help-drawing-a-very-simple-number-line-using-tikz
def generate_number_lines(number_of_lines=6, start=0, end=20):
"""
Generates number lines as a tool for practicing mathematics such as addition or subtraction.
:param number_of_lines: Specify the number of lines to have on the page
:param start: start value for the number line as an integer
:param end: end value for the number line as an integer
:return: contents of the latex document as a string
"""
lines = [r'\documentclass[letterpaper]{article}',
r'\usepackage{geometry}',
r'\geometry{landscape,a4paper,total={170mm,257mm},left=10mm,right=10mm,top=30mm}',
r'\usepackage{tikz}',
r'\usepackage{amsmath}',
r'\usetikzlibrary{arrows}',
r'\begin{document}',
r'\begin{LARGE}',
r'']
numbers = ','.join([str(x) for x in range(start, end + 1)])
for _ in range(number_of_lines):
lines.append(r'')
lines.append(r'{\Large $-$}')
lines.append(r'\begin{tikzpicture}')
lines.append(r'\draw[latex-latex, thick] ' + '({},0) -- ({},0) ;'.format(start - 1, end + 1))
lines.append(r'\foreach \x in {' + numbers + '}')
lines.append(r'\draw[shift={(\x,0)},color=black, thick] (0pt,3pt) -- (0pt,-3pt);')
lines.append(r'\foreach \x in {' + numbers + '}')
lines.append(r'\draw[shift={(\x,0)},color=black, thick] (0pt,0pt) -- (0pt,-3pt) node[below] ')
lines.append(r'{\textbf{\x}};')
lines.append(r'\end{tikzpicture}')
lines.append(r'{\Large $+$}')
lines.append(r'\\')
lines.append(r'\vspace*{50px}')
lines.append(r'')
lines.append(r'\end{LARGE}')
lines.append(r'\end{document}')
return '\n'.join(lines)
def parse_arguments():
"""
Parse user arguments to modify how the document is generated for number lines
:return: parsed args passed by the user or defaults defined below
"""
parser = argparse.ArgumentParser(description='Generate a numberline to practice Addition/Subtraction')
parser.add_argument('--start', default=0, type=int, help='integer to start the number line')
parser.add_argument('--end', default=20, type=int, help='integer to end the number line')
parser.add_argument('--numlines', default=5, metavar='N', type=int, help='number of lines to generate')
parser.add_argument('--filename', default='numberline_01.tex', help='filename to generate')
return parser.parse_args()
def generate_number_lines_pdf(args):
"""
Takes the parsed arguments, generates appropriate latex string, converts it a pdf, and cleans up any temporary files
:param args: parsed arguments that define how to generate the document
"""
contents = generate_number_lines(number_of_lines=args.numlines, start=args.start, end=args.end)
convert_latex_to_pdf(args.filename, contents=contents, view=True)
remove_temporary_files(args.filename)
if __name__ == "__main__":
generate_number_lines_pdf(parse_arguments())
| 43.506494
| 120
| 0.668358
|
945a9863cf38f8fd1f7de6f447f9e23dc2c34c0a
| 8,424
|
py
|
Python
|
tests/components/smhi/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/smhi/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/smhi/test_config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for SMHI config flow."""
from unittest.mock import Mock, patch
from smhi.smhi_lib import Smhi as SmhiApi, SmhiForecastException
from openpeerpower.components.smhi import config_flow
from openpeerpower.const import CONF_LATITUDE, CONF_LONGITUDE
# pylint: disable=protected-access
async def test_openpeerpower_location_exists() -> None:
"""Test if Open Peer Power location exists it should return True."""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
with patch.object(flow, "_check_location", return_value=True):
# Test exists
opp.config.location_name = "Home"
opp.config.latitude = 17.8419
opp.config.longitude = 59.3262
assert await flow._openpeerpower_location_exists() is True
# Test not exists
opp.config.location_name = None
opp.config.latitude = 0
opp.config.longitude = 0
assert await flow._openpeerpower_location_exists() is False
async def test_name_in_configuration_exists() -> None:
"""Test if home location exists in configuration."""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
# Test exists
opp.config.location_name = "Home"
opp.config.latitude = 17.8419
opp.config.longitude = 59.3262
# Check not exists
with patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "test2": "something else"},
):
assert flow._name_in_configuration_exists("no_exist_name") is False
# Check exists
with patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "name_exist": "config"},
):
assert flow._name_in_configuration_exists("name_exist") is True
def test_smhi_locations(opp) -> None:
"""Test return empty set."""
locations = config_flow.smhi_locations(opp)
assert not locations
async def test_show_config_form() -> None:
"""Test show configuration form."""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
result = await flow._show_config_form()
assert result["type"] == "form"
assert result["step_id"] == "user"
async def test_show_config_form_default_values() -> None:
"""Test show configuration form."""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
result = await flow._show_config_form(name="test", latitude="65", longitude="17")
assert result["type"] == "form"
assert result["step_id"] == "user"
async def test_flow_with_home_location(opp) -> None:
"""Test config flow .
Tests the flow when a default location is configured
then it should return a form with default values
"""
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
with patch.object(flow, "_check_location", return_value=True):
opp.config.location_name = "Home"
opp.config.latitude = 17.8419
opp.config.longitude = 59.3262
result = await flow.async_step_user()
assert result["type"] == "form"
assert result["step_id"] == "user"
async def test_flow_show_form() -> None:
"""Test show form scenarios first time.
Test when the form should show when no configurations exists
"""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
# Test show form when Open Peer Power config exists and
# home is already configured, then new config is allowed
with patch.object(
flow, "_show_config_form", return_value=None
) as config_form, patch.object(
flow, "_openpeerpower_location_exists", return_value=True
), patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "name_exist": "config"},
):
await flow.async_step_user()
assert len(config_form.mock_calls) == 1
# Test show form when Open Peer Power config not and
# home is not configured
with patch.object(
flow, "_show_config_form", return_value=None
) as config_form, patch.object(
flow, "_openpeerpower_location_exists", return_value=False
), patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "name_exist": "config"},
):
await flow.async_step_user()
assert len(config_form.mock_calls) == 1
async def test_flow_show_form_name_exists() -> None:
"""Test show form if name already exists.
Test when the form should show when no configurations exists
"""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
test_data = {"name": "home", CONF_LONGITUDE: "0", CONF_LATITUDE: "0"}
# Test show form when Open Peer Power config exists and
# home is already configured, then new config is allowed
with patch.object(
flow, "_show_config_form", return_value=None
) as config_form, patch.object(
flow, "_name_in_configuration_exists", return_value=True
), patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "name_exist": "config"},
), patch.object(
flow, "_check_location", return_value=True
):
await flow.async_step_user(user_input=test_data)
assert len(config_form.mock_calls) == 1
assert len(flow._errors) == 1
async def test_flow_entry_created_from_user_input() -> None:
"""Test that create data from user input.
Test when the form should show when no configurations exists
"""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
test_data = {"name": "home", CONF_LONGITUDE: "0", CONF_LATITUDE: "0"}
# Test that entry created when user_input name not exists
with patch.object(
flow, "_show_config_form", return_value=None
) as config_form, patch.object(
flow, "_name_in_configuration_exists", return_value=False
), patch.object(
flow, "_openpeerpower_location_exists", return_value=False
), patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "name_exist": "config"},
), patch.object(
flow, "_check_location", return_value=True
):
result = await flow.async_step_user(user_input=test_data)
assert result["type"] == "create_entry"
assert result["data"] == test_data
assert not config_form.mock_calls
async def test_flow_entry_created_user_input_faulty() -> None:
"""Test that create data from user input and are faulty.
Test when the form should show when user puts faulty location
in the config gui. Then the form should show with error
"""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
test_data = {"name": "home", CONF_LONGITUDE: "0", CONF_LATITUDE: "0"}
# Test that entry created when user_input name not exists
with patch.object(flow, "_check_location", return_value=True), patch.object(
flow, "_show_config_form", return_value=None
) as config_form, patch.object(
flow, "_name_in_configuration_exists", return_value=False
), patch.object(
flow, "_openpeerpower_location_exists", return_value=False
), patch.object(
config_flow,
"smhi_locations",
return_value={"test": "something", "name_exist": "config"},
), patch.object(
flow, "_check_location", return_value=False
):
await flow.async_step_user(user_input=test_data)
assert len(config_form.mock_calls) == 1
assert len(flow._errors) == 1
async def test_check_location_correct() -> None:
"""Test check location when correct input."""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
with patch.object(
config_flow.aiohttp_client, "async_get_clientsession"
), patch.object(SmhiApi, "async_get_forecast", return_value=None):
assert await flow._check_location("58", "17") is True
async def test_check_location_faulty() -> None:
"""Test check location when faulty input."""
opp = Mock()
flow = config_flow.SmhiFlowHandler()
flow.opp = opp
with patch.object(
config_flow.aiohttp_client, "async_get_clientsession"
), patch.object(SmhiApi, "async_get_forecast", side_effect=SmhiForecastException()):
assert await flow._check_location("58", "17") is False
| 31.315985
| 88
| 0.668803
|
f5c2d0c2ae220d85ed56bf635e6ef93b7ba35fce
| 15,634
|
py
|
Python
|
neutron/tests/unit/agent/linux/test_tc_lib.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/linux/test_tc_lib.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
neutron/tests/unit/agent/linux/test_tc_lib.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2016 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.services.qos import constants as qos_consts
from pyroute2.netlink import rtnl
from neutron.agent.linux import tc_lib
from neutron.common import constants
from neutron.common import utils
from neutron.privileged.agent.linux import tc_lib as priv_tc_lib
from neutron.tests import base
DEVICE_NAME = "tap_device"
KERNEL_HZ_VALUE = 1000
BW_LIMIT = 2000 # [kbps]
BURST = 100 # [kbit]
LATENCY = 50 # [ms]
TC_FILTERS_OUTPUT = (
'filter protocol all pref 49152 u32 \nfilter protocol all pref '
'49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh '
'800::800 order 2048 key ht 800 \n match 00000000/00000000 at 0\n '
'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n'
'drop overhead 0b \n ref 1 bind 1'
) % {'bw': BW_LIMIT, 'burst': BURST}
class BaseUnitConversionTest(object):
def test_convert_to_kilobits_bare_value(self):
value = "1000"
expected_value = 8 # kbit
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_bytes_value(self):
value = "1000b"
expected_value = 8 # kbit
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_bits_value(self):
value = "1000bit"
expected_value = utils.bits_to_kilobits(1000, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_megabytes_value(self):
value = "1m"
expected_value = utils.bits_to_kilobits(
self.base_unit ** 2 * 8, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_megabits_value(self):
value = "1mbit"
expected_value = utils.bits_to_kilobits(
self.base_unit ** 2, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_bytes_wrong_unit(self):
value = "1Zbit"
self.assertRaises(
tc_lib.InvalidUnit,
tc_lib.convert_to_kilobits, value, self.base_unit
)
class TestSIUnitConversions(BaseUnitConversionTest, base.BaseTestCase):
base_unit = constants.SI_BASE
class TestIECUnitConversions(BaseUnitConversionTest, base.BaseTestCase):
base_unit = constants.IEC_BASE
class TestTcCommand(base.BaseTestCase):
def setUp(self):
super(TestTcCommand, self).setUp()
self.tc = tc_lib.TcCommand(DEVICE_NAME, KERNEL_HZ_VALUE)
self.mock_list_tc_qdiscs = mock.patch.object(tc_lib,
'list_tc_qdiscs').start()
self.mock_add_tc_qdisc = mock.patch.object(tc_lib,
'add_tc_qdisc').start()
self.mock_delete_tc_qdisc = mock.patch.object(
tc_lib, 'delete_tc_qdisc').start()
self.mock_list_tc_filters = mock.patch.object(
tc_lib, 'list_tc_filters').start()
self.mock_add_tc_filter_policy = mock.patch.object(
tc_lib, 'add_tc_filter_policy').start()
def test_check_kernel_hz_lower_then_zero(self):
self.assertRaises(
tc_lib.InvalidKernelHzValue,
tc_lib.TcCommand, DEVICE_NAME, 0
)
self.assertRaises(
tc_lib.InvalidKernelHzValue,
tc_lib.TcCommand, DEVICE_NAME, -100
)
def test_get_filters_bw_limits(self):
self.mock_list_tc_filters.return_value = [{'rate_kbps': BW_LIMIT,
'burst_kb': BURST}]
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertEqual(BW_LIMIT, bw_limit)
self.assertEqual(BURST, burst_limit)
def test_get_filters_bw_limits_no_filters(self):
self.mock_list_tc_filters.return_value = []
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertIsNone(bw_limit)
self.assertIsNone(burst_limit)
def test_get_filters_bw_limits_no_rate_info(self):
self.mock_list_tc_filters.return_value = [{'other_values': 1}]
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertIsNone(bw_limit)
self.assertIsNone(burst_limit)
def test_get_tbf_bw_limits(self):
self.mock_list_tc_qdiscs.return_value = [
{'qdisc_type': 'tbf', 'max_kbps': BW_LIMIT, 'burst_kb': BURST}]
self.assertEqual((BW_LIMIT, BURST), self.tc.get_tbf_bw_limits())
def test_get_tbf_bw_limits_when_wrong_qdisc(self):
self.mock_list_tc_qdiscs.return_value = [{'qdisc_type': 'other_type'}]
self.assertEqual((None, None), self.tc.get_tbf_bw_limits())
def test_set_tbf_bw_limit(self):
self.tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY)
self.mock_add_tc_qdisc.assert_called_once_with(
DEVICE_NAME, 'tbf', parent='root', max_kbps=BW_LIMIT,
burst_kb=BURST, latency_ms=LATENCY, kernel_hz=self.tc.kernel_hz,
namespace=self.tc.namespace)
def test_update_filters_bw_limit(self):
self.tc.update_filters_bw_limit(BW_LIMIT, BURST)
self.mock_add_tc_qdisc.assert_called_once_with(
self.tc.name, 'ingress', namespace=self.tc.namespace)
self.mock_delete_tc_qdisc.assert_called_once_with(
self.tc.name, is_ingress=True, raise_interface_not_found=False,
raise_qdisc_not_found=False, namespace=self.tc.namespace)
self.mock_add_tc_filter_policy.assert_called_once_with(
self.tc.name, tc_lib.INGRESS_QDISC_ID, BW_LIMIT, BURST,
tc_lib.MAX_MTU_VALUE, 'drop', priority=49)
def test_delete_filters_bw_limit(self):
self.tc.delete_filters_bw_limit()
self.mock_delete_tc_qdisc.assert_called_once_with(
DEVICE_NAME, is_ingress=True, raise_interface_not_found=False,
raise_qdisc_not_found=False, namespace=self.tc.namespace)
def test_delete_tbf_bw_limit(self):
self.tc.delete_tbf_bw_limit()
self.mock_delete_tc_qdisc.assert_called_once_with(
DEVICE_NAME, parent='root', raise_interface_not_found=False,
raise_qdisc_not_found=False, namespace=self.tc.namespace)
def test_get_ingress_qdisc_burst_value_burst_not_none(self):
self.assertEqual(
BURST, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, BURST)
)
def test_get_ingress_qdisc_burst_no_burst_value_given(self):
expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst,
self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, None)
)
def test_get_ingress_qdisc_burst_burst_value_zero(self):
expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst,
self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, 0)
)
class TcTestCase(base.BaseTestCase):
def setUp(self):
super(TcTestCase, self).setUp()
self.mock_add_tc_qdisc = mock.patch.object(
priv_tc_lib, 'add_tc_qdisc').start()
self.namespace = 'namespace'
def test_add_tc_qdisc_htb(self):
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='1:',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='1:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='2',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='2:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle='3:12',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='3:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle=4,
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='4:0',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root',
namespace=self.namespace)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb',
namespace=self.namespace)
self.mock_add_tc_qdisc.reset_mock()
tc_lib.add_tc_qdisc('device', 'htb', parent='root', handle=5)
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='htb', handle='5:0',
namespace=None)
self.mock_add_tc_qdisc.reset_mock()
def test_add_tc_qdisc_tbf(self):
tc_lib.add_tc_qdisc('device', 'tbf', parent='root', max_kbps=10000,
burst_kb=1500, latency_ms=70, kernel_hz=250,
namespace=self.namespace)
burst = tc_lib._get_tbf_burst_value(10000, 1500, 70) * 1024 / 8
self.mock_add_tc_qdisc.assert_called_once_with(
'device', parent=rtnl.TC_H_ROOT, kind='tbf', rate=10000 * 128,
burst=burst, latency=70000, namespace=self.namespace)
def test_add_tc_qdisc_tbf_missing_arguments(self):
self.assertRaises(
qos_exc.TcLibQdiscNeededArguments, tc_lib.add_tc_qdisc,
'device', 'tbf', parent='root')
def test_add_tc_qdisc_wrong_qdisc_type(self):
self.assertRaises(qos_exc.TcLibQdiscTypeError, tc_lib.add_tc_qdisc,
mock.ANY, 'wrong_qdic_type_name')
def test_list_tc_qdiscs_htb(self):
qdisc = {'index': 2, 'handle': 327680, 'parent': 4294967295,
'attrs': (('TCA_KIND', 'htb'), )}
with mock.patch.object(priv_tc_lib, 'list_tc_qdiscs') as \
mock_list_tc_qdiscs:
mock_list_tc_qdiscs.return_value = tuple([qdisc])
qdiscs = tc_lib.list_tc_qdiscs('device',
namespace=self.namespace)
self.assertEqual(1, len(qdiscs))
self.assertEqual('root', qdiscs[0]['parent'])
self.assertEqual('5:0', qdiscs[0]['handle'])
self.assertEqual('htb', qdiscs[0]['qdisc_type'])
@mock.patch('pyroute2.netlink.rtnl.tcmsg.common.tick_in_usec', 15.625)
def test_list_tc_qdiscs_tbf(self):
tca_tbf_params = {'buffer': 9375000,
'rate': 320000,
'limit': 208000}
qdisc = {'index': 2, 'handle': 327681, 'parent': 4294967295,
'attrs': (
('TCA_KIND', 'tbf'),
('TCA_OPTIONS', {'attrs': (
('TCA_TBF_PARMS', tca_tbf_params), )}))
}
with mock.patch.object(priv_tc_lib, 'list_tc_qdiscs') as \
mock_list_tc_qdiscs:
mock_list_tc_qdiscs.return_value = tuple([qdisc])
qdiscs = tc_lib.list_tc_qdiscs('device',
namespace=self.namespace)
self.assertEqual(1, len(qdiscs))
self.assertEqual('root', qdiscs[0]['parent'])
self.assertEqual('5:1', qdiscs[0]['handle'])
self.assertEqual('tbf', qdiscs[0]['qdisc_type'])
self.assertEqual(2500, qdiscs[0]['max_kbps'])
self.assertEqual(1500, qdiscs[0]['burst_kb'])
self.assertEqual(50, qdiscs[0]['latency_ms'])
def test__get_tbf_burst_value_when_burst_bigger_then_minimal(self):
result = tc_lib._get_tbf_burst_value(BW_LIMIT, BURST, KERNEL_HZ_VALUE)
self.assertEqual(BURST, result)
def test__get_tbf_burst_value_when_burst_smaller_then_minimal(self):
result = tc_lib._get_tbf_burst_value(BW_LIMIT, 0, KERNEL_HZ_VALUE)
self.assertEqual(2, result)
class TcPolicyClassTestCase(base.BaseTestCase):
def setUp(self):
super(TcPolicyClassTestCase, self).setUp()
self.mock_add_tc_policy_class = mock.patch.object(
priv_tc_lib, 'add_tc_policy_class').start()
self.mock_list_tc_policy_classes = mock.patch.object(
priv_tc_lib, 'list_tc_policy_classes').start()
self.namespace = 'namespace'
def test_add_tc_policy_class(self):
tc_lib.add_tc_policy_class(
'device', 'root', '1:10', 'qdisc_type', min_kbps=1000,
max_kbps=2000, burst_kb=1600, namespace=self.namespace)
self.mock_add_tc_policy_class.assert_called_once_with(
'device', rtnl.TC_H_ROOT, '1:10', 'qdisc_type', rate=1000 * 128,
ceil=2000 * 128, burst=1600 * 128, namespace=self.namespace)
@mock.patch('pyroute2.netlink.rtnl.tcmsg.common.tick_in_usec', 15.625)
def test_list_tc_policy_classes(self):
htb_params = {'buffer': 12500000, 'ceil': 256000, 'rate': 192000}
self.mock_list_tc_policy_classes.return_value = tuple([
{'index': 3, 'handle': 65537, 'parent': 4294967295,
'attrs': (
('TCA_KIND', 'htb'),
('TCA_OPTIONS', {
'attrs': tuple([('TCA_HTB_PARMS', htb_params)])}))
}])
_class = tc_lib.list_tc_policy_class('device',
namespace=self.namespace)[0]
reference = {'device': 'device',
'index': 3,
'namespace': self.namespace,
'parent': 'root',
'classid': '1:1',
'qdisc_type': 'htb',
'min_kbps': 1500,
'max_kbps': 2000,
'burst_kb': 1200}
self.assertEqual(reference, _class)
class TcFilterTestCase(base.BaseTestCase):
def test__mac_to_pyroute2_keys(self):
mac = '01:23:45:67:89:ab'
offset = 10
keys = tc_lib._mac_to_pyroute2_keys(mac, offset)
high = {'value': 0x1234567,
'mask': 0xffffffff,
'offset': 10,
'key': '0x1234567/0xffffffff+10'}
low = {'value': 0x89ab0000,
'mask': 0xffff0000,
'offset': 14,
'key': '0x89ab0000/0xffff0000+14'}
self.assertEqual(high, keys[0])
self.assertEqual(low, keys[1])
| 41.25066
| 78
| 0.634323
|
c53dcbf41c5e737532fc2e93ce562e0f35050f54
| 2,013
|
py
|
Python
|
python/misc/table-html.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 1
|
2019-11-23T10:44:58.000Z
|
2019-11-23T10:44:58.000Z
|
python/misc/table-html.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 8
|
2020-07-16T07:14:12.000Z
|
2020-10-14T17:25:33.000Z
|
python/misc/table-html.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 1
|
2019-11-23T10:45:00.000Z
|
2019-11-23T10:45:00.000Z
|
#!/usr/bin/env python
'''
Copyright (C) 2012 Bryan Maupin <bmaupincode@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
''' Takes an input text file and spits out an html table
'''
import sys
# whether or not the first row in the input file is a table header row
header_row = False
# separator between table cells
cell_separator = '\t'
def main():
table_contents = []
if len(sys.argv) < 2:
sys.exit('USAGE: %s input_file' % (sys.argv[0]))
infile = open(sys.argv[1])
for line in infile:
# remove newline
line = line.strip()
# skip blank lines
if line == '':
continue
table_contents.append(line.split(cell_separator))
infile.close()
# print table_contents
print to_table(table_contents)
def to_table(table_contents):
table_html = '<table>\n'
if header_row:
table_html += ' <tr>\n'
for cell in table_contents.pop(0):
table_html += ' <th>%s</th>\n' % (cell)
table_html += ' </tr>\n'
while len(table_contents) > 0:
table_html += ' <tr>\n'
for cell in table_contents.pop(0):
table_html += ' <td>%s</td>\n' % (cell)
table_html += ' </tr>\n'
table_html += '</table>\n'
return table_html
# calls the main() function when the script runs
if __name__ == '__main__':
main()
| 25.807692
| 70
| 0.631396
|
a3a0f224f8b217e652d212f671acd8587a81c605
| 7,887
|
py
|
Python
|
test/test_rivescript_coverage_plugin.py
|
snoopyjc/rivescript_coverage_plugin
|
dab9a36c0c48e040909c487cc7d96fe2b60ef267
|
[
"MIT"
] | 1
|
2020-01-17T22:08:47.000Z
|
2020-01-17T22:08:47.000Z
|
test/test_rivescript_coverage_plugin.py
|
snoopyjc/rivescript_coverage_plugin
|
dab9a36c0c48e040909c487cc7d96fe2b60ef267
|
[
"MIT"
] | 18
|
2020-01-17T22:07:29.000Z
|
2020-03-29T22:13:12.000Z
|
test/test_rivescript_coverage_plugin.py
|
snoopyjc/rivescript_coverage_plugin
|
dab9a36c0c48e040909c487cc7d96fe2b60ef267
|
[
"MIT"
] | null | null | null |
"""
Tests for `rivescript_coverage_plugin` module.
"""
#from rivescript_coverage_plugin import rivescript_coverage_plugin
import rive
import pytest
from rivescript import RiveScript
import os
USER='user' # v1.0.0: Issue 14
USER2='user2' # v1.0.0: Issue 14
@pytest.fixture(scope="session", autouse=True)
def initialize():
rive.rs_init()
def say(message):
return rive.reply(USER, message)
def say_u(user, message): # v1.0.0: Issue 14
return rive.reply(user, message) # v1.0.0: Issue 14
def test_issue_17(): # v1.1.0: Must be run first!
# v1.1.0: Issue 17: If you load all via rs.stream(), plug-in crashes
rs = RiveScript()
rs.stream("""
+ issue 17
- 17 reply
""")
rs.sort_replies()
assert rs.reply(USER, 'issue 17') == '17 reply'
# v1.1.0: Issue 15: Calls to rs.load_file() not tracked
rs = RiveScript()
rs.load_file('issue_15.rive')
rs.sort_replies()
assert rs.reply(USER, 'issue 15') == '15 reply'
def test_basic():
assert say('hi') == 'hey'
resp = say('hi')
assert resp in ('hey again!', 'I already said "hey"')
assert say('yo') == 'hey yo'
resp = say('hi there')
assert resp in ('You said hi there', 'Hi there back!')
assert say('bye') == 'Good bye!'
assert say('hey') == 'hey'
resp = say('hey')
assert resp in ('hey again!', 'I already said "hey"')
assert say('formal') == 'User'
assert say('green') == 'No, purple is the best color!'
assert say('purple') == 'Correct, purple is the best color!'
assert say('topic') == 'entered topic'
assert say('hi') == "You're in topic, type q to quit"
assert say('try in topic') == 'response in topic'
assert say('conditional in topic') == 'conditional in topic response'
assert say('q') == 'Exiting topic'
assert say('object') == 'result from object'
def test_star():
# v1.0.0: Issue 12 resp = say('star')
# v1.0.0: Issue 12 assert resp in ("I don't know what you mean", "Sorry, I don't know that")
# v1.0.0: Issue 12: Test both replies for 'star' as the one with weight wasn't being marked as executed
star_replies = ("I don't know what you mean", "Sorry, I don't know that")
star_replies_found = set()
while True:
resp = say('star')
star_replies_found.add(resp)
if len(star_replies_found) == len(star_replies):
break
assert say('xyzzy') == '[ERR: No Reply Matched]'
def test_bugs_1():
"""Test bugs found in the code"""
# v0.2.0: Issue #3: Plugin crashes if rive file is deleted by test
with open('brain/issue_3.rive', 'w') as d:
print('+ dynamic', file=d)
print('- dynomite!', file=d)
rive.rs.load_file('brain/issue_3.rive')
rive.rs.sort_replies()
assert say('dynamic') == 'dynomite!'
os.remove('brain/issue_3.rive')
# v0.2.2: Issue #6: Topic with inherits or includes don't show any coverage
assert say('issue 6') == 'Entering topic issue_6'
assert say('issue') == 'Check coverage!'
assert say('bye') == 'Good bye!'
assert say('issue 6a') == 'Entering topic issue_6a'
assert say('sixa') == 'Check sixa!'
assert say('exit') == 'Exiting topic'
# v0.2.3: Issue #7: Conditional with alternate response not covered
assert say('issue 7').startswith('issue_7 response')
# v0.2.3: Issue #8: Topic changed in object
assert say('issue_8') == 'result from issue_8_object'
assert say('hi') == "You're in issue_8, type q to quit"
assert say('try in issue_8') == 'response in issue_8'
assert say('prev in issue_8') == 'prev response in issue_8'
assert say('conditional in issue_8') == 'conditional response in issue_8'
assert say('q') == 'Exiting issue_8'
# v1.0.0: Issue #9: local concat
assert say('issue 9 default') == 'Issue 9default response without space'
assert say('issue 9 newline') == 'Issue 9\nresponse with newline'
assert say('issue 9 space') == 'Issue 9 response with space'
assert say('issue 9 none') == 'Issue 9response without space'
# v1.0.0: Issue 10: line numbers off due to whitespace at end of object
assert say('issue 10') == 'Issue 10 response'
def test_bugs_2():
"""Test bugs found in the code"""
# v1.0.0: Issue 11: 'else', 'nonlocal' marked as not executed in object
assert say('issue 11') == \
'Issue 11 response'
# v1.0.0: Issue 12: Responses with weights were not being marked as executed
issue_12_replies = (
'issue 12 response 1',
'issue 12 response 2',
'issue 12 response 3',
'issue 12 response 4',
'issue 12 response 5',
'issue 12 response 6',
'issue 12 response 7',
'issue 12 response 8 - unweighted',
)
issue_12_replies_found = set()
while True:
resp = say('issue 12')
assert resp in issue_12_replies
issue_12_replies_found.add(resp)
if len(issue_12_replies_found) == len(issue_12_replies):
break
# v1.0.0: Issue 13: Responses didn't match if the question contained a single quote
assert say("issue 13 single quote doesn't work") == "issue 13 single quote response"
assert say('issue 13 double quote "how about it?"') == "issue 13 double quote response"
assert say('issue 13 both quotes "don\'t break!"') == "issue 13 both quotes response"
# v1.0.0: Issue 14: Need separate topic for each user
for i in range(1, 11):
for u in (USER, USER2):
assert say_u(u, f"issue 14 {u} q{i}") == f"issue 14 {u} r{i}"
# v1.1.0: Issue 16: No coverage tracked for streams
rs = RiveScript()
for _ in range(2): # Loading the same thing more than once from the same line # shouldn't create additional _rs_stream_ files
rs.stream("""
+ issue 16
- 16 reply
""")
rs.sort_replies()
assert rs.reply(USER, 'issue 16') == '16 reply'
def i16a(): # Caller in same file should be omitted from _rs_stream_ filename
streams = ("""+ issue 16a
- 16a reply""",
"""+ issue 16b
- 16b reply""")
for stream in streams: # Loading different streams at the same line # should create 2 _rs_stream_ files
rs.stream(stream)
rs.sort_replies()
assert rs.reply(USER, 'issue 16a') == '16a reply'
assert rs.reply(USER2, 'issue 16b') == '16b reply'
i16a()
# v1.1.0: Issue 18: If you change topics with set_uservar outside an
# object, the coverage in the new topic is not tracked
rs.stream("""
> topic new_t
+ issue 18
- 18 reply
< topic
+ *
- star
""")
rs.sort_replies()
rs.set_uservar(USER2, "topic", "new_t")
assert rs.reply(USER2, 'issue 18') == '18 reply'
assert rs.reply(USER, 'issue 18') == 'star'
@pytest.mark.parametrize("debug", [False, True])
def test_rivescript_v1_15(debug, capsys): # pragma: no cover
"""Test that the debug option is preserved across prepare_brain_transplant calls"""
if hasattr(RiveScript, "prepare_brain_transplant"):
rs = RiveScript(debug=debug)
rs.load_file("brain/test.rive")
rs.sort_replies()
assert rs.reply(USER, 'hi') == 'hey'
captured = capsys.readouterr()
if debug:
assert '[RS]' in captured.out
else:
assert '[RS]' not in captured.out
rs.prepare_brain_transplant()
rs.load_file("brain/issue_10.rive") # Anything but test.rive
rs.sort_replies()
assert rs.reply(USER, 'issue 10') == 'Issue 10 response'
captured = capsys.readouterr()
if debug:
assert '[RS]' in captured.out
else:
assert '[RS]' not in captured.out
| 35.527027
| 134
| 0.605807
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.