hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1abdb665b3f26fc44f71b7a52adc1fcb9d19d765
| 20,030
|
py
|
Python
|
src/ask/sent_2_q.py
|
ercgn/11411-proj
|
10c7f68e3d800af5e86fd3f3b63e2a298fd21a8c
|
[
"MIT"
] | null | null | null |
src/ask/sent_2_q.py
|
ercgn/11411-proj
|
10c7f68e3d800af5e86fd3f3b63e2a298fd21a8c
|
[
"MIT"
] | null | null | null |
src/ask/sent_2_q.py
|
ercgn/11411-proj
|
10c7f68e3d800af5e86fd3f3b63e2a298fd21a8c
|
[
"MIT"
] | null | null | null |
#
# sent_2_q.py
#
# Given a sentence, this class can be used to turn the sentence into a question
# tries a variety of methods and returns after one has been successful
#
# Rachel Kobayashi
# with
# Aaron Anderson
# Eric Gan
#
#
import util.rdrpos as rdrpos
import nltk, string
from util.combinations import Combine
from util.qutil import *
# cases to replace:
# dates -> when
# if -> why question with the if section as the clause
# noun / proper noun / pronoun replacement
# names / proper nouns
class ConstructQuestion(object):
def __init__(self, sentence):
self.c = Combine();
self.tokens = nltk.word_tokenize(sentence.strip());
self.tags = rdrpos.pos_tag(sentence.strip());
# check if capitaliaztion is necessary and otherwise remove.
self.nltkTags = nltk.pos_tag(self.tokens);
if (self.nltkTags[0] != self.tags[0]) and \
self.c.ID.isReplacablePronoun(self.tokens[0]):
self.tokens[0] = self.tokens[0].lower();
# remove end puncuation
self.tokens, self.tags = self.rmEndPunc(self.tokens, self.tags);
# returned question / question word
self.out = "";
self.qWord = None;
self.N = len(self.tokens);
self.make(sentence);
def reportionCommaLists(self, tokList, tagList, idxList=None):
rmList = [];
for i,item in enumerate(tagList[:-1]):
nextItem = tagList[i+1];
firstWord = nextItem[0];
lastTag = item[-1];
lastWord = None;
if self.c.ID.isEndPhrasePunc(lastTag) and len(item) >= 2:
lastWord = item[-2];
elif len(item) >= 2:
lastWord = item[-1];
if lastWord != None and\
(is_adj(lastWord) and is_adj(firstWord)):
tagList[i+1] = item + nextItem;
tokList[i+1] = tokList[i] + tokList[i+1];
rmList.append(i);
elif lastWord != None and\
(is_propN(lastWord) and is_propN(firstWord)):
second = (tokList[i+1][0],firstWord);
first = (tokList[i][-2],lastWord);
if self.c.ID.isPlace(first,second):
tagList[i+1] = item + nextItem;
tokList[i+1] = tokList[i] + tokList[i+1];
rmList.append(i);
offset = 0;
if rmList != []:
for idx in rmList:
adjI = idx-offset;
# tokList[adjI];
tokList.pop(adjI);
tagList.pop(adjI);
if idxList != None:
idxList.pop(adjI+1);
offset += 1;
if idxList != None:
return tokList, tagList,idxList;
else:
return tokList, tagList;
# Split the tokens and tags into phrases based on commas
# or other ending punc such as ;.?
# cannot just join and use split because the question word that has
# been replaced might be more than one word
# might be irrelevant depending on the sentence handed in
# hence the early check for ,
def splitComma(self):
tok = self.tokens;
pos = self.tags;
# not necessary to split indicated by -1 ouput
if ',' not in set(tok):
return tok,pos, (-1,0);
tokCommaList, tagCommaList, idxs = self.splitCommaBare(tok, pos, True);
# list of indicdes that indicate splitted phrase in the original
idxs.append(len(tok));
(qIdx, qWord) = self.qWord;
# find the index of the comma phrase that contains the question word
for idx in range(len(idxs)-1):
if idxs[idx] <= qIdx and qIdx < idxs[idx+1]:
return (tokCommaList, tagCommaList, (idx, idxs[idx]));
return tokCommaList, tagCommaList, (0,0);
# splitCommaBare - split the input into list of components
# based on comma / end punctuation placement
# takes in the tok, associated pos tags, and
# idxFlag which indicates whether or not the output should include
# a list of where the original lists were split
def splitCommaBare(self, tok, pos, idxFlag):
if ',' not in set(tok) or (len(tok) != len(pos)):
if idxFlag: return tok, pos, False;
else: return tok,pos;
saveTag = [];
saveTok = [];
newTok = [];
newTag = [];
idxs = [];
incpEnd = False;
idxs.append(0);
for i,word in enumerate(tok):
# not comma, part of same phrase
if not self.c.ID.isEndPhrasePunc(word):
saveTok.append(word);
saveTag.append(pos[i]);
incpEnd = True;
# comma / end punction, add section to ouput list
# reset temp save
else:
idxs.append(i+1);
saveTok.append(word);
saveTag.append(pos[i]);
newTok.append(saveTok);
newTag.append(saveTag);
saveTok = [];
saveTag = [];
incpEnd = False;
# if the original input did not have final end punction
# ensures we get the last of the input included
if incpEnd:
newTok.append(saveTok);
newTag.append(saveTag);
# redo comma lists based on countries, adjective lists.
outTok, outTag, outIdxs = self.reportionCommaLists(newTok,newTag,idxs);
if idxFlag:
return outTok,outTag, idxs;
else:
return outTok,outTag;
# Arranges a question when the question word is preceeded by a verb
def verbPreArr(self, tok, qIdx):
qTok = [];
beginning = tok[qIdx:];
qTok += makeList(beginning);
# qTok += [tok[qIdx-1]];
if qIdx-1 > 0:
tok[0] = wordToLower(tok[0]);
qTok += tok[0:qIdx-1];
return qTok;
# Arranges a question when the question word is followed by a verb
def verbPostArr(self, tok, qIdx, pos):
qTok = [];
qPart = tok[qIdx:];
beginning = tok[:qIdx];
beginTag = pos[:qIdx];
# check if the beginning of the sentence has a verb
# (indicates a somewhat complete thought,
# probably not necessary in question)
hasVerb = reduce(lambda x,y: x or y, map(is_verb, beginTag));
if isinstance(qPart,list):
qTok += [qPart[0]];
if not hasVerb and beginTag[-1] != "IN":
if beginTag[0] == "DT" and isinstance(beginning,list):
qTok += beginning[1:];
else:
qTok += beginning;
end = qPart[1:];
qTok += makeList(end);
else:
qTok += [qPart];
# does not copy the first word if what is the second word
# my socks are black today > my what are black today >
# what are black today
if qIdx == 1:
tok[0] = wordToLower(tok[0]);
qTok += tok[0:qIdx];
return qTok;
# checks the comma delineated sections for a given pos tag
# uses the key "NOUN" to indicate any noun and
# "VERB" to indicate any verb (avoids function pointers)
# returns the index into the comma list of the first find
# as well as the found item (a list);
def findTag(self,newTokList,newTagList, tagCode):
saveIdx = [];
found = False;
if newTokList == None or newTagList == None:
return None;
for i, phrase in enumerate(newTagList):
for tag in phrase:
# found condition, save phrase
if (tagCode == "NOUN" and is_noun(tag)) or\
(tagCode == "VERB" and is_verb(tag)) or\
(tag == tagCode):
found = True;
if found:
saveIdx.append(i);
found = False;
# no find, return None
if saveIdx == []:
return None;
# something found, return last find (closest to verb);
else:
return saveIdx[-1],makeList(newTokList[saveIdx[-1]]);
# rearrangeBV - rearrange a sentence when a being verb is present
# so that the question reads [verb] [beinning] [end] ?
# (Forms yes questions without adding / changing word tokens)
def rearrangeBV(self,vbIdx):
if vbIdx < 0:
return False;
pos = self.tags;
tok = self.tokens;
verb = tok[vbIdx];
preVerb = [];
postVerb = [];
qTok = [];
saveIdx = None;
newTok = None;
# start of sentence
if vbIdx > 0:
preVerb = makeList(tok[:vbIdx]);
preVerb[0] = wordToLower(preVerb[0]);
# break at commas if necessary
newTok, newTag= self.splitCommaBare(preVerb,pos[:vbIdx], False);
if newTok != preVerb and newTok != None and newTag != None:
out = self.findTag(newTok,newTag,"NOUN");
if out != None:
saveIdx, preVerb = out;
# end of sentence
if vbIdx < len(tok)-1:
postVerb = makeList(tok[vbIdx+1:]);
# put phrases w/o subject first
if newTok != None and saveIdx != None:
for phrase in newTok:
if phrase != newTok[saveIdx]:
qTok += phrase;
# "meat" of the setence
qTok += [verb] + preVerb + postVerb;
# formatting output
self.joinQ(qTok);
return True;
# If the question is a "who" question,
# remove a trailing article before "who" in the output,
# checked for more general cases in formatQ
def removeLeadingArticle(self):
toks = self.tokens;
tags = self.tags;
if self.qWord != None:
(idx, word) = self.qWord;
if word == "who" and idx > 0:
if tags[idx-1] == "DT":
toks.pop(idx-1);
tags.pop(idx-1);
self.qWord = (idx-1,word);
return;
return;
# rmEndPunc - remove end punction from the given token string
# DOES NOT REMOVE from the associated pos, unless run on that separately
def rmEndPunc(self,tok, tag):
if isinstance(tok, list):
punc = tok[-1];
if self.c.ID.isEndPhrasePunc(punc):
x = tok.pop(-1);
y = tag.pop(-1);
return tok, tag;
# rearranges sentences to flow more naturally
def formatQuestion(self):
# split sentences by commas, keeping only the phrase
# with the question word
# PROS: simplifies question, easier to make grammatical
# CONS: ambiguity, possible erradication of important points
#### currently everything is reattached later
if self.qWord == None:
self.out = "";
return;
else:
self.removeLeadingArticle();
(phraseTok, phraseTag, (pSel,idxOffset)) = self.splitComma();
if pSel != -1:
tok = phraseTok[pSel];
pos = phraseTag[pSel];
else:
tok = phraseTok;
pos = phraseTag;
(qIdx, wrd) = self.qWord;
qIdx = qIdx - idxOffset;
if qIdx != 0:
# question word follow a verb
if qIdx > 1 and qIdx-1 < len(pos) and is_verb(pos[qIdx-1]):
qTok = self.verbPreArr(tok,qIdx);
# question word preceeds a verb
elif qIdx < len(tok)-1 and \
(is_verb(pos[qIdx+1]) or pos[qIdx+1] == "MD"):
qTok = self.verbPostArr(tok,qIdx,pos);
# question word in preposition etc
else: qTok = tok;
# case: question word already in front,
# only need to change punctuation
else: qTok = tok;
# add other details back into the question
for i, phrase in enumerate(phraseTok):
if pSel != -1 and i != pSel:
qTok += ",";
addPhrase = phrase[0:-1];
if addPhrase != []:
tokTags = rdrpos.pos_tag("".join(addPhrase[0]));
if tokTags[0] != "NNP":
addPhrase[0] = addPhrase[0].lower();
if len(addPhrase) > 1:
qTok += addPhrase;
self.joinQ(qTok);
return;
# Turn the question from a list into a string question
# Set output and capitalization
def joinQ(self, qTok):
qTok += ['?'];
# special join function to remove extra spaces
question = self.c.sentJoin(qTok);
# capitalize first letter
self.out = question[0].upper() + question[1:];
return;
# creates question by replacing the first date
# replaces with "what" or "what date" instead of "when"
# because that seems to work better grammatically most of the time
# returns True on success, False on failure
def qFromDate(self):
tok = self.tokens;
pos = self.tags;
origN = self.N
if "#DATE" in set(pos):
idx = pos.index("#DATE");
# only year specified
if len(tok[idx]) == 4:
tok[idx] = "what year";
# preposition case
elif idx < len(tok)-1 and pos[idx+1] == "IN":
tok[idx] = "when";
# follows a verb
elif idx > 0 and is_verb(pos[idx-1]):
tok[idx] = "what";
else:
tok[idx] = "what date";
self.qWord = (idx, tok[idx]);
return True;
else: return False;
# create a quation from a quantity value
# error prone
"""
def qFromQuant(self):
tok = self.tokens;
pos = self.tags;
if "CD" in set(pos):
idx = pos.index("CD");
phrase = [];
phrasetok = [];
i = idx;
token = None;
tag = None;
while(i < len(pos) and tag not in set(string.punctuation)):
if token not in set(string.punctuation) and token != None:
phrase.append(token);
phrasetok.append(tag);
if tag == "NNS":
break;
i += 1;
token = tok[i];
tag = pos[i];
if phrase != []:
print phrase, phrasetok;
"""
# replaces the noun at the given index, idx with the
# appropriate question word
# returns True on success, False on failure
def replaceNounWithQ(self, idx):
tok = self.tokens;
pos = self.tags;
nounTag = pos[idx];
word = tok[idx];
# error with input, no noun to replace, erroneous index
if (idx < 0 or idx > len(pos)) or \
(not is_noun(nounTag) and nounTag != "PRP"):
return False;
# proper noun replacement
if (len(nounTag) > 2 and nounTag[0:3] == "NNP"):
tok[idx] = "who or what";
# pronoun replacement
elif nounTag == "PRP":
pFlag = self.c.ID.isReplacablePronoun(word);
if pFlag == 1: #subject
tok[idx] = "who";
elif pFlag == -1: # object
tok[idx] = "whom";
elif pFlag == -2: # posessive
tok[idx] = "whose";
elif pFlag == 2: # cannot specify (it, there)
tok[idx] = "what";
else:
return False;
# common noun replacement
else:
tok[idx] = "what";
# remove leading determiner if present
if idx > 0 and pos[idx-1] == "DT":
pos.pop(idx-1);
tok.pop(idx-1);
idx = idx -1;
# save the index of the question word (used in rearranging qurestion)
self.qWord = (idx, tok[idx]);
return True;
# replace the first noun / propernoun / pronoun that proceeds
# a verb with an appropriate question word
# returns True on successful replacement, False on failure
def qFromNoun(self):
pos = self.tags;
lastCandidate = None;
for i,tag in enumerate(pos):
# last noun / proper noun
if is_noun(tag):
lastCandidate = i;
elif tag == "PRP" and \
(self.c.ID.isReplacablePronoun(self.tokens[i]) != 0):
lastCandidate = i;
# found verb, take most recent candidate word
elif is_verb(tag):
if lastCandidate != None:
return self.replaceNounWithQ(lastCandidate);
return False;
# replace the first pronoun in the sentence with who
# not used because the version included in the noun replacement
# works more grammatically
"""
def qFromPronoun(self):
pos = self.tags;
tok = self.tokens;
for i,tag in enumerate(pos):
if tag == "PRP" and self.c.ID.isReplacablePronoun(tok[i]):
self.replaceNounWithQ(i);
return True;
return False;"""
# create simple questions by rearranging a sentence starting with if
# if "clause", [subset] -> why will [subset]
def ifQ(self):
pos = self.tags;
tok = self.tokens;
# fail on lack of realization with construct
if "," not in set(pos):
return False;
# split on first comma (associated with "if")
idx = pos.index(",");
if idx < (len(pos)-1):
subTok = tok[idx+1:];
subPos = pos[idx+1:];
# find the first verb modifier to be used in question
if "MD" in set(subPos):
modVerbIdx = subPos.index("MD");
modVerb = subTok[modVerbIdx];
subset = subTok[:modVerbIdx];
if modVerbIdx < len(pos) - 1:
subset += subTok[modVerbIdx+1:];
# if modifer cannot be found in question,
# use general word "will"
else:
modVerb = "will";
subset = subTok;
subset = ["Why"] + [modVerb] + subset;
# combine tokens
self.joinQ(subset);
return True;
return False;
# create simple yes / no questions from a sentence
# by siwtching the placement of the subject and the being verb
def qYesNo(self):
tok = self.tokens;
pos = self.tags;
seenVerb = False;
for i,tag in enumerate(pos):
if is_verb(tag):
if self.c.ID.isBeingVerb(tok[i]) and seenVerb == False:
self.rearrangeBV(i);
return True;
seenVerb = True;
return False;
# overall algorithm for creating questions
# includes combing portions of the input together
# heirarchy of sentence constructions:
# if, yes/no, date, noun
def make(self,sentence):
combi = self.c;
toks = self.tokens;
pos = self.tags;
if toks == [] or pos == []:
return;
# find date locations and replace them in the given, toks, pos
# gives dates the tag "#DATE"
combi.dates(toks, pos);
# combine names into a single token,
# sort of an NER
combi.names(toks, pos);
# check for context based on timing (might require change of verb)
#timeFlag = combi.ID.isTimeDep(toks,0);
if toks[0].lower() == "if" and self.ifQ():
return;
if self.qYesNo():
return;
if self.N < 15 and self.qFromDate():
self.formatQuestion();
return;
if self.qFromNoun():
self.formatQuestion();
return;
return;
| 37.439252
| 79
| 0.521618
|
dbafcf10f68f91f2332890f439e9e5aeef38d178
| 1,449
|
py
|
Python
|
setup.py
|
ubernostrum/django-flashpolicies
|
f24fb80907a82c6894d08c58e25d085a18b11155
|
[
"BSD-3-Clause"
] | 7
|
2015-04-07T22:18:02.000Z
|
2022-01-26T16:02:14.000Z
|
setup.py
|
ubernostrum/django-flashpolicies
|
f24fb80907a82c6894d08c58e25d085a18b11155
|
[
"BSD-3-Clause"
] | 1
|
2017-06-05T01:41:10.000Z
|
2017-10-23T10:14:23.000Z
|
setup.py
|
ubernostrum/django-flashpolicies
|
f24fb80907a82c6894d08c58e25d085a18b11155
|
[
"BSD-3-Clause"
] | 2
|
2016-09-24T18:36:17.000Z
|
2017-08-07T14:26:49.000Z
|
import os
from setuptools import find_packages, setup
setup(
name="django-flashpolicies",
zip_safe=False, # eggs are the devil.
version="1.13",
description="Flash cross-domain policies for Django sites",
long_description=open(os.path.join(os.path.dirname(__file__), "README.rst")).read(),
author="James Bennett",
author_email="james@b-list.org",
url="https://github.com/ubernostrum/django-flashpolicies/",
include_package_data=True,
package_dir={"": "src"},
packages=find_packages("src"),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.1",
"Framework :: Django :: 3.2",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Utilities",
],
python_requires=">=3.6",
install_requires=["Django>=2.2,!=3.0.*"],
)
| 35.341463
| 88
| 0.608696
|
2b7c00929417369437da32838fd327c99fe638a1
| 3,458
|
py
|
Python
|
google/cloud/apigeeconnect_v1/types/connection.py
|
LaudateCorpus1/python-apigee-connect
|
ae944c10b2f63a682cf2f196d2122ccbed2dac48
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/apigeeconnect_v1/types/connection.py
|
LaudateCorpus1/python-apigee-connect
|
ae944c10b2f63a682cf2f196d2122ccbed2dac48
|
[
"Apache-2.0"
] | 30
|
2021-06-14T17:02:41.000Z
|
2022-03-08T02:01:47.000Z
|
google/cloud/apigeeconnect_v1/types/connection.py
|
LaudateCorpus1/python-apigee-connect
|
ae944c10b2f63a682cf2f196d2122ccbed2dac48
|
[
"Apache-2.0"
] | 3
|
2021-07-14T19:32:28.000Z
|
2022-03-01T19:48:20.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.apigeeconnect.v1",
manifest={
"ListConnectionsRequest",
"ListConnectionsResponse",
"Connection",
"Cluster",
},
)
class ListConnectionsRequest(proto.Message):
r"""The request for [ListConnections][Management.ListConnections].
Attributes:
parent (str):
Required. Parent name of the form:
``projects/{project_number or project_id}/endpoints/{endpoint}``.
page_size (int):
The maximum number of connections to return.
The service may return fewer than this value. If
unspecified, at most 100 connections will be
returned. The maximum value is 1000; values
above 1000 will be coerced to 1000.
page_token (str):
A page token, received from a previous ``ListConnections``
call. Provide this to retrieve the subsequent page.
When paginating, all other parameters provided to
``ListConnections`` must match the call that provided the
page token.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListConnectionsResponse(proto.Message):
r"""The response for [ListConnections][Management.ListConnections].
Attributes:
connections (Sequence[google.cloud.apigeeconnect_v1.types.Connection]):
A list of clients.
next_page_token (str):
A token that can be sent as ``page_token`` to retrieve the
next page. If this field is omitted, there are no subsequent
pages.
"""
@property
def raw_page(self):
return self
connections = proto.RepeatedField(proto.MESSAGE, number=1, message="Connection",)
next_page_token = proto.Field(proto.STRING, number=2,)
class Connection(proto.Message):
r"""
Attributes:
endpoint (str):
The endpoint that the connection is made against. Format:
``projects/{project_number}/endpoints/{endpoint}``
cluster (google.cloud.apigeeconnect_v1.types.Cluster):
Cluster information.
stream_count (int):
The count of streams.
"""
endpoint = proto.Field(proto.STRING, number=1,)
cluster = proto.Field(proto.MESSAGE, number=2, message="Cluster",)
stream_count = proto.Field(proto.INT32, number=3,)
class Cluster(proto.Message):
r"""
Attributes:
name (str):
The name of the cluster.
region (str):
The region of the cluster.
"""
name = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.436364
| 85
| 0.655292
|
a04324b0cfab7ef0d9c49d32d2ebe8e6ecd38cbe
| 317
|
py
|
Python
|
conduct/__main__.py
|
lamartine-sl/mlops_project
|
4391d9639129bcd4d1cbc9cc6a64f696a4c7b88e
|
[
"MIT"
] | null | null | null |
conduct/__main__.py
|
lamartine-sl/mlops_project
|
4391d9639129bcd4d1cbc9cc6a64f696a4c7b88e
|
[
"MIT"
] | 5
|
2022-02-21T04:36:06.000Z
|
2022-03-31T04:36:53.000Z
|
conduct/__main__.py
|
lamartine-sl/mlops_project
|
4391d9639129bcd4d1cbc9cc6a64f696a4c7b88e
|
[
"MIT"
] | null | null | null |
import sys
import conduct.versioning.dvc_handler as dvc
def main():
"""The CLI entry point."""
args = sys.argv[1:]
print(f"{__name__} count of args {len(args)}")
for i, arg in enumerate(args):
print(f"{__name__} arg {i}: {arg}")
dvc.test(args)
if __name__ == "__main__":
main()
| 16.684211
| 50
| 0.602524
|
62279865add6af67810178e6121ba399a08896c1
| 619
|
py
|
Python
|
src/python_wiki_api/console.py
|
Zazhka/python_wiki_api
|
e8fb15c2b6597c62f333ec1df98b7f3592d34e07
|
[
"MIT"
] | null | null | null |
src/python_wiki_api/console.py
|
Zazhka/python_wiki_api
|
e8fb15c2b6597c62f333ec1df98b7f3592d34e07
|
[
"MIT"
] | null | null | null |
src/python_wiki_api/console.py
|
Zazhka/python_wiki_api
|
e8fb15c2b6597c62f333ec1df98b7f3592d34e07
|
[
"MIT"
] | null | null | null |
"""Command-line interface."""
import textwrap
import click
from . import __version__, wikipedia
API_URL: str = "https://en.wikipedia.org/api/rest_v1/page/random/summary"
@click.command()
@click.option(
"--language",
"-l",
default="en",
help="Language edition of Wikipedia",
metavar="LANG",
show_default=True,
)
@click.version_option(version=__version__)
def main(language: str) -> None:
"""The wikipedia API based on hypermodern Python project."""
page = wikipedia.random_page(language=language)
click.secho(page.title, fg="green")
click.echo(textwrap.fill(page.extract))
| 22.107143
| 73
| 0.6979
|
0821318cf0d1b768a448097277705b7c1ef6897b
| 6,705
|
py
|
Python
|
6_q-learning/Board.py
|
scottfabini/machine-learning-perceptron
|
2bc4b7b415871bc73ac8c033983df73719df9422
|
[
"Unlicense"
] | null | null | null |
6_q-learning/Board.py
|
scottfabini/machine-learning-perceptron
|
2bc4b7b415871bc73ac8c033983df73719df9422
|
[
"Unlicense"
] | null | null | null |
6_q-learning/Board.py
|
scottfabini/machine-learning-perceptron
|
2bc4b7b415871bc73ac8c033983df73719df9422
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import random
from enum import Enum
import os
# Enumeration of available actions
class Action(Enum):
P = 0
N = 1
S = 2
E = 3
W = 4
# Enumeration of available states
class State(Enum):
Empty = 0
Wall = 1
Can = 2
class Board:
# Constructor initialization
def __init__(self, steps_M,
reward_can, reward_crash, reward_pick_up_empty,
nu, gamma):
self.board = pd.DataFrame([[State.Empty for x in range(12)] for y in range(12)])
self.board[0] = State.Wall
self.board[11] = State.Wall
self.board.iloc[0] = State.Wall
self.board.iloc[11] = State.Wall
self.robot = (1,1)
self.qMatrix = np.zeros((5 ** 5, 5))
self.steps_M = steps_M
self.reward_can = reward_can
self.reward_crash = reward_crash
self.reward_pick_up_empty = reward_pick_up_empty
self.nu = nu
self.gamma = gamma
self.move_penalty = 0
self.move_score = np.zeros(len(Action))
self.qMatrixValues = [None] * len(Action)
self.sensed_state = np.zeros(len(Action))
# Randomize the board state.
def shuffleBoard(self):
for (i,j), element in np.ndenumerate(self.board):
if i != 0 and i != 11 and j != 0 and j != 11:
if random.random() > 0.5:
val = State.Can
else:
val = State.Empty
self.board.set_value(i, j, val)
i = random.randint(1,10)
j = random.randint(1,10)
self.robot = (i, j)
# Run the episode. Choose an action to take, perform the action,
# and update the QMatrix (if updateQ == True).
def runEpisode(self, episode, epsilon, updateQ):
reward = np.zeros(self.steps_M)
for i in range(self.steps_M):
currentState = self.senseCurrentState(self.robot)
# choose current action
if 1 - epsilon < random.random():
currentAction = random.choice(list(Action))
else:
currentAction = self.maximizeAction()
# perform the action
if currentAction == Action.P and self.board.get_value(self.robot[0], self.robot[1]) == State.Can:
self.board.set_value(self.robot[0], self.robot[1], State.Empty)
reward[i] = self.reward_can
nextState = self.senseCurrentState(self.robot)
elif currentAction == Action.P and self.board.get_value(self.robot[0], self.robot[1]) == State.Empty:
reward[i] = self.reward_pick_up_empty
nextState = self.senseCurrentState(self.robot)
elif self.isWall(self.robot, currentAction):
reward[i] = self.reward_crash
nextState = self.senseCurrentState(self.robot)
else:
self.robot = self.moveTo(self.robot, currentAction)
reward[i] = self.move_penalty
nextState = self.senseCurrentState(self.robot)
# update the qMatrix (if this is training data)
if updateQ:
self.updateQMatrix(reward[i], currentState, currentAction, nextState)
return np.sum(reward)
# Update the qMatrix according to the Q-learning algorithm.
def updateQMatrix(self, reward, currentState, currentAction, nextState):
for i, action in enumerate(Action):
self.qMatrixValues[i] = self.qMatrix[nextState, action.value]
if np.argmax(self.qMatrixValues) == 0 and self.qMatrixValues[0] == 0:
nextAction = random.randint(0, 4)
else:
nextAction = np.argmax(self.qMatrixValues)
self.qMatrix[currentState, currentAction.value] = self.qMatrix[currentState, currentAction.value] + self.nu * (reward + self.gamma * self.qMatrix[nextState, nextAction] - self.qMatrix[currentState, currentAction.value])
# Query the QMatrix to determine the best action to take.
def maximizeAction(self):
for i, action in enumerate(Action):
current_state = self.senseCurrentState(self.robot)
self.move_score[i] = self.qMatrix[current_state, i]
if np.argmax(self.move_score) == 0 and self.move_score[0] == 0:
return self.reverseIndex(random.randint(0, 4))
return self.reverseIndex(np.argmax(self.move_score))
# Query the board for the current state of the robot at the given location.
def senseCurrentState(self, location):
for i, action in enumerate(Action):
self.sensed_state[i] = 3 ** i * self.detect(location, action)
return int(np.sum(self.sensed_state))
''' Begin Helper Functions '''
# Display the board
def displayBoard(self):
for (i,j), element in np.ndenumerate(self.board):
if i == self.robot[0] and j == self.robot[1]:
print("*", end = ' ')
else:
print(element.value, end=' ')
if j == self.board.shape[1] - 1:
print()
print("\n\n")
# Detect the state of the adjacent sqare
def detect(self, robot, action):
move = self.moveTo(robot, action)
sensed_value = self.board.get_value(move[0], move[1]).value
return sensed_value
# Helper function to convert the move Enum into an index in the qMatrix
def reverseIndex(self, moveIndex):
if moveIndex == 0:
return Action.P
elif moveIndex == 1:
return Action.N
elif moveIndex == 2:
return Action.S
elif moveIndex == 3:
return Action.E
elif moveIndex == 4:
return Action.W
# Move the robot to a new location based on the action
def moveTo(self, robot, action):
if action == Action.P:
move = (0, 0)
elif action == Action.N:
move = (-1, 0)
elif action == Action.S:
move = (1, 0)
elif action == Action.E:
move = (0, 1)
elif action == Action.W:
move = (0, -1)
return tuple(map(sum, zip(robot, move)))
# Detect if the robot will run into a wall on the nextMove
def isWall(self, robot, nextMove):
i, j = self.moveTo(self.robot, nextMove)
if self.board.get_value(i, j) == State.Wall:
return True
else:
return False
# Sum the state of the board for debug
def sum(self):
sum = 0
for (i,j), element in np.ndenumerate(self.board):
sum += element.value
return sum
| 36.243243
| 227
| 0.579717
|
c0cde4c5478f3fdd2fd38ddaf4faad279432d9a6
| 614
|
py
|
Python
|
portfolio/migrations/0005_auto_20200928_1604.py
|
ElmanTr/simple-django-web
|
07f9b938e42ade4b515cbb8b41364089a5f9f2b1
|
[
"MIT"
] | 1
|
2020-10-07T14:03:13.000Z
|
2020-10-07T14:03:13.000Z
|
portfolio/migrations/0005_auto_20200928_1604.py
|
ElmanTr/simple-django-web
|
07f9b938e42ade4b515cbb8b41364089a5f9f2b1
|
[
"MIT"
] | null | null | null |
portfolio/migrations/0005_auto_20200928_1604.py
|
ElmanTr/simple-django-web
|
07f9b938e42ade4b515cbb8b41364089a5f9f2b1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-28 12:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0004_data_category'),
]
operations = [
migrations.AlterModelOptions(
name='data',
options={'ordering': ['-date'], 'verbose_name': 'مقاله', 'verbose_name_plural': 'مقالات'},
),
migrations.AlterField(
model_name='data',
name='category',
field=models.ManyToManyField(blank=True, to='portfolio.Category', verbose_name='دسته بندی'),
),
]
| 26.695652
| 104
| 0.596091
|
71751b2c92a5024137858ae7af0a15c9e7417718
| 752
|
py
|
Python
|
setup.py
|
weaponhsu/youliPyLib
|
ffb8a6f066df3aa4f8d1055f50f2520feb73dcef
|
[
"MIT"
] | null | null | null |
setup.py
|
weaponhsu/youliPyLib
|
ffb8a6f066df3aa4f8d1055f50f2520feb73dcef
|
[
"MIT"
] | null | null | null |
setup.py
|
weaponhsu/youliPyLib
|
ffb8a6f066df3aa4f8d1055f50f2520feb73dcef
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
# -*- coding:utf-8 -*-
# @author: weaponhsu
# @File: setup
# @Time: 2019/11/27 6:24 PM
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="youliPyLib-hsu0203",
version="0.0.1",
author="hsu0203",
author_email="huangxu4328@gmail.com",
description="py3 common package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/weaponhsu/youliPyLib",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 25.931034
| 50
| 0.650266
|
02eaf93b910920970778a6770f0cb25c4e26d66c
| 486
|
py
|
Python
|
exercises/fr/test_04_04.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/test_04_04.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/fr/test_04_04.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
'spacy.blank("fr")' in __solution__
), "As-tu créé le modèle français vierge ?"
assert (
"DocBin(docs=docs)" in __solution__
), "As-tu créé correctement l'objet DocBin ?"
assert "doc_bin.to_disk(" in __solution__, "As-tu utilisé la méthode to_disk?"
assert "train.spacy" in __solution__, "As-tu bien nommé le fichier correctement ?"
__msg__.good(
"Bien joué ! Maintenant nous pouvons entraîner le modèle."
)
| 34.714286
| 86
| 0.652263
|
e37b78ec7c5b651e763eeb64acbb646137772ec0
| 1,439
|
py
|
Python
|
oelint_adv/rule_base/rule_var_src_uri_file.py
|
Rahix/oelint-adv
|
b9dc381b181a8bdc7300bb5070f80bf90950efbd
|
[
"BSD-2-Clause"
] | 22
|
2019-06-10T00:40:07.000Z
|
2022-01-18T19:59:47.000Z
|
oelint_adv/rule_base/rule_var_src_uri_file.py
|
Rahix/oelint-adv
|
b9dc381b181a8bdc7300bb5070f80bf90950efbd
|
[
"BSD-2-Clause"
] | 274
|
2019-03-07T06:00:27.000Z
|
2022-03-27T10:22:10.000Z
|
oelint_adv/rule_base/rule_var_src_uri_file.py
|
Rahix/oelint-adv
|
b9dc381b181a8bdc7300bb5070f80bf90950efbd
|
[
"BSD-2-Clause"
] | 17
|
2019-08-24T23:04:39.000Z
|
2021-11-02T19:18:19.000Z
|
from oelint_parser.cls_item import Variable
from oelint_adv.cls_rule import Rule
from oelint_parser.helper_files import get_scr_components
from oelint_parser.parser import INLINE_BLOCK
class VarSRCUriGitTag(Rule):
def __init__(self):
super().__init__(id="oelint.vars.srcurifile",
severity="warning",
message="First item of SRC_URI should not be a file:// fetcher, if multiple fetcher are used")
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue="SRC_URI")
_fetcher = []
for i in items:
if any([i.Flag.endswith(x) for x in ["md5sum", "sha256sum"]]):
# These are just the hashes
continue
lines = [y.strip('"') for y in i.get_items() if y]
for x in lines:
if x == INLINE_BLOCK:
_fetcher.append(("inline", i.InFileLine))
continue
_url = get_scr_components(x)
if _url["scheme"]:
_fetcher.append((_url["scheme"], i.InFileLine))
if _fetcher:
if any(x[0] != "file" for x in _fetcher) and _fetcher[0][0] == "file":
res += self.finding(i.Origin, _fetcher[0][1])
return res
| 41.114286
| 119
| 0.558721
|
232feea67e9e6ce0afd3c439e4e1ca671e399489
| 11,473
|
py
|
Python
|
tests/test_api_v1.py
|
userid/acoustid-server
|
7c15ea1d4639f04a6b420e70be181b725055605b
|
[
"MIT"
] | null | null | null |
tests/test_api_v1.py
|
userid/acoustid-server
|
7c15ea1d4639f04a6b420e70be181b725055605b
|
[
"MIT"
] | null | null | null |
tests/test_api_v1.py
|
userid/acoustid-server
|
7c15ea1d4639f04a6b420e70be181b725055605b
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from nose.tools import *
from tests import (prepare_database, with_database, assert_json_equals,
TEST_1_LENGTH,
TEST_1_FP,
TEST_1_FP_RAW,
TEST_2_LENGTH,
TEST_2_FP,
TEST_2_FP_RAW,
)
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from werkzeug.datastructures import MultiDict
from acoustid import tables
from acoustid.api import errors
from acoustid.api.v1 import (
LookupHandler,
LookupHandlerParams,
SubmitHandler,
SubmitHandlerParams,
APIHandler,
)
from acoustid.utils import provider
def test_ok():
handler = APIHandler()
resp = handler._ok({'tracks': [{'id': 1, 'name': 'Track 1'}]})
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = '<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n<response status="ok"><tracks><track><id>1</id><name>Track 1</name></track></tracks></response>'
assert_equals(expected, resp.data)
assert_equals('200 OK', resp.status)
def test_error():
handler = APIHandler()
resp = handler._error(123, 'something is wrong')
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = '<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n<response status="error"><error>something is wrong</error></response>'
assert_equals(expected, resp.data)
assert_equals('400 BAD REQUEST', resp.status)
resp = handler._error(234, 'oops', status=500)
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = '<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n<response status="error"><error>oops</error></response>'
assert_equals(expected, resp.data)
assert_equals('500 INTERNAL SERVER ERROR', resp.status)
@with_database
def test_lookup_handler_params(conn):
# missing client
values = MultiDict({})
params = LookupHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# invalid client
values = MultiDict({'client': 'N/A'})
params = LookupHandlerParams()
assert_raises(errors.InvalidAPIKeyError, params.parse, values, conn)
# missing length
values = MultiDict({'client': 'app1key'})
params = LookupHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# missing fingerprint
values = MultiDict({'client': 'app1key', 'length': str(TEST_1_LENGTH)})
params = LookupHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# invalid fingerprint
values = MultiDict({'client': 'app1key', 'length': str(TEST_1_LENGTH), 'fingerprint': '...'})
params = LookupHandlerParams()
assert_raises(errors.InvalidFingerprintError, params.parse, values, conn)
# all ok
values = MultiDict({'client': 'app1key', 'length': str(TEST_1_LENGTH), 'fingerprint': TEST_1_FP})
params = LookupHandlerParams()
params.parse(values, conn)
assert_equals(1, params.application_id)
assert_equals(TEST_1_LENGTH, params.fingerprints[0]['duration'])
assert_equals(TEST_1_FP_RAW, params.fingerprints[0]['fingerprint'])
@with_database
def test_lookup_handler(conn):
values = {'client': 'app1key', 'length': str(TEST_1_LENGTH), 'fingerprint': TEST_1_FP}
builder = EnvironBuilder(method='POST', data=values)
handler = LookupHandler(connect=provider(conn))
# no matches
handler = LookupHandler(connect=provider(conn))
resp = handler.handle(Request(builder.get_environ()))
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<response><status>ok</status><results /></response>"
assert_equals(expected, resp.data)
assert_equals('200 OK', resp.status)
# one exact match
prepare_database(conn, """
INSERT INTO fingerprint (length, fingerprint, track_id, submission_count)
VALUES (%s, %s, 1, 1);
""", (TEST_1_LENGTH, TEST_1_FP_RAW))
handler = LookupHandler(connect=provider(conn))
resp = handler.handle(Request(builder.get_environ()))
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<response><status>ok</status><results><result><score>1.0</score><id>eb31d1c3-950e-468b-9e36-e46fa75b1291</id></result></results></response>"
assert_equals(expected, resp.data)
assert_equals('200 OK', resp.status)
# one exact match with MBIDs
values = {'client': 'app1key', 'length': str(TEST_1_LENGTH), 'fingerprint': TEST_1_FP, 'meta': '1'}
builder = EnvironBuilder(method='POST', data=values)
handler = LookupHandler(connect=provider(conn))
resp = handler.handle(Request(builder.get_environ()))
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<response><status>ok</status><results><result><tracks><track><id>b81f83ee-4da4-11e0-9ed8-0025225356f3</id></track></tracks><score>1.0</score><id>eb31d1c3-950e-468b-9e36-e46fa75b1291</id></result></results></response>"
assert_equals(expected, resp.data)
assert_equals('200 OK', resp.status)
# one exact match with MBIDs (no exta metadata in v1)
values = {'client': 'app1key', 'length': str(TEST_1_LENGTH), 'fingerprint': TEST_1_FP, 'meta': '2'}
builder = EnvironBuilder(method='POST', data=values)
handler = LookupHandler(connect=provider(conn))
resp = handler.handle(Request(builder.get_environ()))
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<response><status>ok</status><results><result><tracks><track><id>b81f83ee-4da4-11e0-9ed8-0025225356f3</id></track></tracks><score>1.0</score><id>eb31d1c3-950e-468b-9e36-e46fa75b1291</id></result></results></response>"
#expected = "<?xml version='1.0' encoding='UTF-8'?>\n<response><status>ok</status><results><result><tracks><track><length>123</length><artist><id>a64796c0-4da4-11e0-bf81-0025225356f3</id><name>Artist A</name></artist><id>b81f83ee-4da4-11e0-9ed8-0025225356f3</id><releases><release><track_num>1</track_num><id>dd6c2cca-a0e9-4cc4-9a5f-7170bd098e23</id><track_count>2</track_count><name>Album A</name></release></releases><name>Track A</name></track></tracks><score>1.0</score><id>1</id></result></results></response>"
assert_equals(expected, resp.data)
assert_equals('200 OK', resp.status)
@with_database
def test_submit_handler_params(conn):
# missing client
values = MultiDict({})
params = SubmitHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# invalid client
values = MultiDict({'client': 'N/A'})
params = SubmitHandlerParams()
assert_raises(errors.InvalidAPIKeyError, params.parse, values, conn)
# missing user
values = MultiDict({'client': 'app1key'})
params = SubmitHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# invalid user
values = MultiDict({'client': 'app1key', 'user': 'N/A'})
params = SubmitHandlerParams()
assert_raises(errors.InvalidUserAPIKeyError, params.parse, values, conn)
# missing fingerprint
values = MultiDict({'client': 'app1key', 'user': 'user1key'})
params = SubmitHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# missing duration
values = MultiDict({'client': 'app1key', 'user': 'user1key',
'mbid': ['4d814cb1-20ec-494f-996f-f31ca8a49784', '66c0f5cc-67b6-4f51-80cd-ab26b5aaa6ea'],
'puid': '4e823498-c77d-4bfb-b6cc-85b05c2783cf',
'fingerprint': TEST_1_FP,
'bitrate': '192',
'format': 'MP3'
})
params = SubmitHandlerParams()
assert_raises(errors.MissingParameterError, params.parse, values, conn)
# all ok (single submission)
values = MultiDict({'client': 'app1key', 'user': 'user1key',
'mbid': ['4d814cb1-20ec-494f-996f-f31ca8a49784', '66c0f5cc-67b6-4f51-80cd-ab26b5aaa6ea'],
'puid': '4e823498-c77d-4bfb-b6cc-85b05c2783cf',
'length': str(TEST_1_LENGTH),
'fingerprint': TEST_1_FP,
'bitrate': '192',
'format': 'MP3'
})
params = SubmitHandlerParams()
params.parse(values, conn)
assert_equals(1, len(params.submissions))
assert_equals(['4d814cb1-20ec-494f-996f-f31ca8a49784', '66c0f5cc-67b6-4f51-80cd-ab26b5aaa6ea'], params.submissions[0]['mbids'])
assert_equals('4e823498-c77d-4bfb-b6cc-85b05c2783cf', params.submissions[0]['puid'])
assert_equals(TEST_1_LENGTH, params.submissions[0]['duration'])
assert_equals(TEST_1_FP_RAW, params.submissions[0]['fingerprint'])
assert_equals(192, params.submissions[0]['bitrate'])
assert_equals('MP3', params.submissions[0]['format'])
# all ok (single submission)
values = MultiDict({'client': 'app1key', 'user': 'user1key',
'mbid.0': '4d814cb1-20ec-494f-996f-f31ca8a49784',
'puid.0': '4e823498-c77d-4bfb-b6cc-85b05c2783cf',
'length.0': str(TEST_1_LENGTH),
'fingerprint.0': TEST_1_FP,
'bitrate.0': '192',
'format.0': 'MP3',
'mbid.1': '66c0f5cc-67b6-4f51-80cd-ab26b5aaa6ea',
'puid.1': '57b202a3-242b-4896-a79c-cac34bbca0b6',
'length.1': str(TEST_2_LENGTH),
'fingerprint.1': TEST_2_FP,
'bitrate.1': '500',
'format.1': 'FLAC',
})
params = SubmitHandlerParams()
params.parse(values, conn)
assert_equals(2, len(params.submissions))
assert_equals(['4d814cb1-20ec-494f-996f-f31ca8a49784'], params.submissions[0]['mbids'])
assert_equals('4e823498-c77d-4bfb-b6cc-85b05c2783cf', params.submissions[0]['puid'])
assert_equals(TEST_1_LENGTH, params.submissions[0]['duration'])
assert_equals(TEST_1_FP_RAW, params.submissions[0]['fingerprint'])
assert_equals(192, params.submissions[0]['bitrate'])
assert_equals('MP3', params.submissions[0]['format'])
assert_equals(['66c0f5cc-67b6-4f51-80cd-ab26b5aaa6ea'], params.submissions[1]['mbids'])
assert_equals('57b202a3-242b-4896-a79c-cac34bbca0b6', params.submissions[1]['puid'])
assert_equals(TEST_2_LENGTH, params.submissions[1]['duration'])
assert_equals(TEST_2_FP_RAW, params.submissions[1]['fingerprint'])
assert_equals(500, params.submissions[1]['bitrate'])
assert_equals('FLAC', params.submissions[1]['format'])
@with_database
def test_submit_handler(conn):
values = {'client': 'app1key', 'user': 'user1key',
'length': str(TEST_1_LENGTH), 'fingerprint': TEST_1_FP, 'bitrate': 192,
'mbid': 'b9c05616-1874-4d5d-b30e-6b959c922d28', 'format': 'FLAC'}
builder = EnvironBuilder(method='POST', data=values)
handler = SubmitHandler(connect=provider(conn))
resp = handler.handle(Request(builder.get_environ()))
assert_equals('text/xml; charset=UTF-8', resp.content_type)
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<response><status>ok</status></response>"
assert_equals(expected, resp.data)
assert_equals('200 OK', resp.status)
query = tables.submission.select().order_by(tables.submission.c.id.desc()).limit(1)
submission = conn.execute(query).fetchone()
assert_equals('b9c05616-1874-4d5d-b30e-6b959c922d28', submission['mbid'])
assert_equals(1, submission['format_id'])
assert_equals(192, submission['bitrate'])
assert_equals(TEST_1_FP_RAW, submission['fingerprint'])
assert_equals(TEST_1_LENGTH, submission['length'])
| 50.54185
| 519
| 0.700776
|
c3112011f0499470bae5c814ef0cd4da75056cb7
| 492
|
py
|
Python
|
context/app/test_routes_notebooks.py
|
mccalluc/flask-data-portal
|
2ad5c7085c59d0e53c7704b1d3ad95d20ab3680d
|
[
"MIT"
] | null | null | null |
context/app/test_routes_notebooks.py
|
mccalluc/flask-data-portal
|
2ad5c7085c59d0e53c7704b1d3ad95d20ab3680d
|
[
"MIT"
] | 22
|
2019-10-10T23:45:05.000Z
|
2019-10-17T15:33:19.000Z
|
context/app/test_routes_notebooks.py
|
mccalluc/flask-data-portal
|
2ad5c7085c59d0e53c7704b1d3ad95d20ab3680d
|
[
"MIT"
] | null | null | null |
import pytest
from .main import create_app
@pytest.fixture
def client():
app = create_app(testing=True)
with app.test_client() as client:
yield client
@pytest.mark.parametrize(
'entity_type',
['donors', 'samples', 'datasets']
)
def test_notebook(client, entity_type, mocker):
mocker.patch('app.api.client.ApiClient.get_files')
response = client.post(f'/notebooks/{entity_type}.ipynb', json={'uuids': ['fake-uuid']})
assert response.status == '200 OK'
| 23.428571
| 92
| 0.686992
|
10322562e72b453dc7c132ed2303da3615f1fe54
| 7,936
|
py
|
Python
|
mysite/ct/urls.py
|
rimikt/socraticqs2
|
21f964cdc00c33e0b2482b463ca1795fcde9180c
|
[
"Apache-2.0"
] | null | null | null |
mysite/ct/urls.py
|
rimikt/socraticqs2
|
21f964cdc00c33e0b2482b463ca1795fcde9180c
|
[
"Apache-2.0"
] | null | null | null |
mysite/ct/urls.py
|
rimikt/socraticqs2
|
21f964cdc00c33e0b2482b463ca1795fcde9180c
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import patterns, include, url
from ct.views import *
urlpatterns = patterns('',
url(r'^$', main_page, name='home'),
url(r'^about/$', about, name='about'),
url(r'^people/(?P<user_id>\d+)/$', person_profile, name='person_profile'),
# instructor UI
# course tabs
url(r'^teach/courses/(?P<course_id>\d+)/$', course_view, name='course'),
url(r'^teach/courses/(?P<course_id>\d+)/edit/$',
edit_course, name='edit_course'),
# courselet tabs
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/$',
unit_tasks, name='unit_tasks'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/$',
unit_concepts, name='unit_concepts'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/$',
unit_lessons, name='unit_lessons'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/resources/$',
unit_resources, name='unit_resources'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/edit/$',
edit_unit, name='edit_unit'),
# lesson tabs
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/$',
ul_teach, name='ul_teach'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/tasks/$',
ul_tasks, name='ul_tasks'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/concepts/$',
ul_concepts, name='ul_concepts'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/errors/$',
ul_errors, name='ul_errors'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/faq/$',
ul_faq_student, name='ul_faq'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/faq/(?P<resp_id>\d+)/$',
ul_thread_student, name='ul_thread'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/edit/$',
edit_lesson, name='edit_lesson'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/live/$',
live_question, name='live_question'),
# concept tabs
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/$',
study_concept, name='concept_teach'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/tasks/$',
ul_tasks, name='concept_tasks'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/concepts/$',
concept_concepts, name='concept_concepts'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/lessons/$',
concept_lessons, name='concept_lessons'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/errors/$',
concept_errors, name='concept_errors'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/faq/$',
ul_faq_student, name='concept_faq'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/faq/(?P<resp_id>\d+)/$',
ul_thread_student, name='concept_thread'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/edit/$',
edit_lesson, name='edit_concept'),
# error tabs
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/$',
resolutions, name='resolutions'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/resources/$',
error_resources, name='error_resources'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/faq/$',
ul_faq_student, name='error_faq'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/faq/(?P<resp_id>\d+)/$',
ul_thread_student, name='error_thread'),
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/edit/$',
edit_lesson, name='edit_error'),
# responses
url(r'^teach/courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/responses/(?P<resp_id>\d+)/assess/$',
assess, name='assess_teach'),
# student UI
# course tabs
url(r'^courses/(?P<course_id>\d+)/$', course_view, name='course_student'),
# unit tabs
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/$',
study_unit, name='study_unit'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/tasks/$',
unit_tasks_student, name='unit_tasks_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/$',
unit_lessons_student, name='unit_lessons_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/$',
unit_concepts_student, name='unit_concepts_student'),
# lesson tabs
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/tasks/$',
ul_tasks_student, name='ul_tasks_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/concepts/$',
ul_concepts, name='ul_concepts_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/errors/$',
ul_errors_student, name='ul_errors_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/faq/$',
ul_faq_student, name='ul_faq_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/faq/(?P<resp_id>\d+)/$',
ul_thread_student, name='ul_thread_student'),
# concept tabs
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/$',
study_concept, name='study_concept'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/tasks/$',
ul_tasks_student, name='concept_tasks_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/lessons/$',
concept_lessons_student, name='concept_lessons_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/faq/$',
ul_faq_student, name='concept_faq_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/concepts/(?P<ul_id>\d+)/faq/(?P<resp_id>\d+)/$',
ul_thread_student, name='concept_thread_student'),
# error tabs
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/$',
resolutions_student, name='resolutions_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/resources/$',
error_resources, name='error_resources_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/faq/$',
ul_faq_student, name='error_faq_student'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/errors/(?P<ul_id>\d+)/faq/(?P<resp_id>\d+)/$',
ul_thread_student, name='error_thread_student'),
# study pages
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/$',
lesson, name='lesson'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/ask/$',
ul_respond, name='ul_respond'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/responses/(?P<resp_id>\d+)/assess/$',
assess, name='assess'),
url(r'^courses/(?P<course_id>\d+)/units/(?P<unit_id>\d+)/lessons/(?P<ul_id>\d+)/responses/(?P<resp_id>\d+)/errors/$',
assess_errors, name='assess_errors'),
# FSM node pages
url(r'^nodes/(?P<node_id>\d+)/$', fsm_node, name='fsm_node'),
url(r'^nodes/$', fsm_status, name='fsm_status'),
)
| 60.121212
| 127
| 0.613281
|
bc65a488eda8f026bc764aa57b6eb3b4a75890c3
| 2,660
|
py
|
Python
|
src/baxter_dataflow/signals.py
|
GII/baxter_interface
|
9979c30d3451fb9dbdd1e5e9d39c37bf834c8a90
|
[
"BSD-3-Clause"
] | null | null | null |
src/baxter_dataflow/signals.py
|
GII/baxter_interface
|
9979c30d3451fb9dbdd1e5e9d39c37bf834c8a90
|
[
"BSD-3-Clause"
] | null | null | null |
src/baxter_dataflow/signals.py
|
GII/baxter_interface
|
9979c30d3451fb9dbdd1e5e9d39c37bf834c8a90
|
[
"BSD-3-Clause"
] | 2
|
2022-03-03T20:46:45.000Z
|
2022-03-09T22:31:07.000Z
|
from __future__ import absolute_import
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import inspect
from weakref import WeakKeyDictionary
try:
from weakref import WeakSet
except ImportError:
from .weakrefset import WeakSet
class Signal(object):
def __init__(self):
self._functions = WeakSet()
self._methods = WeakKeyDictionary()
def __call__(self, *args, **kargs):
for f in self._functions:
f(*args, **kargs)
for obj, functions in self._methods.items():
for f in functions:
f(obj, *args, **kargs)
def connect(self, slot):
if inspect.ismethod(slot):
if not slot.__self__ in self._methods:
self._methods[slot.__self__] = set()
self._methods[slot.__self__].add(slot.__func__)
else:
self._functions.add(slot)
def disconnect(self, slot):
if inspect.ismethod(slot):
if slot.__self__ in self._methods:
self._methods[slot.__self__].remove(slot.__func__)
else:
if slot in self._functions:
self._functions.remove(slot)
| 40.30303
| 77
| 0.711654
|
c0a6107c800a66a04c60aac44d02c1c6c6e0baef
| 1,876
|
py
|
Python
|
Day_25/part1.py
|
Uklusi/AdventOfCode2021
|
3d22ace832bfd6c9855b2ebad3bf7f10c4751982
|
[
"MIT"
] | null | null | null |
Day_25/part1.py
|
Uklusi/AdventOfCode2021
|
3d22ace832bfd6c9855b2ebad3bf7f10c4751982
|
[
"MIT"
] | null | null | null |
Day_25/part1.py
|
Uklusi/AdventOfCode2021
|
3d22ace832bfd6c9855b2ebad3bf7f10c4751982
|
[
"MIT"
] | null | null | null |
from AoCUtils import *
writeToLog = False
# writeToLog = True
useExample = False
# useExample = True
result = 0
partNumber = "1"
if writeToLog:
logFile = open("log" + partNumber + ".txt", "w")
else:
logFile = "stdout"
printLog = printLogFactory(logFile)
timer_start(partNumber)
rightMoving = set()
downMoving = set()
inputFileName = ("example.txt" if useExample else "input.txt")
with open(inputFileName, "r") as inputFile:
lines = inputFile.read().strip().split("\n")
for (y, line) in enumerate(lines):
line = line.strip()
for (x, char) in enumerate(line):
if char == ">":
rightMoving.add((x, y))
elif char == "v":
downMoving.add((x, y))
xLimit = len(lines[0])
yLimit = len(lines)
def step(rightMoving, downMoving):
newRightMoving = set()
newDownMoving = set()
hasMoved = False
for (x, y) in rightMoving:
newX = x + 1
if newX == xLimit:
newX = 0
newPos = (newX, y)
if newPos in rightMoving or newPos in downMoving:
newRightMoving.add((x, y))
else:
newRightMoving.add(newPos)
hasMoved = True
for (x, y) in downMoving:
newY = y + 1
if newY == yLimit:
newY = 0
newPos = (x, newY)
if newPos in newRightMoving or newPos in downMoving:
newDownMoving.add((x, y))
else:
newDownMoving.add(newPos)
hasMoved = True
return (newRightMoving, newDownMoving, hasMoved)
flag = True
n = 0
while flag:
n += 1
(rightMoving, downMoving, flag) = step(rightMoving, downMoving)
result = n
timer_stop(partNumber)
with open("output" + partNumber + ".txt", "w") as outputFile:
outputFile.write(str(result))
print(str(result))
if writeToLog:
cast(TextIOWrapper, logFile).close()
| 23.160494
| 67
| 0.590085
|
d00cd4a6c9eb0410ed12180bd091df6d89ce25c2
| 552
|
py
|
Python
|
example/zoo_gsom.py
|
CDAC-lab/pygsom
|
d81aff2d9163b05a30b74d90ba35a37c42e9274a
|
[
"MIT"
] | 3
|
2020-05-08T01:54:04.000Z
|
2020-08-26T22:54:22.000Z
|
example/zoo_gsom.py
|
CDAC-lab/pygsom
|
d81aff2d9163b05a30b74d90ba35a37c42e9274a
|
[
"MIT"
] | null | null | null |
example/zoo_gsom.py
|
CDAC-lab/pygsom
|
d81aff2d9163b05a30b74d90ba35a37c42e9274a
|
[
"MIT"
] | 2
|
2020-12-16T01:51:25.000Z
|
2021-12-07T17:33:11.000Z
|
import numpy as np
import pandas as pd
import gsom
data_filename = "data/zoo.txt".replace('\\', '/')
if __name__ == '__main__':
np.random.seed(1)
df = pd.read_csv(data_filename)
print(df.shape)
data_training = df.iloc[:, 1:17]
gsom_map = gsom.GSOM(.83, 16, max_radius=4)
gsom_map.fit(data_training.to_numpy(), 100, 50)
df = df.drop(columns=["label"])
map_points = gsom_map.predict(df,"Name")
gsom.plot(map_points, "Name", gsom_map=gsom_map)
map_points.to_csv("gsom.csv", index=False)
print("complete")
| 24
| 52
| 0.65942
|
55c8875257b1ce5df31c2b6fc2f43320a040db50
| 14,757
|
py
|
Python
|
contrib/gitian-build.py
|
funexcoin/funexcoin
|
149c9328f420e3cd711ee63b91f2d51308445da2
|
[
"MIT"
] | null | null | null |
contrib/gitian-build.py
|
funexcoin/funexcoin
|
149c9328f420e3cd711ee63b91f2d51308445da2
|
[
"MIT"
] | null | null | null |
contrib/gitian-build.py
|
funexcoin/funexcoin
|
149c9328f420e3cd711ee63b91f2d51308445da2
|
[
"MIT"
] | 1
|
2020-09-15T08:52:14.000Z
|
2020-09-15T08:52:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/funexcoin-core/gitian.sigs.git'])
if not os.path.isdir('funexcoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/funexcoin-core/funexcoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('funexcoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/funexcoin/funexcoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
sys.exit(0)
def build():
global args, workdir
os.makedirs('funexcoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz'])
subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../funexcoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'funexcoin='+args.commit, '--url', 'funexcoin='+args.url, '../funexcoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../funexcoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/funexcoin-*.tar.gz build/out/src/funexcoin-*.tar.gz ../funexcoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'funexcoin='+args.commit, '--url', 'funexcoin='+args.url, '../funexcoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../funexcoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/funexcoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/funexcoin-*.zip build/out/funexcoin-*.exe build/out/src/funexcoin-*.tar.gz ../funexcoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'funexcoin='+args.commit, '--url', 'funexcoin='+args.url, '../funexcoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../funexcoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/funexcoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/funexcoin-*.tar.gz build/out/funexcoin-*.dmg build/out/src/funexcoin-*.tar.gz ../funexcoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/funexcoin-' + args.version + '-win-unsigned.tar.gz inputs/funexcoin-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../funexcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../funexcoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/funexcoin-*win64-setup.exe ../funexcoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/funexcoin-' + args.version + '-osx-unsigned.tar.gz inputs/funexcoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../funexcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../funexcoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/funexcoin-osx-signed.dmg ../funexcoin-binaries/'+args.version+'/funexcoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../funexcoin/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../funexcoin/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../funexcoin/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../funexcoin/contrib/gitian-descriptors/gitian-win-signer.yml']):
print('Verifying v'+args.version+' Signed Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../funexcoin/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/funexcoin/funexcoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
setup()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.14.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('funexcoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/funexcoin')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
| 56.110266
| 233
| 0.65352
|
befc2ee8ce0b1d7d16a8f38132fc07539b72b66a
| 1,152
|
py
|
Python
|
test/gtest-1.10.0/googlemock/scripts/generator/cpp/utils.py
|
EliSchleifer/yaml-cpp
|
c83fa7ce38fb94a7cb5b85f38d9aef05cbe9ebde
|
[
"MIT"
] | null | null | null |
test/gtest-1.10.0/googlemock/scripts/generator/cpp/utils.py
|
EliSchleifer/yaml-cpp
|
c83fa7ce38fb94a7cb5b85f38d9aef05cbe9ebde
|
[
"MIT"
] | null | null | null |
test/gtest-1.10.0/googlemock/scripts/generator/cpp/utils.py
|
EliSchleifer/yaml-cpp
|
c83fa7ce38fb94a7cb5b85f38d9aef05cbe9ebde
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = "nnorwitz@google.com (Neal Norwitz)"
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print("Error reading %s: %s" % (filename, sys.exc_info()[1]))
return None
| 28.097561
| 74
| 0.681424
|
060e073de7cf30a9218cc4dcaf7f2388f2ebc947
| 1,025
|
py
|
Python
|
var/spack/repos/builtin/packages/r-crayon/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/r-crayon/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/r-crayon/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCrayon(RPackage):
"""Colored Terminal Output.
Colored terminal output on terminals that support 'ANSI' color and
highlight codes. It also works in 'Emacs' 'ESS'. 'ANSI' color support is
automatically detected. Colors and highlighting can be combined and nested.
New styles can also be created easily. This package was inspired by the
'chalk' 'JavaScript' project."""
cran = "crayon"
version('1.4.2', sha256='ee34397f643e76e30588068d4c93bd3c9afd2193deacccacb3bffcadf141b857')
version('1.4.1', sha256='08b6e42e748d096960b2f32b7ffe690c25742e29fe14c19d1834cd6ff43029c7')
version('1.3.4', sha256='fc6e9bf990e9532c4fcf1a3d2ce22d8cf12d25a95e4779adfa17713ed836fa68')
version('1.3.2', sha256='9a6b75d63c05fe64baf222f1921330ceb727924bcc5fc2753ff0528d42555e68')
| 42.708333
| 95
| 0.772683
|
a5d1ff8f5003e484f1335a68ed4a4d2be9789fe0
| 2,046
|
py
|
Python
|
maml_rl/envs/navigation.py
|
dragen1860/MAML-Pytorch-RL
|
22c5072b3c58ff8fa97d37380f3493b7be885397
|
[
"MIT"
] | 9
|
2019-10-18T01:30:07.000Z
|
2021-11-29T07:04:51.000Z
|
maml_rl/envs/navigation.py
|
dragen1860/MAML-Pytorch-RL
|
22c5072b3c58ff8fa97d37380f3493b7be885397
|
[
"MIT"
] | 1
|
2019-12-09T13:49:30.000Z
|
2019-12-09T13:49:30.000Z
|
maml_rl/envs/navigation.py
|
dragen1860/MAML-Pytorch-RL
|
22c5072b3c58ff8fa97d37380f3493b7be885397
|
[
"MIT"
] | 8
|
2019-03-21T15:23:31.000Z
|
2021-11-08T13:30:28.000Z
|
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class Navigation2DEnv(gym.Env):
"""2D navigation problems, as described in [1]. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/maml_examples/point_env_randgoal.py
At each time step, the 2D agent takes an action (its velocity, clipped in
[-0.1, 0.1]), and receives a penalty equal to its L2 distance to the goal
position (ie. the reward is `-distance`). The 2D navigation tasks are
generated by sampling goal positions from the uniform distribution
on [-0.5, 0.5]^2.
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
"""
def __init__(self, task={}):
super(Navigation2DEnv, self).__init__()
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(2,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self._task = task
self._goal = task.get('goal', np.zeros(2, dtype=np.float32))
self._state = np.zeros(2, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
goals = self.np_random.uniform(-0.5, 0.5, size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
def reset(self, env=True):
self._state = np.zeros(2, dtype=np.float32)
return self._state
def step(self, action):
action = np.clip(action, -0.1, 0.1)
assert self.action_space.contains(action)
self._state = self._state + action
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
reward = -np.sqrt(x ** 2 + y ** 2)
done = ((np.abs(x) < 0.01) and (np.abs(y) < 0.01))
return self._state, reward, done, self._task
| 31.96875
| 116
| 0.675464
|
16e34c3e5a541843393f3d223ee19106a2ef5ce8
| 4,527
|
py
|
Python
|
modules/opnfv/utils/SSHUtils.py
|
kkltcjk/reporting
|
460731b8b2da037159649b02ffed798656dad8a9
|
[
"Apache-2.0"
] | null | null | null |
modules/opnfv/utils/SSHUtils.py
|
kkltcjk/reporting
|
460731b8b2da037159649b02ffed798656dad8a9
|
[
"Apache-2.0"
] | null | null | null |
modules/opnfv/utils/SSHUtils.py
|
kkltcjk/reporting
|
460731b8b2da037159649b02ffed798656dad8a9
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
# Authors: George Paraskevopoulos (geopar@intracom-telecom.com)
# Jose Lausuch (jose.lausuch@ericsson.com)
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import paramiko
import opnfv.utils.OPNFVLogger as OPNFVLogger
import os
logger = OPNFVLogger.Logger('SSHUtils').getLogger()
def get_ssh_client(hostname, username, password=None, proxy=None):
client = None
try:
if proxy is None:
client = paramiko.SSHClient()
else:
client = ProxyHopClient()
client.configure_jump_host(proxy['ip'],
proxy['username'],
proxy['password'])
if client is None:
raise Exception('Could not connect to client')
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname,
username=username,
password=password)
return client
except Exception, e:
logger.error(e)
return None
def get_file(ssh_conn, src, dest):
try:
sftp = ssh_conn.open_sftp()
sftp.get(src, dest)
return True
except Exception, e:
logger.error("Error [get_file(ssh_conn, '%s', '%s']: %s" %
(src, dest, e))
return None
def put_file(ssh_conn, src, dest):
try:
sftp = ssh_conn.open_sftp()
sftp.put(src, dest)
return True
except Exception, e:
logger.error("Error [put_file(ssh_conn, '%s', '%s']: %s" %
(src, dest, e))
return None
class ProxyHopClient(paramiko.SSHClient):
'''
Connect to a remote server using a proxy hop
'''
def __init__(self, *args, **kwargs):
self.logger = OPNFVLogger.Logger("ProxyHopClient").getLogger()
self.proxy_ssh = None
self.proxy_transport = None
self.proxy_channel = None
self.proxy_ip = None
self.proxy_ssh_key = None
self.local_ssh_key = os.path.join(os.getcwd(), 'id_rsa')
super(ProxyHopClient, self).__init__(*args, **kwargs)
def configure_jump_host(self, jh_ip, jh_user, jh_pass,
jh_ssh_key='/root/.ssh/id_rsa'):
self.proxy_ip = jh_ip
self.proxy_ssh_key = jh_ssh_key
self.proxy_ssh = paramiko.SSHClient()
self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.proxy_ssh.connect(jh_ip,
username=jh_user,
password=jh_pass)
self.proxy_transport = self.proxy_ssh.get_transport()
def connect(self, hostname, port=22, username='root', password=None,
pkey=None, key_filename=None, timeout=None, allow_agent=True,
look_for_keys=True, compress=False, sock=None, gss_auth=False,
gss_kex=False, gss_deleg_creds=True, gss_host=None,
banner_timeout=None):
try:
if self.proxy_ssh is None:
raise Exception('You must configure the jump '
'host before calling connect')
get_file_res = get_file(self.proxy_ssh,
self.proxy_ssh_key,
self.local_ssh_key)
if get_file_res is None:
raise Exception('Could\'t fetch SSH key from jump host')
proxy_key = (paramiko.RSAKey
.from_private_key_file(self.local_ssh_key))
self.proxy_channel = self.proxy_transport.open_channel(
"direct-tcpip",
(hostname, 22),
(self.proxy_ip, 22))
self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
super(ProxyHopClient, self).connect(hostname,
username=username,
pkey=proxy_key,
sock=self.proxy_channel)
os.remove(self.local_ssh_key)
except Exception, e:
self.logger.error(e)
| 37.413223
| 78
| 0.550254
|
771a0179b84c04caa8d6e45f8796178e92ebd3fc
| 2,755
|
py
|
Python
|
freq_matched_noise.py
|
sunjerry019/RandomCodes
|
4402604aaeee63bb1ce6fa962c496b438bb17e50
|
[
"MIT"
] | null | null | null |
freq_matched_noise.py
|
sunjerry019/RandomCodes
|
4402604aaeee63bb1ce6fa962c496b438bb17e50
|
[
"MIT"
] | null | null | null |
freq_matched_noise.py
|
sunjerry019/RandomCodes
|
4402604aaeee63bb1ce6fa962c496b438bb17e50
|
[
"MIT"
] | null | null | null |
"""
Measure the frequencies coming in through the microphone
Mashup of wire_full.py from pyaudio tests and spectrum.py from Chaco examples
http://healthyalgorithms.com/2013/08/22/dsp-in-python-active-noise-reduction-with-pyaudio/
"""
import pyaudio
import numpy as np
import scipy.signal
CHUNK = 1024*2
WIDTH = 2
DTYPE = np.int16
MAX_INT = 32768.0
CHANNELS = 1
RATE = 11025*1
RECORD_SECONDS = 20
j = np.complex(0,1)
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
print("* recording")
# initialize filter variables
fir = np.zeros(CHUNK * 2)
fir[:(2*CHUNK)] = 1.
fir /= fir.sum()
fir_last = fir
avg_freq_buffer = np.zeros(CHUNK)
obj = -np.inf
t = 10
# initialize sample buffer
buffer = np.zeros(CHUNK * 2)
#for i in np.arange(RATE / CHUNK * RECORD_SECONDS):
try:
while True:
# read audio
string_audio_data = stream.read(CHUNK)
audio_data = np.fromstring(string_audio_data, dtype=DTYPE)
normalized_data = audio_data / MAX_INT
freq_data = np.fft.fft(normalized_data)
# synthesize audio
buffer[CHUNK:] = np.random.randn(CHUNK)
freq_buffer = np.fft.fft(buffer)
freq_fir = np.fft.fft(fir)
freq_synth = freq_fir * freq_buffer
synth = np.real(np.fft.ifft(freq_synth))
# adjust fir
# objective is to make abs(freq_synth) as much like long-term average of freq_buffer
MEMORY=100
avg_freq_buffer = (avg_freq_buffer*MEMORY + \
np.abs(freq_data)) / (MEMORY+1)
obj_last = obj
obj = np.real(np.dot(avg_freq_buffer[1:51], np.abs(freq_synth[1:100:2])) / np.dot(freq_synth[1:100:2], np.conj(freq_synth[1:100:2])))
if obj > obj_last:
fir_last = fir
fir = fir_last.copy()
# adjust filter in frequency space
freq_fir = np.fft.fft(fir)
#t += np.clip(np.random.randint(3)-1, 0, 64)
t = np.random.randint(100)
freq_fir[t] += np.random.randn()*.05
# transform frequency space filter to time space, click-free
fir = np.real(np.fft.ifft(freq_fir))
fir[:CHUNK] *= np.linspace(1., 0., CHUNK)**.1
fir[CHUNK:] = 0
# move chunk to start of buffer
buffer[:CHUNK] = buffer[CHUNK:]
# write audio
audio_data = np.array(np.round_(synth[CHUNK:] * MAX_INT), dtype=DTYPE)
string_audio_data = audio_data.tostring()
stream.write(string_audio_data, CHUNK)
except KeyboardInterrupt:
print("\n* done")
stream.stop_stream()
stream.close()
p.terminate()
| 26.747573
| 141
| 0.623956
|
e310209035272a71ae4ef099b05063936c4b31f5
| 2,453
|
py
|
Python
|
test/functional/wallet-encryption.py
|
xagau/placeh
|
bd10a67a39dca5b7d1619c29267b291a410b30ac
|
[
"MIT"
] | 2
|
2018-11-16T18:21:38.000Z
|
2020-12-06T15:23:42.000Z
|
test/functional/wallet-encryption.py
|
xagau/placeh
|
bd10a67a39dca5b7d1619c29267b291a410b30ac
|
[
"MIT"
] | null | null | null |
test/functional/wallet-encryption.py
|
xagau/placeh
|
bd10a67a39dca5b7d1619c29267b291a410b30ac
|
[
"MIT"
] | 1
|
2018-12-10T18:41:56.000Z
|
2018-12-10T18:41:56.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import placehTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
)
class WalletEncryptionTest(placehTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_jsonrpc(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_jsonrpc(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
if __name__ == '__main__':
WalletEncryptionTest().main()
| 39.564516
| 136
| 0.709743
|
7a1eaaaae95ca2423447d60e7a8f6a4a17f037d2
| 6,398
|
py
|
Python
|
examples/hardware/plotting.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | 5
|
2021-03-07T11:42:11.000Z
|
2022-02-28T09:46:05.000Z
|
examples/hardware/plotting.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | null | null | null |
examples/hardware/plotting.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | 3
|
2021-01-23T11:09:40.000Z
|
2022-03-02T11:54:57.000Z
|
from os.path import dirname, abspath, join
import numpy as np
from matplotlib import patches
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import pdb
from sofacontrol.utils import load_data
path = dirname(abspath(__file__))
#############################################
# Problem 1, Figure 8 with constraints
#############################################
# M = 3
# T = 10
# N = 500
# t_target = np.linspace(0, M*T, M*N)
# th = np.linspace(0, M * 2 * np.pi, M*N)
# zf_target = np.zeros((M*N, 6))
# zf_target[:, 3] = -15. * np.sin(th)
# zf_target[:, 4] = 15. * np.sin(2 * th)
# y_ub = 5
# name = 'figure8'
##############################################
# Problem 2, Circle on side
##############################################
M = 3
T = 5
N = 1000
r = 10
t_target = np.linspace(0, M*T, M*N)
th = np.linspace(0, M*2*np.pi, M*N)
x_target = np.zeros(M*N)
y_target = r * np.sin(th)
z_target = r - r * np.cos(th) + 107
zf_target = np.zeros((M*N, 6))
zf_target[:, 3] = x_target
zf_target[:, 4] = y_target
zf_target[:, 5] = z_target
name = 'circle'
# Load SCP data
scp_simdata_file = join(path, name + '_scp.pkl')
scp_data = load_data(scp_simdata_file)
idx = np.argwhere(scp_data['t'] >= 3)[0][0]
t_scp = scp_data['t'][idx:] - scp_data['t'][idx]
z_scp = scp_data['z'][idx:, :]
zhat = scp_data['z_hat'][idx:, :]
u_scp = scp_data['u'][idx:, :]
solve_times_scp = scp_data['info']['solve_times']
real_time_limit_scp = scp_data['info']['rollout_time']
# Load ROMPC data
rompc_simdata_file = join(path, name + '_rompc.pkl')
rompc_data = load_data(rompc_simdata_file)
idx = np.argwhere(rompc_data['t'] >= 3)[0][0]
t_rompc = rompc_data['t'][idx:] - rompc_data['t'][idx]
z_rompc = rompc_data['z'][idx:, :]
u_rompc = rompc_data['u'][idx:, :]
solve_times_rompc = rompc_data['info']['solve_times']
real_time_limit_rompc = rompc_data['info']['rollout_time']
# Load Koopman data
koop_simdata_file = join(path, name + '_koopman.pkl')
koop_data = load_data(koop_simdata_file)
idx = np.argwhere(koop_data['t'] >= 3)[0][0]
t_koop = koop_data['t'][idx:] - koop_data['t'][idx]
z_koop = koop_data['z'][idx:, :]
solve_times_koop = koop_data['info']['solve_times']
real_time_limit_koop = koop_data['info']['rollout_time']
m_w = 30
##################################################
# Plot trajectory as function of time
##################################################
fig2 = plt.figure(figsize=(14, 12), facecolor='w', edgecolor='k')
ax2 = fig2.add_subplot(211)
if name == 'figure8':
ax2.plot(t_rompc, z_rompc[:, 3], 'tab:green', marker='x', markevery=m_w, label='Linear ROMPC', linewidth=1)
ax2.plot(t_koop, z_koop[:, 3], 'tab:orange', marker='^', markevery=m_w, label='Koopman MPC', linewidth=1)
ax2.plot(t_scp, z_scp[:, 3], 'tab:blue', label='Nonlinear ROMPC', linewidth=3)
ax2.plot(t_target, zf_target[:, 3], '--k', alpha=1, linewidth=1, label='Target')
plt.ylabel(r'$x_{ee}$ [mm]', fontsize=14)
else:
ax2.plot(t_rompc, z_rompc[:, 4], 'tab:green', marker='x', markevery=m_w, label='Linear ROMPC', linewidth=1)
ax2.plot(t_koop, z_koop[:, 4], 'tab:orange', marker='^', markevery=m_w, label='Koopman MPC', linewidth=1)
ax2.plot(t_scp, z_scp[:, 4], 'tab:blue', label='Nonlinear ROMPC', linewidth=3)
ax2.plot(t_target, zf_target[:, 4], '--k', alpha=1, linewidth=1, label='Target')
plt.ylabel(r'$y_{ee}$ [mm]', fontsize=14)
ax2.set_xlim([0, 10])
plt.xlabel(r'$t$ [s]', fontsize=14)
plt.legend(loc='best', prop={'size': 14})
ax3 = fig2.add_subplot(212)
if name == 'figure8':
ax3.plot(t_target, y_ub * np.ones_like(t_target), 'r', label='Constraint')
ax3.plot(t_rompc, z_rompc[:, 4], 'tab:green', marker='x', markevery=m_w, label='Linear ROMPC', linewidth=1)
ax3.plot(t_koop, z_koop[:, 4], 'tab:orange', marker='^', markevery=m_w, label='Koopman MPC', linewidth=1)
ax3.plot(t_scp, z_scp[:, 4], 'tab:blue', label='Nonlinear ROMPC', linewidth=3)
ax3.plot(t_target, zf_target[:, 4], '--k', alpha=1, linewidth=1, label='Target')
plt.ylabel(r'$y_{ee}$ [mm]', fontsize=14)
else:
ax3.plot(t_rompc, z_rompc[:, 5], 'tab:green', marker='x', markevery=m_w, label='Linear ROMPC', linewidth=1)
ax3.plot(t_koop, z_koop[:, 5], 'tab:orange', marker='^', markevery=m_w, label='Koopman MPC', linewidth=1)
ax3.plot(t_scp, z_scp[:, 5], 'tab:blue', label='Nonlinear ROMPC', linewidth=3)
ax3.plot(t_target, zf_target[:, 5], '--k', alpha=1, linewidth=1, label='Target')
plt.ylabel(r'$z_{ee}$ [mm]', fontsize=14)
ax3.set_xlim([0, 10])
plt.xlabel(r'$t$ [s]', fontsize=14)
plt.legend(loc='best', prop={'size': 14})
figure_file = join(path, name + '.png')
plt.savefig(figure_file, dpi=300, bbox_inches='tight')
# MSE calculations
# Calculation of desired trajectory
if name == 'figure8':
zf_desired = zf_target.copy()
zf_desired[:, 4] = np.minimum(y_ub, zf_target[:,4])
else:
zf_desired = zf_target.copy()
f = interp1d(t_target, zf_desired, axis=0)
zd_koop = f(t_koop)
zd_scp = f(t_scp)
zd_rompc = f(t_rompc)
if name == 'figure8':
err_koop = (z_koop - zd_koop)[:,3:5]
err_scp = (z_scp - zd_scp)[:,3:5]
err_rompc = (z_rompc - zd_rompc)[:,3:5]
else:
err_koop = (z_koop - zd_koop)[:,4:6]
err_scp = (z_scp - zd_scp)[:,4:6]
err_rompc = (z_rompc - zd_rompc)[:,4:6]
# inner norm gives euclidean distance, outer norm squared / nbr_samples gives MSE
mse_koop = np.linalg.norm(np.linalg.norm(err_koop, axis=1))**2 / err_koop.shape[0]
mse_rompc = np.linalg.norm(np.linalg.norm(err_rompc, axis=1))**2 / err_rompc.shape[0]
mse_scp = np.linalg.norm(np.linalg.norm(err_scp, axis=1))**2 / err_scp.shape[0]
print('------ Mean Squared Errors (MSEs)----------')
print('Ours (SCP): {}'.format(mse_scp))
print('Koopman: {}'.format(mse_koop))
print('ROMPC: {}'.format(mse_rompc))
print('-------------Solve times ---------------')
print('Ours: Min: {}, Mean: {} ms, Max: {} s'.format(np.min(solve_times_scp), np.mean(solve_times_scp),
np.max(solve_times_scp)))
print('ROMPC: Min: {}, Mean: {} ms, Max: {} s'.format(np.min(solve_times_rompc), np.mean(solve_times_rompc),
np.max(solve_times_rompc)))
print('Koopman: Min: {}, Mean: {} ms, Max: {} s'.format(np.min(solve_times_koop), np.mean(solve_times_koop),
np.max(solve_times_koop)))
plt.show()
| 39.493827
| 111
| 0.613473
|
84ef9039c5b07a3e7b33deee37ea54b7b4639eca
| 7,779
|
py
|
Python
|
docs/conf.py
|
coagulant/django-waffle
|
4311becf83486cd006797fe4e3edbcfdfc1c75c4
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
coagulant/django-waffle
|
4311becf83486cd006797fe4e3edbcfdfc1c75c4
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
coagulant/django-waffle
|
4311becf83486cd006797fe4e3edbcfdfc1c75c4
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# django-waffle documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 1 17:45:05 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-waffle'
copyright = u'2012, James Socol'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-waffledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-waffle.tex', u'django-waffle Documentation',
u'James Socol', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-waffle', u'django-waffle Documentation',
[u'James Socol'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-waffle', u'django-waffle Documentation',
u'James Socol', 'django-waffle', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.012346
| 80
| 0.714359
|
9adf1032999ed2a2138a49fa9371aba21e618847
| 10,795
|
py
|
Python
|
tests/test_segment.py
|
MarkHoo/rich
|
51889e2e663bbda8d59b2a68f23322f578681cea
|
[
"MIT"
] | 4,407
|
2022-01-05T11:54:59.000Z
|
2022-03-31T23:59:07.000Z
|
tests/test_segment.py
|
MarkHoo/rich
|
51889e2e663bbda8d59b2a68f23322f578681cea
|
[
"MIT"
] | 244
|
2022-01-05T11:50:13.000Z
|
2022-03-31T18:57:55.000Z
|
tests/test_segment.py
|
MarkHoo/rich
|
51889e2e663bbda8d59b2a68f23322f578681cea
|
[
"MIT"
] | 177
|
2022-01-05T12:54:46.000Z
|
2022-03-30T19:57:51.000Z
|
from io import StringIO
import pytest
from rich.segment import ControlType, Segment, SegmentLines, Segments
from rich.style import Style
def test_repr():
assert repr(Segment("foo")) == "Segment('foo')"
home = (ControlType.HOME, 0)
assert (
repr(Segment("foo", None, [home]))
== "Segment('foo', None, [(<ControlType.HOME: 3>, 0)])"
)
def test_line():
assert Segment.line() == Segment("\n")
def test_apply_style():
segments = [Segment("foo"), Segment("bar", Style(bold=True))]
assert Segment.apply_style(segments, None) is segments
assert list(Segment.apply_style(segments, Style(italic=True))) == [
Segment("foo", Style(italic=True)),
Segment("bar", Style(italic=True, bold=True)),
]
def test_split_lines():
lines = [Segment("Hello\nWorld")]
assert list(Segment.split_lines(lines)) == [[Segment("Hello")], [Segment("World")]]
def test_split_and_crop_lines():
assert list(
Segment.split_and_crop_lines([Segment("Hello\nWorld!\n"), Segment("foo")], 4)
) == [
[Segment("Hell"), Segment("\n", None)],
[Segment("Worl"), Segment("\n", None)],
[Segment("foo"), Segment(" ")],
]
def test_adjust_line_length():
line = [Segment("Hello", "foo")]
assert Segment.adjust_line_length(line, 10, style="bar") == [
Segment("Hello", "foo"),
Segment(" ", "bar"),
]
line = [Segment("H"), Segment("ello, World!")]
assert Segment.adjust_line_length(line, 5) == [Segment("H"), Segment("ello")]
line = [Segment("Hello")]
assert Segment.adjust_line_length(line, 5) == line
def test_get_line_length():
assert Segment.get_line_length([Segment("foo"), Segment("bar")]) == 6
def test_get_shape():
assert Segment.get_shape([[Segment("Hello")]]) == (5, 1)
assert Segment.get_shape([[Segment("Hello")], [Segment("World!")]]) == (6, 2)
def test_set_shape():
assert Segment.set_shape([[Segment("Hello")]], 10) == [
[Segment("Hello"), Segment(" ")]
]
assert Segment.set_shape([[Segment("Hello")]], 10, 2) == [
[Segment("Hello"), Segment(" ")],
[Segment(" " * 10)],
]
def test_simplify():
assert list(
Segment.simplify([Segment("Hello"), Segment(" "), Segment("World!")])
) == [Segment("Hello World!")]
assert list(
Segment.simplify(
[Segment("Hello", "red"), Segment(" ", "red"), Segment("World!", "blue")]
)
) == [Segment("Hello ", "red"), Segment("World!", "blue")]
assert list(Segment.simplify([])) == []
def test_filter_control():
control_code = (ControlType.HOME, 0)
segments = [Segment("foo"), Segment("bar", None, (control_code,))]
assert list(Segment.filter_control(segments)) == [Segment("foo")]
assert list(Segment.filter_control(segments, is_control=True)) == [
Segment("bar", None, (control_code,))
]
def test_strip_styles():
segments = [Segment("foo", Style(bold=True))]
assert list(Segment.strip_styles(segments)) == [Segment("foo", None)]
def test_strip_links():
segments = [Segment("foo", Style(bold=True, link="https://www.example.org"))]
assert list(Segment.strip_links(segments)) == [Segment("foo", Style(bold=True))]
def test_remove_color():
segments = [
Segment("foo", Style(bold=True, color="red")),
Segment("bar", None),
]
assert list(Segment.remove_color(segments)) == [
Segment("foo", Style(bold=True)),
Segment("bar", None),
]
def test_is_control():
assert Segment("foo", Style(bold=True)).is_control == False
assert Segment("foo", Style(bold=True), []).is_control == True
assert Segment("foo", Style(bold=True), [(ControlType.HOME, 0)]).is_control == True
def test_segments_renderable():
segments = Segments([Segment("foo")])
assert list(segments.__rich_console__(None, None)) == [Segment("foo")]
segments = Segments([Segment("foo")], new_lines=True)
assert list(segments.__rich_console__(None, None)) == [
Segment("foo"),
Segment.line(),
]
def test_divide():
bold = Style(bold=True)
italic = Style(italic=True)
segments = [
Segment("Hello", bold),
Segment(" World!", italic),
]
assert list(Segment.divide(segments, [])) == []
assert list(Segment.divide([], [1])) == [[]]
assert list(Segment.divide(segments, [1])) == [[Segment("H", bold)]]
assert list(Segment.divide(segments, [1, 2])) == [
[Segment("H", bold)],
[Segment("e", bold)],
]
assert list(Segment.divide(segments, [1, 2, 12])) == [
[Segment("H", bold)],
[Segment("e", bold)],
[Segment("llo", bold), Segment(" World!", italic)],
]
assert list(Segment.divide(segments, [4, 20])) == [
[Segment("Hell", bold)],
[Segment("o", bold), Segment(" World!", italic)],
]
# https://github.com/willmcgugan/rich/issues/1755
def test_divide_complex():
MAP = (
"[on orange4] [on green]XX[on orange4] \n"
" \n"
" \n"
" \n"
" [bright_red on black]Y[on orange4] \n"
"[on green]X[on orange4] [on green]X[on orange4] \n"
" [on green]X[on orange4] [on green]X\n"
"[on orange4] \n"
" [on green]XX[on orange4] \n"
)
from rich.console import Console
from rich.text import Text
text = Text.from_markup(MAP)
console = Console(
color_system="truecolor", width=30, force_terminal=True, file=StringIO()
)
console.print(text)
result = console.file.getvalue()
print(repr(result))
expected = "\x1b[48;5;94m \x1b[0m\x1b[42mXX\x1b[0m\x1b[48;5;94m \x1b[0m\n\x1b[48;5;94m \x1b[0m\n\x1b[48;5;94m \x1b[0m\n\x1b[48;5;94m \x1b[0m\n\x1b[48;5;94m \x1b[0m\x1b[91;40mY\x1b[0m\x1b[91;48;5;94m \x1b[0m\n\x1b[91;42mX\x1b[0m\x1b[91;48;5;94m \x1b[0m\x1b[91;42mX\x1b[0m\x1b[91;48;5;94m \x1b[0m\n\x1b[91;48;5;94m \x1b[0m\x1b[91;42mX\x1b[0m\x1b[91;48;5;94m \x1b[0m\x1b[91;42mX\x1b[0m\n\x1b[91;48;5;94m \x1b[0m\n\x1b[91;48;5;94m \x1b[0m\x1b[91;42mXX\x1b[0m\x1b[91;48;5;94m \x1b[0m\n\n"
assert result == expected
def test_divide_emoji():
bold = Style(bold=True)
italic = Style(italic=True)
segments = [
Segment("Hello", bold),
Segment("💩💩💩", italic),
]
assert list(Segment.divide(segments, [7])) == [
[Segment("Hello", bold), Segment("💩", italic)],
]
assert list(Segment.divide(segments, [8])) == [
[Segment("Hello", bold), Segment("💩 ", italic)],
]
assert list(Segment.divide(segments, [9])) == [
[Segment("Hello", bold), Segment("💩💩", italic)],
]
assert list(Segment.divide(segments, [8, 11])) == [
[Segment("Hello", bold), Segment("💩 ", italic)],
[Segment(" 💩", italic)],
]
assert list(Segment.divide(segments, [9, 11])) == [
[Segment("Hello", bold), Segment("💩💩", italic)],
[Segment("💩", italic)],
]
def test_divide_edge():
segments = [Segment("foo"), Segment("bar"), Segment("baz")]
result = list(Segment.divide(segments, [1, 3, 9]))
print(result)
assert result == [
[Segment("f")],
[Segment("oo")],
[Segment("bar"), Segment("baz")],
]
def test_divide_edge_2():
segments = [
Segment("╭─"),
Segment(
"────── Placeholder ───────",
),
Segment(
"─╮",
),
]
result = list(Segment.divide(segments, [30, 60]))
expected = [segments, []]
print(repr(result))
assert result == expected
@pytest.mark.parametrize(
"text,split,result",
[
("XX", 4, (Segment("XX"), Segment(""))),
("X", 1, (Segment("X"), Segment(""))),
("💩", 1, (Segment(" "), Segment(" "))),
("XY", 1, (Segment("X"), Segment("Y"))),
("💩X", 1, (Segment(" "), Segment(" X"))),
("💩💩", 1, (Segment(" "), Segment(" 💩"))),
("X💩Y", 2, (Segment("X "), Segment(" Y"))),
("X💩YZ", 2, (Segment("X "), Segment(" YZ"))),
("X💩💩Z", 2, (Segment("X "), Segment(" 💩Z"))),
("X💩💩Z", 3, (Segment("X💩"), Segment("💩Z"))),
("X💩💩Z", 4, (Segment("X💩 "), Segment(" Z"))),
("X💩💩Z", 5, (Segment("X💩💩"), Segment("Z"))),
("X💩💩Z", 6, (Segment("X💩💩Z"), Segment(""))),
("XYZABC💩💩", 6, (Segment("XYZABC"), Segment("💩💩"))),
("XYZABC💩💩", 7, (Segment("XYZABC "), Segment(" 💩"))),
("XYZABC💩💩", 8, (Segment("XYZABC💩"), Segment("💩"))),
("XYZABC💩💩", 9, (Segment("XYZABC💩 "), Segment(" "))),
("XYZABC💩💩", 10, (Segment("XYZABC💩💩"), Segment(""))),
("💩💩💩💩💩", 3, (Segment("💩 "), Segment(" 💩💩💩"))),
("💩💩💩💩💩", 4, (Segment("💩💩"), Segment("💩💩💩"))),
("💩X💩Y💩Z💩A💩", 4, (Segment("💩X "), Segment(" Y💩Z💩A💩"))),
("XYZABC", 4, (Segment("XYZA"), Segment("BC"))),
("XYZABC", 5, (Segment("XYZAB"), Segment("C"))),
],
)
def test_split_cells_emoji(text, split, result):
assert Segment(text).split_cells(split) == result
def test_segment_lines_renderable():
lines = [[Segment("hello"), Segment(" "), Segment("world")], [Segment("foo")]]
segment_lines = SegmentLines(lines)
assert list(segment_lines.__rich_console__(None, None)) == [
Segment("hello"),
Segment(" "),
Segment("world"),
Segment("foo"),
]
segment_lines = SegmentLines(lines, new_lines=True)
assert list(segment_lines.__rich_console__(None, None)) == [
Segment("hello"),
Segment(" "),
Segment("world"),
Segment("\n"),
Segment("foo"),
Segment("\n"),
]
def test_align_top():
lines = [[Segment("X")]]
assert Segment.align_top(lines, 3, 1, Style()) == lines
assert Segment.align_top(lines, 3, 3, Style()) == [
[Segment("X")],
[Segment(" ", Style())],
[Segment(" ", Style())],
]
def test_align_middle():
lines = [[Segment("X")]]
assert Segment.align_middle(lines, 3, 1, Style()) == lines
assert Segment.align_middle(lines, 3, 3, Style()) == [
[Segment(" ", Style())],
[Segment("X")],
[Segment(" ", Style())],
]
def test_align_bottom():
lines = [[Segment("X")]]
assert Segment.align_bottom(lines, 3, 1, Style()) == lines
assert Segment.align_bottom(lines, 3, 3, Style()) == [
[Segment(" ", Style())],
[Segment(" ", Style())],
[Segment("X")],
]
| 32.613293
| 671
| 0.533858
|
65efa9649bc7e74ae1b32cfc366dd78155eefab1
| 439
|
py
|
Python
|
python/py_refresh/errors.py
|
star-junk/references
|
5bf8f4eb710ebf953131722efea55d998ea98ed2
|
[
"MIT"
] | null | null | null |
python/py_refresh/errors.py
|
star-junk/references
|
5bf8f4eb710ebf953131722efea55d998ea98ed2
|
[
"MIT"
] | null | null | null |
python/py_refresh/errors.py
|
star-junk/references
|
5bf8f4eb710ebf953131722efea55d998ea98ed2
|
[
"MIT"
] | null | null | null |
def divide(dividend, divisor):
if (divisor == 0):
raise ZeroDivisionError("Divisor cannot be zero ! Ha Ha Ha")
return dividend/divisor
print("welcome to the grade average program")
grades = []
try:
average = divide(sum(grades), len(grades))
except ZeroDivisionError as e:
print("There are no grades yet !", e)
else:
print(f"The average grade is {average}")
finally:
print("See you later !")
| 16.259259
| 68
| 0.651481
|
c9e228b760f3a05d1a2aad4ddc100c66ad2ea340
| 6,456
|
py
|
Python
|
src/pypirun/cli.py
|
yahoo/pypirun
|
77e0cd4d7b7350b6d33879f37fa5c2b04156fa6d
|
[
"BSD-3-Clause"
] | 2
|
2019-08-01T14:27:47.000Z
|
2020-03-15T09:05:34.000Z
|
src/pypirun/cli.py
|
yahoo/pypirun
|
77e0cd4d7b7350b6d33879f37fa5c2b04156fa6d
|
[
"BSD-3-Clause"
] | 14
|
2019-06-06T02:44:38.000Z
|
2019-11-13T18:29:00.000Z
|
src/pypirun/cli.py
|
yahoo/pypirun
|
77e0cd4d7b7350b6d33879f37fa5c2b04156fa6d
|
[
"BSD-3-Clause"
] | 4
|
2020-07-31T17:32:01.000Z
|
2021-09-25T14:23:05.000Z
|
# Copyright 2019, Oath Inc.
# Licensed under the terms of the BSD 3 Clause license. See LICENSE file in project root for terms.
"""
pypirun command line utility
"""
import os
import shutil
import subprocess # nosec
import sys
import tempfile
from .arguments import parse_arguments, ParseError
from .utility import which
def interpreter_version(interpreter):
"""
Get the version from the python interpreter
"""
version = subprocess.check_output([interpreter, '--version'], stderr=subprocess.STDOUT).decode(errors='ignore').strip().split()[-1] # nosec
return version
def interpreter_parent(interpreter):
"""
Get the parent python interpreter for interpreter (I.E the interpreter that created the virtualenv if it is an
interpreter in a virtualenv.
Returns
=======
str:
parent interpreter
"""
try:
real_prefix = subprocess.check_output([interpreter, '-c', 'import sys;print(sys.real_prefix)'], stderr=subprocess.DEVNULL).decode(errors='ignore').strip() # nosec
except subprocess.CalledProcessError: # pragma: no cover
return interpreter
try: # pragma: no cover
major_minor = subprocess.check_output([interpreter, '-c', 'import sys;print(f"{sys.version_info.major}.{sys.version_info.minor}")'], stderr=subprocess.DEVNULL).decode(errors='ignore').strip() # nosec
basename = f'python{major_minor}'
except subprocess.CalledProcessError: # pragma: no cover
basename = 'python3'
bin_dir = os.path.join(real_prefix, 'bin')
interpreter = os.path.join(bin_dir, basename)
return interpreter
def install_and_run(package, command, interpreter, debug=False, no_cache_dir=False, upgrade_setuptools=False, upgrade_pip=False):
"""
Install a package and run a command in a temporary Python virtualenv
Parameters
==========
package: str
A string containing a comma seperated list of packages to install.
command: str
The command to run in the shell once the package is installed
interpreter: str
The python interpreter executable to use to create the virtualenv
debug: bool, optional
Print more useful debug output. Default: False
no_cache_dir: bool, optional
If True, pass the no-cache-dir flag to the pip command to disable pip package caching. Default: False
upgrade_setuptools: bool, optional
Upgrade setuptools after creating the virtualenv but before installing packages. Default: False
upgrade_pip: bool, optional
Upgrade pip after creating the virtualenv but before installing packages. Default: False
"""
packages = package.split(',')
with tempfile.TemporaryDirectory() as tempdir:
venv_dir = os.path.join(tempdir, '.venv')
venv_bin = os.path.join(venv_dir, 'bin')
venv_python = os.path.join(venv_bin, 'python')
venv_pip = os.path.join(venv_bin, 'pip')
pip_args = []
if no_cache_dir: # pragma: no cover
pip_args = ['--no-cache-dir']
# Create venv
try:
output = subprocess.check_output([interpreter, '-m', 'venv', venv_dir]) # nosec
except subprocess.CalledProcessError: # pragma: no cover
print('Failed to create temporary virtualenv using the', interpreter, 'python interpreter')
return 1
if debug:
if output.decode().strip(): # pragma: no cover
print(output.decode().strip())
if not os.path.exists(venv_pip): # pragma: no cover
output = subprocess.check_output([venv_python, '-m', 'pip', 'install', '--force-reinstall'] + pip_args + ['pip'], stderr=subprocess.STDOUT) # nosec
if debug: # pragma: no cover
print(output.decode())
if upgrade_pip: # pragma: no cover
output = subprocess.check_output([venv_pip, 'install', '--upgrade'] + pip_args + ['pip'], stderr=subprocess.STDOUT) # nosec
if debug: # pragma: no cover
print(output.decode())
if upgrade_setuptools: # pragma: no cover
output = subprocess.check_output([venv_pip, 'install', '--upgrade'] + pip_args + ['setuptools'], stderr=subprocess.STDOUT) # nosec
if debug: # pragma: no cover
print(output.decode())
venv_bin_before = set(os.listdir(venv_bin))
# Install the package
try:
output = subprocess.check_output([venv_pip, 'install'] + pip_args + packages, stderr=subprocess.STDOUT) # nosec
except subprocess.CalledProcessError as error: # pragma: no cover
print(error.output.decode(), file=sys.stdout)
return error.returncode
venv_bin_after = set(os.listdir(venv_bin))
if debug: # pragma: no cover
print(output.decode())
print(f'Installed files: {list(venv_bin_after-venv_bin_before)}')
# Run the command
try:
subprocess.check_call(f'{venv_dir}/bin/{command}', shell=True) # nosec
except subprocess.CalledProcessError as error: # pragma: no cover
return error.returncode
return exit_ok() # pragma: no cover
def exit_ok():
"""A python implementation of /bin/true that can be used for testing"""
return 0
def main():
"""
Command line interface entrypoint
Returns
=======
int:
return code from running the command
"""
try:
args = parse_arguments()
except ParseError:
return 1
interpreter = args.interpreter
command_file = args.command[0]
command = ' '.join(args.command)
if not interpreter: # pragma: no cover
interpreter = interpreter_parent(sys.executable)
if not interpreter: # pragma: no cover
interpreter = which('python3')
if not interpreter: # pragma: no cover
print('Unable to find python3 interpreter')
return 1
if args.always_install or not shutil.which(command_file):
return install_and_run(package=args.package, command=command, interpreter=interpreter, debug=args.debug, no_cache_dir=args.no_cache_dir, upgrade_setuptools=args.upgrade_setuptools, upgrade_pip=args.upgrade_pip)
try:
subprocess.check_call(command, shell=True) # nosec
except subprocess.CalledProcessError as error:
return error.returncode
return 0 # pragma: no cover
| 37.534884
| 218
| 0.655204
|
15e536b1b3eea1eebce7a628e2270200018ef6ba
| 4,452
|
py
|
Python
|
ex/parser/scanner.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 6
|
2017-04-01T05:30:08.000Z
|
2017-04-05T14:17:40.000Z
|
ex/parser/scanner.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 1
|
2017-04-04T06:47:13.000Z
|
2017-04-04T14:26:32.000Z
|
ex/parser/scanner.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | null | null | null |
'''
Tokenization for the Vim command line.
'''
from VintageousPlus.ex.ex_error import ERR_UNKNOWN_COMMAND
from VintageousPlus.ex.ex_error import VimError
from . import subscanners
from .state import EOF
from .state import ScannerState
from .tokens import TokenComma
from .tokens import TokenDigits
from .tokens import TokenDollar
from .tokens import TokenDot
from .tokens import TokenEof
from .tokens import TokenMark
from .tokens import TokenOffset
from .tokens import TokenPercent
from .tokens import TokenSearchBackward
from .tokens import TokenSearchForward
from .tokens import TokenSemicolon
# TODO: make this a function. We don't need state.
class Scanner(object):
'''
Produces ex command-line tokens from a string.
'''
def __init__(self, source):
self.state = ScannerState(source)
def scan(self):
'''
Generates ex command-line tokens for `source`.
The scanner works its way through the source string by passing the
current state to the next scanning function.
'''
next_func = scan_range
while True:
# We return multiple tokens so that we can work around cyclic imports:
# functions that need to, return TokenEof without having to call
# a function in this module from a separate module.
#
# Keep scanning while we get a scanning function.
(next_func, items) = next_func(self.state)
yield from items
if not next_func:
break
def scan_range(state):
'''
Produces tokens found in a command line range.
http://vimdoc.sourceforge.net/htmldoc/cmdline.html#cmdline-ranges
'''
c = state.consume()
if c == EOF:
return None, [TokenEof()]
if c == '.':
state.emit()
return scan_range, [TokenDot()]
if c == '$':
state.emit()
return scan_range, [TokenDollar()]
if c in ',;':
token = TokenComma if c == ',' else TokenSemicolon
state.emit()
return scan_range, [token()]
if c == "'":
return scan_mark(state)
if c in '/?':
return scan_search(state)
if c in '+-':
return scan_offset(state)
if c == '%':
state.emit()
return scan_range, [TokenPercent()]
if c in '\t ':
state.skip_run(' \t')
state.ignore()
if c.isdigit():
return scan_digits(state)
state.backup()
return scan_command, []
def scan_mark(state):
c = state.expect_match(r'[a-zA-Z\[\]()<>]')
return scan_range, [TokenMark(c.group(0))]
def scan_digits(state):
while True:
c = state.consume()
if not c.isdigit():
if c == EOF:
return None, [TokenDigits(state.emit()), TokenEof()]
state.backup()
break
return scan_range, [TokenDigits(state.emit())]
def scan_search(state):
delim = state.source[state.position - 1]
while True:
c = state.consume()
if c == delim:
state.start += 1
state.backup()
content = state.emit()
state.consume()
token = TokenSearchForward if c == '/' else TokenSearchBackward
return scan_range , [token(content)]
elif c == EOF:
raise ValueError('unclosed search pattern: {0}'.format(state.source))
def scan_offset(state):
offsets = []
to_int = lambda x: int(x, 10)
sign = '-' if state.source[state.position - 1] == '-' else ''
digits = state.expect_match(r'\s*(\d+)')
offsets.append(sign + digits.group(1))
while True:
c = state.consume()
if c == EOF:
state.ignore()
return None, [TokenOffset(list(map(to_int, offsets))), TokenEof()]
if c == '+' or c == '-':
digits = state.expect_match(r'\s*(\d+)')
sign = '-' if state.source[state.position - 1] == '-' else ''
offsets.append(sign + digits.group(1))
continue
if not c.isdigit():
state.backup()
state.ignore()
return scan_range, [TokenOffset(list(map(to_int, offsets)))]
def scan_command(state):
for (pattern, subscanner) in subscanners.patterns.items():
if state.match(pattern):
state.ignore()
return subscanner(state)
state.expect(EOF, lambda: VimError(ERR_UNKNOWN_COMMAND))
return None, [TokenEof()]
| 26.5
| 82
| 0.59389
|
227b465fa7e6b6e412115b3805d1fcc5357f55b2
| 16,748
|
py
|
Python
|
models/rank/bert4rec/net.py
|
LinJayan/DCN_V2_Paddle
|
7dbf99eecf33ee0280908ec25ffe069f33c4e284
|
[
"Apache-2.0"
] | null | null | null |
models/rank/bert4rec/net.py
|
LinJayan/DCN_V2_Paddle
|
7dbf99eecf33ee0280908ec25ffe069f33c4e284
|
[
"Apache-2.0"
] | null | null | null |
models/rank/bert4rec/net.py
|
LinJayan/DCN_V2_Paddle
|
7dbf99eecf33ee0280908ec25ffe069f33c4e284
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT4Rec model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import math
import paddle
import paddle.nn as nn
class BertModel(nn.Layer):
def __init__(self, _emb_size, _n_layer, _n_head, _voc_size,
_max_position_seq_len, _sent_types, hidden_act, _dropout,
_attention_dropout, initializer_range):
super(BertModel, self).__init__()
self._emb_size = _emb_size
self._n_layer = _n_layer
self._n_head = _n_head
self._voc_size = _voc_size
self._max_position_seq_len = _max_position_seq_len
self._sent_types = _sent_types
hidden_act = hidden_act
if hidden_act == "gelu":
self._hidden_act = nn.GELU()
else:
self._hidden_act = nn.ReLU()
self._dropout = _dropout
self._attention_dropout = _attention_dropout
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._dtype = "float32"
self._param_initializer = nn.initializer.TruncatedNormal(
std=initializer_range)
self.word_emb = nn.Embedding(
num_embeddings=self._voc_size,
embedding_dim=self._emb_size,
name=self._word_emb_name,
weight_attr=paddle.ParamAttr(initializer=self._param_initializer),
sparse=False)
self.position_emb = nn.Embedding(
num_embeddings=self._max_position_seq_len,
embedding_dim=self._emb_size,
weight_attr=paddle.ParamAttr(
name=self._pos_emb_name, initializer=self._param_initializer),
sparse=False)
self.sent_emb = nn.Embedding(
num_embeddings=self._sent_types,
embedding_dim=self._emb_size,
weight_attr=paddle.ParamAttr(
name=self._sent_emb_name, initializer=self._param_initializer),
sparse=False)
self.enc_pre_process_layer = NormalizeDropLayer(
self._dropout, self._emb_size, name='pre_encoder')
self._enc_out_layer = Encoder(
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
attention_dropout=self._attention_dropout,
hidden_act=self._hidden_act,
param_initializer=self._param_initializer,
name='encoder')
self.mask_trans_feat = nn.Linear(
in_features=self._emb_size,
out_features=self._emb_size,
weight_attr=paddle.ParamAttr(
name="mask_lm_trans_fc.w_0",
initializer=self._param_initializer),
bias_attr=paddle.ParamAttr(name='mask_lm_trans_fc.b_0'))
self.mask_trans_act = self._hidden_act
self.mask_post_process_layer = NormalizeLayer(
self._emb_size, name='mask_lm_trans')
self.mask_lm_out_bias = self.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=paddle.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=paddle.nn.initializer.Constant(value=0.0)),
is_bias=True)
def forward(self, src_ids, position_ids, sent_ids, input_mask, mask_pos):
emb_out = self.word_emb(src_ids)
position_embs_out = self.position_emb(position_ids)
emb_out = emb_out + position_embs_out
sent_emb_out = self.sent_emb(sent_ids)
emb_out = emb_out + sent_emb_out
emb_out = self.enc_pre_process_layer(emb_out)
if self._dtype == "float16":
input_mask = paddle.cast(x=input_mask, dtype=self._dtype)
else:
input_mask = paddle.cast(x=input_mask, dtype='float32')
self_attn_mask = paddle.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = paddle.scale(
x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = paddle.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
self._enc_out = self._enc_out_layer(
enc_input=emb_out, attn_bias=n_head_self_attn_mask)
mask_pos = paddle.cast(x=mask_pos, dtype='int32')
reshaped_emb_out = paddle.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
mask_feat = paddle.gather(x=reshaped_emb_out, index=mask_pos, axis=0)
mask_trans_feat_out = self.mask_trans_feat(mask_feat)
mask_trans_feat_out = self.mask_trans_act(mask_trans_feat_out)
mask_trans_feat_out = self.mask_post_process_layer(
out=mask_trans_feat_out)
for name, param in self.named_parameters():
if name == "word_emb.weight":
y_tensor = param
break
fc_out = paddle.matmul(
x=mask_trans_feat_out, y=y_tensor, transpose_y=True)
fc_out += self.mask_lm_out_bias
return fc_out
class MultiHeadAttention(nn.Layer):
def __init__(self,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
param_initializer=None,
name='multi_head_att'):
super(MultiHeadAttention, self).__init__()
self.q_linear = nn.Linear(
in_features=d_model,
out_features=d_key * n_head,
weight_attr=paddle.ParamAttr(
name=name + '_query_fc.w_0', initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
self.k_linear = nn.Linear(
in_features=d_model,
out_features=d_key * n_head,
weight_attr=paddle.ParamAttr(
name=name + '_key_fc.w_0', initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
self.v_linear = nn.Linear(
in_features=d_model,
out_features=d_value * n_head,
weight_attr=paddle.ParamAttr(
name=name + '_value_fc.w_0', initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
self.out_linear = nn.Linear(
in_features=d_key * n_head,
out_features=d_model,
weight_attr=paddle.ParamAttr(
name=name + '_output_fc.w_0', initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
def forward(self, queries, keys, values, attn_bias):
keys = queries if keys is None else keys
values = keys if values is None else values
q = self.q_linear(queries)
k = self.k_linear(keys)
v = self.v_linear(values)
hidden_size = q.shape[-1]
q = paddle.reshape(
x=q, shape=[0, 0, self.n_head, hidden_size // self.n_head])
q = paddle.transpose(
x=q, perm=[0, 2, 1, 3]
) # [batch_size, n_head, max_sequence_len, hidden_size_per_head]
k = paddle.reshape(
x=k, shape=[0, 0, self.n_head, hidden_size // self.n_head])
k = paddle.transpose(
x=k, perm=[0, 2, 1, 3]
) # [batch_size, n_head, max_sequence_len, hidden_size_per_head]
v = paddle.reshape(
x=v, shape=[0, 0, self.n_head, hidden_size // self.n_head])
v = paddle.transpose(
x=v, perm=[0, 2, 1, 3]
) # [batch_size, n_head, max_sequence_len, hidden_size_per_head]
# scale dot product attention
attention_scores = paddle.matmul(x=q, y=k, transpose_y=True)
product = paddle.multiply(
attention_scores,
paddle.to_tensor(
1.0 / math.sqrt(float(self.d_key)), dtype='float32'))
if attn_bias is not None:
product += attn_bias
weights = nn.functional.softmax(product)
if self.dropout_rate:
weights = nn.functional.dropout(
weights,
p=self.dropout_rate,
mode="upscale_in_train",
training=self.training)
out = paddle.matmul(weights, v)
out = paddle.transpose(out, perm=[0, 2, 1, 3])
out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
out = self.out_linear(out)
return out
class NormalizeLayer(nn.Layer):
def __init__(self, norm_shape=768, name=''):
super(NormalizeLayer, self).__init__()
self.name = name
self.LayerNormal = nn.LayerNorm(
norm_shape,
epsilon=1e-05,
weight_attr=paddle.ParamAttr(
name=self.name + '_layer_norm_scale',
initializer=nn.initializer.Constant(1.)),
bias_attr=paddle.ParamAttr(
name=self.name + '_layer_norm_bias',
initializer=nn.initializer.Constant(0.)))
def forward(self, out):
out_dtype = out.dtype
if out_dtype == paddle.fluid.core.VarDesc.VarType.FP16:
out = paddle.cast(x=out, dtype="float32")
out = self.LayerNormal(out)
if out_dtype == paddle.fluid.core.VarDesc.VarType.FP16:
out = paddle.cast(x=out, dtype="float16")
return out
class NormalizeDropLayer(nn.Layer):
def __init__(self, dropout_rate=0., norm_shape=768, name=''):
super(NormalizeDropLayer, self).__init__()
self.name = name
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(p=dropout_rate, mode="upscale_in_train")
self.LayerNormal = nn.LayerNorm(
norm_shape,
epsilon=1e-05,
weight_attr=paddle.ParamAttr(
name=self.name + '_layer_norm_scale',
initializer=nn.initializer.Constant(1.)),
bias_attr=paddle.ParamAttr(
name=self.name + '_layer_norm_bias',
initializer=nn.initializer.Constant(0.)))
def forward(self, out):
out_dtype = out.dtype
if out_dtype == paddle.fluid.core.VarDesc.VarType.FP16:
out = paddle.cast(x=out, dtype="float32")
out = self.LayerNormal(out)
if out_dtype == paddle.fluid.core.VarDesc.VarType.FP16:
out = paddle.cast(x=out, dtype="float16")
if self.dropout_rate:
out = self.dropout(out)
return out
class DropResidualNormalizeLayer(nn.Layer):
def __init__(self, dropout_rate=0., norm_shape=768, name=''):
super(DropResidualNormalizeLayer, self).__init__()
self.name = name
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(p=dropout_rate, mode="upscale_in_train")
self.LayerNormal = nn.LayerNorm(
norm_shape,
epsilon=1e-05,
weight_attr=paddle.ParamAttr(
name=self.name + '_layer_norm_scale',
initializer=nn.initializer.Constant(1.)),
bias_attr=paddle.ParamAttr(
name=self.name + '_layer_norm_bias',
initializer=nn.initializer.Constant(0.)))
def forward(self, out, prev_out=None):
if self.dropout_rate:
out = self.dropout(out)
if prev_out is not None:
out = out + prev_out
out_dtype = out.dtype
if out_dtype == paddle.fluid.core.VarDesc.VarType.FP16:
out = paddle.cast(x=out, dtype="float32")
out = self.LayerNormal(out)
if out_dtype == paddle.fluid.core.VarDesc.VarType.FP16:
out = paddle.cast(x=out, dtype="float16")
return out
class FFN(nn.Layer):
def __init__(self,
d_inner_hid,
d_hid,
hidden_act,
param_initializer=None,
name='ffn'):
super(FFN, self).__init__()
self.fc1 = nn.Linear(
in_features=d_hid,
out_features=d_inner_hid,
weight_attr=paddle.ParamAttr(
name=name + '_fc_0.w_0', initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
self.hidden_act = hidden_act
self.fc2 = nn.Linear(
in_features=d_inner_hid,
out_features=d_hid,
weight_attr=paddle.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
def forward(self, x):
hidden = self.fc1(x)
hidden = self.hidden_act(hidden)
out = self.fc2(hidden)
return out
class EncoderLayer(nn.Layer):
def __init__(self,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
attention_dropout,
hidden_act,
param_initializer=None,
name=''):
super(EncoderLayer, self).__init__()
self.multi_head_attn = MultiHeadAttention(
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
self.drop_residual_normalize_layer_1 = DropResidualNormalizeLayer(
attention_dropout, norm_shape=d_model, name=name + '_post_att')
self.positionwise_feed_layer = FFN(d_inner_hid,
d_model,
hidden_act,
param_initializer,
name=name + '_ffn')
self.drop_residual_normalize_layer_2 = DropResidualNormalizeLayer(
attention_dropout, norm_shape=d_model, name=name + '_post_ffn')
def forward(self, enc_input, attn_bias):
multi_output = self.multi_head_attn(
queries=enc_input, keys=None, values=None, attn_bias=attn_bias)
attn_output = self.drop_residual_normalize_layer_1(
prev_out=enc_input, out=multi_output)
ffd_output = self.positionwise_feed_layer(attn_output)
out = self.drop_residual_normalize_layer_2(
prev_out=attn_output, out=ffd_output)
return out
class Encoder(nn.Layer):
def __init__(self,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
attention_dropout,
hidden_act,
param_initializer=None,
name=''):
super(Encoder, self).__init__()
self.encoder_layer = nn.LayerList([
EncoderLayer(n_head, d_key, d_value, d_model, d_inner_hid,
attention_dropout, hidden_act, param_initializer,
name + '_layer_' + str(i)) for i in range(n_layer)
])
def forward(self, enc_input, attn_bias):
enc_output = None
for enc in self.encoder_layer:
enc_output = enc(enc_input, attn_bias)
enc_input = enc_output
return enc_output
class BertConfig(object):
""" 根据config_path来读取网络的配置 """
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing bert model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict[key]
def print_config(self):
for arg, value in sorted(six.iteritems(self._config_dict)):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
| 38.412844
| 79
| 0.595713
|
3db374fa9bae374aee6b7291c7d6712ffc3457e7
| 8,923
|
bzl
|
Python
|
repositories.bzl
|
jiangtaoli2016/grpc-java
|
512e55444e4176602f27fa39c41223e5b6074d59
|
[
"Apache-2.0"
] | null | null | null |
repositories.bzl
|
jiangtaoli2016/grpc-java
|
512e55444e4176602f27fa39c41223e5b6074d59
|
[
"Apache-2.0"
] | null | null | null |
repositories.bzl
|
jiangtaoli2016/grpc-java
|
512e55444e4176602f27fa39c41223e5b6074d59
|
[
"Apache-2.0"
] | null | null | null |
"""External dependencies for grpc-java."""
def grpc_java_repositories(
omit_com_google_api_grpc_google_common_protos=False,
omit_com_google_auth_google_auth_library_credentials=False,
omit_com_google_code_findbugs_jsr305=False,
omit_com_google_code_gson=False,
omit_com_google_errorprone_error_prone_annotations=False,
omit_com_google_guava=False,
omit_com_google_protobuf=False,
omit_com_google_protobuf_java=False,
omit_com_google_protobuf_nano_protobuf_javanano=False,
omit_com_google_truth_truth=False,
omit_com_squareup_okhttp=False,
omit_com_squareup_okio=False,
omit_io_netty_buffer=False,
omit_io_netty_common=False,
omit_io_netty_transport=False,
omit_io_netty_codec=False,
omit_io_netty_codec_socks=False,
omit_io_netty_codec_http=False,
omit_io_netty_codec_http2=False,
omit_io_netty_handler=False,
omit_io_netty_handler_proxy=False,
omit_io_netty_resolver=False,
omit_io_netty_tcnative_boringssl_static=False,
omit_io_opencensus_api=False,
omit_io_opencensus_grpc_metrics=False,
omit_junit_junit=False):
"""Imports dependencies for grpc-java."""
if not omit_com_google_api_grpc_google_common_protos:
com_google_api_grpc_google_common_protos()
if not omit_com_google_auth_google_auth_library_credentials:
com_google_auth_google_auth_library_credentials()
if not omit_com_google_code_findbugs_jsr305:
com_google_code_findbugs_jsr305()
if not omit_com_google_code_gson:
com_google_code_gson()
if not omit_com_google_errorprone_error_prone_annotations:
com_google_errorprone_error_prone_annotations()
if not omit_com_google_guava:
com_google_guava()
if not omit_com_google_protobuf:
com_google_protobuf()
if omit_com_google_protobuf_java:
fail("omit_com_google_protobuf_java is no longer supported and must be not be passed to grpc_java_repositories()")
if not omit_com_google_protobuf_nano_protobuf_javanano:
com_google_protobuf_nano_protobuf_javanano()
if not omit_com_google_truth_truth:
com_google_truth_truth()
if not omit_com_squareup_okhttp:
com_squareup_okhttp()
if not omit_com_squareup_okio:
com_squareup_okio()
if not omit_io_netty_buffer:
io_netty_buffer()
if not omit_io_netty_common:
io_netty_common()
if not omit_io_netty_transport:
io_netty_transport()
if not omit_io_netty_codec:
io_netty_codec()
if not omit_io_netty_codec_socks:
io_netty_codec_socks()
if not omit_io_netty_codec_http:
io_netty_codec_http()
if not omit_io_netty_codec_http2:
io_netty_codec_http2()
if not omit_io_netty_handler:
io_netty_handler()
if not omit_io_netty_handler_proxy:
io_netty_handler_proxy()
if not omit_io_netty_resolver:
io_netty_resolver()
if not omit_io_netty_tcnative_boringssl_static:
io_netty_tcnative_boringssl_static()
if not omit_io_opencensus_api:
io_opencensus_api()
if not omit_io_opencensus_grpc_metrics:
io_opencensus_grpc_metrics()
if not omit_junit_junit:
junit_junit()
native.bind(
name = "guava",
actual = "@com_google_guava_guava//jar",
)
native.bind(
name = "gson",
actual = "@com_google_code_gson_gson//jar",
)
def com_google_api_grpc_google_common_protos():
native.maven_jar(
name = "com_google_api_grpc_proto_google_common_protos",
artifact = "com.google.api.grpc:proto-google-common-protos:1.0.0",
sha1 = "86f070507e28b930e50d218ee5b6788ef0dd05e6",
)
def com_google_auth_google_auth_library_credentials():
native.maven_jar(
name = "com_google_auth_google_auth_library_credentials",
artifact = "com.google.auth:google-auth-library-credentials:0.9.0",
sha1 = "8e2b181feff6005c9cbc6f5c1c1e2d3ec9138d46",
)
def com_google_code_findbugs_jsr305():
native.maven_jar(
name = "com_google_code_findbugs_jsr305",
artifact = "com.google.code.findbugs:jsr305:3.0.0",
sha1 = "5871fb60dc68d67da54a663c3fd636a10a532948",
)
def com_google_code_gson():
native.maven_jar(
name = "com_google_code_gson_gson",
artifact = "com.google.code.gson:gson:jar:2.7",
sha1 = "751f548c85fa49f330cecbb1875893f971b33c4e",
)
def com_google_errorprone_error_prone_annotations():
native.maven_jar(
name = "com_google_errorprone_error_prone_annotations",
artifact = "com.google.errorprone:error_prone_annotations:2.1.2",
sha1 = "6dcc08f90f678ac33e5ef78c3c752b6f59e63e0c",
)
def com_google_guava():
native.maven_jar(
name = "com_google_guava_guava",
artifact = "com.google.guava:guava:19.0",
sha1 = "6ce200f6b23222af3d8abb6b6459e6c44f4bb0e9",
)
def com_google_protobuf():
# proto_library rules implicitly depend on @com_google_protobuf//:protoc,
# which is the proto-compiler.
# This statement defines the @com_google_protobuf repo.
native.http_archive(
name = "com_google_protobuf",
sha256 = "1f8b9b202e9a4e467ff0b0f25facb1642727cdf5e69092038f15b37c75b99e45",
strip_prefix = "protobuf-3.5.1",
urls = ["https://github.com/google/protobuf/archive/v3.5.1.zip"],
)
def com_google_protobuf_nano_protobuf_javanano():
native.maven_jar(
name = "com_google_protobuf_nano_protobuf_javanano",
artifact = "com.google.protobuf.nano:protobuf-javanano:3.0.0-alpha-5",
sha1 = "357e60f95cebb87c72151e49ba1f570d899734f8",
)
def com_google_truth_truth():
native.maven_jar(
name = "com_google_truth_truth",
artifact = "com.google.truth:truth:0.36",
sha1 = "7485219d2c1d341097a19382c02bde07e69ff5d2",
)
def com_squareup_okhttp():
native.maven_jar(
name = "com_squareup_okhttp_okhttp",
artifact = "com.squareup.okhttp:okhttp:2.5.0",
sha1 = "4de2b4ed3445c37ec1720a7d214712e845a24636",
)
def com_squareup_okio():
native.maven_jar(
name = "com_squareup_okio_okio",
artifact = "com.squareup.okio:okio:1.13.0",
sha1 = "a9283170b7305c8d92d25aff02a6ab7e45d06cbe",
)
def io_netty_codec_http2():
native.maven_jar(
name = "io_netty_netty_codec_http2",
artifact = "io.netty:netty-codec-http2:4.1.17.Final",
sha1 = "f9844005869c6d9049f4b677228a89fee4c6eab3",
)
def io_netty_buffer():
native.maven_jar(
name = "io_netty_netty_buffer",
artifact = "io.netty:netty-buffer:4.1.17.Final",
sha1 = "fdd68fb3defd7059a7392b9395ee941ef9bacc25",
)
def io_netty_common():
native.maven_jar(
name = "io_netty_netty_common",
artifact = "io.netty:netty-common:4.1.17.Final",
sha1 = "581c8ee239e4dc0976c2405d155f475538325098",
)
def io_netty_transport():
native.maven_jar(
name = "io_netty_netty_transport",
artifact = "io.netty:netty-transport:4.1.17.Final",
sha1 = "9585776b0a8153182412b5d5366061ff486914c1",
)
def io_netty_codec():
native.maven_jar(
name = "io_netty_netty_codec",
artifact = "io.netty:netty-codec:4.1.17.Final",
sha1 = "1d00f56dc9e55203a4bde5aae3d0828fdeb818e7",
)
def io_netty_codec_socks():
native.maven_jar(
name = "io_netty_netty_codec_socks",
artifact = "io.netty:netty-codec-socks:4.1.17.Final",
sha1 = "a159bf1f3d5019e0d561c92fbbec8400967471fa",
)
def io_netty_codec_http():
native.maven_jar(
name = "io_netty_netty_codec_http",
artifact = "io.netty:netty-codec-http:4.1.17.Final",
sha1 = "251d7edcb897122b9b23f24ff793cd0739056b9e",
)
def io_netty_handler():
native.maven_jar(
name = "io_netty_netty_handler",
artifact = "io.netty:netty-handler:4.1.17.Final",
sha1 = "18c40ffb61a1d1979eca024087070762fdc4664a",
)
def io_netty_handler_proxy():
native.maven_jar(
name = "io_netty_netty_handler_proxy",
artifact = "io.netty:netty-handler-proxy:4.1.17.Final",
sha1 = "9330ee60c4e48ca60aac89b7bc5ec2567e84f28e",
)
def io_netty_resolver():
native.maven_jar(
name = "io_netty_netty_resolver",
artifact = "io.netty:netty-resolver:4.1.17.Final",
sha1 = "8f386c80821e200f542da282ae1d3cde5cad8368",
)
def io_netty_tcnative_boringssl_static():
native.maven_jar(
name = "io_netty_netty_tcnative_boringssl_static",
artifact = "io.netty:netty-tcnative-boringssl-static:2.0.5.Final",
sha1 = "321c1239ceb3faec04531ffcdeb1bc8e85408b12",
)
def io_opencensus_api():
native.maven_jar(
name = "io_opencensus_opencensus_api",
artifact = "io.opencensus:opencensus-api:0.11.0",
sha1 = "c1ff1f0d737a689d900a3e2113ddc29847188c64",
)
def io_opencensus_grpc_metrics():
native.maven_jar(
name = "io_opencensus_opencensus_contrib_grpc_metrics",
artifact = "io.opencensus:opencensus-contrib-grpc-metrics:0.11.0",
sha1 = "d57b877f1a28a613452d45e35c7faae5af585258",
)
def junit_junit():
native.maven_jar(
name = "junit_junit",
artifact = "junit:junit:4.12",
sha1 = "2973d150c0dc1fefe998f834810d68f278ea58ec",
)
| 32.926199
| 118
| 0.750981
|
e582863d4c0dea4eb0c033514bd736878628baec
| 4,559
|
py
|
Python
|
tests/py3/unit/test_type_hint.py
|
cesartalves/python-cdi
|
a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2
|
[
"BSD-3-Clause"
] | 10
|
2017-02-02T19:23:12.000Z
|
2020-11-18T05:37:10.000Z
|
tests/py3/unit/test_type_hint.py
|
cesartalves/python-cdi
|
a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2
|
[
"BSD-3-Clause"
] | 34
|
2017-07-29T21:03:20.000Z
|
2021-07-01T13:35:31.000Z
|
tests/py3/unit/test_type_hint.py
|
cesartalves/python-cdi
|
a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2
|
[
"BSD-3-Clause"
] | 1
|
2019-06-05T14:45:36.000Z
|
2019-06-05T14:45:36.000Z
|
# -*- encoding: utf-8 -*-
import unittest
from pycdi.core import CDIContainer, DEFAULT_CONTAINER
from pycdi import Producer, Inject
from pycdi.shortcuts import new, call
SOME_STRING = 'some_string'
ANOTHER_STRING = 'another_string'
@Producer()
def get_some_string() -> str:
return SOME_STRING
@Producer(_context='another')
def get_another() -> str:
return ANOTHER_STRING
@Inject()
def function_with_injection(some_string: str) -> str:
return some_string
@Inject(some_string='another')
def another_function_with_injection(some_string: str) -> str:
return some_string
class SimpleCDITest(unittest.TestCase):
def test_default_str_producer(self):
expected = SOME_STRING
result = new(str)
self.assertEqual(expected, result)
def test_another_str_producer(self):
expected = ANOTHER_STRING
result = new(str, context='another')
self.assertEqual(expected, result)
def test_injected_string(self):
expected = SOME_STRING
result = call(function_with_injection)
self.assertEqual(expected, result)
def test_another_injected_string(self):
expected = ANOTHER_STRING
result = call(another_function_with_injection)
self.assertEqual(expected, result)
class BaseClass(object):
def do_something(self):
raise NotImplementedError()
class SubclassA(BaseClass):
def do_something(self):
pass
class SubclassB(BaseClass):
def do_something(self):
pass
@Producer(BaseClass)
def get_subclass_a() -> SubclassA:
return SubclassA()
@Producer(_context='another')
def get_subclass_b() -> SubclassB:
return SubclassB()
class CDITest(unittest.TestCase):
def test_base_class(self):
with self.assertRaises(NotImplementedError):
base = BaseClass()
base.do_something()
def test_default_subclass(self):
expected = SubclassA
result = type(new(BaseClass))
self.assertEqual(expected, result)
def test_another_subclass(self):
expected = SubclassB
result = type(new(BaseClass, context='another'))
self.assertEqual(expected, result)
@Inject(another_string='another', b='another')
class ComplexClass(object):
def __init__(self, some_string: str, another_string: str, a: BaseClass, b: BaseClass):
self.a = a
self.b = b
self.some_string = some_string
self.another_string = another_string
@Inject()
def method_with_injection(self, test: unittest.TestCase):
test.assertEqual(self.some_string, SOME_STRING)
test.assertEqual(self.another_string, ANOTHER_STRING)
class ClassInjectTest(unittest.TestCase):
def test_init_complex_class(self):
complex_class = new(ComplexClass)
self.assertIsInstance(complex_class, ComplexClass)
self.assertIsNotNone(complex_class)
self.assertEqual(type(complex_class.a), SubclassA)
self.assertEqual(type(complex_class.b), SubclassB)
self.assertEqual(complex_class.some_string, SOME_STRING)
self.assertEqual(complex_class.another_string, ANOTHER_STRING)
complex_class.a.do_something()
complex_class.b.do_something()
def test_method_with_injection(self):
DEFAULT_CONTAINER.register_instance(self)
complex_class = new(ComplexClass)
call(complex_class.method_with_injection)
class SelfInjectTest(unittest.TestCase):
def test_simple_function(self):
@Inject()
def function(container: CDIContainer):
self.assertIsNotNone(container)
self.assertIsInstance(container, CDIContainer)
DEFAULT_CONTAINER.call(function)
def test_simple_class(self):
@Inject()
class Class(object):
def __init__(self, container: CDIContainer):
self.container = container
obj = DEFAULT_CONTAINER.produce(Class)
self.assertIsNotNone(obj.container)
self.assertIsInstance(obj.container, CDIContainer)
self.assertEqual(DEFAULT_CONTAINER, obj.container)
def test_subclass(self):
@Inject()
class WithContainer(object):
def __init__(self, container: CDIContainer):
self.cdi = container
class Subclass(WithContainer):
def __init__(self, *args, **kwargs):
super(Subclass, self).__init__(*args, **kwargs)
obj = DEFAULT_CONTAINER.produce(Subclass)
self.assertIsNotNone(obj.cdi)
self.assertIsInstance(obj.cdi, CDIContainer)
| 28.49375
| 90
| 0.689625
|
48c9e222654d89a8db03854738cd428ed78c21e6
| 9,630
|
py
|
Python
|
bdd100k/eval/mot.py
|
Celeven1996/bdd100k
|
3b49d1b25f903ea9f2465590b3b7be115b6f0cb5
|
[
"BSD-3-Clause"
] | null | null | null |
bdd100k/eval/mot.py
|
Celeven1996/bdd100k
|
3b49d1b25f903ea9f2465590b3b7be115b6f0cb5
|
[
"BSD-3-Clause"
] | null | null | null |
bdd100k/eval/mot.py
|
Celeven1996/bdd100k
|
3b49d1b25f903ea9f2465590b3b7be115b6f0cb5
|
[
"BSD-3-Clause"
] | null | null | null |
"""BDD100K tracking evaluation with CLEAR MOT metrics."""
import time
from multiprocessing import Pool
from typing import List, Tuple, Union
import motmetrics as mm
import numpy as np
import pandas as pd
from ..common.logger import logger
from ..common.typing import DictAny
METRIC_MAPS = {
"idf1": "IDF1",
"mota": "MOTA",
"motp": "MOTP",
"num_false_positives": "FP",
"num_misses": "FN",
"num_switches": "IDSw",
"mostly_tracked": "MT",
"partially_tracked": "PT",
"mostly_lost": "ML",
"num_fragmentations": "FM",
}
SUPER_CLASSES = {
"HUMAN": ["pedestrian", "rider"],
"VEHICLE": ["car", "truck", "bus", "train"],
"BIKE": ["motorcycle", "bicycle"],
}
CLASSES = [c for cs in SUPER_CLASSES.values() for c in cs]
IGNORE_CLASSES = ["trailer", "other person", "other vehicle"]
def parse_objects(objects: List[DictAny]) -> List[np.ndarray]:
"""Parse objects under Scalable formats."""
bboxes, labels, ids, ignore_bboxes = [], [], [], []
for obj in objects:
bbox = [
obj["box2d"]["x1"],
obj["box2d"]["y1"],
obj["box2d"]["x2"] - obj["box2d"]["x1"],
obj["box2d"]["y2"] - obj["box2d"]["y1"],
]
if obj["category"] in CLASSES:
if "attributes" in obj and obj["attributes"].get("Crowd", False):
ignore_bboxes.append(bbox)
else:
bboxes.append(bbox)
labels.append(CLASSES.index(obj["category"]))
ids.append(obj["id"])
elif obj["category"] in IGNORE_CLASSES:
ignore_bboxes.append(bbox)
else:
raise KeyError("Unknown category.")
return list(map(np.array, [bboxes, labels, ids, ignore_bboxes]))
def intersection_over_area(preds: np.ndarray, gts: np.ndarray) -> np.ndarray:
"""Returns the intersection over the area of the predicted box."""
out = np.zeros((len(preds), len(gts)))
for i, p in enumerate(preds):
for j, g in enumerate(gts):
x1, x2 = max(p[0], g[0]), min(p[0] + p[2], g[0] + g[2])
y1, y2 = max(p[1], g[1]), min(p[1] + p[3], g[1] + g[3])
out[i][j] = max(x2 - x1, 0) * max(y2 - y1, 0) / float(p[2] * p[3])
return out
def acc_single_video(
gts: List[DictAny],
results: List[DictAny],
iou_thr: float = 0.5,
ignore_iof_thr: float = 0.5,
) -> List[mm.MOTAccumulator]:
"""Accumulate results for one video."""
num_classes = len(CLASSES)
assert len(gts) == len(results)
gts = sorted(gts, key=lambda x: int(x["index"]))
results = sorted(results, key=lambda x: int(x["index"]))
accs = [mm.MOTAccumulator(auto_id=True) for i in range(num_classes)]
for gt, result in zip(gts, results):
assert gt["index"] == result["index"]
gt_bboxes, gt_labels, gt_ids, gt_ignores = parse_objects(gt["labels"])
pred_bboxes, pred_labels, pred_ids, _ = parse_objects(result["labels"])
for i in range(num_classes):
gt_inds, pred_inds = gt_labels == i, pred_labels == i
gt_bboxes_c, gt_ids_c = gt_bboxes[gt_inds], gt_ids[gt_inds]
pred_bboxes_c, pred_ids_c = (
pred_bboxes[pred_inds],
pred_ids[pred_inds],
)
if gt_bboxes_c.shape[0] == 0 and pred_bboxes_c.shape[0] != 0:
distances = np.full((0, pred_bboxes_c.shape[0]), np.nan)
elif gt_bboxes_c.shape[0] != 0 and pred_bboxes_c.shape[0] == 0:
distances = np.full((gt_bboxes_c.shape[0], 0), np.nan)
else:
distances = mm.distances.iou_matrix(
gt_bboxes_c, pred_bboxes_c, max_iou=1 - iou_thr
)
if gt_ignores.shape[0] > 0:
# 1. assign gt and preds
fps = np.ones(pred_bboxes_c.shape[0]).astype(np.bool)
le, ri = mm.lap.linear_sum_assignment(distances)
for m, n in zip(le, ri):
if not np.isfinite(distances[m, n]):
continue
fps[n] = False
# 2. ignore by iof
iofs = intersection_over_area(pred_bboxes_c, gt_ignores)
ignores = (iofs > ignore_iof_thr).any(axis=1)
# 3. filter preds
valid_inds = ~(fps & ignores)
pred_ids_c = pred_ids_c[valid_inds]
distances = distances[:, valid_inds]
if distances.shape != (0, 0):
accs[i].update(gt_ids_c, pred_ids_c, distances)
return accs
def aggregate_accs(
accumulators: List[List[mm.MOTAccumulator]],
) -> Tuple[List[List[str]], List[List[mm.MOTAccumulator]], List[str]]:
"""Aggregate the results of the entire dataset."""
# accs for each class
items = CLASSES.copy()
names: List[List[str]] = [[] for c in CLASSES]
accs: List[List[str]] = [[] for c in CLASSES]
for video_ind, _accs in enumerate(accumulators):
for cls_ind, acc in enumerate(_accs):
if (
len(acc._events["Type"]) # pylint: disable=protected-access
== 0
):
continue
name = f"{CLASSES[cls_ind]}_{video_ind}"
names[cls_ind].append(name)
accs[cls_ind].append(acc)
# super categories
for super_cls, classes in SUPER_CLASSES.items():
items.append(super_cls)
names.append([n for c in classes for n in names[CLASSES.index(c)]])
accs.append([a for c in classes for a in accs[CLASSES.index(c)]])
# overall
items.append("OVERALL")
names.append([n for name in names[: len(CLASSES)] for n in name])
accs.append([a for acc in accs[: len(CLASSES)] for a in acc])
return names, accs, items
def evaluate_single_class(
names: List[str], accs: List[mm.MOTAccumulator]
) -> List[Union[float, int]]:
"""Evaluate results for one class."""
mh = mm.metrics.create()
summary = mh.compute_many(
accs, names=names, metrics=METRIC_MAPS.keys(), generate_overall=True
)
results = [v["OVERALL"] for k, v in summary.to_dict().items()]
motp_ind = list(METRIC_MAPS).index("motp")
if np.isnan(results[motp_ind]):
num_dets = mh.compute_many(
accs,
names=names,
metrics=["num_detections"],
generate_overall=True,
)
sum_motp = (summary["motp"] * num_dets["num_detections"]).sum()
motp = mm.math_util.quiet_divide(
sum_motp, num_dets["num_detections"]["OVERALL"]
)
results[motp_ind] = float(1 - motp)
return results
def render_results(
summaries: List[List[Union[float, int]]],
items: List[str],
metrics: List[str],
) -> DictAny:
"""Render the evaluation results."""
eval_results = pd.DataFrame(columns=metrics)
# category, super-category and overall results
for i, item in enumerate(items):
eval_results.loc[item] = summaries[i]
dtypes = {m: type(d) for m, d in zip(metrics, summaries[0])}
# average results
avg_results: List[Union[int, float]] = []
for i, m in enumerate(metrics):
v = np.array([s[i] for s in summaries[: len(CLASSES)]])
v = np.nan_to_num(v, nan=0)
if dtypes[m] == int:
avg_results.append(int(v.sum()))
elif dtypes[m] == float:
avg_results.append(float(v.mean()))
else:
raise TypeError()
eval_results.loc["AVERAGE"] = avg_results
eval_results = eval_results.astype(dtypes)
strsummary = mm.io.render_summary(
eval_results,
formatters=mm.metrics.create().formatters,
namemap=METRIC_MAPS,
)
strsummary = strsummary.split("\n")
assert len(strsummary) == len(CLASSES) + len(SUPER_CLASSES) + 3
split_line = "-" * len(strsummary[0])
strsummary.insert(1, split_line)
strsummary.insert(2 + len(CLASSES), split_line)
strsummary.insert(3 + len(CLASSES) + len(SUPER_CLASSES), split_line)
strsummary = "".join([f"{s}\n" for s in strsummary])
strsummary = "\n" + strsummary
logger.info(strsummary)
outputs: DictAny = dict()
for i, item in enumerate(items[len(CLASSES) :], len(CLASSES)):
outputs[item] = dict()
for j, metric in enumerate(METRIC_MAPS.values()):
outputs[item][metric] = summaries[i][j]
outputs["OVERALL"]["mIDF1"] = eval_results.loc["AVERAGE"]["idf1"]
outputs["OVERALL"]["mMOTA"] = eval_results.loc["AVERAGE"]["mota"]
outputs["OVERALL"]["mMOTP"] = eval_results.loc["AVERAGE"]["motp"]
return outputs
def evaluate_mot(
gts: List[List[DictAny]],
results: List[List[DictAny]],
iou_thr: float = 0.5,
ignore_iof_thr: float = 0.5,
nproc: int = 4,
) -> DictAny:
"""Evaluate CLEAR MOT metrics for BDD100K."""
logger.info("BDD100K tracking evaluation with CLEAR MOT metrics.")
t = time.time()
assert len(gts) == len(results)
metrics = list(METRIC_MAPS.keys())
logger.info("accumulating...")
pool = Pool(nproc)
accs = pool.starmap(
acc_single_video,
zip(
gts,
results,
[iou_thr for _ in range(len(gts))],
[ignore_iof_thr for _ in range(len(gts))],
),
)
names, accs, items = aggregate_accs(accs)
logger.info("evaluating...")
summaries = pool.starmap(evaluate_single_class, zip(names, accs))
pool.close()
logger.info("rendering...")
eval_results = render_results(summaries, items, metrics)
t = time.time() - t
logger.info("evaluation finishes with %.1f s.", t)
return eval_results
| 36.203008
| 79
| 0.58785
|
df6b339b53bd915863d54449fc91d62f17b8892d
| 3,797
|
py
|
Python
|
py/server/tests/test_wrapper.py
|
lbooker42/deephaven-core
|
2d04563f18ae914754b28041475c02770e57af15
|
[
"MIT"
] | null | null | null |
py/server/tests/test_wrapper.py
|
lbooker42/deephaven-core
|
2d04563f18ae914754b28041475c02770e57af15
|
[
"MIT"
] | null | null | null |
py/server/tests/test_wrapper.py
|
lbooker42/deephaven-core
|
2d04563f18ae914754b28041475c02770e57af15
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
import unittest
import jpy
from deephaven._wrapper import JObjectWrapper
from tests.testbase import BaseTestCase
def alpha():
return MyObject(0, "ALPHA")
def beta():
return MyObject(1, "BETA")
def charlie():
return MyObject(2, "CHARLIE")
def delta():
return MyObject(3, "DELTA")
def other():
return Other(0, "ALPHA")
class MyObject(JObjectWrapper):
j_object_type = jpy.get_type("io.deephaven.integrations.pyserver.wrapper.MyObject")
def __init__(self, hash : int, s : str):
self._j_my_object = MyObject.j_object_type(hash, s)
@property
def j_object(self) -> jpy.JType:
return self._j_my_object
class Other(JObjectWrapper):
j_object_type = jpy.get_type("io.deephaven.integrations.pyserver.wrapper.MyObject")
def __init__(self, hash : int, s : str):
self._j_my_object = Other.j_object_type(hash, s)
@property
def j_object(self) -> jpy.JType:
return self._j_my_object
class WrapperTestCase(BaseTestCase):
def test_repr(self):
regex = r"^tests.test_wrapper.MyObject\(io.deephaven.integrations.pyserver.wrapper.MyObject\(objectRef=0x.+\)\)$"
self.assertRegex(repr(alpha()), regex)
self.assertRegex(repr(beta()), regex)
self.assertRegex(repr(charlie()), regex)
self.assertRegex(repr(delta()), regex)
def test_str(self):
self.assertEqual(str(alpha()), "ALPHA")
self.assertEqual(str(beta()), "BETA")
self.assertEqual(str(charlie()), "CHARLIE")
self.assertEqual(str(delta()), "DELTA")
def test_hash(self):
self.assertEqual(hash(alpha()), 0)
self.assertEqual(hash(beta()), 1)
self.assertEqual(hash(charlie()), 2)
self.assertEqual(hash(delta()), 3)
def test_eq(self):
self.assertTrue(alpha() == alpha())
self.assertTrue(beta() == beta())
self.assertTrue(charlie() == charlie())
self.assertTrue(delta() == delta())
def test_ne(self):
self.assertFalse(alpha() != alpha())
self.assertTrue(alpha() != beta())
self.assertTrue(alpha() != charlie())
self.assertTrue(alpha() != delta())
def test_lt(self):
self.assertFalse(alpha() < alpha())
self.assertTrue(alpha() < beta())
self.assertTrue(beta() < charlie())
self.assertTrue(charlie() < delta())
def test_le(self):
self.assertTrue(alpha() <= alpha())
self.assertTrue(beta() <= beta())
self.assertTrue(charlie() <= charlie())
self.assertTrue(delta() <= delta())
self.assertTrue(alpha() <= beta())
self.assertTrue(beta() <= charlie())
self.assertTrue(charlie() <= delta())
def test_gt(self):
self.assertFalse(alpha() > alpha())
self.assertFalse(alpha() > beta())
self.assertFalse(beta() > charlie())
self.assertFalse(charlie() > delta())
def test_ge(self):
self.assertTrue(alpha() >= alpha())
self.assertTrue(beta() >= beta())
self.assertTrue(charlie() >= charlie())
self.assertTrue(delta() >= delta())
self.assertFalse(alpha() >= beta())
self.assertFalse(beta() >= charlie())
self.assertFalse(charlie() >= delta())
def test_incompatible_types(self):
self.assertFalse(alpha() == other())
self.assertTrue(alpha() != other())
with self.assertRaises(TypeError):
_ = alpha() < other()
with self.assertRaises(TypeError):
_ = alpha() <= other()
with self.assertRaises(TypeError):
_ = alpha() > other()
with self.assertRaises(TypeError):
_ = alpha() >= other()
if __name__ == "__main__":
unittest.main()
| 29.434109
| 121
| 0.609692
|
9ad7d35224902d6de1c71d12280f9898461eb700
| 385
|
py
|
Python
|
tamu/asgi.py
|
BlessedAssurance/RecipeBe
|
cd61ce2f6336e817ead77f16e7660f9ec70a71ca
|
[
"MIT"
] | null | null | null |
tamu/asgi.py
|
BlessedAssurance/RecipeBe
|
cd61ce2f6336e817ead77f16e7660f9ec70a71ca
|
[
"MIT"
] | 6
|
2021-03-30T13:56:55.000Z
|
2021-09-22T19:21:32.000Z
|
tamu/asgi.py
|
Mantongash/tamu_backend
|
542affccfe0f53e729acbf7ce5931c22e2b6892f
|
[
"MIT"
] | 1
|
2020-06-17T18:51:47.000Z
|
2020-06-17T18:51:47.000Z
|
"""
ASGI config for tamu project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tamu.settings')
application = get_asgi_application()
| 22.647059
| 78
| 0.781818
|
fe5412d2dbd5e49bd7565e72a7ba88a428dc6333
| 499
|
py
|
Python
|
yandex-contest/backend-school-2022/a.py
|
kirilllapushinskiy/code
|
8deea136de8d4559f7f2dad26005611b0e51790b
|
[
"MIT"
] | null | null | null |
yandex-contest/backend-school-2022/a.py
|
kirilllapushinskiy/code
|
8deea136de8d4559f7f2dad26005611b0e51790b
|
[
"MIT"
] | null | null | null |
yandex-contest/backend-school-2022/a.py
|
kirilllapushinskiy/code
|
8deea136de8d4559f7f2dad26005611b0e51790b
|
[
"MIT"
] | null | null | null |
S = input()
Q = input()
rate = {}
answers = {}
for i in range(len(S)):
if S[i] not in rate:
rate[S[i]] = 0
if S[i] == Q[i]:
answers[i] = 'correct'
else:
rate[S[i]] += 1
answers[i] = ''
for i in range(len(Q)):
if answers[i]:
continue
else:
if Q[i] in rate and rate[Q[i]]:
answers[i] = 'present'
rate[Q[i]] -= 1
else:
answers[i] = 'absent'
for j in range(len(S)):
print(answers[j])
| 18.481481
| 39
| 0.44489
|
3d395b009903566caffa6fd8bc41ffb49ad06e80
| 2,208
|
py
|
Python
|
nadine/management/commands/billing_batch_run.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | 1
|
2019-08-15T00:10:38.000Z
|
2019-08-15T00:10:38.000Z
|
nadine/management/commands/billing_batch_run.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | null | null | null |
nadine/management/commands/billing_batch_run.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from django.core.management.base import BaseCommand
from nadine.models.billing import BillingBatch, UserBill
class Command(BaseCommand):
requires_system_checks = True
help = "Runs the billing batch."
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
'--delete-open',
action='store_true',
dest='delete-open',
default=False,
help='Delete all open bills',
)
parser.add_argument(
'--start',
default=None,
help='Start date for batch run',
)
parser.add_argument(
'--end',
default=None,
help='End date for batch run',
)
def handle(self, *args, **options):
start_date = end_date = None
if options['delete-open']:
open_bills = UserBill.objects.open().order_by('period_start')
if open_bills:
print("Deleting Open Bills...")
start_date = open_bills.first().period_start
for bill in open_bills:
print("Deleting %s" % bill)
bill.delete()
if options['start']:
start_date = datetime.strptime(options['start'], "%Y-%m-%d").date()
if options['end']:
end_date = datetime.strptime(options['end'], "%Y-%m-%d").date()
print("Running Batch: start=%s end=%s" % (start_date, end_date))
batch = BillingBatch.objects.run(start_date, end_date)
print("%d Bills" % batch.bills.count())
# Copyright 2018 Office Nomads LLC (http://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 39.428571
| 579
| 0.619112
|
8eb49a05b6b3036dfe231ecd0bb42c25fb744e82
| 553
|
py
|
Python
|
kNN.py
|
zxhaijm/-
|
2448b05d95e188ed35812e2a0030454f6190f212
|
[
"Unlicense"
] | null | null | null |
kNN.py
|
zxhaijm/-
|
2448b05d95e188ed35812e2a0030454f6190f212
|
[
"Unlicense"
] | null | null | null |
kNN.py
|
zxhaijm/-
|
2448b05d95e188ed35812e2a0030454f6190f212
|
[
"Unlicense"
] | null | null | null |
def kNN(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = np.tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistance = sqDiffMat.sum(axis=1)
distances = sqDistance**0.5
sortedDistIndicies = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
| 39.5
| 91
| 0.681736
|
01f46813c24eb72d17e7473237ce843aed94ffa2
| 13,244
|
py
|
Python
|
main.py
|
fab-jul/ppfin
|
f3e51583d42590eceb6d3920a351f8f2639792c1
|
[
"MIT"
] | null | null | null |
main.py
|
fab-jul/ppfin
|
f3e51583d42590eceb6d3920a351f8f2639792c1
|
[
"MIT"
] | null | null | null |
main.py
|
fab-jul/ppfin
|
f3e51583d42590eceb6d3920a351f8f2639792c1
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('otp.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
import argparse
import urwid
import data_controller
import symbol_values
_BACKGROUND = urwid.SolidFill(u'\N{MEDIUM SHADE}')
_BASE_CURRENCY = 'CHF'
_main_event_loop = urwid.AsyncioEventLoop()
_PALETTE = [
('brand', 'bold,underline,dark blue', ''),
('underline', 'underline', ''),
('bold', 'bold', ''),
('err', 'dark red,bold', ''),
('reversed', 'standout', ''),
('up', 'dark green', ''),
('upbold', 'dark green,bold', ''),
('neutral', '', ''),
('neutralbold', 'bold', ''),
('down', 'dark red', ''),
('downbold', 'dark red,bold', ''),
]
_STYLES = {palette_entry[0] for palette_entry in _PALETTE}
_BOLD_MAP = {key: key + 'bold'
for key in _STYLES if key in _STYLES and key + 'bold' in _STYLES}
class Controller:
def __init__(self):
self.stack = [_BACKGROUND]
self.view = urwid.Padding(self.stack[-1], left=1, right=1)
def unhandled_input(self, key):
try:
self.stack[-1].unhandled_input(key)
except AttributeError:
pass
def _update(self):
self.view.original_widget = self.stack[-1]
def push(self, w):
self.stack.append(w)
self._update()
def pop(self):
self.stack.pop()
try:
self.stack[-1].refresh()
except AttributeError:
pass
self._update()
def make_button(title, callback_fn):
button = urwid.Button(title)
urwid.connect_signal(button, 'click', callback_fn)
return urwid.AttrMap(button, None, focus_map='reversed')
def boldify(w):
return urwid.AttrMap(w, attr_map=_BOLD_MAP)
def on_main(fn):
def callback():
_main_event_loop.alarm(0, lambda: fn())
return callback
class Header(urwid.WidgetWrap):
_ALIGNS = {'l': 'left', 'r': 'right'}
def __init__(self, *titles, aligns=None):
titles = [('underline', title) if not isinstance(title, tuple) else title
for title in titles]
if not aligns:
aligns = ''.join('l' for _ in titles)
aligns = [Header._ALIGNS[align] for align in aligns]
if len(aligns) != len(titles):
raise ValueError
super().__init__(
urwid.Columns([urwid.Text(title, align=align)
for title, align in zip(titles, aligns)]))
class SummaryView(urwid.WidgetWrap):
def __init__(self, dc: data_controller.DataController, controller: Controller):
self.dc = dc
self.controller = controller
self.focus_walker = None
self._last_focus = None
symbol_values.Ticker.register_callback(
'SummaryView',
on_main(self.refresh))
# lambda: controller.main_loop.event_loop.alarm(0, lambda *_: self.refresh()))
with self.dc.connect():
super(SummaryView, self).__init__(self._get_menu())
def unhandled_input(self, key):
if key == 'r':
self.refresh()
def refresh(self):
logger.info('***\nREFRESH\n***')
with self.dc.connect():
self._set_w(self._get_menu())
def __del__(self):
symbol_values.Ticker.remove_callback('SummaryView')
def _get_menu(self):
body = [urwid.Text(('brand', 'ppfin')), urwid.Divider()]
# Normal (category-0) Accounts
accs = self.dc.get_all_accounts(category=0)
body += [Header('Account', 'Diff', 'Balance', aligns='lrr')]
for acc in accs:
body.append(urwid.Columns([
make_button(acc.name, lambda btn: self._show_account(btn.get_label())),
urwid.Text(acc.get_diff_to_last().attr_str(), align='right'),
urwid.Text(str(acc.get_balance()), align='right')]))
total_diff = sum(acc.get_diff_to_last() for acc in accs).attr_str()
total = sum(acc.get_balance() for acc in accs)
# Special (category-1) Accounts
accs = self.dc.get_all_accounts(category=1)
if accs:
for acc in accs:
body.append(urwid.Columns([
make_button(acc.name, lambda btn: self._show_account(btn.get_label())),
urwid.Text(''),
urwid.Text(str(acc.get_balance()), align='right')]))
total += acc.get_balance()
body += [urwid.Columns([
urwid.Text(('bold', 'Total')),
boldify(urwid.Text(total_diff, align='right')),
urwid.Text(('bold', str(total)), align='right')])]
body += [urwid.Divider(),
make_button('Update Balances', self._update_balances),
make_button('Add Account', self._add_account),
urwid.Divider()]
# Shares
symbol_overviews = self.dc.get_all_symbol_overviews()
if not symbol_overviews:
body += [urwid.Text('No Shares!')]
else:
body += [Header('Symbol', 'Shares', 'Gain', 'Possession', aligns='lrrr')]
for so in symbol_overviews:
body.append(urwid.Columns([
make_button(so.symbol, self._update_share),
urwid.Text(str(so.quantity), align='right'),
urwid.Text(so.get_current_total_gain().attr_str(),
align='right'),
urwid.Text(str(so.get_current_total_value()),
align='right')]))
total_gain = sum(
so.get_current_total_gain(currency=_BASE_CURRENCY)
for so in symbol_overviews)
total_share_value = sum(
so.get_current_total_value(currency=_BASE_CURRENCY)
for so in symbol_overviews)
body += [
urwid.Columns([
urwid.Text(('bold', 'Total')),
urwid.Text(''),
urwid.Text(('bold', str(total_gain)), align='right'),
urwid.Text(('bold', str(total_share_value)), align='right'),
])
]
body += [urwid.Divider(),
make_button('Update Shares', self._update_shares),
make_button('Add Share', self._add_share),
urwid.Divider()]
self.focus_walker = urwid.SimpleFocusListWalker(body)
urwid.connect_signal(self.focus_walker, 'modified',
lambda: self._cache_focus_value())
if self._last_focus is not None:
self.focus_walker.set_focus(self._last_focus)
return urwid.ListBox(self.focus_walker)
def _show_account(self, account_name):
self.controller.push(AccountDetailView(
self.dc, self.controller, account_name))
def _cache_focus_value(self):
self._last_focus = self.focus_walker.focus
def _update_share(self, k):
raise ValueError(k.get_label())
def _update_shares(self, _):
pass
def _add_share(self, _):
def done(_):
name = name_edit.get_edit_text()
currency = cur_edit.get_edit_text()
try:
self.dc.add_stock_symbol(name, currency)
except data_controller.SymbolExistsException:
pass # TODO: maybe handle
self.controller.pop()
header = urwid.Text('Add Share')
name_edit = urwid.Edit("Symbol: ")
cur_edit = urwid.Edit("Currency: ")
widget = urwid.Pile([
header,
name_edit,
cur_edit,
make_button('Done', done),
make_button('Cancel', lambda _: self.controller.pop()),
])
self.controller.push(urwid.Filler(widget, 'top'))
def _update_balances(self, _):
self.controller.push(UpdateView(self.dc, self.controller))
def _add_account(self, _):
def done(_):
name, _ = name_edit.get_text()
name = name.replace('Name: ', '')
self.dc.create_account(name, _BASE_CURRENCY) # TODO
self.controller.pop()
name_edit = urwid.Edit("Name: ")
header = urwid.Text('Add Account')
widget = urwid.Pile([
header,
name_edit,
make_button('Done', done),
make_button('Cancel', lambda _: self.controller.pop()),
])
self.controller.push(urwid.Filler(widget, 'top'))
class AccountDetailView(urwid.WidgetWrap):
def __init__(self,
dc: data_controller.DataController,
controller: Controller,
account_name: str):
self.dc = dc
self.controller = controller
self.account_name = account_name
super().__init__(self._get())
def _get(self):
transactions = self.dc.get_account_transactions(self.account_name)
body = [Header('Date', 'Info', 'Amount', aligns='llr')]
for t in transactions:
body.append(urwid.Columns([
urwid.Text(t.date),
urwid.Text(t.info),
urwid.Text(t.value.attr_str(), align='right'),
]))
body += [
urwid.Divider(),
make_button('Done', lambda _: self.controller.pop())]
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
class UpdateView(urwid.WidgetWrap):
def __init__(self,
dc: data_controller.DataController,
controller: Controller):
self.dc = dc
self.controller = controller
self.done_button: urwid.AttrMap = None
self.focus_walker: urwid.SimpleFocusListWalker = None
self.accs = None
super(UpdateView, self).__init__(self._get_menu())
def refresh(self):
self._set_w(self._get_menu())
def unhandled_input(self, key):
if key == 'enter':
# is_ok = self._validate()
current_idx = self.focus_walker.focus
# current_widget = self.focus_walker[current_idx]
next_position = self.focus_walker.next_position(current_idx)
if isinstance(self.focus_walker[next_position], urwid.Divider):
next_position += 1
# if not isinstance(current_widget, urwid.Edit):
# return
self.focus_walker.set_focus(next_position)
def _get_menu(self):
body = [urwid.Text('Update'), urwid.Divider()]
self.accs = self.dc.get_all_accounts(category=0)
if not self.accs:
raise NotImplemented
indent = max(len(acc.name) for acc in self.accs) + 5
for acc in self.accs:
label = acc.name + ':'
indent_acc = (indent - len(label)) * ' '
body.append(urwid.Edit(f"{label}{indent_acc}"))
# make_button(acc.name, lambda _:...),
# urwid.Text(acc.get_formatted_balance(), align='right')]))
def done(_):
all_ok = self._validate()
if all_ok:
self._commit()
self.controller.pop()
self.done_button = make_button('Done', done)
body += [urwid.Divider(),
self.done_button,
make_button('Cancel', lambda _: self.controller.pop()),
]
self.focus_walker = urwid.SimpleFocusListWalker(body)
urwid.connect_signal(self.focus_walker, 'modified',
lambda: self._validate())
return urwid.ListBox(self.focus_walker)
def _commit(self):
edit_fields = [e for e in self.focus_walker
if isinstance(e, urwid.Edit)]
assert len(edit_fields) == len(self.accs)
with self.dc.connect():
for e, acc in zip(edit_fields, self.accs):
assert acc.name in e.caption
value = e.get_edit_text()
if not value:
continue
value = float(value)
diff = value - acc.get_balance()
self.dc.add_transaction(acc.name, diff)
def _validate(self):
all_ok = True
for i, e in enumerate(self.focus_walker):
if not isinstance(e, urwid.Edit):
continue
value = e.get_edit_text()
if not value:
continue
try:
float(value)
is_ok = True
except ValueError:
is_ok = False
caption = e.caption
if is_ok and '!' in caption:
caption = caption.replace('!', ':')
e.set_caption(caption)
if not is_ok and '!' not in caption:
caption = caption.replace(':', '!')
e.set_caption(('err', caption))
all_ok = all_ok and is_ok
if not all_ok:
self.done_button.set_attr_map({None: 'err'})
self.done_button.original_widget.set_label(
'Errors: All values must be floats!')
else:
self.done_button.set_attr_map({None: None})
self.done_button.original_widget.set_label(
'Done')
return all_ok
class MainWindow:
def __init__(self, dc: data_controller.DataController):
self.dc = dc
self.controller = Controller()
self.controller.push(SummaryView(dc, self.controller))
self.main_loop = None
def make_main_loop(self):
self.main_loop = urwid.MainLoop(self.draw(),
palette=_PALETTE,
unhandled_input=self.controller.unhandled_input,
event_loop=_main_event_loop)
return self.main_loop
def draw(self):
top = urwid.Overlay(self.controller.view, _BACKGROUND,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80),
min_width=20, min_height=9)
return top
def item_chosen(button, choice):
raise urwid.ExitMainLoop()
response = urwid.Text([u'You chose ', choice, u'\n'])
done = urwid.Button(u'Ok')
urwid.connect_signal(done, 'click', exit_program)
main.original_widget = urwid.Filler(urwid.Pile([response,
urwid.AttrMap(done, None, focus_map='reversed')]))
def exit_program(button):
raise urwid.ExitMainLoop()
def main():
p = argparse.ArgumentParser()
p.add_argument('--database', '-db', required=True)
flags = p.parse_args()
dc = data_controller.DataController(flags.database)
mw = MainWindow(dc)
loop = mw.make_main_loop()
loop.run()
if __name__ == '__main__':
main()
| 30.168565
| 100
| 0.627001
|
c1924d3beeb90136baff1d41c8c4b08473a73264
| 2,656
|
py
|
Python
|
qoc/standard/costs/forbidstates.py
|
sriharikrishna/qoc
|
823d48966892fe71c828c31de8737582f36bdd5e
|
[
"MIT"
] | null | null | null |
qoc/standard/costs/forbidstates.py
|
sriharikrishna/qoc
|
823d48966892fe71c828c31de8737582f36bdd5e
|
[
"MIT"
] | null | null | null |
qoc/standard/costs/forbidstates.py
|
sriharikrishna/qoc
|
823d48966892fe71c828c31de8737582f36bdd5e
|
[
"MIT"
] | null | null | null |
"""
forbidstates.py - This module defines a cost function that penalizes
the occupation of a set of forbidden states.
"""
import jax
import jax.numpy as jnp
from qoc.models import Cost
from qoc.standard.functions.convenience import conjugate_transpose
class ForbidStates(Cost):
"""
This cost penalizes the occupation of a set of forbidden states.
Fields:
cost_multiplier
cost_normalization_constant
forbidden_states_count
forbidden_states_dagger
name
requires_step_evalution
"""
name = "forbid_states"
requires_step_evaluation = True
def __init__(self, forbidden_states,
system_eval_count,
cost_eval_step=1,
cost_multiplier=1.,):
"""
See class fields for arguments not listed here.
Arguments:
cost_eval_step
forbidden_states
system_eval_count
"""
super().__init__(cost_multiplier=cost_multiplier)
state_count = forbidden_states.shape[0]
cost_evaluation_count, _ = jnp.divmod(system_eval_count - 1, cost_eval_step)
self.cost_normalization_constant = cost_evaluation_count * state_count
self.forbidden_states_count = jnp.array([forbidden_states_.shape[0]
for forbidden_states_
in forbidden_states])
self.forbidden_states_dagger = conjugate_transpose(forbidden_states)
def cost(self, controls, states, system_eval_step):
"""
Compute the penalty.
Arguments:
controls
states
system_eval_step
Returns:
cost
"""
# The cost is the overlap (fidelity) of the evolved state and each
# forbidden state.
cost = 0
for i, forbidden_states_dagger_ in enumerate(self.forbidden_states_dagger):
state = states[i]
state_cost = 0
for forbidden_state_dagger in forbidden_states_dagger_:
inner_product = jnp.matmul(forbidden_state_dagger, state)[0, 0]
fidelity = jnp.real(inner_product * jnp.conjugate(inner_product))
state_cost = state_cost + fidelity
#ENDFOR
state_cost_normalized = state_cost / self.forbidden_states_count[i]
cost = cost + state_cost_normalized
#ENDFOR
# Normalize the cost for the number of evolving states
# and the number of times the cost is computed.
cost_normalized = cost / self.cost_normalization_constant
return cost_normalized * self.cost_multiplier
| 32.790123
| 84
| 0.638178
|
d6c74f3eadbc12e3b78bc73d2ba5f1965bd8dd97
| 6,097
|
py
|
Python
|
games/algorithms.py
|
RamboWu/counterfactual-regret-minimization
|
f6883e1ffae13e133a71655b67a1ce4fa7f3753d
|
[
"MIT"
] | 141
|
2018-09-23T15:30:43.000Z
|
2022-02-12T01:23:39.000Z
|
games/algorithms.py
|
bimingda100/counterfactual-regret-minimization
|
4a223131e652c3541058f7b39b2338cde1f0dc00
|
[
"MIT"
] | 2
|
2018-10-24T06:28:58.000Z
|
2021-12-27T05:17:06.000Z
|
games/algorithms.py
|
bimingda100/counterfactual-regret-minimization
|
4a223131e652c3541058f7b39b2338cde1f0dc00
|
[
"MIT"
] | 41
|
2018-09-24T04:34:35.000Z
|
2021-12-19T09:20:41.000Z
|
from common.constants import A
from common.utils import init_sigma, init_empty_node_maps
class CounterfactualRegretMinimizationBase:
def __init__(self, root, chance_sampling = False):
self.root = root
self.sigma = init_sigma(root)
self.cumulative_regrets = init_empty_node_maps(root)
self.cumulative_sigma = init_empty_node_maps(root)
self.nash_equilibrium = init_empty_node_maps(root)
self.chance_sampling = chance_sampling
def _update_sigma(self, i):
rgrt_sum = sum(filter(lambda x : x > 0, self.cumulative_regrets[i].values()))
for a in self.cumulative_regrets[i]:
self.sigma[i][a] = max(self.cumulative_regrets[i][a], 0.) / rgrt_sum if rgrt_sum > 0 else 1. / len(self.cumulative_regrets[i].keys())
def compute_nash_equilibrium(self):
self.__compute_ne_rec(self.root)
def __compute_ne_rec(self, node):
if node.is_terminal():
return
i = node.inf_set()
if node.is_chance():
self.nash_equilibrium[i] = {a:node.chance_prob() for a in node.actions}
else:
sigma_sum = sum(self.cumulative_sigma[i].values())
self.nash_equilibrium[i] = {a: self.cumulative_sigma[i][a] / sigma_sum for a in node.actions}
# go to subtrees
for k in node.children:
self.__compute_ne_rec(node.children[k])
def _cumulate_cfr_regret(self, information_set, action, regret):
self.cumulative_regrets[information_set][action] += regret
def _cumulate_sigma(self, information_set, action, prob):
self.cumulative_sigma[information_set][action] += prob
def run(self, iterations):
raise NotImplementedError("Please implement run method")
def value_of_the_game(self):
return self.__value_of_the_game_state_recursive(self.root)
def _cfr_utility_recursive(self, state, reach_a, reach_b):
children_states_utilities = {}
if state.is_terminal():
# evaluate terminal node according to the game result
return state.evaluation()
if state.is_chance():
if self.chance_sampling:
# if node is a chance node, lets sample one child node and proceed normally
return self._cfr_utility_recursive(state.sample_one(), reach_a, reach_b)
else:
chance_outcomes = {state.play(action) for action in state.actions}
return state.chance_prob() * sum([self._cfr_utility_recursive(outcome, reach_a, reach_b) for outcome in chance_outcomes])
# sum up all utilities for playing actions in our game state
value = 0.
for action in state.actions:
child_reach_a = reach_a * (self.sigma[state.inf_set()][action] if state.to_move == A else 1)
child_reach_b = reach_b * (self.sigma[state.inf_set()][action] if state.to_move == -A else 1)
# value as if child state implied by chosen action was a game tree root
child_state_utility = self._cfr_utility_recursive(state.play(action), child_reach_a, child_reach_b)
# value computation for current node
value += self.sigma[state.inf_set()][action] * child_state_utility
# values for chosen actions (child nodes) are kept here
children_states_utilities[action] = child_state_utility
# we are computing regrets for both players simultaneously, therefore we need to relate reach,reach_opponent to the player acting
# in current node, for player A, it is different than for player B
(cfr_reach, reach) = (reach_b, reach_a) if state.to_move == A else (reach_a, reach_b)
for action in state.actions:
# we multiply regret by -1 for player B, this is because value is computed from player A perspective
# again we need that perspective switch
action_cfr_regret = state.to_move * cfr_reach * (children_states_utilities[action] - value)
self._cumulate_cfr_regret(state.inf_set(), action, action_cfr_regret)
self._cumulate_sigma(state.inf_set(), action, reach * self.sigma[state.inf_set()][action])
if self.chance_sampling:
# update sigma according to cumulative regrets - we can do it here because we are using chance sampling
# and so we only visit single game_state from an information set (chance is sampled once)
self._update_sigma(state.inf_set())
return value
def __value_of_the_game_state_recursive(self, node):
value = 0.
if node.is_terminal():
return node.evaluation()
for action in node.actions:
value += self.nash_equilibrium[node.inf_set()][action] * self.__value_of_the_game_state_recursive(node.play(action))
return value
class VanillaCFR(CounterfactualRegretMinimizationBase):
def __init__(self, root):
super().__init__(root = root, chance_sampling = False)
def run(self, iterations = 1):
for _ in range(0, iterations):
self._cfr_utility_recursive(self.root, 1, 1)
# since we do not update sigmas in each information set while traversing, we need to
# traverse the tree to perform to update it now
self.__update_sigma_recursively(self.root)
def __update_sigma_recursively(self, node):
# stop traversal at terminal node
if node.is_terminal():
return
# omit chance
if not node.is_chance():
self._update_sigma(node.inf_set())
# go to subtrees
for k in node.children:
self.__update_sigma_recursively(node.children[k])
class ChanceSamplingCFR(CounterfactualRegretMinimizationBase):
def __init__(self, root):
super().__init__(root = root, chance_sampling = True)
def run(self, iterations = 1):
for _ in range(0, iterations):
self._cfr_utility_recursive(self.root, 1, 1)
| 48.776
| 146
| 0.654584
|
0f9205f2ded1d1e5435f8ccf767c648c912cfdd3
| 36,676
|
py
|
Python
|
erfa_generator.py
|
Stefan-Heimersheim/pyerfa
|
16a0ab2e66aed29c4b7eb67876fef5ca680d7b8b
|
[
"BSD-3-Clause"
] | 20
|
2020-05-06T07:43:31.000Z
|
2022-01-05T03:49:37.000Z
|
erfa_generator.py
|
Stefan-Heimersheim/pyerfa
|
16a0ab2e66aed29c4b7eb67876fef5ca680d7b8b
|
[
"BSD-3-Clause"
] | 62
|
2020-05-05T01:26:45.000Z
|
2021-11-05T16:38:25.000Z
|
erfa_generator.py
|
avalentino/pyerfa
|
efc2ba6300e4edac21c51cd891694bee8f91eb3b
|
[
"BSD-3-Clause"
] | 15
|
2020-05-05T19:26:42.000Z
|
2022-03-10T07:45:41.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module's main purpose is to act as a script to create new versions
of ufunc.c when ERFA is updated (or this generator is enhanced).
`Jinja2 <http://jinja.pocoo.org/>`_ must be installed for this
module/script to function.
Note that this does *not* currently automate the process of creating structs
or dtypes for those structs. They should be added manually in the template file.
"""
import re
import os.path
from collections import OrderedDict
DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0], 'liberfa/erfa/src')
DEFAULT_TEMPLATE_LOC = os.path.join(os.path.split(__file__)[0], 'erfa')
NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])")
.replace(r'\.\*', '.*')
.replace(r'\<', '(')
.replace(r'\>', ')'))
class FunctionDoc:
def __init__(self, doc):
self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "")
self.doc = self.doc.replace("/*+\n", "") # accommodate eraLdn
self.doc = self.doc.replace("* ", " " * 2) # accommodate eraAticqn
self.doc = self.doc.replace("*\n", "\n") # accommodate eraAticqn
self.__input = None
self.__output = None
self.__ret_info = None
def _get_arg_doc_list(self, doc_lines):
"""Parse input/output doc section lines, getting arguments from them.
Ensure all elements of eraASTROM and eraLDBODY are left out, as those
are not input or output arguments themselves. Also remove the nb
argument in from of eraLDBODY, as we infer nb from the python array.
"""
doc_list = []
skip = []
for d in doc_lines:
arg_doc = ArgumentDoc(d)
if arg_doc.name is not None:
if skip:
if skip[0] == arg_doc.name:
skip.pop(0)
continue
else:
raise RuntimeError("We whould be skipping {} "
"but {} encountered."
.format(skip[0], arg_doc.name))
if arg_doc.type.startswith('eraLDBODY'):
# Special-case LDBODY: for those, the previous argument
# is always the number of bodies, but we don't need it
# as an input argument for the ufunc since we're going
# to determine this from the array itself. Also skip
# the description of its contents; those are not arguments.
doc_list.pop()
skip = ['bm', 'dl', 'pv']
elif arg_doc.type.startswith('eraASTROM'):
# Special-case ASTROM: need to skip the description
# of its contents; those are not arguments.
skip = ['pmt', 'eb', 'eh', 'em', 'v', 'bm1',
'bpn', 'along', 'xpl', 'ypl', 'sphi',
'cphi', 'diurab', 'eral', 'refa', 'refb']
doc_list.append(arg_doc)
return doc_list
@property
def input(self):
if self.__input is None:
self.__input = []
for regex in ("Given([^\n]*):.*?\n(.+?) \n",
"Given and returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__input += self._get_arg_doc_list(doc_lines)
return self.__input
@property
def output(self):
if self.__output is None:
self.__output = []
for regex in ("Given and returned([^\n]*):\n(.+?) \n",
"Returned([^\n]*):.*?\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__output += self._get_arg_doc_list(doc_lines)
return self.__output
@property
def ret_info(self):
if self.__ret_info is None:
ret_info = []
result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n",
self.doc, re.DOTALL)
if result is not None:
ret_info.append(ReturnDoc(result.group(2)))
if len(ret_info) == 0:
self.__ret_info = ''
elif len(ret_info) == 1:
self.__ret_info = ret_info[0]
else:
raise ValueError("Multiple C return sections found in this doc:\n"
+ self.doc)
return self.__ret_info
@property
def title(self):
# Used for the docstring title.
lines = [line.strip() for line in self.doc.split('\n')[4:10]]
# Always include the first line, then stop at either an empty
# line or at the end of a sentence.
description = lines[:1]
for line in lines[1:]:
if line == '':
break
if '. ' in line:
line = line[:line.index('. ')+1]
description.append(line)
if line.endswith('.'):
break
return '\n '.join(description)
def __repr__(self):
return '\n'.join([(ln.rstrip() if ln.strip() else '')
for ln in self.doc.split('\n')])
class ArgumentDoc:
def __init__(self, doc):
match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc)
if match is not None:
self.name = match.group(1)
if self.name.startswith('*'): # Easier than getting the regex to behave...
self.name = self.name.replace('*', '')
self.type = match.group(2)
self.doc = match.group(3)
else:
self.name = None
self.type = None
self.doc = None
def __repr__(self):
return f" {self.name:15} {self.type:15} {self.doc}"
class Variable:
"""Properties shared by Argument and Return."""
@property
def npy_type(self):
"""Predefined type used by numpy ufuncs to indicate a given ctype.
Eg., NPY_DOUBLE for double.
"""
return "NPY_" + self.ctype.upper()
@property
def dtype(self):
"""Name of dtype corresponding to the ctype.
Specifically,
double : dt_double
int : dt_int
double[3]: dt_vector
double[2][3] : dt_pv
double[2] : dt_pvdpv
double[3][3] : dt_matrix
int[4] : dt_ymdf | dt_hmsf | dt_dmsf, depding on name
eraASTROM: dt_eraASTROM
eraLDBODY: dt_eraLDBODY
char : dt_sign
char[] : dt_type
The corresponding dtypes are defined in ufunc.c, where they are
used for the loop definitions. In core.py, they are also used
to view-cast regular arrays to these structured dtypes.
"""
if self.ctype == 'const char':
return 'dt_type'
elif self.ctype == 'char':
return 'dt_sign'
elif self.ctype == 'int' and self.shape == (4,):
return 'dt_' + self.name[1:]
elif self.ctype == 'double' and self.shape == (3,):
return 'dt_double'
elif self.ctype == 'double' and self.shape == (2, 3):
return 'dt_pv'
elif self.ctype == 'double' and self.shape == (2,):
return 'dt_pvdpv'
elif self.ctype == 'double' and self.shape == (3, 3):
return 'dt_double'
elif not self.shape:
return 'dt_' + self.ctype
else:
raise ValueError("ctype {} with shape {} not recognized."
.format(self.ctype, self.shape))
@property
def view_dtype(self):
"""Name of dtype corresponding to the ctype for viewing back as array.
E.g., dt_double for double, dt_double33 for double[3][3].
The types are defined in core.py, where they are used for view-casts
of structured results as regular arrays.
"""
if self.ctype == 'const char':
return 'dt_bytes12'
elif self.ctype == 'char':
return 'dt_bytes1'
else:
raise ValueError('Only char ctype should need view back!')
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
size = 1
for s in self.shape:
size *= s
return size
@property
def cshape(self):
return ''.join([f'[{s}]' for s in self.shape])
@property
def signature_shape(self):
if self.ctype == 'eraLDBODY':
return '(n)'
elif self.ctype == 'double' and self.shape == (3,):
return '(3)'
elif self.ctype == 'double' and self.shape == (3, 3):
return '(3, 3)'
else:
return '()'
class Argument(Variable):
def __init__(self, definition, doc):
self.definition = definition
self.doc = doc
self.__inout_state = None
self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1)
if "*" == ptr_name_arr[0]:
self.is_ptr = True
name_arr = ptr_name_arr[1:]
else:
self.is_ptr = False
name_arr = ptr_name_arr
if "[]" in ptr_name_arr:
self.is_ptr = True
name_arr = name_arr[:-2]
if "[" in name_arr:
self.name, arr = name_arr.split("[", 1)
self.shape = tuple([int(size) for size in arr[:-1].split("][")])
else:
self.name = name_arr
self.shape = ()
@property
def inout_state(self):
if self.__inout_state is None:
self.__inout_state = ''
for i in self.doc.input:
if self.name in i.name.split(','):
self.__inout_state = 'in'
for o in self.doc.output:
if self.name in o.name.split(','):
if self.__inout_state == 'in':
self.__inout_state = 'inout'
else:
self.__inout_state = 'out'
return self.__inout_state
@property
def name_for_call(self):
"""How the argument should be used in the call to the ERFA function.
This takes care of ensuring that inputs are passed by value,
as well as adding back the number of bodies for any LDBODY argument.
The latter presumes that in the ufunc inner loops, that number is
called 'nb'.
"""
if self.ctype == 'eraLDBODY':
assert self.name == 'b'
return 'nb, _' + self.name
elif self.is_ptr:
return '_'+self.name
else:
return '*_'+self.name
def __repr__(self):
return (f"Argument('{self.definition}', name='{self.name}', "
f"ctype='{self.ctype}', inout_state='{self.inout_state}')")
class ReturnDoc:
def __init__(self, doc):
self.doc = doc
self.infoline = doc.split('\n')[0].strip()
self.type = self.infoline.split()[0]
self.descr = self.infoline.split()[1]
if self.descr.startswith('status'):
self.statuscodes = statuscodes = {}
code = None
for line in doc[doc.index(':')+1:].split('\n'):
ls = line.strip()
if ls != '':
if ' = ' in ls:
code, msg = ls.split(' = ')
if code != 'else':
code = int(code)
statuscodes[code] = msg
elif code is not None:
statuscodes[code] += ls
else:
self.statuscodes = None
def __repr__(self):
return f"Return value, type={self.type:15}, {self.descr}, {self.doc}"
class Return(Variable):
def __init__(self, ctype, doc):
self.name = 'c_retval'
self.inout_state = 'stat' if ctype == 'int' else 'ret'
self.ctype = ctype
self.shape = ()
self.doc = doc
def __repr__(self):
return f"Return(name='{self.name}', ctype='{self.ctype}', inout_state='{self.inout_state}')"
@property
def doc_info(self):
return self.doc.ret_info
class Function:
"""
A class representing a C function.
Parameters
----------
name : str
The name of the function
source_path : str
Either a directory, which means look for the function in a
stand-alone file (like for the standard ERFA distribution), or a
file, which means look for the function in that file.
match_line : str, optional
If given, searching of the source file will skip until it finds
a line matching this string, and start from there.
"""
def __init__(self, name, source_path, match_line=None):
self.name = name
self.pyname = name.split('era')[-1].lower()
self.filename = self.pyname+".c"
if os.path.isdir(source_path):
self.filepath = os.path.join(os.path.normpath(source_path), self.filename)
else:
self.filepath = source_path
with open(self.filepath) as f:
if match_line:
line = f.readline()
while line != '':
if line.startswith(match_line):
filecontents = '\n' + line + f.read()
break
line = f.readline()
else:
msg = ('Could not find the match_line "{0}" in '
'the source file "{1}"')
raise ValueError(msg.format(match_line, self.filepath))
else:
filecontents = f.read()
pattern = fr"\n([^\n]+{name} ?\([^)]+\)).+?(/\*.+?\*/)"
p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE)
search = p.search(filecontents)
self.cfunc = " ".join(search.group(1).split())
self.doc = FunctionDoc(search.group(2))
self.args = []
for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.search(f"^(.*){name}", self.cfunc).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def args_by_inout(self, inout_filter, prop=None, join=None):
"""
Gives all of the arguments and/or returned values, depending on whether
they are inputs, outputs, etc.
The value for `inout_filter` should be a string containing anything
that arguments' `inout_state` attribute produces. Currently, that can be:
* "in" : input
* "out" : output
* "inout" : something that's could be input or output (e.g. a struct)
* "ret" : the return value of the C function
* "stat" : the return value of the C function if it is a status code
It can also be a "|"-separated string giving inout states to OR
together.
"""
result = []
for arg in self.args:
if arg.inout_state in inout_filter.split('|'):
if prop is None:
result.append(arg)
else:
result.append(getattr(arg, prop))
if join is not None:
return join.join(result)
else:
return result
@property
def user_dtype(self):
"""The non-standard dtype, if any, needed by this function's ufunc.
This would be any structured array for any input or output, but
we give preference to LDBODY, since that also decides that the ufunc
should be a generalized ufunc.
"""
user_dtype = None
for arg in self.args_by_inout('in|inout|out'):
if arg.ctype == 'eraLDBODY':
return arg.dtype
elif user_dtype is None and arg.dtype not in ('dt_double',
'dt_int'):
user_dtype = arg.dtype
return user_dtype
@property
def signature(self):
"""Possible signature, if this function should be a gufunc."""
if all(arg.signature_shape == '()'
for arg in self.args_by_inout('in|inout|out')):
return None
return '->'.join(
[','.join([arg.signature_shape for arg in args])
for args in (self.args_by_inout('in|inout'),
self.args_by_inout('inout|out|ret|stat'))])
@property
def python_call(self):
out = ', '.join([arg.name for arg in self.args_by_inout('inout|out|stat|ret')])
args = ', '.join([arg.name for arg in self.args_by_inout('in|inout')])
result = '{out} = {func}({args})'.format(out=out,
func='ufunc.' + self.pyname,
args=args)
if len(result) < 75:
return result
if result.index('(') < 75:
return result.replace('(', '(\n ')
split_point = result[:75].rfind(',') + 1
return ('(' + result[:split_point] + '\n '
+ result[split_point:].replace(' =', ') ='))
def __repr__(self):
return (f"Function(name='{self.name}', pyname='{self.pyname}', "
f"filename='{self.filename}', filepath='{self.filepath}')")
class Constant:
def __init__(self, name, value, doc):
self.name = name.replace("ERFA_", "")
self.value = value.replace("ERFA_", "")
self.doc = doc
class ExtraFunction(Function):
"""
An "extra" function - e.g. one not following the SOFA/ERFA standard format.
Parameters
----------
cname : str
The name of the function in C
prototype : str
The prototype for the function (usually derived from the header)
pathfordoc : str
The path to a file that contains the prototype, with the documentation
as a multiline string *before* it.
"""
def __init__(self, cname, prototype, pathfordoc):
self.name = cname
self.pyname = cname.split('era')[-1].lower()
self.filepath, self.filename = os.path.split(pathfordoc)
self.prototype = prototype.strip()
if prototype.endswith('{') or prototype.endswith(';'):
self.prototype = prototype[:-1].strip()
incomment = False
lastcomment = None
with open(pathfordoc, 'r') as f:
for ln in f:
if incomment:
if ln.lstrip().startswith('*/'):
incomment = False
lastcomment = ''.join(lastcomment)
else:
if ln.startswith('**'):
ln = ln[2:]
lastcomment.append(ln)
else:
if ln.lstrip().startswith('/*'):
incomment = True
lastcomment = []
if ln.startswith(self.prototype):
self.doc = lastcomment
break
else:
raise ValueError('Did not find prototype {} in file '
'{}'.format(self.prototype, pathfordoc))
self.args = []
argset = re.search(fr"{self.name}\(([^)]+)?\)",
self.prototype).group(1)
if argset is not None:
for arg in argset.split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.match(f"^(.*){self.name}",
self.prototype).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def __repr__(self):
r = super().__repr__()
if r.startswith('Function'):
r = 'Extra' + r
return r
class TestFunction:
"""Function holding information about a test in t_erfa_c.c"""
def __init__(self, name, t_erfa_c, nin, ninout, nout):
self.name = name
# Get lines that test the given erfa function: capture everything
# between a line starting with '{' after the test function definition
# and the first line starting with '}' or ' }'.
pattern = fr"\nstatic void t_{name}\(" + r".+?(^\{.+?^\s?\})"
search = re.search(pattern, t_erfa_c, flags=re.DOTALL | re.MULTILINE)
self.lines = search.group(1).split('\n')
# Number of input, inplace, and output arguments.
self.nin = nin
self.ninout = ninout
self.nout = nout
# Dict of dtypes for variables, filled by define_arrays().
self.var_dtypes = {}
@classmethod
def from_function(cls, func, t_erfa_c):
"""Initialize from a function definition."""
return cls(name=func.pyname, t_erfa_c=t_erfa_c,
nin=len(func.args_by_inout('in')),
ninout=len(func.args_by_inout('inout')),
nout=len(func.args_by_inout('out')))
def xfail(self):
"""Whether the python test produced for this function will fail.
Right now this will be true for functions without inputs such
as eraIr.
"""
if self.nin + self.ninout == 0:
if self.name == 'zpv':
# Works on newer numpy
return "np.__version__ < '1.21', reason='needs numpy >= 1.21'"
else:
return "reason='do not yet support no-input ufuncs'"
else:
return None
def pre_process_lines(self):
"""Basic pre-processing.
Combine multi-part lines, strip braces, semi-colons, empty lines.
"""
lines = []
line = ''
for part in self.lines:
part = part.strip()
if part in ('', '{', '}'):
continue
line += part + ' '
if part.endswith(';'):
lines.append(line.strip()[:-1])
line = ''
return lines
def define_arrays(self, line):
"""Check variable definition line for items also needed in python.
E.g., creating an empty astrom structured array.
"""
defines = []
# Split line in type and variables.
# E.g., "double x, y, z" will give ctype='double; variables='x, y, z'
ctype, _, variables = line.partition(' ')
for var in variables.split(','):
var = var.strip()
# Is variable an array?
name, _, rest = var.partition('[')
# If not, or one of iymdf or ihmsf, ignore (latter are outputs only).
if not rest or rest[:2] == '4]':
continue
if ctype == 'eraLDBODY':
# Special case, since this should be recarray for access similar
# to C struct.
v_dtype = 'dt_eraLDBODY'
v_shape = rest[:rest.index(']')]
extra = ".view(np.recarray)"
else:
# Temporarily create an Argument, so we can use its attributes.
# This translates, e.g., double pv[2][3] to dtype dt_pv.
v = Argument(ctype + ' ' + var.strip(), '')
v_dtype = v.dtype
v_shape = v.shape if v.signature_shape != '()' else '()'
extra = ""
self.var_dtypes[name] = v_dtype
if v_dtype == 'dt_double':
v_dtype = 'float'
else:
v_dtype = 'erfa_ufunc.' + v_dtype
defines.append(f"{name} = np.empty({v_shape}, {v_dtype}){extra}")
return defines
def to_python(self):
"""Lines defining the body of a python version of the test function."""
# TODO: this is quite hacky right now! Would be good to let function
# calls be understood by the Function class.
# Name of the erfa C function, so that we can recognize it.
era_name = 'era' + self.name.capitalize()
# Collect actual code lines, without ";", braces, etc.
lines = self.pre_process_lines()
out = []
for line in lines:
# In ldn ufunc, the number of bodies is inferred from the array size,
# so no need to keep the definition.
if line == 'n = 3' and self.name == 'ldn':
continue
# Are we dealing with a variable definition that also sets it?
# (hack: only happens for double).
if line.startswith('double') and '=' in line:
# Complete hack for single occurrence.
if line.startswith('double xyz[] = {'):
out.append(f"xyz = np.array([{line[16:-1]}])")
else:
# Put each definition on a separate line.
out.extend([part.strip() for part in line[7:].split(',')])
continue
# Variable definitions: add empty array definition as needed.
if line.startswith(('double', 'int', 'char', 'eraASTROM', 'eraLDBODY')):
out.extend(self.define_arrays(line))
continue
# Actual function. Start with basic replacements.
line = (line
.replace('ERFA_', 'erfa.')
.replace('(void)', '')
.replace('(int)', '')
.replace("pv[0]", "pv['p']")
.replace("pv[1]", "pv['v']")
.replace("s, '-'", "s[0], b'-'") # Rather hacky...
.replace("s, '+'", "s[0], b'+'") # Rather hacky...
.strip())
# Call of test function vvi or vvd.
if line.startswith('v'):
line = line.replace(era_name, self.name)
# Can call simple functions directly. Those need little modification.
if self.name + '(' in line:
line = line.replace(self.name + '(', f"erfa_ufunc.{self.name}(")
# Call of function that is being tested.
elif era_name in line:
line = line.replace(era_name, f"erfa_ufunc.{self.name}")
# correct for LDBODY (complete hack!)
line = line.replace('3, b', 'b').replace('n, b', 'b')
# Split into function name and call arguments.
start, _, arguments = line.partition('(')
# Get arguments, stripping excess spaces and, for numbers, remove
# leading zeros since python cannot deal with items like '01', etc.
args = []
for arg in arguments[:-1].split(','):
arg = arg.strip()
while arg[0] == '0' and len(arg) > 1 and arg[1] in '0123456789':
arg = arg[1:]
args.append(arg)
# Get input and output arguments.
in_args = [arg.replace('&', '') for arg in args[:self.nin+self.ninout]]
out_args = ([arg.replace('&', '') for arg in args[-self.nout-self.ninout:]]
if len(args) > self.nin else [])
# If the call assigned something, that will have been the status.
# Prepend any arguments assigned in the call.
if '=' in start:
line = ', '.join(out_args+[start])
else:
line = ', '.join(out_args) + ' = ' + start
line = line + '(' + ', '.join(in_args) + ')'
if 'astrom' in out_args:
out.append(line)
line = 'astrom = astrom.view(np.recarray)'
# In some test functions, there are calls to other ERFA functions.
# Deal with those in a super hacky way for now.
elif line.startswith('eraA'):
line = line.replace('eraA', 'erfa_ufunc.a')
start, _, arguments = line.partition('(')
args = [arg.strip() for arg in arguments[:-1].split(',')]
in_args = [arg for arg in args if '&' not in arg]
out_args = [arg.replace('&', '') for arg in args if '&' in arg]
line = (', '.join(out_args) + ' = '
+ start + '(' + ', '.join(in_args) + ')')
if 'atioq' in line or 'atio13' in line or 'apio13' in line:
line = line.replace(' =', ', j =')
# And the same for some other functions, which always have a
# 2-element time as inputs.
elif line.startswith('eraS'):
line = line.replace('eraS', 'erfa_ufunc.s')
start, _, arguments = line.partition('(')
args = [arg.strip() for arg in arguments[:-1].split(',')]
in_args = args[:2]
out_args = args[2:]
line = (', '.join(out_args) + ' = '
+ start + '(' + ', '.join(in_args) + ')')
# Input number setting.
elif '=' in line:
# Small clean-up.
line = line.replace('= ', '= ')
# Hack to make astrom element assignment work.
if line.startswith('astrom'):
out.append('astrom = np.zeros((), erfa_ufunc.dt_eraASTROM).view(np.recarray)')
# Change access to p and v elements for double[2][3] pv arrays
# that were not caught by the general replacement above (e.g.,
# with names not equal to 'pv')
name, _, rest = line.partition('[')
if (rest and rest[0] in '01' and name in self.var_dtypes
and self.var_dtypes[name] == 'dt_pv'):
line = name + "[" + ("'p'" if rest[0] == "0" else "'v'") + rest[1:]
out.append(line)
return out
def main(srcdir=DEFAULT_ERFA_LOC, templateloc=DEFAULT_TEMPLATE_LOC, verbose=True):
from jinja2 import Environment, FileSystemLoader
outfn = 'core.py'
ufuncfn = 'ufunc.c'
testdir = 'tests'
testfn = 'test_ufunc.py'
if verbose:
print_ = print
else:
def print_(*args, **kwargs):
return None
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(templateloc))
def prefix(a_list, pre):
return [pre+f'{an_element}' for an_element in a_list]
def postfix(a_list, post):
return [f'{an_element}'+post for an_element in a_list]
def surround(a_list, pre, post):
return [pre+f'{an_element}'+post for an_element in a_list]
env.filters['prefix'] = prefix
env.filters['postfix'] = postfix
env.filters['surround'] = surround
erfa_c_in = env.get_template(ufuncfn + '.templ')
erfa_py_in = env.get_template(outfn + '.templ')
# Prepare the jinja2 test templating environment
env2 = Environment(loader=FileSystemLoader(os.path.join(templateloc, testdir)))
test_py_in = env2.get_template(testfn + '.templ')
# Extract all the ERFA function names from erfa.h
if os.path.isdir(srcdir):
erfahfn = os.path.join(srcdir, 'erfa.h')
t_erfa_c_fn = os.path.join(srcdir, 't_erfa_c.c')
multifilserc = True
else:
erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h')
t_erfa_c_fn = os.path.join(os.path.split(srcdir)[0], 't_erfa_c.c')
multifilserc = False
with open(erfahfn, "r") as f:
erfa_h = f.read()
print_("read erfa header")
with open(t_erfa_c_fn, "r") as f:
t_erfa_c = f.read()
print_("read C tests")
funcs = OrderedDict()
section_subsection_functions = re.findall(
r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', erfa_h,
flags=re.DOTALL | re.MULTILINE)
for section, subsection, functions in section_subsection_functions:
print_(f"{section}.{subsection}")
if True:
func_names = re.findall(r' (\w+)\(.*?\);', functions,
flags=re.DOTALL)
for name in func_names:
print_(f"{section}.{subsection}.{name}...")
if multifilserc:
# easy because it just looks in the file itself
cdir = (srcdir if section != 'Extra' else
templateloc or '.')
funcs[name] = Function(name, cdir)
else:
# Have to tell it to look for a declaration matching
# the start of the header declaration, otherwise it
# might find a *call* of the function instead of the
# definition
for line in functions.split(r'\n'):
if name in line:
# [:-1] is to remove trailing semicolon, and
# splitting on '(' is because the header and
# C files don't necessarily have to match
# argument names and line-breaking or
# whitespace
match_line = line[:-1].split('(')[0]
funcs[name] = Function(name, cdir, match_line)
break
else:
raise ValueError("A name for a C file wasn't "
"found in the string that "
"spawned it. This should be "
"impossible!")
test_funcs = [TestFunction.from_function(funcs[name], t_erfa_c)
for name in sorted(funcs.keys())]
funcs = funcs.values()
# Extract all the ERFA constants from erfam.h
erfamhfn = os.path.join(srcdir, 'erfam.h')
with open(erfamhfn, 'r') as f:
erfa_m_h = f.read()
constants = []
for chunk in erfa_m_h.split("\n\n"):
result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk,
flags=re.DOTALL | re.MULTILINE)
if result:
doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL)
for (name, value) in result:
constants.append(Constant(name, value, doc))
# TODO: re-enable this when const char* return values and
# non-status code integer rets are possible
# #Add in any "extra" functions from erfaextra.h
# erfaextrahfn = os.path.join(srcdir, 'erfaextra.h')
# with open(erfaextrahfn, 'r') as f:
# for l in f:
# ls = l.strip()
# match = re.match('.* (era.*)\(', ls)
# if match:
# print_("Extra: {0} ...".format(match.group(1)))
# funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn))
print_("Rendering template")
erfa_c = erfa_c_in.render(funcs=funcs)
erfa_py = erfa_py_in.render(funcs=funcs, constants=constants)
test_py = test_py_in.render(test_funcs=test_funcs)
if outfn is not None:
print_(f"Saving to {outfn}, {ufuncfn} and {testfn}")
with open(os.path.join(templateloc, outfn), "w") as f:
f.write(erfa_py)
with open(os.path.join(templateloc, ufuncfn), "w") as f:
f.write(erfa_c)
with open(os.path.join(templateloc, testdir, testfn), "w") as f:
f.write(test_py)
print_("Done!")
return erfa_c, erfa_py, funcs, test_py, test_funcs
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?',
help='Directory where the ERFA c and header files '
'can be found or to a single erfa.c file '
'(which must be in the same directory as '
'erfa.h). Default: "{}"'.format(DEFAULT_ERFA_LOC))
ap.add_argument('-t', '--template-loc',
default=DEFAULT_TEMPLATE_LOC,
help='the location where the "core.py.templ" and '
'"ufunc.c.templ templates can be found.')
ap.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output normally printed to stdout.')
args = ap.parse_args()
main(args.srcdir, args.template_loc, args.verbose)
| 38.444444
| 100
| 0.516441
|
0bbb40f94295f53f0dfa15ea38cc629e924e0dc6
| 25,172
|
py
|
Python
|
Apps/phpolyswarm/polyswarm_connector.py
|
phantom-dkhorasani/phantom-apps
|
6d7e6edcfef3cb97f6cbf1fa5e13a0b2a9f63ab1
|
[
"Apache-2.0"
] | null | null | null |
Apps/phpolyswarm/polyswarm_connector.py
|
phantom-dkhorasani/phantom-apps
|
6d7e6edcfef3cb97f6cbf1fa5e13a0b2a9f63ab1
|
[
"Apache-2.0"
] | null | null | null |
Apps/phpolyswarm/polyswarm_connector.py
|
phantom-dkhorasani/phantom-apps
|
6d7e6edcfef3cb97f6cbf1fa5e13a0b2a9f63ab1
|
[
"Apache-2.0"
] | null | null | null |
# File: polyswarm_connector.py
# Copyright (c) PolySwarm, 2019.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from phantom.vault import Vault
from polyswarm_consts import *
import os
import time
import requests
import json
import logging
import uuid
# Set Debug level
# logging.basicConfig(level=logging.DEBUG)
class Polyswarm_API:
def __init__(self, config):
"""
__init__
:param config: config with api key for connection
:return:
"""
self.config = config
self.headers = {'Authorization': self.config['polyswarm_api_key']}
def _http_request(self, method, path_url, data=None, files=None):
"""
Send HTTP Request
:param method: [get|post]
:param path_url: URL for request
:param data: data for 'post' or 'get' request
:param files: for uploading files with 'post'
:return: tuple (status_code, content)
"""
r = None
# set full URL for request
try:
full_url = '{base_url}{path_url}'.format(base_url=self.config['base_url'],
path_url=path_url)
except:
self.debug_print('Error occurred while making HTTP Request. {0}'.format(POLYSWARM_CONFIG_PARAMS_ERR_MSG))
return phantom.APP_ERROR, None
logging.info('[{method}] URL: {full_url} - params/data: {data} - files: {files} - headers: {headers}'.
format(method=method.upper(),
full_url=full_url,
data=data,
files=files,
headers=self.headers))
if method.lower() == "get":
r = requests.get(full_url,
params=data,
headers=self.headers)
elif method.lower() == "post":
r = requests.post(full_url,
data=data,
files=files,
headers=self.headers)
r.raise_for_status()
logging.info('[Response] Status code: {status_code} - Content: {response}'.
format(status_code=r.status_code,
response=r.content))
return (r.status_code, r.content)
def _get_hash_type(self, hash):
"""
Return Hash Type
:param hash: hash string
:return: hash type string; empty if failed
"""
if len(hash) == 40:
return 'sha1'
elif len(hash) == 64:
return 'sha256'
elif len(hash) == 32:
return 'md5'
return ''
def search_hash(self, hash):
"""
Search Hash
:param hash: hash
:return: tuple (status_code, response)
"""
hash_type = self._get_hash_type(hash)
params = {'type': hash_type,
'with_instances': 'true',
'hash': hash}
return self._http_request('get', '/search', params)
def scan_url(self, url):
"""
Upload URL for scan
:param url: string
:return: tuple (status_code, response)
"""
path_url = '/consumer/{polyswarm_community}'.format(polyswarm_community=self.config['polyswarm_community'])
params = {'url': url,
'artifact-type': 'url'}
return self._http_request('post', path_url, params)
def lookup(self, uuid):
"""
UUID Lookup
:param uuid: string
:return: tuple (status_code, response)
"""
path_url = '/consumer/{polyswarm_community}/uuid/{uuid}'.format(polyswarm_community=self.config['polyswarm_community'],
uuid=uuid)
status_code, response = self._http_request('get', path_url)
window_closed = json.loads(response)['result']['files'][0]['window_closed']
# we got the results at first shot
if window_closed:
return (status_code, response)
# we dont have any results already - wait for the bounty to complete
# and try again
time.sleep(30)
while not window_closed:
status_code, response = self._http_request('get', path_url)
window_closed = json.loads(response)['result']['files'][0]['window_closed']
time.sleep(1)
return (status_code, response)
def search_url(self, url):
"""
Scan URL and return scan results
:param url: string
:return: (status_code, response, uuid)
"""
status_code, response = self.scan_url(url)
uuid = json.loads(response)['result']
status_code, response = self.lookup(uuid)
return (status_code, response, uuid)
def get_file(self, hash):
"""
Download file by hash
:param hash: File Hash for Download
:return: tuple (status_code, response)
"""
hash_type = self._get_hash_type(hash)
logging.info('[get_file] Hash type: {hash_type}'.
format(hash_type=hash_type))
return self._http_request('get', '/download/{hash_type}/{hash}'.
format(hash_type=hash_type, hash=hash))
def detonate_file(self, file_name, file_path):
"""
Upload File to Polyswarm and get the scan results
:param file_name: file name
:param file_path: complete path from the file to upload
:return: (status_code, response, uuid)
"""
path_url = '/consumer/{polyswarm_community}'.format(polyswarm_community=self.config['polyswarm_community'])
files = { 'file': (file_name, open(file_path, 'rb')) }
# Force re-scans if file was already submitted
# params = { 'force': 'true' }
params = {}
status_code, response = self._http_request('post', path_url, params, files)
uuid = json.loads(response)['result']
status_code, response = self.lookup(uuid)
return (status_code, response, uuid)
class PolyswarmConnector(BaseConnector):
def __init__(self, cli=False):
# Call the BaseConnectors init first
super(PolyswarmConnector, self).__init__()
self.polyswarm_api = None
self._state = None
self._base_url = None
# variable to get aware we are called from cmd
self.cli = cli
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
# get the asset config
config = self.get_config()
# setup polyswarm_api object
self.polyswarm_api = Polyswarm_API(config)
# Access action parameters passed in the 'param' dictionary
try:
self.save_progress('Base URL is: {base_url} - Community: {polyswarm_community}'.
format(base_url=config['base_url'],
polyswarm_community=config['polyswarm_community']))
except:
self.save_progress(POLYSWARM_DEBUG_ERROR_MSG)
return phantom.APP_ERROR
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved across actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
def _update_results(self, action_result, total_scans, positives, uuid):
# update result_data -> summary
action_result.update_summary({'total_scans': str(total_scans)})
action_result.update_summary({'scan_uuid': uuid})
action_result.update_summary({'positives': str(positives)})
# update result_data -> data
data = { 'total': str(total_scans),
'permalink': '{url_results}/{uuid}'.
format(url_results=POLYSWARM_URL_RESULTS,
uuid=uuid),
'positives': str(positives),
'scan_uuid': uuid }
action_result.add_data(data)
def _handle_test_connectivity(self, param):
EICAR_HASH = '131f95c51cc819465fa1797f6ccacf9d494aaaff46fa3eac73ae63ffbdfd8267'
# Polywarm API Response
# HTTP Response
# status code
# response
status_code = 0
response = ''
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
try:
status_code, response = self.polyswarm_api.search_hash(EICAR_HASH)
if (phantom.is_fail(status_code)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# for now the return is commented out, but after implementation, return from here
return action_result.get_status()
except requests.exceptions.HTTPError as err:
return action_result.\
set_status(phantom.APP_ERROR,
'Error with endpoint: {err}'.
format(err=err))
self.save_progress("Connection successful")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_file_reputation(self, param):
self.save_progress('In action handler for: {0}'.
format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector)
# to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# default values
total_scans = 0
positives = 0
# Polywarm API Response
# HTTP Response
# status code
# response
# uuid = uuid from Polyswarm
status_code = 0
response = ''
uuid = 'null'
try:
status_code, response = self.polyswarm_api.search_hash(param['hash'])
if (phantom.is_fail(status_code)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# for now the return is commented out, but after implementation, return from here
return action_result.get_status()
# load json response for iteration
try:
artifact_instances = json.loads(response)['result'][0]['artifact_instances']
except:
return action_result.set_status(phantom.APP_ERROR,
'Error in response. Details: ' + (str(response)))
uuid = artifact_instances[0]['bounty_result']['uuid']
assertions = artifact_instances[0]['bounty_result']['files'][0]['assertions']
for assertion in assertions:
for k, v in assertion.items():
if k == 'verdict' and v:
positives += 1
total_scans += 1
self.debug_print('Positives: {positives} - Total Scans: {total_scans}'.
format(positives=positives, total_scans=total_scans))
except requests.exceptions.HTTPError as err:
if err.response.status_code == 404:
# sample not found
# returning default values == 0
pass
else:
# we got another err - report it
return action_result.\
set_status(phantom.APP_ERROR,
'Error with endpoint: {err}'.
format(err=err))
self._update_results(action_result,
total_scans,
positives,
uuid)
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_file(self, param):
self.save_progress('In action handler for: {0}'.
format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# Polywarm API Response
# HTTP Response
# status code
# response
status_code = 0
response = ''
try:
status_code, response = self.polyswarm_api.get_file(param['hash'])
if (phantom.is_fail(status_code)):
return action_result.get_status()
if hasattr(Vault, 'get_vault_tmp_dir'):
temp_dir = Vault.get_vault_tmp_dir()
else:
temp_dir = 'opt/phantom/vault/tmp'
temp_dir = temp_dir + '/{}'.format(uuid.uuid4())
os.makedirs(temp_dir)
file_path = os.path.join(temp_dir, param['hash'])
with open(file_path, 'wb') as f:
f.write(response)
if self.cli:
container_id = 1
else:
container_id = self.get_container_id()
self.debug_print('file_path: {file_path}'.format(file_path=file_path))
self.debug_print('container_id: {container_id}'.
format(container_id=container_id))
vault_response = Vault.add_attachment(file_location=file_path,
container_id=container_id,
file_name=param['hash'])
self.debug_print(vault_response)
if vault_response['succeeded']:
file_info = Vault.get_file_info(file_name=param['hash'])[0]
self.debug_print('Vault File Info: {file_info}'.
format(file_info=file_info))
action_result.update_summary(file_info)
action_result.add_data(file_info)
return action_result.set_status(phantom.APP_SUCCESS,
'File Downloaded Successfully')
else:
return action_result.set_status(phantom.APP_ERROR,
vault_response['message'])
except requests.exceptions.HTTPError as err:
if err.response.status_code == 404:
# sample not found
return action_result.set_status(phantom.APP_ERROR,
'File Not Found')
else:
# we got another err - report it
return action_result.\
set_status(phantom.APP_ERROR,
'Error with endpoint: {err}'.
format(err=err))
def _handle_detonate_file(self, param):
self.save_progress('In action handler for: {0}'.
format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# default values
total_scans = 0
positives = 0
file_info = None
# Polywarm API Response
# HTTP Response
# status_code
# response
# Result
# uuid = uuid from Polyswarm
status_code = 0
response = ''
uuid = ''
vault_id = param['vault_id']
try:
file_info = Vault.get_file_info(vault_id=vault_id)[0]
self.debug_print(file_info)
except:
if not file_info:
return action_result.set_status(phantom.APP_ERROR,
'Error: File not found in Vault')
try:
status_code, response, uuid = self.polyswarm_api.detonate_file(file_info['name'],
file_info['path'])
if (phantom.is_fail(status_code)):
return action_result.get_status()
# load json response for iteration
try:
assertions = json.loads(response)['result']['files'][0]['assertions']
except:
return action_result.set_status(phantom.APP_ERROR,
'Error in response. Details: ' + (str(response)))
# iterate for getting positives and total_scan number
for assertion in assertions:
for k, v in assertion.items():
if k == 'verdict' and v:
positives += 1
total_scans += 1
self.debug_print('Positives: {positives} - Total Scans: {total_scans}'.
format(positives=positives, total_scans=total_scans))
except requests.exceptions.HTTPError as err:
# we got another err - report it
return action_result.\
set_status(phantom.APP_ERROR,
'Error with endpoint: {err}'.
format(err=err))
self._update_results(action_result,
total_scans,
positives,
uuid)
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_url_reputation(self, param, artifact):
self.save_progress('In action handler for: {0}'.
format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# default values
total_scans = 0
positives = 0
# Polywarm API Response
# HTTP Response
# status_code
# response
# Result
# uuid = uuid from Polyswarm
status_code = 0
response = ''
uuid = ''
try:
status_code, response, uuid = self.polyswarm_api.search_url(param[artifact])
if (phantom.is_fail(status_code)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# for now the return is commented out, but after implementation, return from here
return action_result.get_status()
# load json response for iteration
try:
assertions = json.loads(response)['result']['files'][0]['assertions']
except:
return action_result.set_status(phantom.APP_ERROR,
'Error in response. Details: ' + (str(response)))
# iterate for getting positives and total_scan number
for assertion in assertions:
for k, v in assertion.items():
if k == 'verdict' and v:
positives += 1
total_scans += 1
except requests.exceptions.HTTPError as err:
# err
return action_result.\
set_status(phantom.APP_ERROR,
'Error with endpoint: {err}'.
format(err=err))
self._update_results(action_result,
total_scans,
positives,
uuid)
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_report(self, param):
self.save_progress('In action handler for: {0}'.
format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# default values
total_scans = 0
positives = 0
# Polywarm API Response
# HTTP Response
# status_code
# response
status_code = 0
response = ''
try:
status_code, response = self.polyswarm_api.lookup(param['scan_uuid'])
if (phantom.is_fail(status_code)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# for now the return is commented out, but after implementation, return from here
return action_result.get_status()
# load json response for iteration
try:
assertions = json.loads(response)['result']['files'][0]['assertions']
except:
return action_result.set_status(phantom.APP_ERROR,
'Error in response. Details: ' + (str(response)))
# iterate for getting positives and total_scan number
for assertion in assertions:
for k, v in assertion.items():
if k == 'verdict' and v:
positives += 1
total_scans += 1
except requests.exceptions.HTTPError as err:
# err
return action_result.\
set_status(phantom.APP_ERROR,
'Error with endpoint: {err}'.
format(err=err))
self._update_results(action_result,
total_scans,
positives,
param['scan_uuid'])
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print('action_id', self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'file_reputation':
ret_val = self._handle_file_reputation(param)
elif action_id == 'get_file':
ret_val = self._handle_get_file(param)
elif action_id == 'detonate_file':
ret_val = self._handle_detonate_file(param)
elif action_id == 'url_reputation':
ret_val = self._handle_url_reputation(param, 'url')
elif action_id == 'ip_reputation':
ret_val = self._handle_url_reputation(param, 'ip')
elif action_id == 'domain_reputation':
ret_val = self._handle_url_reputation(param, 'domain')
elif action_id == 'detonate_url':
ret_val = self._handle_url_reputation(param, 'url')
elif action_id == 'get_report':
ret_val = self._handle_get_report(param)
return ret_val
# standalone
if __name__ == '__main__':
# import pu'b
import argparse
# pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if (username is not None and password is None):
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass('Password: ')
if (username and password):
try:
login_url = PolyswarmConnector._get_phantom_base_url() + '/login'
print ('Accessing the Login page')
r = requests.get(login_url, verify=False)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print ('Logging into Platform to get the session id')
r2 = requests.post(login_url, verify=False, data=data, headers=headers)
session_id = r2.cookies['sessionid']
except Exception as e:
print ('Unable to get session id from the platform. Error: {e}'.
format(e=str(e)))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = PolyswarmConnector(cli=True)
connector.print_progress_message = True
if (session_id is not None):
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print (json.dumps(json.loads(ret_val), indent=4))
exit(0)
| 35.453521
| 127
| 0.559868
|
c3c5767d492ac42d94b70b85e050d5ded8962c69
| 8,409
|
py
|
Python
|
django/contrib/sitemaps/tests/test_http.py
|
dnozay/django
|
5dcdbe95c749d36072f527e120a8cb463199ae0d
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T17:16:56.000Z
|
2019-01-31T17:16:56.000Z
|
django/contrib/sitemaps/tests/test_http.py
|
rmutter/django
|
5d044339037be879a11b03fe8bd8c3ef1d520b1a
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/sitemaps/tests/test_http.py
|
rmutter/django
|
5d044339037be879a11b03fe8bd8c3ef1d520b1a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import os
from datetime import date
from unittest import skipUnless
from django.apps import apps
from django.conf import settings
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from django.utils.formats import localize
from django.utils._os import upath
from django.utils.translation import activate, deactivate
from .base import TestModel, SitemapTestsBase
class HTTPSitemapTests(SitemapTestsBase):
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_last_modified(self):
"Tests that Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Tests that Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Tests that Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless(apps.is_installed('django.contrib.sites'),
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = GenericSitemap({'queryset': TestModel.objects.all()})
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
| 45.454054
| 204
| 0.692829
|
dcfb050c8bb645de527e6088c93d56750205e7c6
| 3,128
|
py
|
Python
|
bokeh/core/property/any.py
|
Jaok-ku/bokeh
|
9330ed3afd22712baa152e2848c74d7843e74645
|
[
"BSD-3-Clause"
] | 1
|
2021-10-30T00:32:00.000Z
|
2021-10-30T00:32:00.000Z
|
bokeh/core/property/any.py
|
Jaok-ku/bokeh
|
9330ed3afd22712baa152e2848c74d7843e74645
|
[
"BSD-3-Clause"
] | 1
|
2021-02-12T19:50:48.000Z
|
2021-02-12T23:32:21.000Z
|
bokeh/core/property/any.py
|
Jaok-ku/bokeh
|
9330ed3afd22712baa152e2848c74d7843e74645
|
[
"BSD-3-Clause"
] | 2
|
2021-01-12T18:22:24.000Z
|
2021-10-30T00:32:02.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Provide wildcard properties.
The Any and AnyRef properties can be used to hold values without performing
any validation.
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .bases import Property
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Any',
'AnyRef'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Any(Property):
""" Accept all values.
The ``Any`` property does not do any validation or transformation.
Args:
default (obj or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
Example:
.. code-block:: python
>>> class AnyModel(HasProps):
... prop = Any()
...
>>> m = AnyModel()
>>> m.prop = True
>>> m.prop = 10
>>> m.prop = 3.14
>>> m.prop = "foo"
>>> m.prop = [1, 2, 3]
"""
class AnyRef(Property):
""" Accept all values and force reference discovery. """
@property
def has_ref(self):
return True
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 30.368932
| 78
| 0.356777
|
9f15ec3e595e537d83146381242aa66460e42f11
| 197
|
py
|
Python
|
Contest/ABC064/d/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC064/d/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC064/d/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
input()
s = input()
L = R = 0
for t in s:
if t == "(":
R += 1
else:
if R:
R -= 1
else:
L += 1
print("("*L + s + ")"*R)
| 15.153846
| 24
| 0.329949
|
ea019b4e3a69ed6d00ebb2a624c0f55704669fce
| 725
|
py
|
Python
|
var/spack/repos/builtin/packages/r-scrime/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-scrime/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/r-scrime/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RScrime(RPackage):
"""Analysis of High-Dimensional Categorical Data Such as SNP Data.
Tools for the analysis of high-dimensional data developed/implemented at
the group "Statistical Complexity Reduction In Molecular Epidemiology"
(SCRIME). Main focus is on SNP data. But most of the functions can also be
applied to other types of categorical data."""
cran = "scrime"
version('1.3.5', sha256='5d97d3e57d8eb30709340fe572746029fd139456d7a955421c4e3aa75d825578')
| 36.25
| 95
| 0.764138
|
12dcdc6ceeee060647e272c39b26adfb1ab55d4f
| 11,915
|
py
|
Python
|
porterStemmer.py
|
jcob-sikorski/searchEngine
|
db8b6295f81bc617e81abf5f8a8280f1f2073fd8
|
[
"MIT"
] | null | null | null |
porterStemmer.py
|
jcob-sikorski/searchEngine
|
db8b6295f81bc617e81abf5f8a8280f1f2073fd8
|
[
"MIT"
] | null | null | null |
porterStemmer.py
|
jcob-sikorski/searchEngine
|
db8b6295f81bc617e81abf5f8a8280f1f2073fd8
|
[
"MIT"
] | null | null | null |
import sys
class PorterStemmer:
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"): self.r("al")
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"): self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem(self, p, i, j):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
if __name__ == '__main__':
p = PorterStemmer()
if len(sys.argv) > 1:
for f in sys.argv[1:]:
infile = open(f, 'r')
while 1:
output = ''
word = ''
line = infile.readline()
if line == '':
break
for c in line:
if c.isalpha():
word += c.lower()
else:
if word:
output += p.stem(word, 0,len(word)-1)
word = ''
output += c.lower()
print(output, end=" ")
infile.close()
| 35.356083
| 108
| 0.450441
|
50259feea42b50c6b199d86006f2937571486739
| 10,148
|
py
|
Python
|
spikeinterface/sortingcomponents/motion_correction.py
|
chyumin/spikeinterface
|
12b7863684b705e3d0ae8165bb3009143c70530c
|
[
"MIT"
] | null | null | null |
spikeinterface/sortingcomponents/motion_correction.py
|
chyumin/spikeinterface
|
12b7863684b705e3d0ae8165bb3009143c70530c
|
[
"MIT"
] | null | null | null |
spikeinterface/sortingcomponents/motion_correction.py
|
chyumin/spikeinterface
|
12b7863684b705e3d0ae8165bb3009143c70530c
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.interpolate
import sklearn
from tqdm import tqdm
import sklearn.metrics
from spikeinterface.toolkit.preprocessing.basepreprocessor import BasePreprocessor, BasePreprocessorSegment
try:
import numba
HAVE_NUMBA = True
except ImportError:
HAVE_NUMBA = False
def correct_motion_on_peaks(peaks, peak_locations, times,
motion, temporal_bins, spatial_bins,
direction='y', progress_bar=False):
"""
Given the output of estimate_motion(), apply inverse motion on peak location.
Parameters
----------
peaks: np.array
peaks vector
peak_locations: np.array
peaks location vector
times: np.array
times vector of recording
motion: np.array 2D
motion.shape[0] equal temporal_bins.shape[0]
motion.shape[1] equal 1 when "rigid" motion
equal temporal_bins.shape[0] when "none rigid"
temporal_bins: np.array
Temporal bins in second.
spatial_bins: None or np.array
Bins for non-rigid motion. If None, rigid motion is used
Returns
-------
corrected_peak_locations: np.array
Motion-corrected peak locations
"""
corrected_peak_locations = peak_locations.copy()
if spatial_bins is None:
# rigid motion interpolation 1D
sample_bins = np.searchsorted(times, temporal_bins)
f = scipy.interpolate.interp1d(sample_bins, motion[:, 0], bounds_error=False, fill_value="extrapolate")
shift = f(peaks['sample_ind'])
corrected_peak_locations[direction] -= shift
else:
# non rigid motion = interpolation 2D
sample_bins = np.searchsorted(times, temporal_bins)
f = scipy.interpolate.RegularGridInterpolator((sample_bins, spatial_bins), motion,
method='linear', bounds_error=False, fill_value=None)
shift = f(list(zip(peaks['sample_ind'], peak_locations[direction])))
corrected_peak_locations[direction] -= shift
return corrected_peak_locations
def correct_motion_on_traces(traces, times, channel_locations, motion, temporal_bins, spatial_bins, direction=1,):
"""
Apply inverse motion with spatial interpolation on traces.
Traces can be full traces, but also waveforms snippets.
Parameters
----------
traces : np.array
Trace snippet (num_samples, num_channels)
channel_location: np.array 2d
Channel location with shape (n, 2) or (n, 3)
motion: np.array 2D
motion.shape[0] equal temporal_bins.shape[0]
motion.shape[1] equal 1 when "rigid" motion
equal temporal_bins.shape[0] when "none rigid"
temporal_bins: np.array
Temporal bins in second.
spatial_bins: None or np.array
Bins for non-rigid motion. If None, rigid motion is used
direction: int in (0, 1, 2)
Dimension of shift in channel_locations.
Returns
-------
channel_motions: np.array
Shift over time by channel
Shape (times.shape[0], channel_location.shape[0])
"""
assert HAVE_NUMBA
assert times.shape[0] == traces.shape[0]
traces_corrected = np.zeros_like(traces)
# print(traces_corrected.shape)
if spatial_bins is None:
# rigid motion interpolation 1D
raise NotImplementedError
else:
# non rigid motion = interpolation 2D
# regroup times by closet temporal_bins
bin_inds = _get_closest_ind(temporal_bins, times)
# inperpolation kernel will be the same per temporal bin
for bin_ind in np.unique(bin_inds):
# Step 1 : interpolation channel motion for this temporal bin
f = scipy.interpolate.interp1d(spatial_bins, motion[bin_ind, :], kind='linear',
axis=0, bounds_error=False, fill_value="extrapolate")
locs = channel_locations[:, direction]
channel_motions = f(locs)
channel_locations_moved = channel_locations.copy()
channel_locations_moved[:, direction] += channel_motions
# Step 2 : interpolate trace
# interpolation is done with Inverse Distance Weighted
# because it is simple to implement
# Instead vwe should use use the convex hull, Delaunay triangulation http://www.qhull.org/
# scipy.interpolate.LinearNDInterpolator and qhull.Delaunay should help for this
distances = sklearn.metrics.pairwise_distances(channel_locations_moved, channel_locations,
metric='euclidean')
num_chans = channel_locations.shape[0]
num_closest = 3
closest_chans = np.zeros((num_chans, num_closest), dtype='int64')
weights = np.zeros((num_chans, num_closest), dtype='float32')
for c in range(num_chans):
ind_sorted = np.argsort(distances[c, ])
closest_chans[c, :] = ind_sorted[:num_closest]
dists = distances[c, ind_sorted[:num_closest]]
if dists[0] == 0.:
# no interpolation the first have zeros distance
weights[c, :] = 0
weights[c, 0] = 1
else:
# Inverse Distance Weighted
w = 1 / dists
w /= np.sum(w)
weights[c, :] = w
my_inverse_weighted_distance_interpolation(traces, traces_corrected, closest_chans, weights)
return traces_corrected
if HAVE_NUMBA:
@numba.jit(parallel=False)
def my_inverse_weighted_distance_interpolation(traces, traces_corrected, closest_chans, weights):
num_sample = traces.shape[0]
num_chan = traces.shape[1]
num_closest = closest_chans.shape[1]
for sample_ind in range(num_sample):
for chan_ind in range(num_chan):
v = 0
for i in range(num_closest):
other_chan = closest_chans[chan_ind, i]
v += weights[chan_ind, i] * traces[sample_ind, other_chan]
traces_corrected[sample_ind, chan_ind] = v
def _get_closest_ind(array, values):
# https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
# get insert positions
idxs = np.searchsorted(array, values, side="left")
# find indexes where previous index is closer
prev_idx_is_less = ((idxs == len(array)) | (np.fabs(values - array[np.maximum(idxs-1, 0)]) <
np.fabs(values - array[np.minimum(idxs, len(array)-1)])))
idxs[prev_idx_is_less] -= 1
return idxs
class CorrectMotionRecording(BasePreprocessor):
"""
Recording that corrects motion on-the-fly given a rigid or non-rigid
motion vector estimation.
This internally applies for every time bin an inverse weighted distance interpolation
on the original after reverse the motion.
This is still experimental at the moment.
estimate_motion() must be call before this to get the motion vector.
Parameters
----------
recording: Recording
The parent recording.
motion: np.array 2D
motion.shape[0] equal temporal_bins.shape[0]
motion.shape[1] equal 1 when "rigid" motion
equal temporal_bins.shape[0] when "none rigid"
temporal_bins: np.array
Temporal bins in second.
spatial_bins: None or np.array
Bins for non-rigid motion. If None, rigid motion is used
direction: int in (0, 1, 2)
Dimension of shift in channel_locations.
Returns
-------
Corrected_recording: CorrectMotionRecording
Recording after motion correction
"""
name = 'correct_motion'
def __init__(self, recording, motion, temporal_bins, spatial_bins, direction=1):
assert recording.get_num_segments() == 1, 'correct is handle only for one segment for the moment'
BasePreprocessor.__init__(self, recording)
channel_locations = recording.get_channel_locations()
for parent_segment in recording._recording_segments:
rec_segment = CorrectMotionRecordingSegment(parent_segment, channel_locations,
motion, temporal_bins, spatial_bins, direction)
self.add_recording_segment(rec_segment)
self._kwargs = dict(recording=recording.to_dict(), motion=motion, temporal_bins=temporal_bins,
spatial_bins=spatial_bins, direction=direction)
# self.is_dumpable= False
class CorrectMotionRecordingSegment(BasePreprocessorSegment):
def __init__(self, parent_recording_segment, channel_locations, motion, temporal_bins, spatial_bins, direction):
BasePreprocessorSegment.__init__(self, parent_recording_segment)
self.channel_locations = channel_locations
self.motion = motion
self.temporal_bins = temporal_bins
self.spatial_bins = spatial_bins
self.direction = direction
def get_traces(self, start_frame, end_frame, channel_indices):
if self.time_vector is not None:
times = np.asarray(self.time_vector[start_frame:end_frame])
else:
times = np.arange(end_frame - start_frame, dtype='float64')
times /= self.sampling_frequency
t0 = start_frame / self.sampling_frequency
if self.t_start is not None:
t0 = t0 + self.t_start
times += t0
traces = self.parent_recording_segment.get_traces(start_frame, end_frame, channel_indices=None)
# print(traces.shape, times.shape, self.channel_locations, self.motion, self.temporal_bins, self.spatial_bins)
trace2 = correct_motion_on_traces(traces, times, self.channel_locations, self.motion,
self.temporal_bins, self.spatial_bins, direction=self.direction)
if trace2 is not None:
trace2 = trace2[:, channel_indices]
return trace2
| 39.640625
| 118
| 0.642787
|
aa1144058ab1a707dc8a629c2b87c94bb797cad6
| 2,984
|
py
|
Python
|
problem_3.py
|
johangenis/problems_vs_algorithms
|
9925d7319de849fd7814cf87050232c22d8c2a96
|
[
"MIT"
] | null | null | null |
problem_3.py
|
johangenis/problems_vs_algorithms
|
9925d7319de849fd7814cf87050232c22d8c2a96
|
[
"MIT"
] | null | null | null |
problem_3.py
|
johangenis/problems_vs_algorithms
|
9925d7319de849fd7814cf87050232c22d8c2a96
|
[
"MIT"
] | null | null | null |
def heapsort(arr):
arr_len = len(arr)
for i in range(arr_len - 1, -1, -1):
heapify(arr, len(arr), i)
# Swap the top element in heap with last element in array
arr[0], arr[i] = arr[i], arr[0]
def heapify(arr, n, i):
"""
:param: arr - array to heapify
n -- number of elements in the array
i -- index of the current node
TODO: Converts an array (in place) into a maxheap, a complete binary tree with the largest values at the top
"""
for i in range(1, i + 1):
# Perform heapify processing
data_index = i
while data_index > 0:
parent_index = (data_index - 1) // 2
if arr[data_index] > arr[parent_index]:
arr[data_index], arr[parent_index] = arr[parent_index], arr[data_index]
data_index = parent_index
else:
break
def rearrange_digits(input_list):
"""
Rearrange Array Elements so as to form two number such that their sum is maximum.
Args:
input_list(list): Input List
Returns:
(int),(int): Two maximum sums
"""
# Handle empty input list
if len(input_list) == 0:
return []
# Step 1 - perform heap sort on the input list
heapsort(input_list)
# Step 2 - base on the sorted list, construct the 2 numbers so their sum is maximum
number_1_list = list()
number_2_list = list()
input_list_len = len(input_list)
# If the no. of digits is odd, then set the first digit of the first number as
if input_list_len % 2 == 1:
digit = input_list.pop()
number_1_list.append(digit)
# Append the digits in input list to the 2 numbers in an interleave manner
input_list_len = len(input_list)
for i in range(input_list_len, 0, -1):
digit = input_list.pop()
if i % 2 == 0:
number_1_list.append(digit)
else:
number_2_list.append(digit)
# Convert the 2 list of digits into a string
number_1_str = ''.join(str(n) for n in number_1_list)
number_2_str = ''.join(str(n) for n in number_2_list)
# Convert the number string to int
number_1 = int(number_1_str)
number_2 = int(number_2_str)
return [number_1, number_2]
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print("Pass")
else:
print("Fail")
# Test case 1 - un-sorted array as input
print("Calling function with un-sorted array: [4, 6, 2, 5, 9, 8]")
test_case_1 = [[4, 6, 2, 5, 9, 8], [964, 852]]
# Should print pass as the output should be [964, 852]
test_function(test_case_1)
# Test case 2 - sorted array as input
test_case_2 = [[1, 2, 3, 4, 5], [542, 31]]
# Should print pass as the output should be [542, 31]
test_function(test_case_2)
# Test case 3 - empty array as input
test_case_3 = [[], []]
# Should print pass as the output should be []
test_function(test_case_3)
| 29.544554
| 112
| 0.62567
|
22ec2b60ed48f41c8cf56978f53627d7dbc08996
| 869
|
py
|
Python
|
tests/unit/test_vcs_mercurial.py
|
rogerhil/pip
|
7616583dbb2dcbda5a19d78873642d6751fbf017
|
[
"MIT"
] | 7,089
|
2015-01-01T10:48:04.000Z
|
2022-03-31T08:47:02.000Z
|
tests/unit/test_vcs_mercurial.py
|
rogerhil/pip
|
7616583dbb2dcbda5a19d78873642d6751fbf017
|
[
"MIT"
] | 8,417
|
2015-01-01T13:03:16.000Z
|
2022-03-31T17:40:27.000Z
|
tests/unit/test_vcs_mercurial.py
|
rogerhil/pip
|
7616583dbb2dcbda5a19d78873642d6751fbf017
|
[
"MIT"
] | 2,663
|
2015-01-02T04:02:12.000Z
|
2022-03-30T02:30:46.000Z
|
"""
Contains functional tests of the Mercurial class.
"""
import configparser
import os
from pip._internal.utils.misc import hide_url
from pip._internal.vcs.mercurial import Mercurial
from tests.lib import need_mercurial
from tests.lib.path import Path
@need_mercurial
def test_mercurial_switch_updates_config_file_when_found(tmpdir: Path) -> None:
hg = Mercurial()
options = hg.make_rev_options()
hg_dir = os.path.join(tmpdir, ".hg")
os.mkdir(hg_dir)
config = configparser.RawConfigParser()
config.add_section("paths")
config.set("paths", "default", "old_url")
hgrc_path = os.path.join(hg_dir, "hgrc")
with open(hgrc_path, "w") as f:
config.write(f)
hg.switch(tmpdir, hide_url("new_url"), options)
config.read(hgrc_path)
default_path = config.get("paths", "default")
assert default_path == "new_url"
| 25.558824
| 79
| 0.715765
|
0907706108a486018d923d966cbe435c34c93499
| 158
|
py
|
Python
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_Hour_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_Hour_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_Hour_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['SVR'] );
| 39.5
| 80
| 0.746835
|
362f9a0dac1296db6e42d55e75d8a22d5b411754
| 4,461
|
py
|
Python
|
leo/plugins/leo_babel/tests/idle_time.py
|
leonidborisenko/leo-editor
|
db55bd00c94fb8501795284453891ad64ce12af9
|
[
"MIT"
] | null | null | null |
leo/plugins/leo_babel/tests/idle_time.py
|
leonidborisenko/leo-editor
|
db55bd00c94fb8501795284453891ad64ce12af9
|
[
"MIT"
] | null | null | null |
leo/plugins/leo_babel/tests/idle_time.py
|
leonidborisenko/leo-editor
|
db55bd00c94fb8501795284453891ad64ce12af9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#coding=utf-8
#@+leo-ver=5-thin
#@+node:bob.20180206123613.1: * @file leo_babel/tests/idle_time.py
#@@first
#@@first
#@@language python
#@@tabwidth -4
#@+<< imports >>
#@+node:bob.20180206123613.2: ** << imports >>
import os
import time
# import traceback
import leo.core.leoGlobals as leoG
assert leoG
#@-<< imports >>
#@+<< version >>
#@+node:bob.20180206123613.3: ** << version >>
version = '1.0'
#@-<< version >>
#@+others
#@+node:bob.20180206123725.1: ** class IdleTime
class IdleTime:
""" This is an implementation of the Leo-Editor
class IdleTime() for use with Leo-Bridge.
"""
#@+others
#@+node:bob.20180206125732.1: *3* Class Parameters
list_active = list()
list_inactive = list()
#@+node:bob.20180206123842.1: *3* __init__()
def __init__(self, handler, delay=500, tag=None):
""" Create an Idle Time Object Instance
Arguments:
handler: Function to execute when idle
delay: Minimum time in milliseconds between
calls to handler
tag: Identifier for the purpose of the handler
Returns:
None
"""
self._handler = handler
self._delay = delay / 1000.
self._tag = tag
self._active = False
IdleTime.list_inactive.append(self)
#traceStk = [lix.strip() for lix in traceback.format_stack()]
#leoG.trace('Trace: {0}'.format(traceStk[-2]))
#leoG.trace('IdleTime() {0}'.format(id(self)))
#@+node:bob.20180206124140.1: *3* start()
def start(self):
""" Start an Idle Time Instance
Arguments:
self: IdleTime instance
Returns:
None
"""
#leoG.trace(id(self))
IdleTime.list_inactive.remove(self)
self._nexttime = time.clock()
IdleTime.list_active.insert(0, self)
self._active = True
#@+node:bob.20180206125022.1: *3* stop()
def stop(self):
""" Stop an Idle Time Instance
Arguments:
self: IdleTime instance
Returns:
None
"""
#leoG.trace(id(self))
if self._active:
IdleTime.list_active.remove(self)
IdleTime.list_inactive.append(self)
self._active = False
#@+node:bob.20180206123934.1: *3* idle() Class method
@classmethod
def idle(cls):
""" Application idle -- Except for Idle Time
handler execution
Arguments:
cls: The IdleTime class object
Returns:
None
"""
#traceStk = [lix.strip() for lix in traceback.format_stack()]
#leoG.trace('Trace: {0}'.format(traceStk[-2]))
itoLast = 0
while True:
if not cls.list_active:
break
# pylint: disable=no-member
os.sched_yield()
timeCur = time.clock()
idleTimeObj = cls.list_active.pop(0)
#leoG.trace('Popped {0} leaving {1}'.format(id(idleTimeObj), [id(ent) for ent in cls.list_active]))
if timeCur >= idleTimeObj._nexttime:
nexttime = timeCur + idleTimeObj._delay
idleTimeObj._nexttime = nexttime
for idx, idleTimeObj2 in enumerate(cls.list_active):
if nexttime < idleTimeObj2._nexttime:
#leoG.trace('Insert at {0}'.format(idx))
cls.list_active.insert(idx, idleTimeObj)
break
else:
#leoG.trace('Append')
cls.list_active.append(idleTimeObj)
if itoLast != idleTimeObj:
itoLast = idleTimeObj
#leoG.trace('Run {0} cls.list_active={1}'.format(id(idleTimeObj), [id(ent) for ent in cls.list_active]))
idleTimeObj._handler(idleTimeObj)
#leoG.trace('Handler return. cls.list_active={0}'.format([id(ent) for ent in cls.list_active]))
else:
# Nothing to run yet
cls.list_active.insert(0, idleTimeObj)
#leoG.trace('Exiting cls.list_active={0}'.format([id(ent) for ent in cls.list_active]))
#@-others
#@+node:bob.20180206123613.16: ** main()
def main():
""" Command Line Program Entry point
"""
raise NotImplementedError('{0} is not a command line program.'.format(__file__))
#@-others
if __name__ == "__main__":
main()
#@-leo
| 30.979167
| 124
| 0.566465
|
151de6f670ffc75d29ac25cd6f2024aa0810efaa
| 6,048
|
py
|
Python
|
utils.py
|
ethanluoyc/compile-jax
|
65bea174b0b546b27ef17774bca90e0f25b11f7d
|
[
"MIT"
] | null | null | null |
utils.py
|
ethanluoyc/compile-jax
|
65bea174b0b546b27ef17774bca90e0f25b11f7d
|
[
"MIT"
] | null | null | null |
utils.py
|
ethanluoyc/compile-jax
|
65bea174b0b546b27ef17774bca90e0f25b11f7d
|
[
"MIT"
] | null | null | null |
"""Utility functions."""
import numpy as np
import jax
import jax.numpy as jnp
from jax import nn, random
EPS = 1e-17
NEG_INF = -1e30
def cross_entropy(logits, labels, reduction='none'):
sequence_length, batch_size = logits.shape[:2]
targets = jax.nn.one_hot(labels, logits.shape[-1])
return -jnp.sum(targets * jax.nn.log_softmax(logits, -1), -1)
def gumbel_sample(rng, shape):
"""Sample Gumbel noise."""
uniform = random.uniform(rng, shape=shape)
return -jnp.log(EPS - jnp.log(uniform + EPS))
def gumbel_softmax_sample(rng, logits, temp=1.):
"""Sample from the Gumbel softmax / concrete distribution."""
gumbel_noise = gumbel_sample(rng, logits.shape)
return nn.softmax((logits + gumbel_noise) / temp, axis=-1)
def gaussian_sample(rng, mu, log_var):
"""Sample from Gaussian distribution."""
gaussian_noise = random.normal(rng, mu.shape)
return mu + jnp.exp(log_var * 0.5) * gaussian_noise
def kl_gaussian(mu, log_var):
"""KL divergence between Gaussian posterior and standard normal prior."""
return -0.5 * jnp.sum(1 + log_var - jnp.square(mu) - jnp.exp(log_var), axis=1)
def kl_categorical_uniform(preds):
"""KL divergence between categorical distribution and uniform prior."""
kl_div = preds * jnp.log(preds + EPS) # Constant term omitted.
return kl_div.sum(1)
def kl_categorical(preds, log_prior):
"""KL divergence between two categorical distributions."""
kl_div = preds * (jnp.log(preds + EPS) - log_prior)
return kl_div.sum(1)
def poisson_categorical_log_prior(length, rate):
"""Categorical prior populated with log probabilities of Poisson dist."""
rate = jnp.array(rate, dtype=jnp.float32)
values = jnp.expand_dims(jnp.arange(1, length + 1, dtype=jnp.float32), 0)
log_prob_unnormalized = jax.lax.lgamma(
jnp.log(rate) * values - rate - (values + 1))
# TODO(tkipf): Length-sensitive normalization.
return nn.log_softmax(log_prob_unnormalized, axis=1) # Normalize.
def log_cumsum(probs, axis=1):
"""Calculate log of inclusive cumsum."""
return jnp.log(jnp.cumsum(probs, axis=axis) + EPS)
def generate_toy_data(num_symbols=5, num_segments=3, max_segment_len=5):
"""Generate toy data sample with repetition of symbols (EOS symbol: 0)."""
seq = []
symbols = np.random.choice(
np.arange(1, num_symbols + 1), num_segments, replace=False)
for seg_id in range(num_segments):
segment_len = np.random.choice(np.arange(1, max_segment_len))
seq += [symbols[seg_id]] * segment_len
seq += [0]
return np.array(seq, dtype=jnp.int64)
def get_lstm_initial_state(batch_size, hidden_dim):
"""Get empty (zero) initial states for LSTM."""
hidden_state = jnp.zeros((batch_size, hidden_dim))
cell_state = jnp.zeros((batch_size, hidden_dim))
return hidden_state, cell_state
def get_segment_probs(all_b_samples, all_masks, segment_id):
"""Get segment probabilities for a particular segment ID."""
neg_cumsum = 1 - jnp.cumsum(all_b_samples[segment_id], axis=1)
if segment_id > 0:
return neg_cumsum * all_masks[segment_id - 1]
else:
return neg_cumsum
def get_losses(
inputs,
outputs,
args,
beta_b=.1,
beta_z=.1,
prior_rate=3.,
):
"""Get losses (NLL, KL divergences and neg. ELBO).
Args:
inputs: Padded input sequences.
outputs: CompILE model output tuple.
args: Argument dict from `ArgumentParser`.
beta_b: Scaling factor for KL term of boundary variables (b).
beta_z: Scaling factor for KL term of latents (z).
prior_rate: Rate (lambda) for Poisson prior.
"""
targets = inputs.reshape(-1)
all_encs, all_recs, all_masks, all_b, all_z = outputs
input_dim = args.num_symbols + 1
nll = 0.
kl_z = 0.
for seg_id in range(args.num_segments):
seg_prob = get_segment_probs(all_b['samples'], all_masks, seg_id)
preds = all_recs[seg_id].reshape(-1, input_dim)
seg_loss = cross_entropy(
preds, targets, reduction='none').reshape(-1, inputs.shape[1])
# print(seg_loss.shape, seg_prob.shape)
# Ignore EOS token (last sequence element) in loss.
nll += (seg_loss[:, :-1] * seg_prob[:, :-1]).sum(1).mean(0)
# KL divergence on z.
if args.latent_dist == 'gaussian':
mu, log_var = jnp.split(all_z['logits'][seg_id], 2, axis=1)
kl_z += kl_gaussian(mu, log_var).mean(0)
elif args.latent_dist == 'concrete':
kl_z += kl_categorical_uniform(
nn.softmax(all_z['logits'][seg_id], axis=-1)).mean(0)
else:
raise ValueError('Invalid argument for `latent_dist`.')
# KL divergence on b (first segment only, ignore first time step).
# TODO(tkipf): Implement alternative prior on soft segment length.
probs_b = nn.softmax(all_b['logits'][0], axis=-1)
log_prior_b = poisson_categorical_log_prior(probs_b.shape[1], prior_rate)
kl_b = args.num_segments * kl_categorical(probs_b[:, 1:],
log_prior_b[:, 1:]).mean(0)
loss = nll + beta_z * kl_z + beta_b * kl_b
return loss, nll, kl_z, kl_b
def get_reconstruction_accuracy(inputs, outputs, args):
"""Calculate reconstruction accuracy (averaged over sequence length)."""
all_encs, all_recs, all_masks, all_b, all_z = outputs
batch_size = inputs.shape[0]
rec_seq = []
rec_acc = 0.
for sample_idx in range(batch_size):
prev_boundary_pos = 0
rec_seq_parts = []
for seg_id in range(args.num_segments):
boundary_pos = jnp.argmax(all_b['samples'][seg_id], axis=-1)[sample_idx]
if prev_boundary_pos > boundary_pos:
boundary_pos = prev_boundary_pos
seg_rec_seq = jnp.argmax(all_recs[seg_id], axis=-1)
rec_seq_parts.append(seg_rec_seq[sample_idx,
prev_boundary_pos:boundary_pos])
prev_boundary_pos = boundary_pos
rec_seq.append(jnp.concatenate(rec_seq_parts))
cur_length = rec_seq[sample_idx].shape[0]
matches = rec_seq[sample_idx] == inputs[sample_idx, :cur_length]
rec_acc += matches.astype(jnp.float32).mean()
rec_acc /= batch_size
return rec_acc, rec_seq
| 33.787709
| 80
| 0.689319
|
d23ae359ed05a2f17804adca3b27a947d9003f6c
| 499
|
py
|
Python
|
plotly/validators/mesh3d/_scene.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/mesh3d/_scene.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/mesh3d/_scene.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class SceneValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name='scene', parent_name='mesh3d', **kwargs):
super(SceneValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop('dflt', 'scene'),
edit_type=kwargs.pop('edit_type', 'calc+clearAxisTypes'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 33.266667
| 76
| 0.641283
|
85d391f7bec3296b0d2909766e165b7e58ed9c44
| 59
|
py
|
Python
|
sis-inf.py
|
jorgegene/sis-inf-project
|
ed97162a828abb5c3c2013c33c6511e2ae0b6efd
|
[
"MIT"
] | null | null | null |
sis-inf.py
|
jorgegene/sis-inf-project
|
ed97162a828abb5c3c2013c33c6511e2ae0b6efd
|
[
"MIT"
] | null | null | null |
sis-inf.py
|
jorgegene/sis-inf-project
|
ed97162a828abb5c3c2013c33c6511e2ae0b6efd
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models import User, Poster
| 29.5
| 35
| 0.79661
|
6a2c86f09a1cd21b272da4436eeae50fb0865cd2
| 17,527
|
py
|
Python
|
Modelling Scenarios/scenario forecast folders/Epidemic Modelling v0710/metrics/COVID19_r0.py
|
ec-jrc/COVID-19
|
c9ef6ca3ae69edc8ba77b9f99d4a6875416136aa
|
[
"CC-BY-4.0"
] | 43
|
2020-04-23T08:46:49.000Z
|
2022-03-29T07:49:50.000Z
|
Modelling Scenarios/scenario forecast folders/Epidemic Modelling v0710/metrics/COVID19_r0.py
|
ebuitragod/COVID-19
|
b6989fe4cf9d10bdb47be7f840036f597d293e21
|
[
"CC-BY-4.0"
] | 44
|
2020-05-05T08:35:20.000Z
|
2022-01-13T03:29:28.000Z
|
Modelling Scenarios/scenario forecast folders/Epidemic Modelling v0710/metrics/COVID19_r0.py
|
ebuitragod/COVID-19
|
b6989fe4cf9d10bdb47be7f840036f597d293e21
|
[
"CC-BY-4.0"
] | 24
|
2020-05-19T16:45:01.000Z
|
2021-12-14T06:00:59.000Z
|
import os, sys, getopt
import pandas as pd
import numpy as np
import datetime
from scipy import stats as sps
from scipy.interpolate import interp1d
# Gamma is 1/serial interval
# https://wwwnc.cdc.gov/eid/article/26/7/20-0282_article
# https://www.nejm.org/doi/full/10.1056/NEJMoa2001316
GAMMA = 1/7
def highest_density_interval(pmf, p=.9, debug=False):
# If we pass a DataFrame, just call this recursively on the columns
if(isinstance(pmf, pd.DataFrame)):
return pd.DataFrame([highest_density_interval(pmf[col], p=p) for col in pmf],
index=pmf.columns)
cumsum = np.cumsum(pmf.values)
# N x N matrix of total probability mass for each low, high
total_p = cumsum - cumsum[:, None]
# Return all indices with total_p > p
lows, highs = (total_p > p).nonzero()
# Find the smallest range (highest density)
try:
best = (highs - lows).argmin()
except:
best=highs
low = pmf.index[lows[best]]
high = pmf.index[highs[best]]
return pd.Series([low, high],
index=[f'Low_{p*100:.0f}',
f'High_{p*100:.0f}'])
def prepare_cases(cases, cutoff=25,step=14):
new_cases = cases.diff()
smoothed = new_cases.rolling(step,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2).round()
idx_start = np.searchsorted(smoothed, cutoff)
smoothed = smoothed.iloc[idx_start:]
original = new_cases.loc[smoothed.index]
#if len(smoothed)==0:
# if step==7:
# original,smoothed=prepare_cases(cases,cutoff,1)
# else:
# original=cases
# smoothed=cases
if len(smoothed)==0:
cutoff -=1
#print (cutoff)
if cutoff>0:
original,smoothed=prepare_cases(cases,cutoff,step)
else:
original=cases
smoothed=cases
return original, smoothed
def get_posteriors(sr, sigma=0.15):
# We create an array for every possible value of Rt
R_T_MAX = 12
r_t_range = np.linspace(0, R_T_MAX, R_T_MAX*100+1)
# (1) Calculate Lambda
lam = sr[:-1].values * np.exp(GAMMA * (r_t_range[:, None] - 1))
# (2) Calculate each day's likelihood
likelihoods = pd.DataFrame(
data = sps.poisson.pmf(sr[1:].values, lam),
index = r_t_range,
columns = sr.index[1:])
# (3) Create the Gaussian Matrix
process_matrix = sps.norm(loc=r_t_range,
scale=sigma
).pdf(r_t_range[:, None])
# (3a) Normalize all rows to sum to 1
process_matrix /= process_matrix.sum(axis=0)
# (4) Calculate the initial prior
#prior0 = sps.gamma(a=4).pdf(r_t_range)
prior0 = np.ones_like(r_t_range)/len(r_t_range)
prior0 /= prior0.sum()
# Create a DataFrame that will hold our posteriors for each day
# Insert our prior as the first posterior.
#try:
posteriors = pd.DataFrame(
index=r_t_range,
columns=sr.index,
data={sr.index[0]: prior0}
)
#except:
# print ('error in data')
# We said we'd keep track of the sum of the log of the probability
# of the data for maximum likelihood calculation.
log_likelihood = 0.0
# (5) Iteratively apply Bayes' rule
for previous_day, current_day in zip(sr.index[:-1], sr.index[1:]):
#(5a) Calculate the new prior
current_prior = process_matrix @ posteriors[previous_day]
#(5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t)
numerator = likelihoods[current_day] * current_prior
#(5c) Calcluate the denominator of Bayes' Rule P(k)
denominator = np.sum(numerator)
# Execute full Bayes' Rule
posteriors[current_day] = numerator/denominator
# Add to the running sum of log likelihoods
log_likelihood += np.log(denominator)
return posteriors, log_likelihood
def calcR0s(state_name,states,reg=''):
# if states=='':
# url = 'https://github.com/ec-jrc/COVID-19/raw/master/data-by-country/jrc-covid-19-all-days-by-country.csv'
# states = pd.read_csv(url,
# usecols=['Date', 'CountryName', 'CumulativePositive'],
# parse_dates=['Date'],
# index_col=['CountryName', 'Date'],
# squeeze=True).sort_index()
# state_name = 'Belgium'
if reg=='':
cases0 = states.xs(state_name).rename(f"{state_name} cases")
else:
cases0 = states.xs(state_name).xs(reg).rename(f"{state_name+' '+reg} cases")
cases = cases0[0:len(cases0)]
#print (cases)
original, smoothed = prepare_cases(cases)
# Note that we're fixing sigma to a value just for the example
posteriors, log_likelihood = get_posteriors(smoothed, sigma=.25)
# Note that this takes a while to execute - it's not the most efficient algorithm
hdis = highest_density_interval(posteriors, p=.9)
most_likely = posteriors.idxmax().rename('ML')
# Look into why you shift -1
result = pd.concat([most_likely, hdis], axis=1)
csv=result.to_csv()
rows=csv.replace('\r','').split('\n')
x=[];r0=[];rl=[];rh=[]
for r in rows:
if r.startswith('Date'):continue
if r=='':continue
p=r.split(',')
d=datetime.datetime.strptime(p[0],'%Y-%m-%d')
try:
r0f=float(p[1]);rlf=float(p[2]);rhf=float(p[3])
r0.append(r0f)
rl.append(rlf)
rh.append(rhf)
x.append(d)
except:
#print ('error in data for ',d,r)
r=r
#print ('AA',np.shape(x),np.shape(r0),p)
return x,r0,rl,rh, csv
#result.to_csv(state_name+'rt.csv')
def calcR0(cou):
if cou=='Czech_Republic': cou=cou.replace("_"," ") #!!! PB
url = 'https://github.com/ec-jrc/COVID-19/raw/master/data-by-country/jrc-covid-19-all-days-by-country.csv'
states = pd.read_csv(url, usecols=['Date', 'CountryName', 'CumulativePositive'], parse_dates=['Date'], index_col=['CountryName', 'Date'], squeeze=True).sort_index()
x,r0,rl,rh, csv=calcR0s(cou,states)
return x,r0,rl,rh,csv
def gd(a1,a0):
return (a1-a0).days+(a1-a0).seconds/3600./24.
def calcR0_JRC(cou,reg=''):
if cou=='Czech_Republic': cou=cou.replace("_"," ") #!!! PB
if reg=='':
url = 'https://github.com/ec-jrc/COVID-19/raw/master/data-by-country/jrc-covid-19-all-days-by-country.csv'
states = pd.read_csv(url, usecols=['Date', 'CountryName', 'CumulativePositive'], parse_dates=['Date'], index_col=['CountryName', 'Date'], squeeze=True).sort_index()
else:
url='https://raw.githubusercontent.com/ec-jrc/COVID-19/master/data-by-region/jrc-covid-19-all-days-by-regions.csv'
states = pd.read_csv(url,usecols=['Date', 'CountryName','Region','CumulativePositive'], parse_dates=['Date'], index_col=['CountryName','Region','Date'],squeeze=True).sort_index()
x,r0,rl,rh=calcR0_JRCs(cou,states,reg)
return x,r0,rl,rh
def calcR0_7d(cou,reg='',ndays=7):
if cou=='Czech_Republic': cou=cou.replace("_"," ") #!!! PB
if reg=='':
url = 'https://github.com/ec-jrc/COVID-19/raw/master/data-by-country/jrc-covid-19-all-days-by-country.csv'
states = pd.read_csv(url, usecols=['Date', 'CountryName', 'CumulativePositive'], parse_dates=['Date'], index_col=['CountryName', 'Date'], squeeze=True).sort_index()
else:
url='https://raw.githubusercontent.com/ec-jrc/COVID-19/master/data-by-region/jrc-covid-19-all-days-by-regions.csv'
states = pd.read_csv(url,usecols=['Date', 'CountryName','Region','CumulativePositive'], parse_dates=['Date'], index_col=['CountryName','Region','Date'],squeeze=True).sort_index()
x,r0=calcR0_7ds(cou,states,reg,ndays)
return x,r0
def calcR0_RKI(cou,reg='',ndays=4):
if cou=='Czech_Republic': cou=cou.replace("_"," ") #!!! PB
if reg=='':
url = 'https://github.com/ec-jrc/COVID-19/raw/master/data-by-country/jrc-covid-19-all-days-by-country.csv'
states = pd.read_csv(url, usecols=['Date', 'CountryName', 'CumulativePositive'], parse_dates=['Date'], index_col=['CountryName', 'Date'], squeeze=True).sort_index()
else:
url='https://raw.githubusercontent.com/ec-jrc/COVID-19/master/data-by-region/jrc-covid-19-all-days-by-regions.csv'
states = pd.read_csv(url,usecols=['Date', 'CountryName','Region','CumulativePositive'], parse_dates=['Date'], index_col=['CountryName','Region','Date'],squeeze=True).sort_index()
x,r0=calcR0_RKIs(cou,states,reg,ndays)
return x,r0
def calcR0_RKIs(state_name,states,reg='',ndays=4):
if reg=='':
cases0 = states.xs(state_name).rename(f"{state_name} cases")
else:
cases0 = states.xs(state_name).xs(reg).rename(f"{state_name+' '+reg} cases")
cases = cases0[0:len(cases0)]
original, smoothed = prepare_cases(cases,10,5)
sdif=pd.array(smoothed)
dd=pd.array(smoothed.index)
dates=[];r0=[];rmin=[];rmax=[]
for k in range(2*ndays,sdif.shape[0]):
week1=0.0
week2=0.0
for j in range(0,ndays):
week1 += sdif[k-2*ndays+j]
week2 += sdif[k-ndays+j]
if week1 !=0:
rrki=week2/week1
else:
rrki=0.0
dates.append(datetime.datetime.strptime(format(dd[k],'%Y-%m-%d'),'%Y-%m-%d'))
r0.append(rrki)
return dates,r0
def calcR0_7ds(state_name,states,reg='',ndays=7):
if reg=='':
cases0 = states.xs(state_name).rename(f"{state_name} cases")
else:
cases0 = states.xs(state_name).xs(reg).rename(f"{state_name+' '+reg} cases")
cases = cases0[0:len(cases0)]
original, smoothed = prepare_cases(cases,10,5)
sdif=pd.array(smoothed)
dd=pd.array(smoothed.index)
dates=[];r0=[];rmin=[];rmax=[]
for k in range(2*ndays,sdif.shape[0]):
week1=0.0
week2=0.0
for j in range(0,ndays):
week1 += sdif[k-2*ndays+j]
week2 += sdif[k-ndays+j]
if week1 !=0:
rrki=week2/week1
else:
rrki=0.0
dates.append(datetime.datetime.strptime(format(dd[k],'%Y-%m-%d'),'%Y-%m-%d'))
r0.append(rrki)
return dates,r0
def calcR0_JRCs(state_name,states,reg=''):
if reg=='':
cases0 = states.xs(state_name).rename(f"{state_name} cases")
else:
cases0 = states.xs(state_name).xs(reg).rename(f"{state_name+' '+reg} cases")
cases = cases0[0:len(cases0)]
original, smoothed = prepare_cases(cases,10,5)
sdif=pd.array(smoothed)
dd=pd.array(smoothed.index)
genTime=7.0
dates=[];r0=[];rmin=[];rmax=[]
for k in range(8,sdif.shape[0]):
if sdif[k]>0 and sdif[k-7]>0:
vr0=(np.log(sdif[k])-np.log(sdif[k-7]))*genTime/gd(dd[k],dd[k-7])+1
vrmax=(np.log(sdif[k])-np.log(sdif[k-7]))*(genTime+1)/gd(dd[k],dd[k-7])+1
vrmin=(np.log(sdif[k])-np.log(sdif[k-7]))*(genTime-1)/gd(dd[k],dd[k-7])+1
#print(r0,sdif[k],sdif[k-7],gd(dd[k],dd[k-7]),np.log(sdif[k]),np.log(sdif[k-7]))
else:
vr0=0.0
vrmin=0.0
vrmax=0.0
dates.append( datetime.datetime.strptime(format(dd[k],'%Y-%m-%d'),'%Y-%m-%d'))
r0.append(vr0)
rmin.append(vrmin)
rmax.append(vrmax)
return dates,r0,rmin,rmax
def calcR0_CRAN(cou,readFolder,reg='',reg0=''):
print(' calcR0_cran' ,cou,reg,readFolder)
if not os.path.exists(readFolder):
os.makedirs(readFolder)
create=False
sreg=''
if reg!='':
sreg=' '+reg
print('checking '+readFolder+'\\'+cou+sreg+' _R0.csv')
if not (os.path.exists(readFolder+'\\'+cou+sreg+' _R0.csv') and os.path.exists(readFolder+'\\'+cou+sreg+' _R0_COU.csv') and os.path.exists(readFolder+'\\'+cou+sreg+' _R0_confint.csv')):
print('file not existing, to be created '+readFolder+'\\'+cou+reg+' _R0.csv')
create=True
else:
st=os.stat(readFolder+'\\'+cou+sreg+' _R0.csv')
fdate=datetime.datetime.fromtimestamp(st.st_mtime)
age=datetime.datetime.now()-fdate
#print(age.days+age.seconds/(3600.0*24.0))
if age.days+age.seconds/(3600.0*24.0)>0.5:
print('file existing but old, to be created')
create=True
print('>'+cou+'<>'+reg+'<')
if create:
if (reg==''):
cmd=r'"C:\Program Files\R\R-4.0.0\bin\Rscript.exe"'+' calcR0.R -mode NAT -o '+readFolder+' -c '+cou.replace(" ","_")
else:
cmd=r'"C:\Program Files\R\R-4.0.0\bin\Rscript.exe"'+' calcR0.R -mode REG -o '+readFolder+' -c '+cou.replace(" ","_") +' -r '+reg.replace(" ","_")
print('calcR0_CRAN command= ' + cmd)
os.system(cmd)
dates=[];r0=[];rlow=[];rhigh=[]
fname=readFolder+'\\'+cou+sreg+' _R0.csv'
if not os.path.exists(fname):
reg=reg0
fname=readFolder+'\\'+cou+sreg+' _R0.csv'
if not os.path.exists(fname):
return dates,r0,rlow,rhigh
fname=readFolder+'\\'+cou+sreg+' _R0.csv';f=open(fname,'r');textR0=f.read();f.close()
fname=readFolder+'\\'+cou+sreg+' _R0_COU.csv';f=open(fname,'r',encoding='UTF-8');textCOU=f.read();f.close()
fname=readFolder+'\\'+cou+sreg+' _R0_confint.csv';f=open(fname,'r');textCI=f.read();f.close()
lines=textCOU.split('\n')
for k in range(2,len(lines)-1):
p=lines[k].split(',')
#print(p)
#print(p[len(p)-2])
d=datetime.datetime.strptime(p[len(p)-2],'%Y-%m-%d')
dates.append(d)
lines1=textR0.split('\n')
lines2=textCI.split('\n')
r0.append(0);rlow.append(0);rhigh.append(0)
#r0.append(0);rlow.append(0);rhigh.append(0)
#r0.append(0);rlow.append(0);rhigh.append(0)
for k in range(2,len(lines1)-2):
if lines1[k]=='':continue
p=lines2[k].split(',')
r0.append(float(lines1[k]))
rlow.append(float(p[0]))
rhigh.append(float(p[1]))
# print (len(r0),len(dates))
# for k in range(len(dates)):
# print(dates[k],r0[k])
return dates,r0,rlow,rhigh
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:],"c:o:i:h",["cou=","odir=","idir=","help="])
if len(opts)<1:
print ('INPUT ERROR')
print ('COVID19_r0.py -c <country|ALL> -o <outputdir> ')
print (' example:')
print ('python D:\mnt\output\COVID-19\scripts_py3\COVID19_r0.py -c Italy -o D:\mnt\output\COVID-19\pdfOut\ ')
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
print ('python D:\mnt\output\COVID-19\scripts\COVID19_r0.py -c Italy -o D:\mnt\output\COVID-19\pdfOut\ ')
sys.exit()
elif opt in ("-o", "--odir"):
dirout = arg
elif opt in ("-i", "--idir"):
readFolder = arg
elif opt in ("-c", "--cou"):
cou = arg
else:
print ('error parameter not available: '+opt)
sys.exit()
url = 'https://github.com/ec-jrc/COVID-19/raw/master/data-by-country/jrc-covid-19-all-days-by-country.csv'
states = pd.read_csv(url,usecols=['Date', 'CountryName', 'CumulativePositive'],parse_dates=['Date'],index_col=['CountryName', 'Date'],squeeze=True).sort_index()
now=datetime.datetime.now().strftime("%Y%m%d")
rowLast="country,date,r0_ML,Low_90',High_90\n"
print (cou)
if cou=="ALL":
ecList = "Austria, Belgium, Bulgaria, Croatia, Cyprus, Czech Republic, Denmark, Estonia, Finland, France, Germany, Greece, Hungary, Ireland, Italy, Latvia, Lithuania, Luxembourg, Malta, Netherlands, Poland, Portugal, Romania, Slovakia, Slovenia, Spain, Sweden".split(', ')
eucmParticip = "Iceland,Montenegro,North Macedonia,Norway,Turkey,United Kingdom,Switzerland".split(',')
for cou in ecList:
print(cou)
try:
x,r0,rl,rh,csv=calcR0s(cou,states)
f=open(dirout+"\\"+now+"_"+cou+"_r0.csv","w")
f.write(csv.replace("\n",""))
f.close()
rowLast +=cou+','+format(x[len(x)-1])+','+"%.2f" % r0[len(r0)-1]+','+"%.2f" % rl[len(rl)-1]+','+"%.2f" % rh[len(rh)-1]+'\n'
except:
rowLast +=cou+",,,,\n"
for cou in eucmParticip:
print(cou)
try:
x,r0,rl,rh,csv=calcR0s(cou,states)
f=open(dirout+"\\"+now+"_"+cou+"_r0.csv","w")
f.write(csv.replace("\n",""))
f.close()
rowLast +=cou+','+format(x[len(x)-1])+','+"%.2f" % r0[len(r0)-1]+','+"%.2f" % rl[len(rl)-1]+','+"%.2f" % rh[len(rh)-1]+'\n'
except:
rowLast +=cou+",,,,\n"
else:
#try:
x,r0,rl,rh,csv=calcR0s(cou,states)
#print(r0)
f=open(dirout+"\\"+now+"_"+cou+"_r0.csv","w")
f.write(csv.replace("\n",""))
f.close()
rowLast +=cou+','+format(x[len(x)-1])+','+"%.2f" % r0[len(r0)-1]+','+"%.2f" % rl[len(rl)-1]+','+"%.2f" % rh[len(rh)-1]+'\n'
if readFolder !='':
x,r0,rl,rh=calcR0_CRAN(cou,cou,readFolder)
# except:
# print('Error calling calcR0')
# rowLast +=cou+",,,,"
f=open(dirout+"\\"+now+"_r0_last.csv","w")
f.write(rowLast)
f.close()
# #x,r0,rl,rh=calcR0('Italy')
# x,r0,rl,rh=calcR0('Austria')
# print(r0)
| 38.019523
| 280
| 0.579791
|
6e57882d06037ef3c5e93993bd8588cf5a550f61
| 7,485
|
py
|
Python
|
ingest/prepare_scripts/alos/alos2_prepare.py
|
ChetanKhanna/NE-GeoCloud
|
bad907045729cd9ffd086ede034ef1805eeecc8b
|
[
"Apache-2.0"
] | 1
|
2019-07-22T05:24:40.000Z
|
2019-07-22T05:24:40.000Z
|
ingest/prepare_scripts/alos/alos2_prepare.py
|
SivaramakrishnanKN/NE-GeoCloud
|
affcae49e0ccd7d29360a2771a9517147ed56590
|
[
"Apache-2.0"
] | 1
|
2019-06-06T18:31:29.000Z
|
2019-06-06T18:31:29.000Z
|
ingest/prepare_scripts/alos/alos2_prepare.py
|
SivaramakrishnanKN/NE-GeoCloud
|
affcae49e0ccd7d29360a2771a9517147ed56590
|
[
"Apache-2.0"
] | 5
|
2019-06-05T07:26:13.000Z
|
2019-06-08T06:53:11.000Z
|
# coding=utf-8
"""
Ingest data from the command-line.
"""
from __future__ import absolute_import, division
from __future__ import print_function
import logging
import uuid
from xml.etree import ElementTree
import re
from pathlib import Path
import yaml
from dateutil import parser
from datetime import timedelta
import rasterio.warp
import click
from osgeo import osr
import os
_STATIONS = {
'023': 'TKSC',
'022': 'SGS',
'010': 'GNC',
'011': 'HOA',
'012': 'HEOC',
'013': 'IKR',
'014': 'KIS',
'015': 'LGS',
'016': 'MGR',
'017': 'MOR',
'032': 'LGN',
'019': 'MTI',
'030': 'KHC',
'031': 'MLK',
'018': 'MPS',
'003': 'BJC',
'002': 'ASN',
'001': 'AGS',
'007': 'DKI',
'006': 'CUB',
'005': 'CHM',
'004': 'BKT',
'009': 'GLC',
'008': 'EDC',
'029': 'JSA',
'028': 'COA',
'021': 'PFS',
'020': 'PAC'
}
def band_name(path):
name = path.stem
layername = name
# position = name.find('_')
if 'HH' in str(path):
layername = 'hh_gamma0'
if 'HV' in str(path):
layername = 'hv_gamma0'
if 'date' in str(path):
layername = 'observation_date'
if 'linci' in str(path):
layername = 'incidence_angle'
if 'mask' in str(path):
layername = 'mask'
# if position == -1:
# raise ValueError('Unexpected tif image in eods: %r' % path)
# if re.match(r"[Bb]\d+", name[position+1:]):
# layername = name[position+2:]
# else:
# layername = name[position+1:]
return layername
def get_projection(path):
with rasterio.open(str(path)) as img:
left, bottom, right, top = img.bounds
return {
'spatial_reference': str(str(getattr(img, 'crs_wkt', None) or img.crs.wkt)),
'geo_ref_points': {
'ul': {
'x': left,
'y': top
},
'ur': {
'x': right,
'y': top
},
'll': {
'x': left,
'y': bottom
},
'lr': {
'x': right,
'y': bottom
},
}
}
def get_coords(geo_ref_points, spatial_ref):
spatial_ref = osr.SpatialReference(spatial_ref)
t = osr.CoordinateTransformation(spatial_ref, spatial_ref.CloneGeogCS())
def transform(p):
lon, lat, z = t.TransformPoint(p['x'], p['y'])
return {'lon': lon, 'lat': lat}
return {key: transform(p) for key, p in geo_ref_points.items()}
def populate_coord(doc):
proj = doc['grid_spatial']['projection']
doc['extent']['coord'] = get_coords(proj['geo_ref_points'], proj['spatial_reference'])
def crazy_parse(timestr):
try:
return parser.parse(timestr)
except ValueError:
if not timestr[-2:] == "60":
raise
return parser.parse(timestr[:-2] + '00') + timedelta(minutes=1)
def prep_dataset(fields, path):
# for file in os.listdir(str(path)):
# if file.endswith(".xml") and (not file.endswith('aux.xml')):
# metafile = file
# Parse xml ElementTree gives me a headache so using lxml
# doc = ElementTree.parse(os.path.join(str(path), metafile))
# TODO root method doesn't work here - need to include xlmns...
# for global_metadata in doc.findall('{http://espa.cr.usgs.gov/v1.2}global_metadata'):
# satellite = (global_metadata.find('{http://espa.cr.usgs.gov/v1.2}satellite')).text
# instrument = (global_metadata.find('{http://espa.cr.usgs.gov/v1.2}instrument')).text
# acquisition_date = str((global_metadata
# .find('{http://espa.cr.usgs.gov/v1.2}acquisition_date')).text).replace("-","")
# scene_center_time = (global_metadata.find('{http://espa.cr.usgs.gov/v1.2}scene_center_time')).text[:8]
# center_dt = crazy_parse(acquisition_date+"T"+scene_center_time)
# aos = crazy_parse(acquisition_date+"T"+scene_center_time)-timedelta(seconds=(24/2))
# los = aos + timedelta(seconds=24)
# lpgs_metadata_file = (global_metadata.find('{http://espa.cr.usgs.gov/v1.2}lpgs_metadata_file')).text
# groundstation = lpgs_metadata_file[16:19]
# fields.update({'instrument': instrument, 'satellite': satellite})
aos = crazy_parse('20' + fields['mosaic_year'] + '-01-01T00:00:00')
los = aos
fields['creation_dt'] = aos
fields['satellite'] = 'ALOS_2'
images = {
band_name(im_path): {
'path': str(im_path.relative_to(path))
}
for im_path in path.glob('*.tif') if "RGB" not in str(im_path)
}
doc = {
'id': str(uuid.uuid4()),
'processing_level': "terrain",
'product_type': "gamma0",
'creation_dt': aos,
'platform': {
'code': 'ALOS_2'
},
'instrument': {
'name': 'PALSAR'
},
'acquisition': {
'groundstation': {
'code': '023',
'aos': str(aos),
'los': str(los)
}
},
'extent': {
'from_dt': str(aos),
'to_dt': str(aos),
'center_dt': str(aos)
},
'format': {
'name': 'GeoTiff'
},
'grid_spatial': {
'projection': get_projection(path / next(iter(images.values()))['path'])
},
'image': {
# 'satellite_ref_point_start': {'path': 0, 'row': 0},
# 'satellite_ref_point_end': {'path': 0, 'row': 0},
'bands': images
},
# TODO include 'lineage': {'source_datasets': {'lpgs_metadata_file': lpgs_metadata_file}}
'lineage': {
'source_datasets': {}
}
}
populate_coord(doc)
return doc
def dataset_folder(fields):
fmt_str = "{vehicle}_{instrument}_{type}_{level}_GA{type}{product}-{groundstation}_{path}_{row}_{date}"
return fmt_str.format(**fields)
# INPUT path is parsed for elements - below hardcoded for testing
def prepare_datasets(alos2_path):
print(alos2_path)
fields = re.match((r"(?P<latitude_dir>N|S)"
r"(?P<latitude>[0-9]{2})"
r"(?P<longitude_dir>E|W)"
r"(?P<longitude>[0-9]{3})"
"_"
r"(?P<mosaic_year>[0-9]{2})"), alos2_path.stem).groupdict()
# timedelta(days=int(fields["julianday"]))
# , 'creation_dt': ((crazy_parse(fields["productyear"]+'0101T00:00:00'))+timedelta(days=int(fields["julianday"])))})
fields.update({'level': 'gamma0', 'type': 'intensity'})
alos2 = prep_dataset(fields, alos2_path)
return (alos2, alos2_path)
@click.command(help="Prepare ALOS2 PALSAR dataset for ingestion into the Data Cube.")
@click.argument('datasets', type=click.Path(exists=True, readable=True, writable=True), nargs=-1)
def main(datasets):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
for dataset in datasets:
path = Path(dataset)
logging.info("Processing %s", path)
documents = prepare_datasets(path)
dataset, folder = documents
yaml_path = str(folder.joinpath('agdc-metadata.yaml'))
logging.info("Writing %s", yaml_path)
with open(yaml_path, 'w') as stream:
yaml.dump(dataset, stream)
if __name__ == "__main__":
main()
| 30.303644
| 120
| 0.551637
|
64d8b91ffc54d7be0d117e747508e9449f29b37e
| 5,263
|
py
|
Python
|
honeybee_schema/energy/construction.py
|
MingboPeng/honeybee-schema
|
84bfea4c4ed038e3cf71ae2d708b937cb98334d5
|
[
"BSD-3-Clause"
] | null | null | null |
honeybee_schema/energy/construction.py
|
MingboPeng/honeybee-schema
|
84bfea4c4ed038e3cf71ae2d708b937cb98334d5
|
[
"BSD-3-Clause"
] | null | null | null |
honeybee_schema/energy/construction.py
|
MingboPeng/honeybee-schema
|
84bfea4c4ed038e3cf71ae2d708b937cb98334d5
|
[
"BSD-3-Clause"
] | null | null | null |
"""Construction Schema"""
from pydantic import Field, constr
from typing import List, Union
from ._base import IDdEnergyBaseModel
from .material import EnergyMaterial, EnergyMaterialNoMass, \
EnergyWindowMaterialGas, EnergyWindowMaterialGasCustom, \
EnergyWindowMaterialGasMixture, EnergyWindowMaterialSimpleGlazSys, \
EnergyWindowMaterialBlind, EnergyWindowMaterialGlazing, EnergyWindowMaterialShade
from .schedule import ScheduleRuleset, ScheduleFixedInterval
class WindowConstructionAbridged(IDdEnergyBaseModel):
"""Construction for window objects (Aperture, Door)."""
type: constr(regex='^WindowConstructionAbridged$') = 'WindowConstructionAbridged'
layers: List[constr(min_length=1, max_length=100)] = Field(
...,
description='List of strings for material identifiers. The order of the '
'materials is from exterior to interior.',
min_items=1,
max_items=8
)
class WindowConstruction(WindowConstructionAbridged):
"""Construction for window objects (Aperture, Door)."""
type: constr(regex='^WindowConstruction$') = 'WindowConstruction'
materials: List[
Union[
EnergyWindowMaterialGas, EnergyWindowMaterialGasCustom, EnergyWindowMaterialGasMixture,
EnergyWindowMaterialSimpleGlazSys, EnergyWindowMaterialBlind,
EnergyWindowMaterialGlazing, EnergyWindowMaterialShade
]
] = Field(
...,
description='List of materials. The order of the materials is from outside '
'to inside.',
min_items=1,
max_items=8
)
class OpaqueConstructionAbridged(IDdEnergyBaseModel):
"""Construction for opaque objects (Face, Shade, Door)."""
type: constr(regex='^OpaqueConstructionAbridged$') = 'OpaqueConstructionAbridged'
layers: List[constr(min_length=1, max_length=100)] = Field(
...,
description='List of strings for material identifiers. The order of the materials '
'is from exterior to interior.',
min_items=1,
max_items=10
)
class OpaqueConstruction(OpaqueConstructionAbridged):
"""Construction for opaque objects (Face, Shade, Door)."""
type: constr(regex='^OpaqueConstruction$') = 'OpaqueConstruction'
materials: List[Union[EnergyMaterial, EnergyMaterialNoMass]] = Field(
...,
description='List of materials. The order of the materials is from outside to'
' inside.',
min_items=1,
max_items=10
)
class ShadeConstruction(IDdEnergyBaseModel):
"""Construction for Shade objects."""
type: constr(regex='^ShadeConstruction$') = 'ShadeConstruction'
solar_reflectance: float = Field(
0.2,
ge=0,
le=1,
description=' A number for the solar reflectance of the construction.'
)
visible_reflectance: float = Field(
0.2,
ge=0,
le=1,
description=' A number for the visible reflectance of the construction.'
)
is_specular: bool = Field(
default=False,
description='Boolean to note whether the reflection off the shade is diffuse '
'(False) or specular (True). Set to True if the construction is '
'representing a glass facade or a mirror material.'
)
class AirBoundaryConstructionAbridged(IDdEnergyBaseModel):
"""Construction for Air Boundary objects."""
type: constr(regex='^AirBoundaryConstructionAbridged$') = \
'AirBoundaryConstructionAbridged'
air_mixing_per_area: float = Field(
0.1,
ge=0,
description='A positive number for the amount of air mixing between Rooms '
'across the air boundary surface [m3/s-m2]. Default: 0.1 corresponds '
'to average indoor air speeds of 0.1 m/s (roughly 20 fpm), which is '
'typical of what would be induced by a HVAC system.'
)
air_mixing_schedule: str = Field(
...,
min_length=1,
max_length=100,
description='Identifier of a fractional schedule for the air mixing schedule '
'across the construction.'
)
class AirBoundaryConstruction(AirBoundaryConstructionAbridged):
"""Construction for Air Boundary objects."""
type: constr(regex='^AirBoundaryConstruction$') = 'AirBoundaryConstruction'
air_mixing_schedule: Union[ScheduleRuleset, ScheduleFixedInterval] = Field(
...,
description='A fractional schedule as a ScheduleRuleset or '
'ScheduleFixedInterval for the air mixing schedule across '
'the construction.'
)
class Config:
@staticmethod
def schema_extra(schema, model):
schema['properties']['air_mixing_schedule']['anyOf'] = [
{"$ref": "#/components/schemas/ScheduleRuleset"},
{"$ref": "#/components/schemas/ScheduleFixedInterval"}
]
if __name__ == '__main__':
print(WindowConstructionAbridged.schema_json(indent=2))
print(WindowConstruction.schema_json(indent=2))
print(OpaqueConstructionAbridged.schema_json(indent=2))
print(OpaqueConstruction.schema_json(indent=2))
print(ShadeConstruction.schema_json(indent=2))
print(AirBoundaryConstruction.schema_json(indent=2))
| 34.398693
| 99
| 0.67832
|
e600482d981d9faf663eb9739276f889dfa0775e
| 2,999
|
py
|
Python
|
configs/config_BiCLSTM_dunet.py
|
dkswxd/unetpp_pytorch_qiu
|
df439b07d13c5d8c87975f0cca4dd7a5ff19f8c2
|
[
"Apache-2.0"
] | null | null | null |
configs/config_BiCLSTM_dunet.py
|
dkswxd/unetpp_pytorch_qiu
|
df439b07d13c5d8c87975f0cca4dd7a5ff19f8c2
|
[
"Apache-2.0"
] | null | null | null |
configs/config_BiCLSTM_dunet.py
|
dkswxd/unetpp_pytorch_qiu
|
df439b07d13c5d8c87975f0cca4dd7a5ff19f8c2
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
config_name = 'BiCLSTM_dunet'
config_dataset = OrderedDict([
('dataset', 'hyper'),
('channels', 60),
('n_class', 2),
('norm_kwargs', {'type':'data'}),
('npy_dir', '../cancer/npy/'),
('label_dir', '../cancer/label/'),
('train_split', '../cancer/split/split_0_train.txt'),
('val_split', '../cancer/split/split_0_val.txt'),
('test_split', '../cancer/split/split_0_test.txt'),
('batch_size', 1),
('channel_transform', '5:45:2'),
('aug_config', 'none')
])
config_model = OrderedDict([
('model', config_name),
('layers', 4),
('feature_root', 32),
('BiCLSTM_feature_root', 8),
('conv_repeat', 2),
('use_bn', True),
('track_running_stats', False),
# ('track_running_stats', True),
('bn_momentum', 0.1),
('use_gn', False),
('num_groups', 16),
('deform_CLSTM', False),
('epoch', 50),
('save_interval', 5),
('restore', False), # TODO: restore training not implemented!
('modulated', True),
('use_deform', ['down3_conv0', 'down3_conv1', 'down2_conv0', 'down2_conv1', 'up2_conv0', 'up2_conv1']),
])
config_optimizer = OrderedDict([
('loss', 'BCE'),
('optimizer', 'Adam'),
('learning_rate', 0.001),
('weight_decay', 0.001),
('scheduler', [40]),
('scheduler_rate', 0.1),
])
config_utils = OrderedDict([
('workdir', '../cancer/workdir/{}_public/'.format(config_name)),
('work_phase', 'train-val-test'),
# ('work_phase', 'train-test'),
# ('work_phase', 'test'),
])
config_public = OrderedDict()
config_public.update(config_dataset)
config_public.update(config_model)
config_public.update(config_optimizer)
config_public.update(config_utils)
################################################## split configs
config_split_all = []
for i in range(5):
config_split_all.append(config_public.copy())
config_split_all[-1]['train_split'] = '../cancer/split/split_{}_train.txt'.format(i)
config_split_all[-1]['val_split'] = '../cancer/split/split_{}_val.txt'.format(i)
config_split_all[-1]['test_split'] = '../cancer/split/split_{}_test.txt'.format(i)
config_split_all[-1]['workdir'] = '../cancer/workdir/{}_split_{}/'.format(config_name, i)
################################################## split configs
################################################## split configs
config_split_all = []
for i in range(5):
config_split_all.append(config_public.copy())
# config_split_all[-1]['deform_CLSTM'] = True
config_split_all[-1]['train_split'] = '../cancer/split/split_{}_train.txt'.format(i)
config_split_all[-1]['val_split'] = '../cancer/split/split_{}_val.txt'.format(i)
config_split_all[-1]['test_split'] = '../cancer/split/split_{}_test.txt'.format(i)
config_split_all[-1]['workdir'] = '../cancer/workdir/{}_deform_split_{}/'.format(config_name, i)
################################################## split configs
all_configs = config_split_all
# all_configs = []
| 34.471264
| 107
| 0.598533
|
93a5510b7ca805ea8519bb98e84cc5a3c5e509ea
| 7,927
|
py
|
Python
|
code/brain_pipeline.py
|
Delta-Sigma/brain_segmentation
|
4462bde26e8434391c91cf3d9a8ac53fd6d494bc
|
[
"MIT"
] | 315
|
2016-04-19T20:17:51.000Z
|
2022-03-22T05:48:13.000Z
|
code/brain_pipeline.py
|
Delta-Sigma/brain_segmentation
|
4462bde26e8434391c91cf3d9a8ac53fd6d494bc
|
[
"MIT"
] | 29
|
2016-10-16T13:32:18.000Z
|
2021-08-30T01:40:34.000Z
|
code/brain_pipeline.py
|
Delta-Sigma/brain_segmentation
|
4462bde26e8434391c91cf3d9a8ac53fd6d494bc
|
[
"MIT"
] | 206
|
2016-06-16T06:29:39.000Z
|
2022-02-16T12:38:03.000Z
|
import numpy as np
import subprocess
import random
import progressbar
from glob import glob
from skimage import io
np.random.seed(5) # for reproducibility
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
class BrainPipeline(object):
'''
A class for processing brain scans for one patient
INPUT: (1) filepath 'path': path to directory of one patient. Contains following mha files:
flair, t1, t1c, t2, ground truth (gt)
(2) bool 'n4itk': True to use n4itk normed t1 scans (defaults to True)
(3) bool 'n4itk_apply': True to apply and save n4itk filter to t1 and t1c scans for given patient. This will only work if the
'''
def __init__(self, path, n4itk = True, n4itk_apply = False):
self.path = path
self.n4itk = n4itk
self.n4itk_apply = n4itk_apply
self.modes = ['flair', 't1', 't1c', 't2', 'gt']
# slices=[[flair x 155], [t1], [t1c], [t2], [gt]], 155 per modality
self.slices_by_mode, n = self.read_scans()
# [ [slice1 x 5], [slice2 x 5], ..., [slice155 x 5]]
self.slices_by_slice = n
self.normed_slices = self.norm_slices()
def read_scans(self):
'''
goes into each modality in patient directory and loads individual scans.
transforms scans of same slice into strip of 5 images
'''
print 'Loading scans...'
slices_by_mode = np.zeros((5, 155, 240, 240))
slices_by_slice = np.zeros((155, 5, 240, 240))
flair = glob(self.path + '/*Flair*/*.mha')
t2 = glob(self.path + '/*_T2*/*.mha')
gt = glob(self.path + '/*more*/*.mha')
t1s = glob(self.path + '/**/*T1*.mha')
t1_n4 = glob(self.path + '/*T1*/*_n.mha')
t1 = [scan for scan in t1s if scan not in t1_n4]
scans = [flair[0], t1[0], t1[1], t2[0], gt[0]] # directories to each image (5 total)
if self.n4itk_apply:
print '-> Applyling bias correction...'
for t1_path in t1:
self.n4itk_norm(t1_path) # normalize files
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
elif self.n4itk:
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
for scan_idx in xrange(5):
# read each image directory, save to self.slices
slices_by_mode[scan_idx] = io.imread(scans[scan_idx], plugin='simpleitk').astype(float)
for mode_ix in xrange(slices_by_mode.shape[0]): # modes 1 thru 5
for slice_ix in xrange(slices_by_mode.shape[1]): # slices 1 thru 155
slices_by_slice[slice_ix][mode_ix] = slices_by_mode[mode_ix][slice_ix] # reshape by slice
return slices_by_mode, slices_by_slice
def norm_slices(self):
'''
normalizes each slice in self.slices_by_slice, excluding gt
subtracts mean and div by std dev for each slice
clips top and bottom one percent of pixel intensities
if n4itk == True, will apply n4itk bias correction to T1 and T1c images
'''
print 'Normalizing slices...'
normed_slices = np.zeros((155, 5, 240, 240))
for slice_ix in xrange(155):
normed_slices[slice_ix][-1] = self.slices_by_slice[slice_ix][-1]
for mode_ix in xrange(4):
normed_slices[slice_ix][mode_ix] = self._normalize(self.slices_by_slice[slice_ix][mode_ix])
print 'Done.'
return normed_slices
def _normalize(self, slice):
'''
INPUT: (1) a single slice of any given modality (excluding gt)
(2) index of modality assoc with slice (0=flair, 1=t1, 2=t1c, 3=t2)
OUTPUT: normalized slice
'''
b, t = np.percentile(slice, (0.5,99.5))
slice = np.clip(slice, b, t)
if np.std(slice) == 0:
return slice
else:
return (slice - np.mean(slice)) / np.std(slice)
def save_patient(self, reg_norm_n4, patient_num):
'''
INPUT: (1) int 'patient_num': unique identifier for each patient
(2) string 'reg_norm_n4': 'reg' for original images, 'norm' normalized images, 'n4' for n4 normalized images
OUTPUT: saves png in Norm_PNG directory for normed, Training_PNG for reg
'''
print 'Saving scans for patient {}...'.format(patient_num)
progress.currval = 0
if reg_norm_n4 == 'norm': #saved normed slices
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('Norm_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
elif reg_norm_n4 == 'reg':
for slice_ix in progress(xrange(155)):
strip = self.slices_by_slice[slice_ix].reshape(1200, 240)
if np.max(strip) != 0:
strip /= np.max(strip)
io.imsave('Training_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
else:
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('n4_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
def n4itk_norm(self, path, n_dims=3, n_iters='[20,20,10,5]'):
'''
INPUT: (1) filepath 'path': path to mha T1 or T1c file
(2) directory 'parent_dir': parent directory to mha file
OUTPUT: writes n4itk normalized image to parent_dir under orig_filename_n.mha
'''
output_fn = path[:-4] + '_n.mha'
# run n4_bias_correction.py path n_dim n_iters output_fn
subprocess.call('python n4_bias_correction.py ' + path + ' ' + str(n_dims) + ' ' + n_iters + ' ' + output_fn, shell = True)
def save_patient_slices(patients, type):
'''
INPUT (1) list 'patients': paths to any directories of patients to save. for example- glob("Training/HGG/**")
(2) string 'type': options = reg (non-normalized), norm (normalized, but no bias correction), n4 (bias corrected and normalized)
saves strips of patient slices to approriate directory (Training_PNG/, Norm_PNG/ or n4_PNG/) as patient-num_slice-num
'''
for patient_num, path in enumerate(patients):
a = BrainPipeline(path)
a.save_patient(type, patient_num)
def s3_dump(directory, bucket):
'''
dump files from a given directory to an s3 bucket
INPUT (1) string 'directory': directory containing files to save
(2) string 'bucket': name od s3 bucket to dump files
'''
subprocess.call('aws s3 cp' + ' ' + directory + ' ' + 's3://' + bucket + ' ' + '--recursive')
def save_labels(fns):
'''
INPUT list 'fns': filepaths to all labels
'''
progress.currval = 0
for label_idx in progress(xrange(len(labels))):
slices = io.imread(labels[label_idx], plugin = 'simpleitk')
for slice_idx in xrange(len(slices)):
io.imsave('Labels/{}_{}L.png'.format(label_idx, slice_idx), slices[slice_idx])
if __name__ == '__main__':
labels = glob('Original_Data/Training/HGG/**/*more*/**.mha')
save_labels(labels)
# patients = glob('Training/HGG/**')
# save_patient_slices(patients, 'reg')
# save_patient_slices(patients, 'norm')
# save_patient_slices(patients, 'n4')
# s3_dump('Graveyard/Training_PNG/', 'orig-training-png')
| 46.629412
| 140
| 0.59783
|
8f97a33bf65c5c1152061e1d471f109c80327372
| 2,192
|
py
|
Python
|
setup.py
|
remram44/find_projections
|
f3988a206c4902e039ea75d832d2c3ef695c1f91
|
[
"MIT"
] | 1
|
2021-03-29T18:23:57.000Z
|
2021-03-29T18:23:57.000Z
|
setup.py
|
remram44/find_projections
|
f3988a206c4902e039ea75d832d2c3ef695c1f91
|
[
"MIT"
] | null | null | null |
setup.py
|
remram44/find_projections
|
f3988a206c4902e039ea75d832d2c3ef695c1f91
|
[
"MIT"
] | 2
|
2021-03-18T19:39:48.000Z
|
2022-01-05T16:25:44.000Z
|
import os
import sys
from distutils.core import Extension
from setuptools import setup
home_folder = os.path.expanduser("~")
user_site_packages_folder = "{0}/.local/lib/python{1}.{2}/site-packages".format(home_folder, sys.version_info[0],
sys.version_info[1])
if user_site_packages_folder not in sys.path:
sys.path.append(user_site_packages_folder)
import numpy as np
NAME = 'find_projections'
VERSION = '2.3.1'
REQUIRES = ['numpy >= 1.13']
find_projections_module = Extension('libfind_projections',
sources=['find_projections/binary_tree.cpp', 'find_projections/projection.cpp',
'find_projections/search.cpp', 'find_projections/helper.cpp',
'find_projections/numeric_binary_tree.cpp',
'find_projections/discrete_binary_tree.cpp', 'find_projections/datset.cpp',
'find_projections/pyfind_projections.cpp'],
include_dirs=[np.get_include()],
extra_compile_args=['-pthread', '-std=c++14'],
extra_link_args=['-shared', '-pthread', '-lboost_python-py36']
)
setup(
name=NAME,
version=VERSION,
url='http://autonlab.org',
author='Saswati Ray',
author_email='sray@cs.cmu.edu',
description='Search for 2-d projection boxes separating out classes/quantiles of output',
keywords='d3m_primitive',
license='MIT',
ext_modules=[find_projections_module],
packages=['find_projections'],
entry_points={
'd3m.primitives': [
'classification.search.Find_projections = find_projections:Search',
'regression.search_numeric.Find_projections = find_projections:SearchNumeric',
'classification.search_hybrid.Find_projections = find_projections:SearchHybrid',
'regression.search_hybrid_numeric.Find_projections = find_projections:SearchHybridNumeric',
]
}
)
| 42.980392
| 120
| 0.593978
|
10720d1808c8f355c4a5f13de6ca92d051cedb4c
| 46
|
py
|
Python
|
bitmovin/services/manifests/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 44
|
2016-12-12T17:37:23.000Z
|
2021-03-03T09:48:48.000Z
|
bitmovin/services/manifests/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 38
|
2017-01-09T14:45:45.000Z
|
2022-02-27T18:04:33.000Z
|
bitmovin/services/manifests/__init__.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 27
|
2017-02-02T22:49:31.000Z
|
2019-11-21T07:04:57.000Z
|
from .manifest_service import ManifestService
| 23
| 45
| 0.891304
|
3de44afd8d8da955f0f6fdc847cd1cee0ff44303
| 4,753
|
py
|
Python
|
tests/test_rule_building.py
|
zStupan/NiaARM
|
3ade6c5f89a22da7f1e7309cb4fec227bb913e6b
|
[
"MIT"
] | null | null | null |
tests/test_rule_building.py
|
zStupan/NiaARM
|
3ade6c5f89a22da7f1e7309cb4fec227bb913e6b
|
[
"MIT"
] | 14
|
2022-03-02T07:38:34.000Z
|
2022-03-15T11:18:50.000Z
|
tests/test_rule_building.py
|
zStupan/NiaARM
|
3ade6c5f89a22da7f1e7309cb4fec227bb913e6b
|
[
"MIT"
] | 1
|
2022-03-01T14:41:07.000Z
|
2022-03-01T14:41:07.000Z
|
"""
Test intended for testing the main procedure for building
an association rule from candidate solutions.
"""
from unittest import TestCase
from niaarm.niaarm import NiaARM
from niaarm.dataset import Dataset
from niaarm.feature import Feature
import os
class TestBuildRuleA(TestCase):
# let's borrow a test case from Wikipedia:
# https://en.wikipedia.org/wiki/Lift_(data_mining)
def setUp(self):
data = Dataset(os.path.join(os.path.dirname(__file__), 'test_data', 'wiki_test_case.csv'))
self.features = data.features
self.transactions = data.transactions
self.oper = NiaARM(data.dimension, data.features, data.transactions, ('support',))
def test_threshold_move(self):
move = self.oper.threshold_move(0)
move2 = self.oper.threshold_move(1)
self.assertEqual(move, 1)
self.assertEqual(move2, 2)
def test_vector_position(self):
"""Important test for checking the position of feature in vector
Categorical features consists of two vector elements, while
each numerical feature consists of three vector elements.
"""
position1 = self.oper.feature_position(0)
position2 = self.oper.feature_position(1)
self.assertEqual(position1, 0)
self.assertEqual(position2, 2)
def test_build_rule(self):
"""Test procedure for building rules"""
rule1 = self.oper.build_rule([0.45328107,
0.13655004,
0.6860223,
0.78527931,
0.96291945,
0.18117294,
0.50567635])
rule2 = self.oper.build_rule([0.95328107,
0.13655004,
0.6860223,
0.78527931,
0.96291945,
0.18117294,
0.50567635])
rule3 = self.oper.build_rule([0.95328107,
0.98655004,
0.6860223,
0.78527931,
0.96291945,
0.18117294,
0.50567635])
rule4 = self.oper.build_rule([0.45328107,
0.20655004,
0.6860223,
0.78527931,
0.10291945,
0.18117294,
0.50567635])
rule5 = self.oper.build_rule([0.45328107,
0.20655004,
0.2060223,
0.79527931,
0.10291945,
0.18117294,
0.50567635])
rule6 = self.oper.build_rule([0.45328107,
0.20655004,
0.2060223,
0.19727931,
0.10291945,
0.18117294,
0.50567635])
rule7 = self.oper.build_rule([0.95328107,
0.20655004,
0.2060223,
0.19727931,
0.10291945,
0.18117294,
0.50567635])
self.assertEqual(rule1, [Feature('Feat1', dtype='cat', categories=["A"]), None])
self.assertEqual(rule2, [Feature('Feat1', dtype='cat', categories=["B"]), None])
self.assertEqual(rule3, [None, None])
self.assertEqual(rule4, [Feature('Feat1', dtype='cat', categories=["A"]),
Feature('Feat2', dtype='int', min_val=1, max_val=1)])
self.assertEqual(rule5, [Feature('Feat1', dtype='cat', categories=["A"]),
Feature('Feat2', dtype='int', min_val=0, max_val=1)])
self.assertEqual(rule6, [Feature('Feat1', dtype='cat', categories=["A"]),
Feature('Feat2', dtype='int', min_val=0, max_val=0)])
self.assertEqual(rule7, [Feature('Feat1', dtype='cat', categories=["B"]),
Feature('Feat2', dtype='int', min_val=0, max_val=0)])
| 45.701923
| 98
| 0.432779
|
63eb059266786adf59e3541fe6bfe0ca075a37bf
| 6,912
|
py
|
Python
|
tni.py
|
AvidDabbler/TitleVI-Analysis
|
bc447ead902c4c6bf4fbfbb92f4dfbb75f22b5a6
|
[
"MIT"
] | 1
|
2019-10-28T03:06:44.000Z
|
2019-10-28T03:06:44.000Z
|
tni.py
|
AvidDabbler/TitleVI-Analysis
|
bc447ead902c4c6bf4fbfbb92f4dfbb75f22b5a6
|
[
"MIT"
] | 5
|
2019-10-28T15:28:33.000Z
|
2019-10-28T15:33:10.000Z
|
tni.py
|
AvidDabbler/TitleVI-Analysis
|
bc447ead902c4c6bf4fbfbb92f4dfbb75f22b5a6
|
[
"MIT"
] | null | null | null |
# ! are the imports needed if they are defined in main?
import arcpy
import os
import shutil
from helpers import *
# ! are we using tni at all?
# Compute Transit Need Index (TNI) based on the 2003 service standards for each census blockgroup.
# Use the minority, income, age and car ownership data computed in prior functions as inputs, and
# add a feature class indicating TNI to the final output gdb (final_gdb_loc)
def tni(year, root_dir, final_gdb_loc):
arcpy.env.overwriteOutput = True
# set a working gdb
gdb = f"TransitNeedIndex{year}.gdb"
replaceGDB(root_dir, gdb)
gdb_loc = os.path.join(root_dir,gdb)
# define input feature classes, generated from prior functions
minority_fc = os.path.join(final_gdb_loc, f'Minority{year}_final')
medhhinc_fc = os.path.join(final_gdb_loc, f'MedHHInc{year}_final')
senior_fc = os.path.join(final_gdb_loc, f'Senior{year}_final')
NoCar_fc = os.path.join(final_gdb_loc, f"NoCar{year}_Final")
arcpy.env.workspace = os.path.join(root_dir, gdb) # -----> Change Year # ! what is this comment?
arcpy.ClearWorkspaceCache_management()
# MAke a working feature class from a copy of the minority fc. Define minority TNI fields and calculate them
TNI_Minority = arcpy.conversion.FeatureClassToFeatureClass(in_features=minority_fc, out_path=arcpy.env.workspace, out_name=f"TNI_Minority{year}")
arcpy.management.AddFields(in_table=TNI_Minority, field_description=[["TNI_Minority", "DOUBLE"],["PopDens", "DOUBLE"],["RegPopDens", "DOUBLE"],["TNI_Pop", "DOUBLE"]])
# ! should this use percentage rather than density? If we use this later on I can adjust (if it should be adjusted)
# Process: Calculate Field (6) (Calculate Field) (management)
arcpy.management.CalculateField(in_table=TNI_Minority, field="PopDens", expression="!TPOP! / !SqMiles!", expression_type="PYTHON3", code_block="", field_type="TEXT")
arcpy.management.CalculateField(in_table=TNI_Minority, field="RegPopDens", expression="!RegTPOP! / !RegSqMiles!", expression_type="PYTHON3", code_block="", field_type="TEXT")
arcpy.management.CalculateField(in_table=TNI_Minority, field="TNI_Minority", expression="!MinorityDens! / !RegMinorityDens!", expression_type="PYTHON3", code_block="", field_type="TEXT")
arcpy.management.CalculateField(in_table=TNI_Minority, field="TNI_Pop", expression="!PopDens! / !RegPopDens!", expression_type="PYTHON3", code_block="", field_type="TEXT")
# copy income fc, define TNI fields, and join to minority working fc.
# note that median income is used directly in TNI calcs.
TNI_MedHHInc = arcpy.conversion.FeatureClassToFeatureClass(in_features=os.path.join(final_gdb_loc, medhhinc_fc), out_path=gdb_loc, out_name=f"TNI_MedHHInc{year}")[0]
arcpy.management.AddFields(in_table=TNI_MedHHInc,field_description=[["TNI_MedInc", "DOUBLE"],["TNI_MedInc", "DOUBLE"]])
TNI_Minority_MedHHInc_Join = arcpy.management.JoinField(in_data=TNI_Minority, in_field="GEOID", join_table=TNI_MedHHInc, join_field="GEOID", fields=["RegMedHHInc", "MedHHInc", "TNI_MedInc"])[0]
# same as above, with senior
TNI_Senior = arcpy.conversion.FeatureClassToFeatureClass(in_features=os.path.join(final_gdb_loc, senior_fc), out_path=gdb_loc, out_name=f"TNI_Senior{year}")[0]
arcpy.management.AddField(in_table=TNI_Senior, field_name="TNI_Senior", field_type="DOUBLE")
arcpy.management.CalculateField(in_table=TNI_Senior, field="TNI_Senior", expression="!SeniorDens! / !RegSeniorDens!", expression_type="PYTHON3", code_block="", field_type="DOUBLE")
TNI_Join = arcpy.management.JoinField(in_data=TNI_Minority_MedHHInc_Join, in_field="GEOID", join_table=TNI_Senior, join_field="GEOID", fields=["TSenior", "SeniorDens", "RegSeniorDens", "TNI_Senior"])[0]
# Same as above, with zero car households
TNI_NoCar = arcpy.conversion.FeatureClassToFeatureClass(in_features=os.path.join(final_gdb_loc, NoCar_fc), out_path=gdb_loc, out_name="TNI_NoCar",)[0]
arcpy.management.AddField(in_table=TNI_NoCar, field_name="TNI_NoCar", field_type="DOUBLE")[0]
arcpy.management.AddField(in_table=TNI_NoCar, field_name="TNI_LowCar", field_type="DOUBLE")[0]
arcpy.management.CalculateField(in_table=TNI_NoCar, field="TNI_NoCar", expression="!NoCarDens! / !RegNoCarDens!", expression_type="PYTHON3", field_type="DOUBLE")[0]
arcpy.management.CalculateField(in_table=TNI_NoCar, field="TNI_LowCar", expression="!LowCarDens! / !RegLowCarDens!", expression_type="PYTHON3", code_block="", field_type="TEXT")[0]
TNI_Join = arcpy.management.JoinField(in_data=TNI_Join, in_field="GEOID", join_table=TNI_NoCar, join_field="GEOID", fields=["TNoCar", "NoCarDens", "RegNoCarDens", "TNI_NoCar", "TLowCar", "LowCarDens", "RegLowCarDens", "TNI_LowCar"])[0]
# Create and calculate the TNI
arcpy.management.AddField(in_table=TNI_Join, field_name="TNI", field_type="DOUBLE")
arcpy.management.CalculateField(in_table=TNI_Join, field="TNI", expression="(!TNI_MedInc!*3.5)+(!TNI_Minority!*1)+(!TNI_Senior!*1)+(!TNI_LowCar!*1.5)+(!TNI_Pop!*2)", expression_type="PYTHON3", field_type="TEXT")
# compare each blockgroup's TNI to the regional TNI
# Determine the regional mean and standard deviation TNI, then join to each blockgroup.
# Finally, define each regions need (Very Low to High) based on how it compares to regional TNI
TNI_Join_Dissolve = arcpy.management.Dissolve(in_features=TNI_Join, out_feature_class=f"{TNI_Join}_dissolve", dissolve_field=[], statistics_fields=[["TNI", "STD"], ["TNI", "MEAN"]], multi_part="MULTI_PART", unsplit_lines="DISSOLVE_LINES")[0]
TNI_Join_Dissolve_SpJoin = arcpy.analysis.SpatialJoin(target_features=TNI_Join, join_features=TNI_Join_Dissolve, out_feature_class=f'{TNI_Join_Dissolve}_SpJoin', join_operation="JOIN_ONE_TO_ONE", join_type="KEEP_ALL")[0]
arcpy.management.AddField(in_table=TNI_Join_Dissolve_SpJoin, field_name="Propensity", field_type="DOUBLE")[0]
arcpy.management.CalculateField(in_table=TNI_Join_Dissolve_SpJoin, field="Propensity", expression="ifBlock(!TNI!,!STD_TNI!,!MEAN_TNI!)", expression_type="PYTHON3", code_block='''def ifBlock(TNI, STD_TNI, MEAN_TNI):
if TNI < (MEAN_TNI-(STD_TNI*1.5)):
return \"VL\"
elif TNI > (MEAN_TNI-(STD_TNI*1.5)) and TNI < (MEAN_TNI-(STD_TNI*.5)):
return \"L\"
elif TNI > (MEAN_TNI-(STD_TNI*.5)) and TNI < (MEAN_TNI+(STD_TNI*.5)):
return \"A\"
elif TNI > (MEAN_TNI+(STD_TNI*.5)) and TNI < (MEAN_TNI+(STD_TNI*1.5)):
return \"H\"
elif TNI > (MEAN_TNI+(STD_TNI*1.5)):
return \"VH\"
else:
return \"ERROR\"
''', field_type="TEXT")[0]
# create TNI feature classes within output gdb's
arcpy.conversion.FeatureClassToFeatureClass(in_features=TNI_Join_Dissolve_SpJoin, out_path=gdb_loc, out_name=f"TNI{year}_Final")[0]
arcpy.conversion.FeatureClassToFeatureClass(in_features=TNI_Join_Dissolve_SpJoin, out_path=final_gdb_loc, out_name=f"TNI{year}_Final")[0]
| 65.207547
| 244
| 0.754051
|
6e0fce2d79b732da02e8ec7fbf23e9b5b8e1b453
| 7,769
|
py
|
Python
|
_test/test_indentation.py
|
zvxr/ruamel-yaml
|
0f5e5dd4c650ee0d9faa7c49a5cbc0a3f3488759
|
[
"MIT"
] | 8
|
2020-08-10T11:59:25.000Z
|
2022-03-07T19:12:08.000Z
|
_test/test_indentation.py
|
zvxr/ruamel-yaml
|
0f5e5dd4c650ee0d9faa7c49a5cbc0a3f3488759
|
[
"MIT"
] | 2
|
2022-01-12T10:02:57.000Z
|
2022-02-22T21:11:29.000Z
|
_test/test_indentation.py
|
zvxr/ruamel-yaml
|
0f5e5dd4c650ee0d9faa7c49a5cbc0a3f3488759
|
[
"MIT"
] | 4
|
2020-12-11T21:26:42.000Z
|
2022-02-01T13:36:21.000Z
|
# coding: utf-8
import pytest # NOQA
from roundtrip import round_trip, round_trip_load, round_trip_dump, dedent, YAML
def rt(s):
res = round_trip_dump(round_trip_load(s))
return res.strip() + '\n'
class TestIndent:
def test_roundtrip_inline_list(self):
s = 'a: [a, b, c]\n'
output = rt(s)
assert s == output
def test_roundtrip_mapping_of_inline_lists(self):
s = dedent("""\
a: [a, b, c]
j: [k, l, m]
""")
output = rt(s)
assert s == output
def test_roundtrip_mapping_of_inline_lists_comments(self):
s = dedent("""\
# comment A
a: [a, b, c]
# comment B
j: [k, l, m]
""")
output = rt(s)
assert s == output
def test_roundtrip_mapping_of_inline_sequence_eol_comments(self):
s = dedent("""\
# comment A
a: [a, b, c] # comment B
j: [k, l, m] # comment C
""")
output = rt(s)
assert s == output
# first test by explicitly setting flow style
def test_added_inline_list(self):
s1 = dedent("""
a:
- b
- c
- d
""")
s = 'a: [b, c, d]\n'
data = round_trip_load(s1)
val = data['a']
val.fa.set_flow_style()
# print(type(val), '_yaml_format' in dir(val))
output = round_trip_dump(data)
assert s == output
# ############ flow mappings
def test_roundtrip_flow_mapping(self):
s = dedent("""\
- {a: 1, b: hallo}
- {j: fka, k: 42}
""")
data = round_trip_load(s)
output = round_trip_dump(data)
assert s == output
def test_roundtrip_sequence_of_inline_mappings_eol_comments(self):
s = dedent("""\
# comment A
- {a: 1, b: hallo} # comment B
- {j: fka, k: 42} # comment C
""")
output = rt(s)
assert s == output
def test_indent_top_level(self):
inp = """
- a:
- b
"""
round_trip(inp, indent=4)
def test_set_indent_5_block_list_indent_1(self):
inp = """
a:
- b: c
- 1
- d:
- 2
"""
round_trip(inp, indent=5, block_seq_indent=1)
def test_set_indent_4_block_list_indent_2(self):
inp = """
a:
- b: c
- 1
- d:
- 2
"""
round_trip(inp, indent=4, block_seq_indent=2)
def test_set_indent_3_block_list_indent_0(self):
inp = """
a:
- b: c
- 1
- d:
- 2
"""
round_trip(inp, indent=3, block_seq_indent=0)
def Xtest_set_indent_3_block_list_indent_2(self):
inp = """
a:
-
b: c
-
1
-
d:
-
2
"""
round_trip(inp, indent=3, block_seq_indent=2)
def test_set_indent_3_block_list_indent_2(self):
inp = """
a:
- b: c
- 1
- d:
- 2
"""
round_trip(inp, indent=3, block_seq_indent=2)
def Xtest_set_indent_2_block_list_indent_2(self):
inp = """
a:
-
b: c
-
1
-
d:
-
2
"""
round_trip(inp, indent=2, block_seq_indent=2)
# this is how it should be: block_seq_indent stretches the indent
def test_set_indent_2_block_list_indent_2(self):
inp = """
a:
- b: c
- 1
- d:
- 2
"""
round_trip(inp, indent=2, block_seq_indent=2)
# have to set indent!
def test_roundtrip_four_space_indents(self):
# fmt: off
s = (
'a:\n'
'- foo\n'
'- bar\n'
)
# fmt: on
round_trip(s, indent=4)
def test_roundtrip_four_space_indents_no_fail(self):
inp = """
a:
- foo
- bar
"""
exp = """
a:
- foo
- bar
"""
assert round_trip_dump(round_trip_load(inp)) == dedent(exp)
class TestYpkgIndent:
def test_00(self):
inp = """
name : nano
version : 2.3.2
release : 1
homepage : http://www.nano-editor.org
source :
- http://www.nano-editor.org/dist/v2.3/nano-2.3.2.tar.gz : ff30924807ea289f5b60106be8
license : GPL-2.0
summary : GNU nano is an easy-to-use text editor
builddeps :
- ncurses-devel
description: |
GNU nano is an easy-to-use text editor originally designed
as a replacement for Pico, the ncurses-based editor from the non-free mailer
package Pine (itself now available under the Apache License as Alpine).
"""
round_trip(
inp, indent=4, block_seq_indent=2, top_level_colon_align=True, prefix_colon=' '
)
def guess(s):
from ruamel.yaml.util import load_yaml_guess_indent
x, y, z = load_yaml_guess_indent(dedent(s))
return y, z
class TestGuessIndent:
def test_guess_20(self):
inp = """\
a:
- 1
"""
assert guess(inp) == (2, 0)
def test_guess_42(self):
inp = """\
a:
- 1
"""
assert guess(inp) == (4, 2)
def test_guess_42a(self):
# block seq indent prevails over nested key indent level
inp = """\
b:
a:
- 1
"""
assert guess(inp) == (4, 2)
def test_guess_3None(self):
inp = """\
b:
a: 1
"""
assert guess(inp) == (3, None)
class TestSeparateMapSeqIndents:
# using uncommon 6 indent with 3 push in as 2 push in automatically
# gets you 4 indent even if not set
def test_00(self):
# old style
yaml = YAML()
yaml.indent = 6
yaml.block_seq_indent = 3
inp = """
a:
- 1
- [1, 2]
"""
yaml.round_trip(inp)
def test_01(self):
yaml = YAML()
yaml.indent(sequence=6)
yaml.indent(offset=3)
inp = """
a:
- 1
- {b: 3}
"""
yaml.round_trip(inp)
def test_02(self):
yaml = YAML()
yaml.indent(mapping=5, sequence=6, offset=3)
inp = """
a:
b:
- 1
- [1, 2]
"""
yaml.round_trip(inp)
def test_03(self):
inp = """
a:
b:
c:
- 1
- [1, 2]
"""
round_trip(inp, indent=4)
def test_04(self):
yaml = YAML()
yaml.indent(mapping=5, sequence=6)
inp = """
a:
b:
- 1
- [1, 2]
- {d: 3.14}
"""
yaml.round_trip(inp)
def test_issue_51(self):
yaml = YAML()
# yaml.map_indent = 2 # the default
yaml.indent(sequence=4, offset=2)
yaml.preserve_quotes = True
yaml.round_trip("""
role::startup::author::rsyslog_inputs:
imfile:
- ruleset: 'AEM-slinglog'
File: '/opt/aem/author/crx-quickstart/logs/error.log'
startmsg.regex: '^[-+T.:[:digit:]]*'
tag: 'error'
- ruleset: 'AEM-slinglog'
File: '/opt/aem/author/crx-quickstart/logs/stdout.log'
startmsg.regex: '^[-+T.:[:digit:]]*'
tag: 'stdout'
""")
# ############ indentation
| 23.191045
| 95
| 0.460677
|
e30d5101f2fc8f75c04560c168376fce1539be29
| 5,150
|
py
|
Python
|
tests/test_add_option_enqueue.py
|
benhowes/loguru
|
b576d9767e32129c6c04869c054f20a739d6ce5c
|
[
"MIT"
] | 1
|
2020-05-30T10:00:36.000Z
|
2020-05-30T10:00:36.000Z
|
tests/test_add_option_enqueue.py
|
benhowes/loguru
|
b576d9767e32129c6c04869c054f20a739d6ce5c
|
[
"MIT"
] | null | null | null |
tests/test_add_option_enqueue.py
|
benhowes/loguru
|
b576d9767e32129c6c04869c054f20a739d6ce5c
|
[
"MIT"
] | null | null | null |
from loguru import logger
import pytest
import time
import re
import sys
class NotPicklable:
def __getstate__(self):
raise RuntimeError("You shall not serialize me!")
def __setstate__(self, state):
pass
class NotUnpicklable:
def __getstate__(self):
return "..."
def __setstate__(self, state):
raise RuntimeError("You shall not de-serialize me!")
class NotWritable:
def write(self, message):
if "fail" in message.record["extra"]:
raise RuntimeError("You asked me to fail...")
print(message, end="")
def test_enqueue():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
logger.debug("Test")
assert len(x) == 0
time.sleep(0.2)
assert len(x) == 1
assert x[0] == "Test\n"
def test_enqueue_with_exception():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
try:
1 / 0
except ZeroDivisionError:
logger.exception("Error")
assert len(x) == 0
time.sleep(0.2)
assert len(x) == 1
lines = x[0].splitlines()
assert lines[0] == "Error"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_caught_exception_queue_put(writer, capsys):
logger.add(writer, enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotPicklable()).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2] == "RuntimeError: You shall not serialize me!"
assert lines[-1] == "--- End of logging error ---"
def test_caught_exception_queue_get(writer, capsys):
logger.add(writer, enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert lines[1] == "Record was: None"
assert lines[-2] == "RuntimeError: You shall not de-serialize me!"
assert lines[-1] == "--- End of logging error ---"
def test_caught_exception_sink_write(capsys):
logger.add(NotWritable(), enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\nIt's fine again\n"
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2] == "RuntimeError: You asked me to fail..."
assert lines[-1] == "--- End of logging error ---"
def test_not_caught_exception_queue_put(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
logger.info("It's fine")
with pytest.raises(RuntimeError, match=r"You shall not serialize me!"):
logger.bind(broken=NotPicklable()).info("Bye bye...")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\n"
assert out == ""
assert err == ""
def test_not_caught_exception_queue_get(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's not fine")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\n"
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You shall not de-serialize me!"
def test_not_caught_exception_sink_write(capsys):
logger.add(NotWritable(), enqueue=True, catch=False, format="{message}")
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's not fine")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\n"
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
def test_wait_for_all_messages_enqueued(capsys):
def slow_sink(message):
time.sleep(0.01)
sys.stderr.write(message)
logger.add(slow_sink, enqueue=True, catch=False, format="{message}")
for i in range(10):
logger.info(i)
logger.complete()
out, err = capsys.readouterr()
assert out == ""
assert err == "".join("%d\n" % i for i in range(10))
| 27.688172
| 76
| 0.629709
|
4089d5a55890ee41005c2ca825c7644b9733f297
| 753
|
py
|
Python
|
src/dockerjango/dockerjango/urls.py
|
adamappsdev/Dockerjango
|
76ecc537a4d8ef7efe41c4a8c69a5214edd23403
|
[
"MIT"
] | null | null | null |
src/dockerjango/dockerjango/urls.py
|
adamappsdev/Dockerjango
|
76ecc537a4d8ef7efe41c4a8c69a5214edd23403
|
[
"MIT"
] | null | null | null |
src/dockerjango/dockerjango/urls.py
|
adamappsdev/Dockerjango
|
76ecc537a4d8ef7efe41c4a8c69a5214edd23403
|
[
"MIT"
] | null | null | null |
"""dockerjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.227273
| 77
| 0.710491
|
31358cf08831bc34d6a22910e63bb5a0259f59d3
| 420
|
py
|
Python
|
run_demo.py
|
natchapolt/face-orientation-prep
|
378b25fa78c7429d007cf384410c39791cbd317c
|
[
"MIT"
] | null | null | null |
run_demo.py
|
natchapolt/face-orientation-prep
|
378b25fa78c7429d007cf384410c39791cbd317c
|
[
"MIT"
] | 3
|
2021-03-19T01:39:32.000Z
|
2022-01-13T01:22:13.000Z
|
run_demo.py
|
natchapolt/face-orientation-prep
|
378b25fa78c7429d007cf384410c39791cbd317c
|
[
"MIT"
] | null | null | null |
import cv2
import sys
from faceorientationprep.faceorientationfixer import FaceOrientationFixer
def main():
if len(sys.argv) == 1:
msg = """Usage:
python run_demo.py \"<image-path>\""""
print(msg)
return
f = FaceOrientationFixer()
im = cv2.imread(sys.argv[1])
cv2.imshow("s", f.fixOrientation(im))
cv2.waitKey(0)
return
if __name__ == '__main__':
main()
| 19.090909
| 73
| 0.619048
|
dcdb4a5b552f18aa140b708e3701bf1f940c8660
| 4,167
|
py
|
Python
|
source/lambda/capture_news_feed/test/test_stream_helper.py
|
knihit/discovering-hot-topics-using-machine-learning
|
a7d2d87bedee54d18d6885d472f758b0bacf9db8
|
[
"Apache-2.0"
] | null | null | null |
source/lambda/capture_news_feed/test/test_stream_helper.py
|
knihit/discovering-hot-topics-using-machine-learning
|
a7d2d87bedee54d18d6885d472f758b0bacf9db8
|
[
"Apache-2.0"
] | 21
|
2021-07-22T19:02:25.000Z
|
2022-02-14T16:28:18.000Z
|
source/lambda/ingestion-youtube/test/test_stream_helper.py
|
aassadza-org/discovering-hot-topics-using-machine-learning
|
ef5f9d00a14b6b2024c9e0f9dfb915a6e632074d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import json
import os
import unittest
from datetime import datetime
from moto import mock_kinesis
from shared_util.service_helper import get_service_client
from util.stream_helper import buffer_data_into_stream
@mock_kinesis
def stream_setup(stream_name):
kds_client = get_service_client("kinesis")
kds_client.create_stream(StreamName=stream_name, ShardCount=1)
return kds_client
def delete_stream_setup(kds_client, stream_name):
kds_client.delete_stream(StreamName=stream_name)
@mock_kinesis
class TestStreamBuffer(unittest.TestCase):
def setUp(self):
self.stream_name = os.environ["STREAM_NAME"]
self.kds_client = stream_setup(self.stream_name)
def tearDown(self):
delete_stream_setup(self.kds_client, self.stream_name)
def test_buffer_data_into_stream(self):
data = {
"account_name": "fakeaccount",
"platform": "fakeplatform",
"search_query": "query_str",
"feed": {
"created_at": datetime.now().timestamp(),
"id": "fakeid",
"id_str": "fakeid",
"text": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.",
"entities": {"media": [{"media_url_https": "https://fakeimageurl", "type": "image/jpeg"}]},
"extended_entities": {"media": [{"media_url_https": "https://fakeimageurl", "type": "image/jpeg"}]},
"lang": "en",
"metadata": {"website": "fakeurl.com", "country": "US", "topic": "faketopic"},
},
}
self.assertEqual(buffer_data_into_stream(data)["ResponseMetadata"]["HTTPStatusCode"], 200)
# To verify the data read the data from the mock stream
response = self.kds_client.describe_stream(StreamName=self.stream_name)
shard_id = response["StreamDescription"]["Shards"][0]["ShardId"]
self.assertEqual(shard_id, "shardId-000000000000")
shard_iterator = self.kds_client.get_shard_iterator(
StreamName=self.stream_name, ShardId=shard_id, ShardIteratorType="TRIM_HORIZON"
)
shard_iterator = shard_iterator["ShardIterator"]
records = self.kds_client.get_records(ShardIterator=shard_iterator, Limit=1)
self.assertEqual(json.loads(records["Records"][0]["Data"]), data)
| 56.310811
| 472
| 0.552676
|
598544731ee469685aaff8bb7b149b02adf21435
| 2,039
|
py
|
Python
|
examples/custom_eval.py
|
zangy17/OpenAttack
|
9114a8af12680f14684d2bf1bc6a5c5e34f8932c
|
[
"MIT"
] | 1
|
2020-09-27T23:10:14.000Z
|
2020-09-27T23:10:14.000Z
|
examples/custom_eval.py
|
zangy17/OpenAttack
|
9114a8af12680f14684d2bf1bc6a5c5e34f8932c
|
[
"MIT"
] | null | null | null |
examples/custom_eval.py
|
zangy17/OpenAttack
|
9114a8af12680f14684d2bf1bc6a5c5e34f8932c
|
[
"MIT"
] | 1
|
2020-09-01T11:14:42.000Z
|
2020-09-01T11:14:42.000Z
|
'''
This example code shows how to design a customized attack evaluation metric, namely BLEU score.
'''
import OpenAttack
from nltk.translate.bleu_score import sentence_bleu
class CustomAttackEval(OpenAttack.DefaultAttackEval):
def __init__(self, attacker, clsf, processor=OpenAttack.DefaultTextProcessor(), **kwargs):
super().__init__(attacker, clsf, processor=processor, **kwargs)
self.__processor = processor
# We extend :py:class:`.DefaultAttackEval` and use ``processor`` option to specify
# the :py:class:`.TextProcessor` used in our ``CustomAttackEval``.
def measure(self, x_orig, x_adv):
# Invoke the original ``measure`` method to get measurements
info = super().measure(x_orig, x_adv)
if info["Succeed"]:
# Add ``Blue`` score which is calculated by **NLTK toolkit** if attack succeed.
token_orig = [token for token, pos in self.__processor.get_tokens(x_orig)]
token_adv = [token for token, pos in self.__processor.get_tokens(x_adv)]
info["Bleu"] = sentence_bleu([x_orig], x_adv)
return info
def update(self, info):
info = super().update(info)
if info["Succeed"]:
# Add bleu score that we just calculated to the total result.
self.__result["bleu"] += info["Bleu"]
return info
def clear(self):
super().clear()
self.__result = { "bleu": 0 }
# Clear results
def get_result(self):
result = super().get_result()
# Calculate average bleu scores and return.
result["Avg. Bleu"] = self.__result["bleu"] / result["Successful Instances"]
return result
def main():
clsf = OpenAttack.load("Victim.BiLSTM.SST")
dataset = OpenAttack.load("Dataset.SST.sample")[:10]
attacker = OpenAttack.attackers.GeneticAttacker()
attack_eval = CustomAttackEval(attacker, clsf)
attack_eval.eval(dataset, visualize=True)
if __name__ == "__main__":
main()
| 38.471698
| 95
| 0.641981
|
9b075415b437ccaa3a7d86cc2ef16be39ef9c0af
| 5,001
|
py
|
Python
|
src/clients/ctm_api_client/models/host_group_data.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 5
|
2021-12-01T18:40:00.000Z
|
2022-03-04T10:51:44.000Z
|
src/clients/ctm_api_client/models/host_group_data.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 3
|
2022-02-21T20:08:32.000Z
|
2022-03-16T17:41:03.000Z
|
src/clients/ctm_api_client/models/host_group_data.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 7
|
2021-12-01T11:59:16.000Z
|
2022-03-01T18:16:40.000Z
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class HostGroupData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"hostgroup": "str",
"tag": "str",
"agentslist": "list[AgentInGroupParams]",
}
attribute_map = {"hostgroup": "hostgroup", "tag": "tag", "agentslist": "agentslist"}
def __init__(
self, hostgroup=None, tag=None, agentslist=None, _configuration=None
): # noqa: E501
"""HostGroupData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._hostgroup = None
self._tag = None
self._agentslist = None
self.discriminator = None
if hostgroup is not None:
self.hostgroup = hostgroup
if tag is not None:
self.tag = tag
if agentslist is not None:
self.agentslist = agentslist
@property
def hostgroup(self):
"""Gets the hostgroup of this HostGroupData. # noqa: E501
Host Group name # noqa: E501
:return: The hostgroup of this HostGroupData. # noqa: E501
:rtype: str
"""
return self._hostgroup
@hostgroup.setter
def hostgroup(self, hostgroup):
"""Sets the hostgroup of this HostGroupData.
Host Group name # noqa: E501
:param hostgroup: The hostgroup of this HostGroupData. # noqa: E501
:type: str
"""
self._hostgroup = hostgroup
@property
def tag(self):
"""Gets the tag of this HostGroupData. # noqa: E501
Host Group tag # noqa: E501
:return: The tag of this HostGroupData. # noqa: E501
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this HostGroupData.
Host Group tag # noqa: E501
:param tag: The tag of this HostGroupData. # noqa: E501
:type: str
"""
self._tag = tag
@property
def agentslist(self):
"""Gets the agentslist of this HostGroupData. # noqa: E501
Agents list # noqa: E501
:return: The agentslist of this HostGroupData. # noqa: E501
:rtype: list[AgentInGroupParams]
"""
return self._agentslist
@agentslist.setter
def agentslist(self, agentslist):
"""Sets the agentslist of this HostGroupData.
Agents list # noqa: E501
:param agentslist: The agentslist of this HostGroupData. # noqa: E501
:type: list[AgentInGroupParams]
"""
self._agentslist = agentslist
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(HostGroupData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HostGroupData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HostGroupData):
return True
return self.to_dict() != other.to_dict()
| 27.478022
| 88
| 0.565487
|
8a873f00dcfbdb93174d03b2016b4767148bbd9e
| 154
|
py
|
Python
|
tests/odm/test_sqlite.py
|
quantmind/lux
|
7318fcd86c77616aada41d8182a04339680a554c
|
[
"BSD-3-Clause"
] | 21
|
2015-03-28T23:27:43.000Z
|
2020-11-23T13:24:10.000Z
|
tests/odm/test_sqlite.py
|
quantmind/lux
|
7318fcd86c77616aada41d8182a04339680a554c
|
[
"BSD-3-Clause"
] | 195
|
2015-02-18T17:22:28.000Z
|
2017-12-01T23:01:16.000Z
|
tests/odm/test_sqlite.py
|
quantmind/lux
|
7318fcd86c77616aada41d8182a04339680a554c
|
[
"BSD-3-Clause"
] | 16
|
2015-03-31T23:15:38.000Z
|
2017-04-18T11:59:43.000Z
|
import tests.odm.test_postgresql as postgresql
from tests.odm.utils import SqliteMixin
class TestSql(SqliteMixin, postgresql.TestPostgreSql):
pass
| 19.25
| 54
| 0.818182
|
4e8d13c8d1d568534d5ea1b9a820686c3ea45080
| 501
|
py
|
Python
|
tests/system/test_base.py
|
kussj/cassandrabeat
|
feaf39f9eddc905780a59bb281983ec5ea28acd8
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_base.py
|
kussj/cassandrabeat
|
feaf39f9eddc905780a59bb281983ec5ea28acd8
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_base.py
|
kussj/cassandrabeat
|
feaf39f9eddc905780a59bb281983ec5ea28acd8
|
[
"Apache-2.0"
] | null | null | null |
from cassandrabeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Cassandrabeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
cassandrabeat_proc = self.start_beat()
self.wait_until( lambda: self.log_contains("cassandrabeat is running"))
exit_code = cassandrabeat_proc.kill_and_wait()
assert exit_code == 0
| 25.05
| 79
| 0.638723
|
1dfee333e6fa0523611657da2908d959068d44f8
| 4,911
|
py
|
Python
|
tests/test_fallback_expressions.py
|
Symmetry-International/m2cgen
|
3157e0cbd5bd1ee7e044a992223c60224e2b7709
|
[
"MIT"
] | 1
|
2021-05-28T06:59:21.000Z
|
2021-05-28T06:59:21.000Z
|
tests/test_fallback_expressions.py
|
Symmetry-International/m2cgen
|
3157e0cbd5bd1ee7e044a992223c60224e2b7709
|
[
"MIT"
] | null | null | null |
tests/test_fallback_expressions.py
|
Symmetry-International/m2cgen
|
3157e0cbd5bd1ee7e044a992223c60224e2b7709
|
[
"MIT"
] | null | null | null |
from m2cgen import ast
from m2cgen.interpreters import CInterpreter, PythonInterpreter
from tests.utils import assert_code_equal
def test_abs_fallback_expr():
expr = ast.AbsExpr(ast.NumVal(-2.0))
interpreter = CInterpreter()
interpreter.abs_function_name = NotImplemented
expected_code = """
double score(double * input) {
double var0;
double var1;
var1 = -2.0;
if ((var1) < (0.0)) {
var0 = (0.0) - (var1);
} else {
var0 = var1;
}
return var0;
}
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_tanh_fallback_expr():
expr = ast.TanhExpr(ast.NumVal(2.0))
interpreter = PythonInterpreter()
interpreter.tanh_function_name = NotImplemented
expected_code = """
import math
def score(input):
var1 = 2.0
if (var1) > (44.0):
var0 = 1.0
else:
if (var1) < (-44.0):
var0 = -1.0
else:
var0 = (1.0) - ((2.0) / ((math.exp((2.0) * (var1))) + (1.0)))
return var0
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_sqrt_fallback_expr():
expr = ast.SqrtExpr(ast.NumVal(2.0))
interpreter = PythonInterpreter()
interpreter.sqrt_function_name = NotImplemented
expected_code = """
import math
def score(input):
return math.pow(2.0, 0.5)
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_exp_fallback_expr():
expr = ast.ExpExpr(ast.NumVal(2.0))
interpreter = PythonInterpreter()
interpreter.exponent_function_name = NotImplemented
expected_code = """
import math
def score(input):
return math.pow(2.718281828459045, 2.0)
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_log1p_fallback_expr():
expr = ast.Log1pExpr(ast.NumVal(2.0))
interpreter = PythonInterpreter()
interpreter.log1p_function_name = NotImplemented
expected_code = """
import math
def score(input):
var1 = 2.0
var2 = (1.0) + (var1)
var3 = (var2) - (1.0)
if (var3) == (0.0):
var0 = var1
else:
var0 = ((var1) * (math.log(var2))) / (var3)
return var0
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_atan_fallback_expr():
expr = ast.AtanExpr(ast.NumVal(2.0))
interpreter = PythonInterpreter()
interpreter.atan_function_name = NotImplemented
expected_code = (
"""
def score(input):
var1 = 2.0
var2 = abs(var1)
if (var2) > (2.414213562373095):
var0 = (1.0) / (var2)
else:
if (var2) > (0.66):
var0 = ((var2) - (1.0)) / ((var2) + (1.0))
else:
var0 = var2
var3 = var0
var4 = (var3) * (var3)
if (var2) > (2.414213562373095):
var5 = -1.0
else:
var5 = 1.0
if (var2) <= (0.66):
var6 = 0.0
else:
if (var2) > (2.414213562373095):
var6 = 1.5707963267948968
else:
var6 = 0.7853981633974484
if (var1) < (0.0):
var7 = -1.0
else:
var7 = 1.0
return (((((var3) * ((var4) * ((((var4) * (((var4) * (((var4) * """
"""(((var4) * (-0.8750608600031904)) - (16.157537187333652))) - """
"""(75.00855792314705))) - (122.88666844901361))) - """
"""(64.85021904942025)) / ((194.5506571482614) + ((var4) * """
"""((485.3903996359137) + ((var4) * ((432.88106049129027) + """
"""((var4) * ((165.02700983169885) + ((var4) * """
"""((24.858464901423062) + (var4))))))))))))) + (var3)) * """
"""(var5)) + (var6)) * (var7)""")
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_softmax_fallback_expr():
expr = ast.SoftmaxExpr([ast.NumVal(2.0), ast.NumVal(3.0)])
class InterpreterWithoutSoftmax(PythonInterpreter):
softmax_function_name = NotImplemented
def interpret_softmax_expr(self, expr, **kwargs):
return super(PythonInterpreter, self).interpret_softmax_expr(
expr, **kwargs)
interpreter = InterpreterWithoutSoftmax()
expected_code = """
import math
def score(input):
var0 = math.exp(2.0)
var1 = math.exp(3.0)
var2 = (var0) + (var1)
return [(var0) / (var2), (var1) / (var2)]
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_sigmoid_fallback_expr():
expr = ast.SigmoidExpr(ast.NumVal(2.0))
class InterpreterWithoutSigmoid(PythonInterpreter):
sigmoid_function_name = NotImplemented
def interpret_sigmoid_expr(self, expr, **kwargs):
return super(PythonInterpreter, self).interpret_sigmoid_expr(
expr, **kwargs)
interpreter = InterpreterWithoutSigmoid()
expected_code = """
import math
def score(input):
return (1.0) / ((1.0) + (math.exp((0.0) - (2.0))))
"""
assert_code_equal(interpreter.interpret(expr), expected_code)
| 25.184615
| 75
| 0.604358
|
10252181dfd15e2891365d5cf56448ee9a38231d
| 1,887
|
py
|
Python
|
autokeras/hyper_preprocessors.py
|
jwliou/autokeras
|
f9b4cdf445368d69f776317f372b09b66f45b69e
|
[
"Apache-2.0"
] | 4,704
|
2017-12-03T02:40:27.000Z
|
2019-12-19T23:23:34.000Z
|
autokeras/hyper_preprocessors.py
|
jwliou/autokeras
|
f9b4cdf445368d69f776317f372b09b66f45b69e
|
[
"Apache-2.0"
] | 541
|
2018-02-13T21:59:58.000Z
|
2019-04-01T15:21:57.000Z
|
autokeras/hyper_preprocessors.py
|
jwliou/autokeras
|
f9b4cdf445368d69f776317f372b09b66f45b69e
|
[
"Apache-2.0"
] | 738
|
2018-02-07T03:01:13.000Z
|
2019-12-19T23:23:36.000Z
|
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from autokeras import preprocessors
from autokeras.engine import hyper_preprocessor
def serialize(encoder):
return keras.utils.serialize_keras_object(encoder)
def deserialize(config, custom_objects=None):
return keras.utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="preprocessors",
)
class DefaultHyperPreprocessor(hyper_preprocessor.HyperPreprocessor):
"""HyperPreprocessor without Hyperparameters to tune.
It would always return the same preprocessor. No hyperparameters to be
tuned.
# Arguments
preprocessor: The Preprocessor to return when calling build.
"""
def __init__(self, preprocessor, *args, **kwargs):
super().__init__(*args, **kwargs)
self.preprocessor = preprocessor
def build(self, hp, dataset):
return self.preprocessor
def get_config(self):
config = super().get_config()
config.update({"preprocessor": preprocessors.serialize(self.preprocessor)})
return config
@classmethod
def from_config(cls, config):
config["preprocessor"] = preprocessors.deserialize(config["preprocessor"])
return super().from_config(config)
| 31.983051
| 83
| 0.72973
|
fe3c7a998c371a89535d41f098a60635a861c1d9
| 13,331
|
py
|
Python
|
autokeras/image_supervised.py
|
Bennnun/autokeras
|
8cd0b8dd5627e41ba31ea1f099a7c2ae469f902f
|
[
"MIT"
] | null | null | null |
autokeras/image_supervised.py
|
Bennnun/autokeras
|
8cd0b8dd5627e41ba31ea1f099a7c2ae469f902f
|
[
"MIT"
] | null | null | null |
autokeras/image_supervised.py
|
Bennnun/autokeras
|
8cd0b8dd5627e41ba31ea1f099a7c2ae469f902f
|
[
"MIT"
] | null | null | null |
import csv
import os
import pickle
import time
from abc import abstractmethod
from functools import reduce
import numpy as np
from scipy import ndimage
import torch
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from autokeras.loss_function import classification_loss, regression_loss
from autokeras.supervised import Supervised
from autokeras.constant import Constant
from autokeras.metric import Accuracy, MSE
from autokeras.preprocessor import OneHotEncoder, DataTransformer
from autokeras.search import Searcher, train
from autokeras.utils import ensure_dir, has_file, pickle_from_file, pickle_to_file, temp_folder_generator
def _validate(x_train, y_train):
"""Check `x_train`'s type and the shape of `x_train`, `y_train`."""
try:
x_train = x_train.astype('float64')
except ValueError:
raise ValueError('x_train should only contain numerical data.')
if len(x_train.shape) < 2:
raise ValueError('x_train should at least has 2 dimensions.')
if x_train.shape[0] != y_train.shape[0]:
raise ValueError('x_train and y_train should have the same number of instances.')
def run_searcher_once(train_data, test_data, path, timeout):
if Constant.LIMIT_MEMORY:
pass
searcher = pickle_from_file(os.path.join(path, 'searcher'))
searcher.search(train_data, test_data, timeout)
def read_csv_file(csv_file_path):
"""Read the csv file and returns two separate list containing files name and their labels.
Args:
csv_file_path: Path to the CSV file.
Returns:
file_names: List containing files names.
file_label: List containing their respective labels.
"""
file_names = []
file_labels = []
with open(csv_file_path, 'r') as files_path:
path_list = csv.DictReader(files_path)
fieldnames = path_list.fieldnames
for path in path_list:
file_names.append(path[fieldnames[0]])
file_labels.append(path[fieldnames[1]])
return file_names, file_labels
def read_images(img_file_names, images_dir_path):
"""Read the images from the path and return their numpy.ndarray instance.
Return a numpy.ndarray instance containing the training data.
Args:
img_file_names: List containing images names.
images_dir_path: Path to the directory containing images.
"""
x_train = []
if os.path.isdir(images_dir_path):
for img_file in img_file_names:
img_path = os.path.join(images_dir_path, img_file)
if os.path.exists(img_path):
img = ndimage.imread(fname=img_path)
if len(img.shape) < 3:
img = img[..., np.newaxis]
x_train.append(img)
else:
raise ValueError("%s image does not exist" % img_file)
else:
raise ValueError("Directory containing images does not exist")
return np.asanyarray(x_train)
def load_image_dataset(csv_file_path, images_path):
"""Load images from the files and labels from a csv file.
Second, the dataset is a set of images and the labels are in a CSV file.
The CSV file should contain two columns whose names are 'File Name' and 'Label'.
The file names in the first column should match the file names of the images with extensions,
e.g., .jpg, .png.
The path to the CSV file should be passed through the `csv_file_path`.
The path to the directory containing all the images should be passed through `image_path`.
Args:
csv_file_path: CSV file path.
images_path: Path where images exist.
Returns:
x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.
y: The labels.
"""
img_file_name, y = read_csv_file(csv_file_path)
x = read_images(img_file_name, images_path)
return np.array(x), np.array(y)
class ImageSupervised(Supervised):
"""The image classifier class.
It is used for image classification. It searches convolutional neural network architectures
for the best configuration for the dataset.
Attributes:
path: A path to the directory to save the classifier.
y_encoder: An instance of OneHotEncoder for `y_train` (array of categorical labels).
verbose: A boolean value indicating the verbosity mode.
searcher: An instance of BayesianSearcher. It searches different
neural architecture to find the best model.
searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
augment: A boolean value indicating whether the data needs augmentation.
"""
def __init__(self, verbose=False, path=None, resume=False, searcher_args=None, augment=None):
"""Initialize the instance.
The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
Otherwise it would create a new one.
Args:
verbose: A boolean of whether the search process will be printed to stdout.
path: A string. The path to a directory, where the intermediate results are saved.
resume: A boolean. If True, the classifier will continue to previous work saved in path.
Otherwise, the classifier will start a new search.
augment: A boolean value indicating whether the data needs augmentation.
"""
super().__init__(verbose)
if searcher_args is None:
searcher_args = {}
if path is None:
path = temp_folder_generator()
if augment is None:
augment = Constant.DATA_AUGMENTATION
if has_file(os.path.join(path, 'classifier')) and resume:
classifier = pickle_from_file(os.path.join(path, 'classifier'))
self.__dict__ = classifier.__dict__
self.path = path
else:
self.y_encoder = None
self.data_transformer = None
self.verbose = verbose
self.searcher = False
self.path = path
self.searcher_args = searcher_args
self.augment = augment
ensure_dir(path)
@property
@abstractmethod
def metric(self):
pass
@property
@abstractmethod
def loss(self):
pass
def fit(self, x_train=None, y_train=None, time_limit=None):
"""Find the best neural architecture and train it.
Based on the given dataset, the function will find the best neural architecture for it.
The dataset is in numpy.ndarray format.
So they training data should be passed through `x_train`, `y_train`.
Args:
x_train: A numpy.ndarray instance containing the training data.
y_train: A numpy.ndarray instance containing the label of the training data.
time_limit: The time limit for the search in seconds.
"""
if y_train is None:
y_train = []
if x_train is None:
x_train = []
x_train = np.array(x_train)
y_train = np.array(y_train).flatten()
_validate(x_train, y_train)
y_train = self.transform_y(y_train)
# Transform x_train
if self.data_transformer is None:
self.data_transformer = DataTransformer(x_train, augment=self.augment)
# Create the searcher and save on disk
if not self.searcher:
input_shape = x_train.shape[1:]
self.searcher_args['n_output_node'] = self.get_n_output_node()
self.searcher_args['input_shape'] = input_shape
self.searcher_args['path'] = self.path
self.searcher_args['metric'] = self.metric
self.searcher_args['loss'] = self.loss
self.searcher_args['verbose'] = self.verbose
searcher = Searcher(**self.searcher_args)
self.save_searcher(searcher)
self.searcher = True
# Divide training data into training and testing data.
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train,
test_size=min(Constant.VALIDATION_SET_SIZE,
int(len(y_train) * 0.2)),
random_state=42)
# Wrap the data into DataLoaders
train_data = self.data_transformer.transform_train(x_train, y_train)
test_data = self.data_transformer.transform_test(x_test, y_test)
# Save the classifier
pickle.dump(self, open(os.path.join(self.path, 'classifier'), 'wb'))
pickle_to_file(self, os.path.join(self.path, 'classifier'))
if time_limit is None:
time_limit = 24 * 60 * 60
start_time = time.time()
time_remain = time_limit
try:
while time_remain > 0:
run_searcher_once(train_data, test_data, self.path, int(time_remain))
if len(self.load_searcher().history) >= Constant.MAX_MODEL_NUM:
break
time_elapsed = time.time() - start_time
time_remain = time_limit - time_elapsed
# if no search executed during the time_limit, then raise an error
if time_remain <= 0:
raise TimeoutError
except TimeoutError:
if len(self.load_searcher().history) == 0:
raise TimeoutError("Search Time too short. No model was found during the search time.")
elif self.verbose:
print('Time is out.')
@abstractmethod
def get_n_output_node(self):
pass
def transform_y(self, y_train):
return y_train
def predict(self, x_test):
"""Return predict results for the testing data.
Args:
x_test: An instance of numpy.ndarray containing the testing data.
Returns:
A numpy.ndarray containing the results.
"""
if Constant.LIMIT_MEMORY:
pass
test_loader = self.data_transformer.transform_test(x_test)
model = self.load_searcher().load_best_model().produce_model()
model.eval()
outputs = []
with torch.no_grad():
for index, inputs in enumerate(test_loader):
outputs.append(model(inputs).numpy())
output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
return self.inverse_transform_y(output)
def inverse_transform_y(self, output):
return output
def evaluate(self, x_test, y_test):
"""Return the accuracy score between predict value and `y_test`."""
y_predict = self.predict(x_test)
return accuracy_score(y_test, y_predict)
def save_searcher(self, searcher):
pickle.dump(searcher, open(os.path.join(self.path, 'searcher'), 'wb'))
def load_searcher(self):
return pickle_from_file(os.path.join(self.path, 'searcher'))
def final_fit(self, x_train, y_train, x_test, y_test, trainer_args=None, retrain=False):
"""Final training after found the best architecture.
Args:
x_train: A numpy.ndarray of training data.
y_train: A numpy.ndarray of training targets.
x_test: A numpy.ndarray of testing data.
y_test: A numpy.ndarray of testing targets.
trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
retrain: A boolean of whether reinitialize the weights of the model.
"""
if trainer_args is None:
trainer_args = {'max_no_improvement_num': 30}
y_train = self.transform_y(y_train)
y_test = self.transform_y(y_test)
train_data = self.data_transformer.transform_train(x_train, y_train)
test_data = self.data_transformer.transform_test(x_test, y_test)
searcher = self.load_searcher()
graph = searcher.load_best_model()
if retrain:
graph.weighted = False
_, _1, graph = train((graph, train_data, test_data, trainer_args, None, self.metric, self.loss, self.verbose))
def get_best_model_id(self):
""" Return an integer indicating the id of the best model."""
return self.load_searcher().get_best_model_id()
class ImageClassifier(ImageSupervised):
@property
def loss(self):
return classification_loss
def transform_y(self, y_train):
# Transform y_train.
if self.y_encoder is None:
self.y_encoder = OneHotEncoder()
self.y_encoder.fit(y_train)
y_train = self.y_encoder.transform(y_train)
return y_train
def inverse_transform_y(self, output):
return self.y_encoder.inverse_transform(output)
def get_n_output_node(self):
return self.y_encoder.n_classes
@property
def metric(self):
return Accuracy
class ImageRegressor(ImageSupervised):
@property
def loss(self):
return regression_loss
@property
def metric(self):
return MSE
def get_n_output_node(self):
return 1
def transform_y(self, y_train):
return y_train.flatten().reshape(len(y_train), 1)
def inverse_transform_y(self, output):
return output.flatten()
| 36.225543
| 118
| 0.647963
|
97d5ae1f5dc76405bc0f01803d647534988f1616
| 3,229
|
py
|
Python
|
research/object_detection/generate_tfrecord.py
|
stesha2016/models
|
addb5baa31875232b6b8f5b61d0522de56a79f08
|
[
"Apache-2.0"
] | 1
|
2021-07-17T11:33:57.000Z
|
2021-07-17T11:33:57.000Z
|
research/object_detection/generate_tfrecord.py
|
stesha2016/models
|
addb5baa31875232b6b8f5b61d0522de56a79f08
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/generate_tfrecord.py
|
stesha2016/models
|
addb5baa31875232b6b8f5b61d0522de56a79f08
|
[
"Apache-2.0"
] | 1
|
2019-07-24T03:52:18.000Z
|
2019-07-24T03:52:18.000Z
|
""" Usage: # From tensorflow/models/
# Create train data: python generate_tfrecord.py
--csv_input=images/train_labels.csv
--image_dir=images/train
--output_path=train.record
# Create test data: python generate_tfrecord.py --csv_input=images/test_labels.csv --image_dir=images/test --output_path=test.record """
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('dataset', 'boxes', 'dataset name such as boxes')
FLAGS = flags.FLAGS
def class_text_to_int(row_label):
if row_label == 'box':
return 1
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
for set in ['train', 'test']:
output_path = './project_images/{}/{}.tfrecord'.format(FLAGS.dataset, set)
image_dir = 'project_images/{}/{}'.format(FLAGS.dataset, set)
csv_input = './project_images/{}/{}_labels.csv'.format(FLAGS.dataset, set)
writer = tf.python_io.TFRecordWriter(output_path)
path = os.path.join(os.getcwd(), image_dir)
examples = pd.read_csv(csv_input)
grouped = split(examples, 'filename')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
print('Successfully created the TFRecords: {}'.format(output_path))
if __name__ == '__main__':
tf.app.run()
| 34.72043
| 136
| 0.743574
|
769801b46e9f88419eea9f92be1c8656bf986912
| 2,364
|
py
|
Python
|
MH.py
|
EricBlythe/RSA
|
25b87c17ec56a21488c3f84dcc77a847f2dc5ef2
|
[
"MIT"
] | 1
|
2021-07-01T13:57:29.000Z
|
2021-07-01T13:57:29.000Z
|
MH.py
|
EricBlythe/RSA
|
25b87c17ec56a21488c3f84dcc77a847f2dc5ef2
|
[
"MIT"
] | null | null | null |
MH.py
|
EricBlythe/RSA
|
25b87c17ec56a21488c3f84dcc77a847f2dc5ef2
|
[
"MIT"
] | null | null | null |
def gcd(a,b):
a,b=abs(a), abs(b)
if a<b:
t=a
a=b
b=t
if a%b==0:
return b
else:
return gcd(a%b, b)
class Fraction:
def __init__(self,a,b):
if a==0:
self.a=0
self.b=1
return
if a*b<0:
a=-abs(a)
b=abs(b)
d=gcd(a,b)
a=int(a/d)
b=int(b/d)
self.a=a
self.b=b
def __repr__(self):
return str(self.a)+'/'+str(self.b)
def __str__(self):
return str(self.a)+'/'+str(self.b)
def __add__(self,other):
return Fraction(self.a*other.b+self.b*other.a, self.b*other.b)
def __sub__(self, other):
return Fraction(self.a*other.b-self.b*other.a, self.b*other.b)
def __neg__(self):
return Fraction(0,1)-self
def __mul__(self, other):
return Fraction(self.a*other.a,self.b*other.b)
def __truediv__(self, other):
return self*Fraction(other.b,other.a)
def __pow__(self, other):
return (self.a/self.b)**other
def __eq__(self,other):
if self.a==other.a and self.b==other.b:
return True
return False
def __gt__(self, other):
if (self-other).a>0:
return True
return False
def Euc(a,b):
x=0
if a<b:
t=a
a=b
b=t
x=1
if a%b==0:
M=1
N=-a//b+1
if x==1:
t=M
M=N
N=t
return [M,N]
else:
r=a%b
q=(a-a%b)//b
m=Euc(r,b)[0]
n=Euc(r,b)[1]
M=m
N=n-q*m
if x==1:
t=M
M=N
N=t
return [M,N]
def inv(a,p):
x=Euc(a,p)[0]
x=x%p
return x
def prime(N):
N=N-2
A=2
P=[]
P.append(A)
A=3
P.append(A)
while A<=N:
A=A+2
t=2+N**0.5
t=int(t)
for p in P:
if p>t:
break
if A%p==0:
break
if A%p==0:
continue
P.append(A)
return P
def RIntegral(f, lower, higher):
step=0.001
a=lower
result=0
while a<higher:
result+=step*(f(a)+f(a+step))/2
a+=step
return result
| 20.556522
| 71
| 0.416244
|
f8431aca0f5b810642bb4e9ef2d4ce581a0d28a8
| 537
|
py
|
Python
|
mmdet/models/detectors/solov2.py
|
Joxis/mmdetection
|
ad14c6e820f7516c3b31514fea271ba5c46e8fdb
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/solov2.py
|
Joxis/mmdetection
|
ad14c6e820f7516c3b31514fea271ba5c46e8fdb
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/solov2.py
|
Joxis/mmdetection
|
ad14c6e820f7516c3b31514fea271ba5c46e8fdb
|
[
"Apache-2.0"
] | null | null | null |
from .single_stage_ins import SingleStageInsDetector
from ..builder import DETECTORS
@DETECTORS.register_module
class SOLOv2(SingleStageInsDetector):
def __init__(self,
backbone,
neck,
bbox_head,
mask_feat_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SOLOv2, self).__init__(backbone, neck, bbox_head, mask_feat_head,
train_cfg, test_cfg, pretrained)
| 29.833333
| 79
| 0.56797
|
2a5cd013563e07934fdfe94f81b94a324ca01984
| 101
|
py
|
Python
|
python/rpdk/python/__init__.py
|
RG4421/cloudformation-cli-python-plugin
|
a53dc519764f7b876e1cc3cef4b9daea70822887
|
[
"Apache-2.0"
] | null | null | null |
python/rpdk/python/__init__.py
|
RG4421/cloudformation-cli-python-plugin
|
a53dc519764f7b876e1cc3cef4b9daea70822887
|
[
"Apache-2.0"
] | null | null | null |
python/rpdk/python/__init__.py
|
RG4421/cloudformation-cli-python-plugin
|
a53dc519764f7b876e1cc3cef4b9daea70822887
|
[
"Apache-2.0"
] | 2
|
2020-09-01T16:42:30.000Z
|
2021-09-28T05:20:36.000Z
|
import logging
__version__ = "2.1.0"
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 16.833333
| 61
| 0.782178
|
a5aa0be7303ab8e928d16b283a18bcd25cac9c51
| 5,638
|
py
|
Python
|
app/model/utils.py
|
othmanKisha/In-Door-Face-Mask-Inspector
|
d094ff80f8ed16285d668905a081c237d4345919
|
[
"MIT"
] | 3
|
2020-12-06T17:03:57.000Z
|
2021-01-01T13:11:39.000Z
|
app/model/utils.py
|
Alnasser0/In-Door-Face-Mask-Inspector-1
|
d094ff80f8ed16285d668905a081c237d4345919
|
[
"MIT"
] | null | null | null |
app/model/utils.py
|
Alnasser0/In-Door-Face-Mask-Inspector-1
|
d094ff80f8ed16285d668905a081c237d4345919
|
[
"MIT"
] | 1
|
2021-01-21T18:55:05.000Z
|
2021-01-21T18:55:05.000Z
|
# -*- encoding=utf-8
# Code from this Repository: https://github.com/AIZOOTech/FaceMaskDetection
import numpy as np
def generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios, offset=0.5):
'''
generate anchors.
:param feature_map_sizes: list of list, for example: [[40,40], [20,20]]
:param anchor_sizes: list of list, for example: [[0.05, 0.075], [0.1, 0.15]]
:param anchor_ratios: list of list, for example: [[1, 0.5], [1, 0.5]]
:param offset: default to 0.5
:return:
'''
anchor_bboxes = []
for idx, feature_size in enumerate(feature_map_sizes):
cx = (np.linspace(0, feature_size[0] - 1, feature_size[0]) + 0.5) / feature_size[0]
cy = (np.linspace(0, feature_size[1] - 1, feature_size[1]) + 0.5) / feature_size[1]
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid_expend = np.expand_dims(cx_grid, axis=-1)
cy_grid_expend = np.expand_dims(cy_grid, axis=-1)
center = np.concatenate((cx_grid_expend, cy_grid_expend), axis=-1)
num_anchors = len(anchor_sizes[idx]) + len(anchor_ratios[idx]) - 1
center_tiled = np.tile(center, (1, 1, 2* num_anchors))
anchor_width_heights = []
# different scales with the first aspect ratio
for scale in anchor_sizes[idx]:
ratio = anchor_ratios[idx][0] # select the first ratio
width = scale * np.sqrt(ratio)
height = scale / np.sqrt(ratio)
anchor_width_heights.extend([-width / 2.0, -height / 2.0, width / 2.0, height / 2.0])
# the first scale, with different aspect ratios (except the first one)
for ratio in anchor_ratios[idx][1:]:
s1 = anchor_sizes[idx][0] # select the first scale
width = s1 * np.sqrt(ratio)
height = s1 / np.sqrt(ratio)
anchor_width_heights.extend([-width / 2.0, -height / 2.0, width / 2.0, height / 2.0])
bbox_coords = center_tiled + np.array(anchor_width_heights)
bbox_coords_reshape = bbox_coords.reshape((-1, 4))
anchor_bboxes.append(bbox_coords_reshape)
anchor_bboxes = np.concatenate(anchor_bboxes, axis=0)
return anchor_bboxes
def decode_bbox(anchors, raw_outputs, variances=[0.1, 0.1, 0.2, 0.2]):
'''
Decode the actual bbox according to the anchors.
the anchor value order is:[xmin,ymin, xmax, ymax]
:param anchors: numpy array with shape [batch, num_anchors, 4]
:param raw_outputs: numpy array with the same shape with anchors
:param variances: list of float, default=[0.1, 0.1, 0.2, 0.2]
:return:
'''
anchor_centers_x = (anchors[:, :, 0:1] + anchors[:, :, 2:3]) / 2
anchor_centers_y = (anchors[:, :, 1:2] + anchors[:, :, 3:]) / 2
anchors_w = anchors[:, :, 2:3] - anchors[:, :, 0:1]
anchors_h = anchors[:, :, 3:] - anchors[:, :, 1:2]
raw_outputs_rescale = raw_outputs * np.array(variances)
predict_center_x = raw_outputs_rescale[:, :, 0:1] * anchors_w + anchor_centers_x
predict_center_y = raw_outputs_rescale[:, :, 1:2] * anchors_h + anchor_centers_y
predict_w = np.exp(raw_outputs_rescale[:, :, 2:3]) * anchors_w
predict_h = np.exp(raw_outputs_rescale[:, :, 3:]) * anchors_h
predict_xmin = predict_center_x - predict_w / 2
predict_ymin = predict_center_y - predict_h / 2
predict_xmax = predict_center_x + predict_w / 2
predict_ymax = predict_center_y + predict_h / 2
predict_bbox = np.concatenate([predict_xmin, predict_ymin, predict_xmax, predict_ymax], axis=-1)
return predict_bbox
def single_class_non_max_suppression(bboxes, confidences, conf_thresh=0.2, iou_thresh=0.5, keep_top_k=-1):
'''
do nms on single class.
Hint: for the specific class, given the bbox and its confidence,
1) sort the bbox according to the confidence from top to down, we call this a set
2) select the bbox with the highest confidence, remove it from set, and do IOU calculate with the rest bbox
3) remove the bbox whose IOU is higher than the iou_thresh from the set,
4) loop step 2 and 3, util the set is empty.
:param bboxes: numpy array of 2D, [num_bboxes, 4]
:param confidences: numpy array of 1D. [num_bboxes]
:param conf_thresh:
:param iou_thresh:
:param keep_top_k:
:return:
'''
if len(bboxes) == 0: return []
conf_keep_idx = np.where(confidences > conf_thresh)[0]
bboxes = bboxes[conf_keep_idx]
confidences = confidences[conf_keep_idx]
pick = []
xmin = bboxes[:, 0]
ymin = bboxes[:, 1]
xmax = bboxes[:, 2]
ymax = bboxes[:, 3]
area = (xmax - xmin + 1e-3) * (ymax - ymin + 1e-3)
idxs = np.argsort(confidences)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# keep top k
if keep_top_k != -1:
if len(pick) >= keep_top_k:
break
overlap_xmin = np.maximum(xmin[i], xmin[idxs[:last]])
overlap_ymin = np.maximum(ymin[i], ymin[idxs[:last]])
overlap_xmax = np.minimum(xmax[i], xmax[idxs[:last]])
overlap_ymax = np.minimum(ymax[i], ymax[idxs[:last]])
overlap_w = np.maximum(0, overlap_xmax - overlap_xmin)
overlap_h = np.maximum(0, overlap_ymax - overlap_ymin)
overlap_area = overlap_w * overlap_h
overlap_ratio = overlap_area / (area[idxs[:last]] + area[i] - overlap_area)
need_to_be_deleted_idx = np.concatenate(([last], np.where(overlap_ratio > iou_thresh)[0]))
idxs = np.delete(idxs, need_to_be_deleted_idx)
# if the number of final bboxes is less than keep_top_k, we need to pad it.
# TODO
return conf_keep_idx[pick]
| 43.038168
| 111
| 0.643313
|
502422d75e51be247286f1aa47b5ade27441fea5
| 11,649
|
py
|
Python
|
MetamorphicTests/all_mutants/sales_forecasting_file/155.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | null | null | null |
MetamorphicTests/all_mutants/sales_forecasting_file/155.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | null | null | null |
MetamorphicTests/all_mutants/sales_forecasting_file/155.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | 1
|
2022-02-06T14:59:43.000Z
|
2022-02-06T14:59:43.000Z
|
def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=1.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'loss': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis')
| 31.230563
| 232
| 0.629668
|
bef213c633a620d295b147bfa27439b22c57ecc4
| 6,782
|
py
|
Python
|
glue/core/tests/test_hub.py
|
bsipocz/glue
|
7b7e4879b4c746b2419a0eca2a17c2d07a3fded3
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/tests/test_hub.py
|
bsipocz/glue
|
7b7e4879b4c746b2419a0eca2a17c2d07a3fded3
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/tests/test_hub.py
|
bsipocz/glue
|
7b7e4879b4c746b2419a0eca2a17c2d07a3fded3
|
[
"BSD-3-Clause"
] | null | null | null |
#pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import pytest
from mock import MagicMock
from ..exceptions import InvalidSubscriber, InvalidMessage
from ..message import SubsetMessage, Message
from ..hub import Hub, HubListener
from ..subset import Subset
from ..data import Data
from ..data_collection import DataCollection
class TestHub(object):
def setup_method(self, method):
self.hub = Hub()
def get_subscription(self):
msg = Message
handler = MagicMock()
subscriber = MagicMock(spec_set=HubListener)
return msg, handler, subscriber
def test_subscribe(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
assert self.hub.is_subscribed(subscriber, msg)
assert self.hub.get_handler(subscriber, msg) == handler
def test_get_handler(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
assert self.hub.get_handler(subscriber, msg) == handler
assert self.hub.get_handler(subscriber, None) is None
assert self.hub.get_handler(None, msg) is None
def test_unsubscribe(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
self.hub.unsubscribe(subscriber, msg)
assert not self.hub.is_subscribed(subscriber, msg)
assert self.hub.get_handler(subscriber, msg) is None
def test_unsubscribe_all(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber, msg2, handler)
self.hub.unsubscribe_all(subscriber)
assert not self.hub.is_subscribed(subscriber, msg)
assert not self.hub.is_subscribed(subscriber, msg2)
def test_unsubscribe_specific_to_message(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber, msg2, handler)
self.hub.unsubscribe(subscriber, msg)
assert not self.hub.is_subscribed(subscriber, msg)
assert self.hub.is_subscribed(subscriber, msg2)
def test_broadcast(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
handler.assert_called_once_with(msg_instance)
def test_unsubscribe_halts_broadcast(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
self.hub.unsubscribe(subscriber, msg)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
assert handler.call_count == 0
def test_unsubscribe_spec_setific_to_message(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg2, handler)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
assert handler.call_count == 0
def test_subscription_catches_message_subclasses(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
msg_instance = msg2(MagicMock(spec_set=Subset))
self.hub.broadcast(msg_instance)
handler.assert_called_once_with(msg_instance)
def test_handler_ignored_if_subset_handler_present(self):
msg, handler, subscriber = self.get_subscription()
handler2 = MagicMock()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber, msg2, handler2)
msg_instance = SubsetMessage(Subset(None))
self.hub.broadcast(msg_instance)
handler2.assert_called_once_with(msg_instance)
assert handler.call_count == 0
def test_filter(self):
msg, handler, subscriber = self.get_subscription()
filter = lambda x: False
self.hub.subscribe(subscriber, msg, handler)
msg_instance = msg("Test")
self.hub.broadcast(msg)
assert handler.call_count == 0
def test_broadcast_sends_to_all_subsribers(self):
msg, handler, subscriber = self.get_subscription()
msg, handler2, subscriber2 = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber2, msg, handler2)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
handler.assert_called_once_with(msg_instance)
handler2.assert_called_once_with(msg_instance)
def test_invalid_unsubscribe_ignored(self):
msg, handler, subscriber = self.get_subscription()
self.hub.unsubscribe(handler, subscriber)
def test_invalid_subscribe(self):
msg, handler, subscriber = self.get_subscription()
with pytest.raises(InvalidSubscriber) as exc:
self.hub.subscribe(None, msg, handler)
assert exc.value.args[0].startswith("Subscriber must be a HubListener")
with pytest.raises(InvalidMessage) as exc:
self.hub.subscribe(subscriber, None, handler)
assert exc.value.args[0].startswith("message class must be "
"a subclass of glue.Message")
def test_default_handler(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
subscriber.notify.assert_called_once_with(msg_instance)
def test_autosubscribe(self):
l = MagicMock(spec_set=HubListener)
d = MagicMock(spec_set=Data)
s = MagicMock(spec_set=Subset)
dc = MagicMock(spec_set=DataCollection)
hub = Hub(l, d, s, dc)
l.register_to_hub.assert_called_once_with(hub)
d.register_to_hub.assert_called_once_with(hub)
dc.register_to_hub.assert_called_once_with(hub)
s.register.assert_called_once_with()
def test_invalid_init(self):
with pytest.raises(TypeError) as exc:
Hub(None)
assert exc.value.args[0] == ("Inputs must be HubListener, data, "
"subset, or data collection objects")
class TestHubListener(object):
"""This is a dumb test, I know. Fixated on code coverage"""
def test_unimplemented(self):
hl = HubListener()
with pytest.raises(NotImplementedError):
hl.register_to_hub(None)
with pytest.raises(NotImplementedError):
hl.notify(None)
| 38.534091
| 79
| 0.680183
|
9e5e2e5abb5a02221b2950ed094527ad8d7acf32
| 18,030
|
py
|
Python
|
test/test_LensModel/test_Profiles/test_chameleon.py
|
DarthLazar/lenstronomy
|
64f72a89bb08ef19d3641b7e5e048238632e9094
|
[
"MIT"
] | null | null | null |
test/test_LensModel/test_Profiles/test_chameleon.py
|
DarthLazar/lenstronomy
|
64f72a89bb08ef19d3641b7e5e048238632e9094
|
[
"MIT"
] | 1
|
2022-02-26T21:04:47.000Z
|
2022-02-26T21:04:47.000Z
|
test/test_LensModel/test_Profiles/test_chameleon.py
|
DarthLazar/lenstronomy
|
64f72a89bb08ef19d3641b7e5e048238632e9094
|
[
"MIT"
] | 1
|
2022-02-08T20:31:45.000Z
|
2022-02-08T20:31:45.000Z
|
import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.LensModel.Profiles.nie import NIE
from lenstronomy.LensModel.Profiles.chameleon import Chameleon, DoubleChameleon, DoubleChameleonPointMass, TripleChameleon
from lenstronomy.LightModel.Profiles.chameleon import DoubleChameleon as DoubleChameleonLight
from lenstronomy.LightModel.Profiles.chameleon import TripleChameleon as TripleChameleonLight
import lenstronomy.Util.param_util as param_util
class TestChameleon(object):
"""
class to test the Moffat profile
"""
def setup(self):
self.chameleon = Chameleon()
self.nie = NIE()
def test_theta_E_convert(self):
w_c, w_t = 2, 1
theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=w_c, w_t=w_t, e1=0, e2=0)
assert w_c == 1
assert w_t == 2
assert theta_E_convert == 0
def test_function(self):
"""
:return:
"""
x = np.linspace(0.1, 10, 10)
w_c, w_t = 0.5, 1.
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=0.5, w_t=1., e1=e1, e2=e2)
kwargs_1 = {'theta_E': theta_E_convert, 's_scale': s_scale_1, 'e1': e1, 'e2': e2}
kwargs_2 = {'theta_E': theta_E_convert, 's_scale': s_scale_2, 'e1': e1, 'e2': e2}
f_ = self.chameleon.function(x=x, y=1., **kwargs_light)
f_1 = self.nie.function(x=x, y=1., **kwargs_1)
f_2 = self.nie.function(x=x, y=1., **kwargs_2)
npt.assert_almost_equal(f_, (f_1 - f_2), decimal=5)
def test_derivatives(self):
"""
:return:
"""
x = np.linspace(0.1, 10, 10)
w_c, w_t = 0.5, 1.
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=0.5, w_t=1., e1=e1, e2=e2)
kwargs_1 = {'theta_E': theta_E_convert, 's_scale': s_scale_1, 'e1': e1, 'e2': e2}
kwargs_2 = {'theta_E': theta_E_convert, 's_scale': s_scale_2, 'e1': e1, 'e2': e2}
f_x, f_y = self.chameleon.derivatives(x=x, y=1., **kwargs_light)
f_x_1, f_y_1 = self.nie.derivatives(x=x, y=1., **kwargs_1)
f_x_2, f_y_2 = self.nie.derivatives(x=x, y=1., **kwargs_2)
npt.assert_almost_equal(f_x, (f_x_1 - f_x_2), decimal=5)
npt.assert_almost_equal(f_y, (f_y_1 - f_y_2), decimal=5)
f_x, f_y = self.chameleon.derivatives(x=1, y=0., **kwargs_light)
npt.assert_almost_equal(f_x, 1, decimal=1)
def test_hessian(self):
"""
:return:
"""
x = np.linspace(0.1, 10, 10)
w_c, w_t = 0.5, 1.
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
theta_E_convert, w_c, w_t, s_scale_1, s_scale_2 = self.chameleon.param_convert(alpha_1=1, w_c=0.5, w_t=1., e1=e1, e2=e2)
kwargs_1 = {'theta_E': theta_E_convert, 's_scale': s_scale_1, 'e1': e1, 'e2': e2}
kwargs_2 = {'theta_E': theta_E_convert, 's_scale': s_scale_2, 'e1': e1, 'e2': e2}
f_xx, f_xy, f_yx, f_yy = self.chameleon.hessian(x=x, y=1., **kwargs_light)
f_xx_1, f_xy_1, f_yx_1, f_yy_1 = self.nie.hessian(x=x, y=1., **kwargs_1)
f_xx_2, f_xy_2, f_yx_2, f_yy_2 = self.nie.hessian(x=x, y=1., **kwargs_2)
npt.assert_almost_equal(f_xx, (f_xx_1 - f_xx_2), decimal=5)
npt.assert_almost_equal(f_yy, (f_yy_1 - f_yy_2), decimal=5)
npt.assert_almost_equal(f_xy, (f_xy_1 - f_xy_2), decimal=5)
npt.assert_almost_equal(f_yx, (f_yx_1 - f_yx_2), decimal=5)
def test_static(self):
x, y = 1., 1.
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
f_ = self.chameleon.function(x, y, **kwargs_light)
self.chameleon.set_static(**kwargs_light)
f_static = self.chameleon.function(x, y, **kwargs_light)
npt.assert_almost_equal(f_, f_static, decimal=8)
self.chameleon.set_dynamic()
kwargs_light = {'alpha_1': 2., 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
f_dyn = self.chameleon.function(x, y, **kwargs_light)
assert f_dyn != f_static
class TestDoubleChameleon(object):
"""
class to test the Moffat profile
"""
def setup(self):
pass
def test_param_name(self):
chameleon = DoubleChameleon()
names = chameleon.param_names
assert names[0] == 'alpha_1'
def test_function(self):
"""
:return:
"""
doublechameleon = DoubleChameleon()
chameleon = Chameleon()
x = np.linspace(0.1, 10, 10)
phi_G, q = 0.3, 0.8
theta_E = 1.
ratio = 2.
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
kwargs_1 = {'alpha_1': theta_E / (1 + 1. / ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
kwargs_2 = {'alpha_1': theta_E / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
flux = doublechameleon.function(x=x, y=1., **kwargs_light)
flux1 = chameleon.function(x=x, y=1., **kwargs_1)
flux2 = chameleon.function(x=x, y=1., **kwargs_2)
npt.assert_almost_equal(flux, flux1 + flux2, decimal=8)
def test_derivatives(self):
"""
:return:
"""
doublechameleon = DoubleChameleon()
chameleon = Chameleon()
x = np.linspace(0.1, 10, 10)
phi_G, q = 0.3, 0.8
theta_E = 1.
ratio = 2.
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
kwargs_1 = {'alpha_1': theta_E / (1 + 1. / ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
kwargs_2 = {'alpha_1': theta_E / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
f_x, f_y = doublechameleon.derivatives(x=x, y=1., **kwargs_light)
f_x1, f_y1 = chameleon.derivatives(x=x, y=1., **kwargs_1)
f_x2, f_y2 = chameleon.derivatives(x=x, y=1., **kwargs_2)
npt.assert_almost_equal(f_x, f_x1 + f_x2, decimal=8)
npt.assert_almost_equal(f_y, f_y1 + f_y2, decimal=8)
def test_hessian(self):
"""
:return:
"""
doublechameleon = DoubleChameleon()
chameleon = Chameleon()
x = np.linspace(0.1, 10, 10)
phi_G, q = 0.3, 0.8
theta_E = 1.
ratio = 2.
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_lens = {'alpha_1': theta_E, 'ratio': ratio, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
kwargs_light = {'amp': theta_E, 'ratio': ratio, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
kwargs_1 = {'alpha_1': theta_E / (1 + 1./ratio), 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
kwargs_2 = {'alpha_1': theta_E / (1 + ratio), 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=x, y=1., **kwargs_lens)
f_xx1, f_xy1, f_yx1, f_yy1 = chameleon.hessian(x=x, y=1., **kwargs_1)
f_xx2, f_xy2, f_yx2, f_yy2 = chameleon.hessian(x=x, y=1., **kwargs_2)
npt.assert_almost_equal(f_xx, f_xx1 + f_xx2, decimal=8)
npt.assert_almost_equal(f_yy, f_yy1 + f_yy2, decimal=8)
npt.assert_almost_equal(f_xy, f_xy1 + f_xy2, decimal=8)
npt.assert_almost_equal(f_yx, f_yx1 + f_yx2, decimal=8)
light = DoubleChameleonLight()
f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_lens)
kappa = 1./2 * (f_xx + f_yy)
kappa_norm = kappa / np.mean(kappa)
flux = light.function(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_light)
flux_norm = flux / np.mean(flux)
npt.assert_almost_equal(kappa_norm, flux_norm, decimal=5)
def test_static(self):
doublechameleon = DoubleChameleon()
x, y = 1., 1.
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1, 'ratio': 0.5, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
f_ = doublechameleon.function(x, y, **kwargs_light)
doublechameleon.set_static(**kwargs_light)
f_static = doublechameleon.function(x, y, **kwargs_light)
npt.assert_almost_equal(f_, f_static, decimal=8)
doublechameleon.set_dynamic()
kwargs_light = {'alpha_1': 2, 'ratio': 0.5, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
f_dyn = doublechameleon.function(x, y, **kwargs_light)
assert f_dyn != f_static
class TestDoubleChameleonPointMass(object):
"""
class to test the Moffat profile
"""
def setup(self):
pass
def test_param_name(self):
chameleon = DoubleChameleonPointMass()
names = chameleon.param_names
assert names[0] == 'alpha_1'
def test_function(self):
"""
:return:
"""
doublechameleon = DoubleChameleonPointMass()
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio_pointmass': 3, 'ratio_chameleon': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
flux = doublechameleon.function(x=1, y=1., **kwargs_light)
npt.assert_almost_equal(flux, 1.2767176964863585, decimal=4)
def test_derivatives(self):
"""
:return:
"""
doublechameleon = DoubleChameleonPointMass()
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio_pointmass': 3, 'ratio_chameleon': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
f_x, f_y = doublechameleon.derivatives(x=1, y=1., **kwargs_light)
npt.assert_almost_equal(f_x, 0.4348690461571936, decimal=4)
npt.assert_almost_equal(f_y, 0.4530081649948411, decimal=4)
def test_hessian(self):
"""
:return:
"""
doublechameleon = DoubleChameleonPointMass()
phi_G, q = 0.3, 0.8
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio_pointmass': 3, 'ratio_chameleon': 2, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2, 'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2}
f_xx, f_xy, f_yx, f_yy = doublechameleon.hessian(x=1, y=1., **kwargs_light)
npt.assert_almost_equal(f_xx, 0.0633838122066912, decimal=4)
npt.assert_almost_equal(f_xy, -0.3986532840628945, decimal=4)
npt.assert_almost_equal(f_yx, -0.3986532840628945, decimal=4)
npt.assert_almost_equal(f_yy, 0.04802318253385707, decimal=4)
class TestTripleChameleon(object):
"""
class to test the Moffat profile
"""
def setup(self):
pass
def test_param_name(self):
chameleon = TripleChameleon()
names = chameleon.param_names
assert names[0] == 'alpha_1'
def test_function(self):
"""
:return:
"""
triplechameleon = TripleChameleon()
chameleon = Chameleon()
x = np.linspace(0.1, 10, 10)
phi_G, q = 0.3, 0.8
ratio12 = 2.
ratio13 = 3
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1, 'e21': e2,
'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2,
'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2
}
amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13)
amp2 = amp1 / ratio12
amp3 = amp1 / ratio13
kwargs_1 = {'alpha_1': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
kwargs_2 = {'alpha_1': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
kwargs_3 = {'alpha_1': amp3, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
flux = triplechameleon.function(x=x, y=1., **kwargs_light)
flux1 = chameleon.function(x=x, y=1., **kwargs_1)
flux2 = chameleon.function(x=x, y=1., **kwargs_2)
flux3 = chameleon.function(x=x, y=1., **kwargs_3)
npt.assert_almost_equal(flux, flux1 + flux2 + flux3, decimal=8)
def test_derivatives(self):
"""
:return:
"""
triplechameleon = TripleChameleon()
chameleon = Chameleon()
x = np.linspace(0.1, 10, 10)
phi_G, q = 0.3, 0.8
ratio12 = 2.
ratio13 = 3
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_light = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2,
'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2,
'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2
}
amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13)
amp2 = amp1 / ratio12
amp3 = amp1 / ratio13
kwargs_1 = {'alpha_1': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
kwargs_2 = {'alpha_1': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
kwargs_3 = {'alpha_1': amp3, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
f_x, f_y = triplechameleon.derivatives(x=x, y=1., **kwargs_light)
f_x1, f_y1 = chameleon.derivatives(x=x, y=1., **kwargs_1)
f_x2, f_y2 = chameleon.derivatives(x=x, y=1., **kwargs_2)
f_x3, f_y3 = chameleon.derivatives(x=x, y=1., **kwargs_3)
npt.assert_almost_equal(f_x, f_x1 + f_x2 + f_x3, decimal=8)
npt.assert_almost_equal(f_y, f_y1 + f_y2 + f_y3, decimal=8)
def test_hessian(self):
"""
:return:
"""
triplechameleon = TripleChameleon()
chameleon = Chameleon()
x = np.linspace(0.1, 10, 10)
phi_G, q = 0.3, 0.8
ratio12 = 2.
ratio13 = 3
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_lens = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2,
'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2,
'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2
}
kwargs_light = {'amp': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2,
'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2,
'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2
}
amp1 = 1. / (1. + 1. / ratio12 + 1. / ratio13)
amp2 = amp1 / ratio12
amp3 = amp1 / ratio13
kwargs_1 = {'alpha_1': amp1, 'w_c': .5, 'w_t': 1., 'e1': e1, 'e2': e2}
kwargs_2 = {'alpha_1': amp2, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
kwargs_3 = {'alpha_1': amp3, 'w_c': .1, 'w_t': .5, 'e1': e1, 'e2': e2}
f_xx, f_xy, f_yx, f_yy = triplechameleon.hessian(x=x, y=1., **kwargs_lens)
f_xx1, f_xy1, f_yx1, f_yy1 = chameleon.hessian(x=x, y=1., **kwargs_1)
f_xx2, f_xy2, f_yx2, f_yy2 = chameleon.hessian(x=x, y=1., **kwargs_2)
f_xx3, f_xy3, f_yx3, f_yy3 = chameleon.hessian(x=x, y=1., **kwargs_3)
npt.assert_almost_equal(f_xx, f_xx1 + f_xx2 + f_xx3, decimal=8)
npt.assert_almost_equal(f_yy, f_yy1 + f_yy2 + f_yy3, decimal=8)
npt.assert_almost_equal(f_xy, f_xy1 + f_xy2 + f_xy3, decimal=8)
npt.assert_almost_equal(f_yx, f_yx1 + f_yx2 + f_yx3, decimal=8)
light = TripleChameleonLight()
f_xx, f_xy, f_yx, f_yy = triplechameleon.hessian(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_lens)
kappa = 1./2 * (f_xx + f_yy)
kappa_norm = kappa / np.mean(kappa)
flux = light.function(x=np.linspace(0, 1, 10), y=np.zeros(10), **kwargs_light)
flux_norm = flux / np.mean(flux)
npt.assert_almost_equal(kappa_norm, flux_norm, decimal=5)
def test_static(self):
triplechameleon = TripleChameleon()
x, y = 1., 1.
phi_G, q = 0.3, 0.8
ratio12 = 2.
ratio13 = 3
e1, e2 = param_util.phi_q2_ellipticity(phi_G, q)
kwargs_lens = {'alpha_1': 1., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2,
'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2,
'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2
}
f_ = triplechameleon.function(x, y, **kwargs_lens)
triplechameleon.set_static(**kwargs_lens)
f_static = triplechameleon.function(x, y, **kwargs_lens)
npt.assert_almost_equal(f_, f_static, decimal=8)
triplechameleon.set_dynamic()
kwargs_lens = {'alpha_1': 2., 'ratio12': ratio12, 'ratio13': ratio13, 'w_c1': .5, 'w_t1': 1., 'e11': e1,
'e21': e2,
'w_c2': .1, 'w_t2': .5, 'e12': e1, 'e22': e2,
'w_c3': .1, 'w_t3': .5, 'e13': e1, 'e23': e2
}
f_dyn = triplechameleon.function(x, y, **kwargs_lens)
assert f_dyn != f_static
if __name__ == '__main__':
pytest.main()
| 43.341346
| 174
| 0.553356
|
0e6178979d3b16aadc68fd67e1409ab958dc6e6f
| 7,082
|
py
|
Python
|
metrics/algorithmic.py
|
cassianobecker/dnn
|
bb2ea04f77733de9df10f795bb049ac3b9d30478
|
[
"MIT"
] | 3
|
2020-02-21T21:35:07.000Z
|
2020-09-29T15:20:00.000Z
|
metrics/algorithmic.py
|
cassianobecker/dnn
|
bb2ea04f77733de9df10f795bb049ac3b9d30478
|
[
"MIT"
] | 27
|
2020-02-20T21:00:23.000Z
|
2020-05-22T15:23:25.000Z
|
metrics/algorithmic.py
|
cassianobecker/dnn
|
bb2ea04f77733de9df10f795bb049ac3b9d30478
|
[
"MIT"
] | null | null | null |
import math
from fwk.metrics import Metric
from fwk.config import Config
class EpochCounter(Metric):
def __init__(self) -> None:
super().__init__()
self.epoch = None
self.total_epochs = None
def on_before_epoch(self, local_variables):
self.epoch = local_variables['epoch']
self.total_epochs = int(Config.config['ALGORITHM']['epochs'])
self.print_metric()
def text_record(self):
record_str = f'\n---- epoch {self.epoch + 1:03d} of {self.total_epochs:03d} --------------------\n'
return record_str
class TrainBatchCounter(Metric):
def __init__(self) -> None:
super().__init__()
self.number_of_subjects = None
self.batch_idx = None
self.subjects_per_batch = None
self.number_of_batches = None
self.regime = 'train'
def on_before_train_batch(self, local_variables):
self.number_of_subjects = len(local_variables['self'].data_loaders[self.regime].dataset.subjects)
self.subjects_per_batch = int(Config.config['ALGORITHM'][f'{self.regime}_batch_size'])
self.batch_idx = local_variables['batch_idx']
self.number_of_batches = math.ceil(self.number_of_subjects / self.subjects_per_batch)
self.print_metric()
def text_record(self):
text_record = f'\n batch {self.batch_idx + 1:03d} of {self.number_of_batches:03d} ({self.regime})\n'
return text_record
class TestBatchCounter(Metric):
def __init__(self) -> None:
super().__init__()
self.number_of_subjects = None
self.batch_idx = None
self.subjects_per_batch = None
self.number_of_batches = None
self.regime = 'test'
def on_before_test_batch(self, local_variables):
self.number_of_subjects = len(local_variables['self'].data_loaders[self.regime].dataset.subjects)
self.subjects_per_batch = int(Config.config['ALGORITHM'][f'{self.regime}_batch_size'])
self.batch_idx = local_variables['batch_idx']
self.number_of_batches = math.ceil(self.number_of_subjects / self.subjects_per_batch)
self.print_metric()
def text_record(self):
text_record = f'\n batch {self.batch_idx + 1:03d} of {self.number_of_batches:03d} ({self.regime})\n'
return text_record
class ImageTrainBatchCounter(Metric):
def __init__(self) -> None:
super().__init__()
self.number_of_images = None
self.batch_idx = None
self.images_per_batch = None
self.number_of_batches = None
self.regime = 'train'
def on_before_train_batch(self, local_variables):
self.number_of_images = len(local_variables['self'].data_loaders[self.regime].dataset.images)
self.images_per_batch = int(Config.config['ALGORITHM'][f'{self.regime}_batch_size'])
self.batch_idx = local_variables['batch_idx']
self.number_of_batches = int(self.number_of_images / self.images_per_batch)
self.print_metric()
def text_record(self):
text_record = f'\n batch {self.batch_idx + 1:03d} of {self.number_of_batches:03d} ({self.regime})\n'
return text_record
class ImageTestBatchCounter(Metric):
def __init__(self) -> None:
super().__init__()
self.number_of_images = None
self.batch_idx = None
self.images_per_batch = None
self.number_of_batches = None
self.regime = 'test'
def on_before_test_batch(self, local_variables):
self.number_of_images = len(local_variables['self'].data_loaders[self.regime].dataset.images)
self.images_per_batch = int(Config.config['ALGORITHM'][f'{self.regime}_batch_size'])
self.batch_idx = local_variables['batch_idx']
self.number_of_batches = int(self.number_of_images / self.images_per_batch)
self.print_metric()
def text_record(self):
text_record = f'\n batch {self.batch_idx + 1:03d} of {self.number_of_batches:03d} ({self.regime})\n'
return text_record
class BatchLoss(Metric):
def __init__(self) -> None:
super().__init__()
self.loss = None
def on_after_train_batch(self, local_variables):
self.loss = local_variables['loss'].item()
self.print_metric()
def text_record(self):
text_record = f' batch loss: {self.loss:.3e}\n'
return text_record
class EpochLoss(Metric):
def __init__(self) -> None:
super().__init__()
self.loss = None
def on_after_train_batch(self, local_variables):
self.loss += local_variables['loss'].item()
def on_before_epoch(self, local_variables):
self.loss = 0
def on_after_epoch(self, local_variables):
self.print_metric()
def text_record(self):
text_record = f'\n epoch loss: {self.loss:.3e}\n'
return text_record
def numpy_record(self, records=None):
if 'epoch_loss' not in records.keys():
records['epoch_loss'] = list()
records['epoch_loss'].append(self.loss)
return records
class GradientMetrics(Metric):
def __init__(self) -> None:
super().__init__()
self.gradient_norm = None
self.batch_idx = None
self.epoch = None
def on_after_train_batch(self, local_variables):
model = local_variables['self'].model
self.batch_idx = local_variables['batch_idx']
self.epoch = local_variables['epoch']
self.gradient_norm = self._compute_norm(model)
self.print_metric()
def text_record(self):
text_record = f' gradient norm: {self.gradient_norm:.3e}\n'
return text_record
def numpy_record(self, records=None):
if 'gradient_norm' not in records.keys():
records['gradient_norm'] = []
if self.batch_idx == 0:
records['gradient_norm'].append([self.gradient_norm])
else:
records['gradient_norm'][self.epoch].append(self.gradient_norm)
return records
@staticmethod
def _compute_norm(model):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.norm(2).item() if p.grad is not None else 0
total_norm += param_norm ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
class NumberOfParameters(Metric):
def __init__(self) -> None:
super().__init__()
self.model = None
def on_after_setup(self, local_variables):
self.model = local_variables['self'].model
self.print_metric()
def text_record(self):
total_str = f'number of parameters {self._total_parameters(self.model):1.3e}\n'
total_trainable_str = f'number of trainable parameters {self._total_trainable_parameters(self.model):1.3e}\n'
return total_str + total_trainable_str
@staticmethod
def _total_parameters(model):
return sum(p.numel() for p in model.parameters())
@staticmethod
def _total_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 32.045249
| 117
| 0.655888
|
dc3d2b6d0f91f2b2a25bbb50c3fea732c8baed81
| 2,296
|
py
|
Python
|
GetTheFieldsAndSubfields.py
|
WissamAntoun/LebaneseEngineers
|
c0cc7e85ead4e7ec6677882f80a5a7bcd4d80e87
|
[
"MIT"
] | 1
|
2021-02-12T21:00:53.000Z
|
2021-02-12T21:00:53.000Z
|
GetTheFieldsAndSubfields.py
|
WissamAntoun/LebaneseEngineers
|
c0cc7e85ead4e7ec6677882f80a5a7bcd4d80e87
|
[
"MIT"
] | null | null | null |
GetTheFieldsAndSubfields.py
|
WissamAntoun/LebaneseEngineers
|
c0cc7e85ead4e7ec6677882f80a5a7bcd4d80e87
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import requests
import sys
import codecs
sys.stdout.reconfigure(encoding="utf-8")
if __name__ == "__main__":
subfields_df = pd.read_csv("subfields.csv", encoding="utf-8", index_col=0)
fields_df = pd.read_csv("fields.csv", encoding="utf-8", index_col=0)
fields = np.delete(
fields_df.index.to_numpy(), np.where(fields_df.index.to_numpy() == -1)
)
subfields = np.delete(
subfields_df.index.to_numpy(), np.where(subfields_df.index.to_numpy() == -1)
)
print("Hello")
# "fstname":,
# "lstname":,
# "fatname":,
# "numb":,
with codecs.open("TheCatogories.txt", "w", encoding="utf-8") as file1:
for field in fields:
for subfield in subfields:
# print(field, subfield)
# subfield = -1
# field = -1
parameters = {
"PageID": 112,
"CurrPage": 1,
"spec": field,
"spec1": subfield,
"searchoption": "And",
"rand": 0.055286690143709905,
}
r = requests.get(
"https://www.oea.org.lb/Arabic/GetMembers.aspx", params=parameters
)
response = r.text
if "لا يوجد أي نتيجة" in response:
print("wrong issue")
else:
print(
field,
fields_df.loc[field].Field,
subfield,
subfields_df.loc[subfield].Subfield,
sep=", ",
)
# Writing data to a file
file1.write(
", ".join(
map(
str,
[
field,
fields_df.loc[field].Field,
subfield,
subfields_df.loc[subfield].Subfield,
],
)
)
)
file1.write("\n")
| 31.888889
| 86
| 0.39939
|
a3ecb9050652368eccb195869c30f26e32c10fe4
| 38,248
|
py
|
Python
|
visualizations/plot_utils.py
|
afogarty85/typos
|
30b79a4ab4cd7c996fb394524906a6bffab3a2ab
|
[
"MIT"
] | null | null | null |
visualizations/plot_utils.py
|
afogarty85/typos
|
30b79a4ab4cd7c996fb394524906a6bffab3a2ab
|
[
"MIT"
] | 1
|
2021-04-15T16:41:31.000Z
|
2021-04-15T16:41:31.000Z
|
visualizations/plot_utils.py
|
afogarty85/typos
|
30b79a4ab4cd7c996fb394524906a6bffab3a2ab
|
[
"MIT"
] | 1
|
2021-04-22T14:29:36.000Z
|
2021-04-22T14:29:36.000Z
|
############################################################################
# IMPORTS
############################################################################
import numpy as np
import seaborn as sns
import pandas as pd
import altair as alt
from collections import OrderedDict
from vega_datasets import data
import matplotlib as mpl
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Times New Roman'
from matplotlib import pyplot as plt
import matplotlib.ticker as tck
############################################################################
# Plotting Utilities, Constants, Methods for W209 arXiv project
############################################################################
#---------------------------------------------------------------------------
## Plotting Palette
#
# Create a dict object containing U.C. Berkeley official school colors for plot palette
# reference : https://brand.berkeley.edu/colors/
# secondary reference : https://alumni.berkeley.edu/brand/color-palette# CLass Initialization
#---------------------------------------------------------------------------
berkeley_palette = OrderedDict({
'berkeley_blue' : '#003262',
'california_gold' : '#fdb515',
'founders_rock' : '#3b7ea1',
'medalist' : '#c4820e',
'bay_fog' : '#ddd5c7',
'lawrence' : '#00b0da',
'sather_gate' : '#b9d3b6',
'pacific' : '#46535e',
'soybean' : '#859438',
'south_hall' : '#6c3302',
'wellman_tile' : '#D9661F',
'rose_garden' : '#ee1f60',
'golden_gate' : '#ed4e33',
'lap_lane' : '#00a598',
'ion' : '#cfdd45',
'stone_pine' : '#584f29',
'grey' : '#eeeeee',
'web_grey' : '#888888',
# alum only colors
'metallic_gold' : '#BC9B6A',
'california_purple' : '#5C3160',
# standard web colors
'white' : '#FFFFFF',
'black' : '#000000'
})
#---------------------------------------------------------------------------
## Altair custom "Cal" theme
#---------------------------------------------------------------------------
def cal_theme():
font = "Lato"
return {
"config": {
"title": {
"fontSize": 30,
"font": font,
"anchor": "middle",
"align":"center",
"color": berkeley_palette['berkeley_blue'],
"subtitleFontSize": 20,
"subtitleFont": font,
"subtitleAcchor": "middle",
"subtitleAlign": "center",
"subtitleColor": berkeley_palette['berkeley_blue']
},
"axisX": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end",
"titlePadding": 20
},
"axisY": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end",
"titlePadding": 20
},
"headerRow": {
"labelFont": font,
"titleFont": font,
"titleFontSize": 15,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
},
"legend": {
"labelFont": font,
"labelFontSize": 15,
"labelColor": berkeley_palette['stone_pine'],
"symbolType": "stroke",
"symbolStrokeWidth": 3,
"symbolOpacity": 1.0,
"symbolSize": 500,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue']
},
"view": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
},
"facet": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
},
"row": {
"labelFont": font,
"labelColor": berkeley_palette['pacific'],
"labelFontSize": 15,
"titleFont": font,
"titleFontSize": 20,
"titleColor": berkeley_palette['berkeley_blue'],
"titleAlign": "right",
"titleAnchor": "end"
}
}
}
alt.themes.register("my_cal_theme", cal_theme)
alt.themes.enable("my_cal_theme")
###################################################################################
###################################################################################
## DIVERGENCE DATA PREP
###################################################################################
###################################################################################
def get_divergence_data(df):
df2_effective = df.groupby(by=['Treatment','Prompt','Effective']).ROWID.count().reset_index().sort_values(by=['Prompt','Effective','Treatment'])
df2_effective.columns = ['treatment', 'prompt','rank','total']
df2_effective['question'] = 'effective'
df2_intelligence = df.groupby(by=['Treatment','Prompt','Intelligence']).ROWID.count().reset_index().sort_values(by=['Prompt','Intelligence','Treatment'])
df2_intelligence.columns = ['treatment', 'prompt','rank','total']
df2_intelligence['question'] = 'intelligence'
df2_writing = df.groupby(by=['Treatment','Prompt','Writing']).ROWID.count().reset_index().sort_values(by=['Prompt','Writing','Treatment'])
df2_writing.columns = ['treatment', 'prompt','rank','total']
df2_writing['question'] = 'writing'
df2 = pd.concat([df2_effective, df2_intelligence, df2_writing], axis=0, ignore_index=True)
gt = df2.groupby(by=['treatment','prompt','question']).agg({'total':'sum'}).reset_index()
gt.columns = ['treatment','prompt','question','grand_total']
df2 = df2.merge(gt, on=['treatment','prompt','question'], how='inner')
df2['pct_of_total'] = (df2.total / df2.grand_total) * 100.
df2['pct_start'] = np.nan
df2['pct_end'] = np.nan
# fill in any missing votes as 0 percent votes
x = [(a, b, c, d) for a in df2.treatment.unique() for b in df2.prompt.unique() for c in df2['rank'].unique() for d in df2.question.unique()]
x = pd.DataFrame(x, columns=['treatment','prompt','rank','question'])
x = x.merge(df2[['treatment','prompt','rank','question','pct_of_total']], how='left', on=['treatment','prompt','rank','question'])
x = x[(x.pct_of_total.isna()==True)]
x.pct_of_total = np.float32(0.0)
df2 = pd.concat([df2,x], axis=0, ignore_index=True)
# set baseline in the middle
df2.loc[(df2['rank'] == 4), 'pct_start'] = df2.loc[(df2['rank'] == 4), 'pct_of_total']/2 * -1
df2['pct_end'] = df2['pct_start'] * -1
# calculate ranks 1-3 and 5-7
for r,t,p,q in [(a,b,c,d) for a in [3,2,1] for b in df2.treatment.unique() for c in df2.prompt.unique() for d in df2.question.unique()]:
# get starting value for negative percentages, this becomes the "end" value for the next rank down
pct_start = np.float32(df2[((df2['rank'] == (r+1)) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q))].pct_start)
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_end'] = pct_start
pct_new_start = np.float32(df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_of_total'] * -1) + pct_start
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_start'] = pct_new_start
for r,t,p,q in [(a,b,c,d) for a in [5,6,7] for b in df2.treatment.unique() for c in df2.prompt.unique() for d in df2.question.unique()]:
pct_start = np.float32(df2[((df2['rank'] == (r-1)) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q))].pct_end)
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_start'] = pct_start
pct_end = np.float32(df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_of_total']) + pct_start
df2.loc[((df2['rank'] == r) & (df2.treatment == t) & (df2.prompt == p) & (df2.question == q)), 'pct_end'] = pct_end
return df2
###################################################################################
###################################################################################
## DIVERGENCE PLOTS (LIKERT)
###################################################################################
###################################################################################
def diverge_plot(data, question):
color_scale = alt.Scale(
domain=["1","2","3","4","5","6","7"],
range=[berkeley_palette["rose_garden"],
berkeley_palette["medalist"],
berkeley_palette["california_gold"],
berkeley_palette["bay_fog"],
berkeley_palette["lawrence"],
berkeley_palette["founders_rock"],
berkeley_palette["berkeley_blue"]]
)
select = alt.selection_multi(fields=['rank'])
p = alt.Chart()\
.transform_filter(alt.datum.question == question)\
.mark_bar().encode(
x=alt.X('pct_start:Q'),
x2=alt.X2('pct_end:Q'),
y=alt.Y('prompt:N', axis=alt.Axis(title=None, ticks=False, domain=False, offset=5, minExtent=60)),
color=alt.Color(
'rank:O',
legend=None,
scale=color_scale),
tooltip=[alt.Tooltip('treatment:N', title='Assignment'),
alt.Tooltip('question:N', title='Question'),
alt.Tooltip('rank:O', title='Rank (1-7)'),
alt.Tooltip('pct_of_total:Q', title='% of Total', format='.2f')],
opacity=alt.condition(select, alt.OpacityValue(1.0), alt.OpacityValue(0.5))
).properties(height=150,width=650,title={'text':''}).add_selection(select)
l = alt.Chart(pd.DataFrame({'X':[0]})).mark_rule(size=3, color=berkeley_palette["pacific"], strokeDash=[10,5])\
.encode(x=alt.X('X', type='quantitative', title=None))
return alt.layer(p, l)
def macro_diverge_plot(data, question, title):
c = diverge_plot(data, question)\
.facet(
row=alt.Row('treatment:N',
sort=alt.SortArray(['Control','Typographical','Phonological']),
header=alt.Header(
labelColor=berkeley_palette['pacific'],
labelFontSize=20,
labelFont='Lato',
title=""
)
),
title=title,
data=data)\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return c
###################################################################################
###################################################################################
## PARTICIPANT COUNT PLOTS (send in only with [treatment, total] columns)
###################################################################################
###################################################################################
def participant_count_plot(data):
b = alt.Chart().mark_bar(line={'color':berkeley_palette['web_grey']}).encode(
x = alt.X('treatment:O', sort=['Control', 'Typographical', 'Phonological'],
axis = alt.Axis(title = 'Assignment Group', labelAngle=0, labelPadding=10, labelFontSize=20, titleFontSize=25)),
y = alt.Y('total:Q', axis = alt.Axis(title = "Participants Assigned", labelPadding=10, labelFontSize=20, titleFontSize=25),
scale=alt.Scale(domain=[0,14])),
color = alt.Color('treatment:O', legend = None,
scale=alt.Scale(range = [berkeley_palette['pacific'], berkeley_palette['berkeley_blue'], berkeley_palette['founders_rock']]))
)
t = alt.Chart().mark_text(
color = berkeley_palette['white'],
size = 20,
align='center',
baseline='middle',
dy = 20).encode(
x = alt.X('treatment:O', axis=None, sort=['Control', 'Typographical','Phonological']),
y = alt.Y('total:Q'),
text = alt.Text('total:Q')
)
p = alt.layer(b, t, data = data)\
.properties(height=300,width=650,title={'text':'Pilot Participation'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
def participant_count_plot_live(data):
df2 = data[['Start Date','Treatment','ROWID']].copy()
df2['Start Date'] = df2['Start Date'].dt.normalize()
df2 = df2.drop_duplicates().groupby(by=['Start Date','Treatment']).agg({'ROWID':'count'}).reset_index()
df2.columns = ['date','branch','total']
df2['display_date'] = df2.date.dt.strftime('%b %d')
df2['source'] = 'Amazon'
df2.loc[(df2.date > '2021-04-05'), 'source'] = 'XLab'
df2 = df2.groupby(by=['branch','source']).agg({'total':'sum'}).reset_index().rename(columns={'branch':'treatment'})
base = alt.Chart().mark_bar().encode(
x=alt.X('total:Q', axis=alt.Axis(title = 'Participants Assigned', labelPadding=10, labelFontSize=20, titleFontSize=25)),
y = alt.X('treatment:O', axis=alt.Axis(title = '', labelAngle=0, labelPadding=10, labelFontSize=20, titleFontSize=25), sort=['Control', 'Typographical','Phonological']),
color = alt.Color('treatment:O', legend = None,
scale=alt.Scale(range = [berkeley_palette['pacific'], berkeley_palette['berkeley_blue'], berkeley_palette['founders_rock']]))
).properties(width=650, height=150)
txt = base.mark_text(dx=-15, size=15).encode(
text='total:Q',
color=alt.value('white')
)
p = alt.layer(base, txt).properties(width=600, height=150, title={'text':''})\
.facet(
row=alt.Row('source:N',
sort=alt.SortArray(['XLab','Amazon']),
header=alt.Header(labelColor=berkeley_palette['pacific'], labelFontSize=25,labelFont='Lato',title='')
),
data=df2,
title='Live Study Participation'
).configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
###################################################################################
###################################################################################
## MISSING DEMOGRAPHICS DATA (HTML w/ PANDAS STYLER)
###################################################################################
###################################################################################
def color_all_missing(val):
color = 'white' if val == 0 else 'black'
return 'color: %s' % color
def highlight_missing(s):
is_max = s == 0
return ['background-color: black' if v else '' for v in is_max]
def highlight_missing_max(s):
is_max = s == s.max()
return ['background-color: black' if v else '' for v in is_max]
def color_all_missing_max(val):
color = 'white' if val == df.shape[0] else 'black'
return 'color: %s' % color
def get_missing_demographics(df):
cm = sns.light_palette("#0067B0", as_cmap=True)
cols = [c for c in df.columns if c in ['Year','Gender','English','Race',
'Country','State','Student','Degree']]
rend = pd.DataFrame({'% Missing Values' : round(df[cols].isnull().mean() * 100, 2),
'Missing Values (Count)' : df[cols].isnull().sum(),
'Non-Null Values' : df[cols].notnull().sum(),
'Density' : 1 / df[cols].nunique()})\
.style.bar(color = "#22a7f0", align = 'left', subset=['% Missing Values'])\
.background_gradient(cmap=cm, subset=['Density'])\
.apply(highlight_missing, subset=['Non-Null Values'])\
.apply(highlight_missing_max, subset=['Missing Values (Count)'])\
.set_caption('Distribution of Missing Demographic Values')\
.set_precision(2)
return rend
###################################################################################
###################################################################################
## DEMOGRAPHICS : YEAR DISTRIBUTION (GOOD DATA ONLY)
###################################################################################
###################################################################################
def get_good_demographic_year(df):
df2 = df.copy()
df2.Year = df2.Year.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Year']).size()\
.reset_index()[['ROWID','Year']].Year.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'year', 'Year':'count'}).sort_values(by='year')
strange_values = ['19996','25','26','54','<MISSING>','Los Angeles','Mumbai, India','US','2020']
good = df2[(~df2.year.isin(strange_values))].copy()
good['year'] = good['year'].astype(int)
p = alt.Chart(good).mark_bar(size=15, color=berkeley_palette['pacific'], line={'color':berkeley_palette['web_grey']})\
.encode(
x = alt.X('year:Q', bin=False,
axis=alt.Axis(format='.0f', labelAngle=-45),
scale=alt.Scale(domain=[min(good.year), max(good.year)]),
title='Year of Birth'
),
y = alt.Y('count:Q',
axis=alt.Axis(title='Frequency')
)
).properties(height=300, width=650, title={'text':'Distribution of Birth Year', 'subtitle':'Valid Data Only'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-10)
return p
###################################################################################
###################################################################################
## DEMOGRAPHICS : GENDER DISTRIBUTION PLOT
###################################################################################
###################################################################################
def get_demographic_gender(df):
df2 = df.copy()
df2.Gender = df2.Gender.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Gender']).size()\
.reset_index()[['ROWID','Gender']].Gender.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'gender', 'Gender':'count'}).sort_values(by='gender')
b = alt.Chart()\
.mark_bar(
color=berkeley_palette['rose_garden'], opacity=0.85,
stroke=berkeley_palette['berkeley_blue'],
strokeWidth=1
).encode(
x=alt.X('gender:N',
axis=alt.Axis(labelAngle=-45, labelFontSize=20, title='Participant Gender', titleFontSize=25)),
y=alt.Y('count:Q',
axis = alt.Axis(title='Frequency', titleFontSize=25))
)
t = alt.Chart().mark_text(
color = berkeley_palette['pacific'],
size = 20,
align='center',
baseline='middle',
dy = -20
).encode(
x = alt.X('gender:N', axis=None),
y = alt.Y('count:Q', axis=None),
text = alt.Text('count:Q')
)
p = alt.layer(b, t, data=df2)\
.properties(height=300,width=700,title={'text':'Distribution of Gender'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
###################################################################################
###################################################################################
## DEMOGRAPHICS : COUNTRY DISTRIBUTION
###################################################################################
###################################################################################
def get_demographic_country(df):
df2 = df.copy()
df2.Country = df2.Country.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Country']).size()\
.reset_index()[['ROWID','Country']].Country.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'country', 'Country':'count'}).sort_values(by='country')
ctry = pd.DataFrame({
'country':['<MISSING>', 'Afghanistan', 'Canada', 'China', 'France',
'Hong Kong (S.A.R.)', 'India', 'Italy', 'Mexico', 'New Zealand',
'Portugal', 'Singapore', 'United Kingdom of Great Britain and Northern Ireland',
'United States of America'],
'id':[0, 4, 124, 156, 250, 344, 356, 380, 484, 554, 620, 702, 826, 840]})
df2 = df2.merge(ctry, how='inner', on='country')
source = alt.topo_feature(data.world_110m.url, "countries")
background = alt.Chart(source).mark_geoshape(fill="white")
foreground = (
alt.Chart(source)
.mark_geoshape(stroke=berkeley_palette['bay_fog'], strokeWidth=0.25)
.encode(
color=alt.Color(
"count:N", scale=alt.Scale(range=[berkeley_palette['pacific'], berkeley_palette['lawrence'],
berkeley_palette['lap_lane'], berkeley_palette['founders_rock'],
berkeley_palette['founders_rock'], berkeley_palette['berkeley_blue']]), legend=None,
),
tooltip=[
alt.Tooltip("country:N", title="Country"),
alt.Tooltip("count:Q", title="Participants"),
],
)
.transform_lookup(
lookup="id",
from_=alt.LookupData(df2, "id", ["count", "country"]),
)
)
final_map = alt.layer(background, foreground)\
.properties(width=700, height=400, title={'text':'Distribution of Country'})\
.configure_title(anchor='middle')\
.configure_title(dy=-10)\
.project("naturalEarth1")\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_view(stroke=None, strokeWidth=0)\
.configure_axis(grid=False)
return final_map
###################################################################################
###################################################################################
## DEMOGRAPHICS : STATE DISTRIBUTION
###################################################################################
###################################################################################
def get_demographic_state(df):
df2 = df.copy()
df2.State = df2.State.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','State']).size()\
.reset_index()[['ROWID','State']].State.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'state', 'State':'count'}).sort_values(by='state')
codes = pd.DataFrame({'state':['Alabama','Alaska','Arizona','Arkansas','California',
'Colorado','Connecticut','Delaware','District of Columbia','Florida','Georgia',
'Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana',
'Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri',
'Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York',
'North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island',
'South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia',
'Washington','West Virginia','Wisconsin','Wyoming','Puerto Rico'],
'id':[1,2,4,5,6,8,9,10,11,12,13,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,44,45,46,47,48,49,50,51,53,54,55,56,72]})
df2 = df2.merge(codes, how='left', on='state').fillna(-99)
df2.id = df2.id.astype(int)
states = alt.topo_feature(data.us_10m.url, 'states')
b = alt.Chart(states).mark_geoshape(stroke=berkeley_palette['white'], strokeWidth=0.25).encode(
color=alt.Color(
"count:N", scale=alt.Scale(range=[berkeley_palette['pacific'], "#00b0da",
"#009dcb", "#008aba", "#0077aa", "#006598", "#005386", "#004274", "#003262"]), legend=None),
tooltip=[
alt.Tooltip("state:N", title="U.S. State"),
alt.Tooltip("count:Q", title="Participants")]
).transform_lookup(
lookup='id',
from_=alt.LookupData(df2, 'id', ["count","state"]))\
.project(type='albersUsa')\
.properties(width=700, height=400, title={'text':'Distribution of U.S. State'})\
.configure_title(anchor='middle')\
.configure_title(dy=-10)\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_view(stroke=None, strokeWidth=0)\
.configure_axis(grid=False)
return b
###################################################################################
###################################################################################
## DEMOGRAPHICS : STUDENT STATUS DISTRIBUTION
###################################################################################
###################################################################################
def get_demographic_student_status(df):
df2 = df.copy()
df2.Student = df2.Student.fillna('<MISSING>')
df2 = pd.DataFrame(df2.groupby(by=['ROWID','Student']).size()\
.reset_index()[['ROWID','Student']].Student.value_counts(dropna=False))\
.reset_index().rename(columns={'index':'student', 'Student':'count'}).sort_values(by='student')
df2 = df2.sort_values(by = ['count','student'], ascending=False)
y = df2['count'].values
x = df2.student.values
x_label = 'Student Status'
y_label = 'Frequency'
y_label2 = '% of Total'
title = 'Distribution of Student Status'
show_pct_y = True
tot = df2['count'].sum()
pct_format='{0:.0%}'
def my_format(num, x):
return (str(num*100)[:4 + (x-1)] + '%').replace('.','')
# build the pareto chart
fig = plt.figure(figsize=(10, 7), dpi = 100)
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
bars = ax1.bar(x = x, height = y, width = 0.9, align = 'center', edgecolor = berkeley_palette['berkeley_blue'],
color = '#0078D4', linewidth = 1, alpha = 0.8)
ax1.set_xticks(range(df2.shape[0]))
ax1.set_xticklabels(x, rotation = 45, fontsize=12)
for xtick in ax1.get_xticklabels():
xtick.set_color(berkeley_palette['black'])
ax1.get_yaxis().set_major_formatter(
tck.FuncFormatter(lambda x, p: format(int(x), ',')))
ax1.tick_params(axis = 'y', labelsize = 10)
ax1.tick_params(axis = 'y', labelcolor = berkeley_palette['pacific'])
if x_label:
ax1.set_xlabel(x_label, fontsize = 20, horizontalalignment = 'right', x = 1.0,
color = berkeley_palette['pacific'], labelpad=10)
if y_label:
ax1.set_ylabel(y_label, fontsize = 20, horizontalalignment = 'right', y = 1.0,
color = berkeley_palette['pacific'], labelpad=20)
if title:
plt.title(title, fontsize = 25, fontweight = 'semibold', color = berkeley_palette['berkeley_blue'], pad = 30, loc='center')
weights = y / tot
cumsum = weights.cumsum()
cumsum = [0.999999999 if x >= 1.0 else x for x in cumsum]
cumsum[len(cumsum)-1] = 1.0
ax2.plot(x, cumsum, color =berkeley_palette['black'], label = 'Cumulative Distribution', alpha = 1)
ax2.scatter(x, cumsum, color = berkeley_palette['rose_garden'], marker = 'D', s = 15)
ax2.set_ylabel('', color = berkeley_palette['berkeley_blue'])
ax2.tick_params('y', colors = berkeley_palette['web_grey'])
ax2.set_ylim(0, 1.01)
vals = ax2.get_yticks()
ax2.set_yticks(vals.tolist())
ax2.set_yticklabels([pct_format.format(x) for x in vals], fontsize = 10)
# hide y-labels on right side
if not show_pct_y:
ax2.set_yticks([])
else:
if y_label2:
ax2.set_ylabel(y_label2, fontsize = 20, horizontalalignment = 'right', y = 1.0,
color = berkeley_palette['pacific'], labelpad = 20)
ax2.set_yticklabels([])
ax2.set_yticks([])
#formatted_weights = [pct_format.format(x) for x in cumsum]
formatted_weights = [my_format(x, 0) for x in cumsum]
for i, txt in enumerate(formatted_weights):
ax2.annotate(text = txt, xy = (x[i], cumsum[i] + .05), fontweight = 'bold', color = berkeley_palette['black'], fontsize=15)
if '<MISSING>' in df2.student.values:
yy = df2[(df2.student.values=='<MISSING>')].values[0][1]
b = bars.get_children()[len(bars.get_children())-1]
xx = (b.get_x() + b.get_width() / 2) - 0.05
ax1.annotate(text = str(yy), xy = (xx, yy+5), fontweight = 'bold', color = berkeley_palette['rose_garden'], fontsize=15)
# Adjust the plot spine borders to be lighter
for ax in [ax1, ax2]:
for p, v in zip(["top", "bottom", "right", "left"], [0.0, 0.3, 0.0, 0.3]):
ax.spines[p].set_alpha(v)
# Sset the Y-axis grid-lines to dim, and display the Accuracy plot.
plt.grid(axis='y', alpha=.3)
plt.tight_layout()
#plt.show()
return plt
###################################################################################
###################################################################################
## DESCRIPTIVE STATISTICS STYLER (PANDAS)
###################################################################################
###################################################################################
def get_descriptive_statistics(df, cols = None):
if not cols:
cols = df.columns
rend = df[cols].describe()\
.T.style.background_gradient(cmap=sns.light_palette("#0067B0", as_cmap=True))\
.set_precision(2)
return rend
###################################################################################
###################################################################################
## LIKERT SCALE ANSWER VARIANCE PLOT
###################################################################################
###################################################################################
def get_likert_variance(df):
df2 = df.copy()
df2['likert_var'] = np.var(df2[['Interest','Effective','Intelligence','Writing','Meet']], axis=1)
df2['group'] = 'XLab'
df2.loc[(df2['Start Date'] < "2021-04-05"), 'group'] = 'Amazon'
at = alt.Chart(df2).transform_density('likert_var', as_=['likert_var','Density'], groupby=['group'])\
.mark_area(opacity=0.5, stroke=berkeley_palette['black'], strokeWidth=2)\
.encode(
x = alt.X('likert_var:Q',
axis=alt.Axis(values=list(np.arange(0.0, 9.5, 0.5)), tickCount=19), title="Variance"),
y = alt.Y('Density:Q'),
color = alt.Color('group:N',
scale=alt.Scale(domain=df2.group.unique(),
range=[berkeley_palette['berkeley_blue'], berkeley_palette['california_gold']]),
legend = alt.Legend(title="Participant Group", padding=10,
symbolType="square", symbolStrokeWidth=1, orient="right", offset=-170)))\
.properties(height=250, width=650, title={'text':'Distribution of Variance', 'subtitle':'for Likert Scale Answers'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-5)
return at
###################################################################################
###################################################################################
## LIKERT SCALE ANSWER VARIANCE PLOT
###################################################################################
###################################################################################
def get_likert_counts_by_group(df):
df2 = df.copy()
df2['likert_var'] = np.var(df2[['Interest','Effective','Intelligence','Writing','Meet']], axis=1)
df2['group'] = 'XLab'
df2.loc[(df2['Start Date'] < "2021-04-05"), 'group'] = 'Amazon'
tot = df2.groupby(by=['group','ROWID']).size().reset_index().rename(columns={'ROWID':'participant_id',0:'total_responses'})
lik = df2[(df2.likert_var == 0.0)].groupby(by=['group','ROWID']).size().reset_index().rename(columns={'ROWID':'participant_id',0:'uniform_responses'})
tot = tot.merge(lik, how='inner', on=['group','participant_id'])
tot['pct_uniform'] = tot.uniform_responses / tot.total_responses
tot.groupby(by=['group','uniform_responses']).size().reset_index().rename(columns={0:'count'})
base = alt.Chart().mark_bar(stroke=berkeley_palette['pacific'], strokeWidth=0.5).encode(
x=alt.X('count:Q', axis=alt.Axis(title = 'Frequency', labelPadding=10, labelFontSize=20, titleFontSize=25)),
y = alt.Y('uniform_responses:O', axis=alt.Axis(title = '', labelAngle=0, labelPadding=10, labelFontSize=20,
titleFontSize=25, values=[1,2,3,4,5,6], tickCount=6), sort=[1,2,3,4,5,6]),
color = alt.Color('uniform_responses:O', legend = None,
scale=alt.Scale(range = [berkeley_palette['bay_fog'], "#00b0da", "#004274", berkeley_palette['golden_gate'], berkeley_palette['rose_garden']]))
).properties(width=650, height=150)
txt = base.mark_text(dx=-15, size=15).encode(
text='count:Q',
color=alt.value('white')
)
p = alt.layer(base, txt).properties(width=600, height=150, title={'text':''})\
.facet(
row=alt.Row('group:N',
sort=alt.SortArray(['XLab','Amazon']),
header=alt.Header(labelColor=berkeley_palette['pacific'], labelFontSize=25,labelFont='Lato', title='')
),
data=tot.groupby(by=['group','uniform_responses']).size().reset_index().rename(columns={0:'count'}),
title='Uniform Likert Respones'
).configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-20)
return p
###################################################################################
###################################################################################
## LIKERT SCALE ANSWER VARIANCE PLOT
###################################################################################
###################################################################################
def get_wpm_plot(df):
df2 = df.copy()
df2['likert_var'] = np.var(df2[['Interest','Effective','Intelligence','Writing','Meet']], axis=1)
df2['group'] = 'XLab'
df2.loc[(df2['Start Date'] < "2021-04-05"), 'group'] = 'Amazon'
p = alt.Chart(df2).mark_bar(opacity=0.8, stroke=berkeley_palette['black'], strokeWidth=0.5).encode(
x = alt.X('wpm:Q', bin=alt.Bin(maxbins=100), title="Words per Minute (bin=100)"),
y = alt.Y('count()', title='Frequency'),
color=alt.Color('group:N',
scale=alt.Scale(range = [berkeley_palette['berkeley_blue'], berkeley_palette['california_gold']]),
legend = alt.Legend(title="Participant Group", padding=10,
symbolType="square", symbolStrokeWidth=1, orient="right", offset=-170))
).properties(height=300,width=650, title={'text':'Distribution of Response Time', 'subtitle':'Evaluated in Words per Minute'})\
.configure(padding={'top':20, 'left':20, 'right':20,'bottom':20})\
.configure_facet(spacing=10)\
.configure_view(stroke=None)\
.configure_title(anchor='middle')\
.configure_axis(grid=False)\
.configure_title(dy=-5)
return p
| 44.892019
| 177
| 0.501935
|
a342a453e598c5666a65e62341c4df474c7b78af
| 737
|
py
|
Python
|
pymc3/examples/arbitrary_stochastic.py
|
vpolisky/pymc3
|
87cdd712c86321121c2ed3150764f3d847f5083c
|
[
"Apache-2.0"
] | 15
|
2016-03-29T17:22:45.000Z
|
2021-05-05T06:28:06.000Z
|
pymc3/examples/arbitrary_stochastic.py
|
taku-y/pymc3
|
70e3ca5e137b67aac0390c7e3979ec16842c4aed
|
[
"Apache-2.0"
] | 1
|
2019-08-17T06:58:38.000Z
|
2019-08-17T06:58:38.000Z
|
pymc3/examples/arbitrary_stochastic.py
|
rsumner31/pymc3-23
|
539c0fc04c196679a1cdcbf4bc2dbea4dee10080
|
[
"Apache-2.0"
] | 6
|
2016-06-30T08:58:16.000Z
|
2019-01-26T16:50:54.000Z
|
import numpy as np
import pymc3 as pm
import theano.tensor as tt
def build_model():
with pm.Model() as model:
lam = pm.Exponential('lam', 1)
failure = np.array([0, 1])
value = np.array([1, 0])
def logp(failure, value):
return tt.sum(failure * np.log(lam) - lam * value)
pm.DensityDist('x', logp, observed={'failure': failure, 'value': value})
return model
def run(n_samples=3000):
model = build_model()
start = model.test_point
h = pm.find_hessian(start, model=model)
step = pm.Metropolis(model.vars, h, blocked=True, model=model)
trace = pm.sample(n_samples, step=step, start=start, model=model)
return trace
if __name__ == "__main__":
run()
| 26.321429
| 80
| 0.626866
|
9dc4ac1939f203f6dd514e3f7bfdde55b7e22e06
| 4,433
|
py
|
Python
|
tests/unit/plugins/openstack/context/network/test_network.py
|
aforalee/rallyALi
|
8050ca08b0e253aeb19a1cec34f33c648f00136a
|
[
"Apache-2.0"
] | 2
|
2015-02-06T11:03:12.000Z
|
2015-03-02T10:39:44.000Z
|
tests/unit/plugins/openstack/context/network/test_network.py
|
aforalee/rallyALi
|
8050ca08b0e253aeb19a1cec34f33c648f00136a
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/plugins/openstack/context/network/test_network.py
|
aforalee/rallyALi
|
8050ca08b0e253aeb19a1cec34f33c648f00136a
|
[
"Apache-2.0"
] | 2
|
2016-03-16T03:52:13.000Z
|
2020-10-02T07:58:50.000Z
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from rally.plugins.openstack.context.network import networks as network_context
from tests.unit import test
NET = "rally.plugins.openstack.wrappers.network."
class NetworkTestCase(test.TestCase):
def get_context(self, **kwargs):
return {"task": {"uuid": "foo_task"},
"admin": {"endpoint": "foo_admin"},
"config": {"network": kwargs},
"users": [{"id": "foo_user", "tenant_id": "foo_tenant"},
{"id": "bar_user", "tenant_id": "bar_tenant"}],
"tenants": {"foo_tenant": {"networks": [{"id": "foo_net"}]},
"bar_tenant": {"networks": [{"id": "bar_net"}]}}}
def test_START_CIDR_DFLT(self):
netaddr.IPNetwork(network_context.Network.DEFAULT_CONFIG["start_cidr"])
@mock.patch("rally.osclients.Clients")
@mock.patch(NET + "wrap", return_value="foo_service")
def test__init__default(self, mock_wrap, mock_clients):
context = network_context.Network(self.get_context())
self.assertEqual(context.config["networks_per_tenant"], 1)
self.assertEqual(context.config["start_cidr"],
network_context.Network.DEFAULT_CONFIG["start_cidr"])
@mock.patch("rally.osclients.Clients")
@mock.patch(NET + "wrap", return_value="foo_service")
def test__init__explicit(self, mock_wrap, mock_clients):
context = network_context.Network(
self.get_context(start_cidr="foo_cidr", networks_per_tenant=42,
network_create_args={"fakearg": "fake"}))
self.assertEqual(context.config["networks_per_tenant"], 42)
self.assertEqual(context.config["start_cidr"], "foo_cidr")
self.assertDictEqual(context.config["network_create_args"],
{"fakearg": "fake"})
@mock.patch(NET + "wrap")
@mock.patch("rally.plugins.openstack.context.network.networks.utils")
@mock.patch("rally.osclients.Clients")
def test_setup(self, mock_clients, mock_utils, mock_wrap):
mock_utils.iterate_per_tenants.return_value = [
("foo_user", "foo_tenant"),
("bar_user", "bar_tenant")]
mock_create = mock.Mock(side_effect=lambda t, **kw: t + "-net")
mock_utils.generate_random_name = mock.Mock()
mock_wrap.return_value = mock.Mock(create_network=mock_create)
nets_per_tenant = 2
net_context = network_context.Network(
self.get_context(networks_per_tenant=nets_per_tenant,
network_create_args={"fakearg": "fake"}))
net_context.setup()
create_calls = [
mock.call(tenant, add_router=True,
subnets_num=1, network_create_args={"fakearg": "fake"})
for user, tenant in mock_utils.iterate_per_tenants.return_value]
mock_create.assert_has_calls(create_calls)
mock_utils.iterate_per_tenants.assert_called_once_with(
net_context.context["users"])
expected_networks = ["bar_tenant-net",
"foo_tenant-net"] * nets_per_tenant
actual_networks = []
for tenant_id, tenant_ctx in net_context.context["tenants"].items():
actual_networks.extend(tenant_ctx["networks"])
self.assertSequenceEqual(sorted(expected_networks),
sorted(actual_networks))
@mock.patch("rally.osclients.Clients")
@mock.patch(NET + "wrap")
def test_cleanup(self, mock_wrap, mock_clients):
net_context = network_context.Network(self.get_context())
net_context.cleanup()
mock_wrap().delete_network.assert_has_calls(
[mock.call({"id": "foo_net"}), mock.call({"id": "bar_net"})],
any_order=True)
| 45.234694
| 79
| 0.643582
|
a0c393565bb26f88c2ae74f20002c1cffa8d8639
| 2,462
|
py
|
Python
|
emcap-compress.py
|
rpp0/emma
|
fab81e1c66b8a88d14e68b8878ddbb5ee6528de2
|
[
"MIT"
] | 36
|
2019-01-08T12:49:36.000Z
|
2022-03-31T08:11:48.000Z
|
emcap-compress.py
|
rpp0/emma
|
fab81e1c66b8a88d14e68b8878ddbb5ee6528de2
|
[
"MIT"
] | 6
|
2020-01-28T22:59:05.000Z
|
2022-02-10T00:14:43.000Z
|
emcap-compress.py
|
rpp0/emma
|
fab81e1c66b8a88d14e68b8878ddbb5ee6528de2
|
[
"MIT"
] | 3
|
2019-02-12T11:55:42.000Z
|
2020-08-12T23:30:05.000Z
|
#!/usr/bin/python
"""
This program compresses datasets captured with emcap using either PCA or an autoencoder. It looks for a manifest.emcap
inside the dataset directory and applies the compression to the trace set given as argument. The program is called from
emcap to mitigate the fact that Python 3 is not supported by GNU Radio (required for calling EMMA dsp and ML functions.
After release of GNU Radio 3.8, the compress_dataset function can be directly applied on the numpy array in emcap itself.
"""
import argparse
import os
import pickle
import emma.io.io as emio
from emma.processing import ops
from emma.utils.utils import EMMAException, conf_delete_action
from emma.io.emresult import EMResult
from emma.processing.action import Action
def compress_trace_set(trace_set_path):
if trace_set_path.endswith('.npy'):
parent_dataset_path = os.path.dirname(trace_set_path)
manifest_path = os.path.join(parent_dataset_path, 'manifest.emcap')
if os.path.exists(manifest_path):
# Open manifest
with open(manifest_path, 'rb') as manifest_file:
manifest = pickle.load(manifest_file)
conf = manifest['conf']
# Load trace set
trace_set = emio.get_trace_set(trace_set_path, 'cw', remote=False)
conf_delete_action(conf, 'optimize_capture') # Make sure there is no optimize_capture action anymore
# Add appropriate actions
if 'pca' in manifest:
conf.actions.append(Action('pca[%s]' % manifest_path))
elif 'autoenc' in manifest:
conf.actions.append(Action('corrtest[autoenc]'))
# Perform compression
result = EMResult()
ops.process_trace_set(result, trace_set, conf, keep_trace_sets=True)
processed_trace_set = result.trace_sets[0]
# Save compressed trace set
processed_trace_set.save(os.path.abspath(parent_dataset_path), dry=False)
else:
raise EMMAException("No manifest.emcap in %s, so don't know how to compress." % parent_dataset_path)
else:
raise EMMAException("Not a valid traceset_path in numpy format")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='EMCap compress')
parser.add_argument('trace_set_path', type=str, help="Trace set to compress")
args = parser.parse_args()
compress_trace_set(args.trace_set_path)
| 41.033333
| 121
| 0.6974
|
ff639141c6df908b8b14b6f18ec91167ca534325
| 7,409
|
py
|
Python
|
qa/rpc-tests/test_framework/test_framework.py
|
dnoiz1/ruxcoin
|
07e30a2b5ebc624ac8a2d92be435e895ede5deae
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/test_framework.py
|
dnoiz1/ruxcoin
|
07e30a2b5ebc624ac8a2d92be435e895ede5deae
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/test_framework/test_framework.py
|
dnoiz1/ruxcoin
|
07e30a2b5ebc624ac8a2d92be435e895ede5deae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Ruxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
wait_ruxcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class RuxcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_ruxcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_ruxcoinds()
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave ruxcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop ruxcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing ruxcoind/ruxcoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
self.options.tmpdir += '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_ruxcoinds()
else:
print("Note: ruxcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some ruxcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(RuxcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("RUXCOIND", "ruxcoind"),
help="ruxcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("RUXCOIND", "ruxcoind"),
help="ruxcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| 34.460465
| 139
| 0.613848
|
86e0b1ede63cdfbc23a29baa3671e3de0fa8fd4c
| 246
|
py
|
Python
|
instaread/instapaper/example.py
|
anhdat/instaread
|
c554c58ae32119d3d80c8db1a163a1712c3f4f90
|
[
"MIT"
] | null | null | null |
instaread/instapaper/example.py
|
anhdat/instaread
|
c554c58ae32119d3d80c8db1a163a1712c3f4f90
|
[
"MIT"
] | null | null | null |
instaread/instapaper/example.py
|
anhdat/instaread
|
c554c58ae32119d3d80c8db1a163a1712c3f4f90
|
[
"MIT"
] | null | null | null |
import instapaper
I = instapaper.Instapaper("<oauth_consumer_key>", "<oauth_consumer_secret>")
I.login("<user_name>", "<password>")
b = instapaper.Bookmark(I, {"url": "https://www.biblegateway.com/passage/?search=John+1&version=NIV"})
b.save()
| 30.75
| 102
| 0.723577
|
758678e10d95a793dcf123dd81afd7a6fccf5711
| 326
|
py
|
Python
|
2017/day21/part1.py
|
dcabezas98/advent-of-code
|
c3a1e376bfea877a5af3b4472bb1ca6a5807b52e
|
[
"MIT"
] | null | null | null |
2017/day21/part1.py
|
dcabezas98/advent-of-code
|
c3a1e376bfea877a5af3b4472bb1ca6a5807b52e
|
[
"MIT"
] | null | null | null |
2017/day21/part1.py
|
dcabezas98/advent-of-code
|
c3a1e376bfea877a5af3b4472bb1ca6a5807b52e
|
[
"MIT"
] | null | null | null |
pattern = [['.','#','.'],['.','.','#'],['#','#','#']]
rules_book_inputs = {}
rules_book_outputs = {}
with open("input.txt") as f:
for line in f:
i, o = line[:-1].split(" => ")
i = i.split("/")
i = list(map(list,i))
o = o.split("/")
o = list(map(list,o))
rules_book = {}
| 20.375
| 53
| 0.407975
|
1ad9d0054f90f20f4055ba5af3fb3c2bb931f247
| 15,932
|
py
|
Python
|
tensorflow/python/keras/engine/sequential.py
|
Byambaa0325/tensorflow
|
dc001fd87b0661436d090af8fd51253c8264679f
|
[
"Apache-2.0"
] | 1
|
2020-03-03T13:36:29.000Z
|
2020-03-03T13:36:29.000Z
|
tensorflow/python/keras/engine/sequential.py
|
Byambaa0325/tensorflow
|
dc001fd87b0661436d090af8fd51253c8264679f
|
[
"Apache-2.0"
] | 2
|
2021-08-25T15:59:18.000Z
|
2022-02-10T02:00:23.000Z
|
tensorflow/python/keras/engine/sequential.py
|
Byambaa0325/tensorflow
|
dc001fd87b0661436d090af8fd51253c8264679f
|
[
"Apache-2.0"
] | 1
|
2022-03-18T04:26:38.000Z
|
2022-03-18T04:26:38.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of the `Sequential` model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import keras_export
SINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '
'a single output tensor. For multi-output '
'layers, use the functional API.')
@keras_export('keras.Sequential', 'keras.models.Sequential')
class Sequential(training.Model):
"""`Sequential` groups a linear stack of layers into a `tf.keras.Model`.
`Sequential` provides training and inference features on this model.
Examples:
>>> # Optionally, the first layer can receive an `input_shape` argument:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> # Afterwards, we do automatic shape inference:
>>> model.add(tf.keras.layers.Dense(4))
>>> # This is identical to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_dim=16))
>>> # And to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, batch_input_shape=(None, 16)))
>>> # Note that you can also omit the `input_shape` argument.
>>> # In that case the model doesn't have any weights until the first call
>>> # to a training/evaluation method (since it isn't yet built):
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> # model.weights not created yet
>>> # Whereas if you specify the input shape, the model gets built
>>> # continuously as you are adding layers:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> model.add(tf.keras.layers.Dense(4))
>>> len(model.weights)
4
>>> # When using the delayed-build pattern (no input shape specified), you can
>>> # choose to manually build your model by calling
>>> # `build(batch_input_shape)`:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> model.build((None, 16))
>>> len(model.weights)
4
```python
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit` (or other training and
# evaluation methods).
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, layers=None, name=None):
"""Creates a `Sequential` model instance.
Args:
layers: Optional list of layers to add to the model.
name: Optional name for the model.
"""
super(Sequential, self).__init__(name=name, autocast=False)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
self._auto_track_sub_layers = False
self._layer_call_argspecs = {}
# Add to the model any layers passed to the constructor.
if layers:
if not isinstance(layers, (list, tuple)):
layers = [layers]
tf_utils.assert_no_legacy_layers(layers)
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `Trackable` manages the `_layers` attributes and does filtering
# over it.
layers = super(Sequential, self).layers
if layers and isinstance(layers[0], input_layer.InputLayer):
return layers[1:]
return layers[:]
@property
@trackable_layer_utils.cache_recursive_attribute('dynamic')
def dynamic(self):
return any(layer.dynamic for layer in self.layers)
@trackable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
# If we are passed a Keras tensor created by keras.Input(), we can extract
# the input layer from its keras history and use that without any loss of
# generality.
if hasattr(layer, '_keras_history'):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, input_layer.InputLayer):
layer = origin_layer
if not isinstance(layer, base_layer.Layer):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
tf_utils.assert_no_legacy_layers([layer])
# This allows the added layer to broadcast mutations to the current
# layer, which is necessary to ensure cache correctness.
layer._attribute_sentinel.add_parent(self._attribute_sentinel)
self.built = False
set_inputs = False
if not self._layers:
if isinstance(layer, input_layer.InputLayer):
# Corner case where the user passes an InputLayer layer via `add`.
assert len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) == 1
set_inputs = True
else:
batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)
if batch_shape:
# Instantiate an input layer.
x = input_layer.Input(
batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
# If an input layer (placeholder) is available.
if len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [
nest.flatten(layer._inbound_nodes[-1].output_tensors)[0]
]
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if len(nest.flatten(output_tensor)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [output_tensor]
if self.outputs:
# True if set_inputs or self._is_graph_network or if adding a layer
# to an already built deferred seq model.
self.built = True
if set_inputs or self._is_graph_network:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
else:
self._layers.append(layer)
self._handle_deferred_layer_dependencies([layer])
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
# Different Model types add to `._layers` in different ways, so for safety
# we do a cache invalidation to make sure the changes are reflected.
self._attribute_sentinel.invalidate_all()
@trackable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
layer = self._layers.pop()
self._layer_call_argspecs.pop(layer)
self._attribute_sentinel.invalidate_all()
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
elif self._is_graph_network:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs, name=self.name)
self.built = True
@base_layer_utils.default
def build(self, input_shape=None):
if self._is_graph_network:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
else:
if input_shape is None:
raise ValueError('You must provide an `input_shape` argument.')
input_shape = tuple(input_shape)
self._build_input_shape = input_shape
super(Sequential, self).build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name
if self._build_input_shape is None:
input_shapes = nest.map_structure(_get_shape_tuple, inputs)
self._build_input_shape = input_shapes
if self._is_graph_network:
if not self.built:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
return super(Sequential, self).call(inputs, training=training, mask=mask)
outputs = inputs # handle the corner case where self.layers is empty
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and `outputs`
# are the outputs of `layer` applied to `inputs`. At the end of each
# iteration `inputs` is set to `outputs` to prepare for the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if 'mask' in argspec:
kwargs['mask'] = mask
if 'training' in argspec:
kwargs['training'] = training
outputs = layer(inputs, **kwargs)
if len(nest.flatten(outputs)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# `outputs` will be the inputs to the next layer.
inputs = outputs
mask = outputs._keras_mask
return outputs
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
outputs = self.call(inputs, mask=mask)
return outputs._keras_mask
@deprecated('2021-01-01', 'Please use `model.predict()` instead.')
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
@deprecated('2021-01-01',
'Please use instead:'
'* `np.argmax(model.predict(x), axis=-1)`, '
' if your model does multi-class classification '
' (e.g. if it uses a `softmax` last-layer activation).'
'* `(model.predict(x) > 0.5).astype("int32")`, '
' if your model does binary classification '
' (e.g. if it uses a `sigmoid` last-layer activation).')
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
layer_configs = []
for layer in self.layers:
layer_configs.append(generic_utils.serialize_keras_object(layer))
# When constructed using an `InputLayer` the first non-input layer may not
# have the shape information to reconstruct `Sequential` as a graph network.
if (self._is_graph_network and layer_configs and
'batch_input_shape' not in layer_configs[0]['config'] and
isinstance(self._layers[0], input_layer.InputLayer)):
batch_input_shape = self._layers[0]._batch_input_shape
layer_configs[0]['config']['batch_input_shape'] = batch_input_shape
config = {
'name': self.name,
'layers': copy.deepcopy(layer_configs)
}
if self._build_input_shape is not None:
config['build_input_shape'] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'name' in config:
name = config['name']
build_input_shape = config.get('build_input_shape')
layer_configs = config['layers']
else:
name = None
build_input_shape = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
layer = layer_module.deserialize(layer_config,
custom_objects=custom_objects)
model.add(layer)
if (not model.inputs and build_input_shape and
isinstance(build_input_shape, (tuple, list))):
model.build(build_input_shape)
return model
@property
def input_spec(self):
if self.layers and hasattr(self.layers[0], 'input_spec'):
return self.layers[0].input_spec
return None
@property
def _trackable_saved_model_saver(self):
return model_serialization.SequentialSavedModelSaver(self)
def _get_shape_tuple(t):
if hasattr(t, 'shape'):
shape = t.shape
if shape.rank is not None:
return tuple(shape.as_list())
return None
return None
| 37.575472
| 91
| 0.682651
|
13599f1abb1b715b6c351d091033137d9fcbb2df
| 902
|
py
|
Python
|
jobs/delete_unsaved_candidates.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | null | null | null |
jobs/delete_unsaved_candidates.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | 156
|
2019-10-17T19:35:22.000Z
|
2021-08-01T13:23:47.000Z
|
jobs/delete_unsaved_candidates.py
|
jialin-wu-02/skyportal
|
29d606ad8567b2230fb0553b18dd3cb9d3ab2d84
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import datetime
from skyportal.models import init_db, Candidate, Source, Obj, DBSession
from baselayer.app.env import load_env
env, cfg = load_env()
init_db(**cfg["database"])
try:
n_days = int(cfg["misc.days_to_keep_unsaved_candidates"])
except ValueError:
raise ValueError("Invalid (non-integer) value provided for "
"days_to_keep_unsaved_candidates in config file.")
if not 1 <= n_days <= 30:
raise ValueError("days_to_keep_unsaved_candidates must be an integer between 1 and 30")
cutoff_datetime = datetime.datetime.now() - datetime.timedelta(days=n_days)
n_deleted = (
Obj.query
.filter(Obj.id.in_(DBSession.query(Candidate.obj_id)))
.filter(Obj.id.notin_(DBSession.query(Source.obj_id)))
.filter(Obj.created_at <= cutoff_datetime)
.delete()
)
DBSession.commit()
print(f"Deleted {n_deleted} unsaved candidates.")
| 27.333333
| 91
| 0.727273
|
43777880f7de832c2dea7ab11d6cd56f586c7559
| 14,268
|
py
|
Python
|
Python/Vague/piddletest3.py
|
joel-s/portfolio
|
4b45f0c9bbf1647ef7865e0d2616b78030a27389
|
[
"FSFAP"
] | null | null | null |
Python/Vague/piddletest3.py
|
joel-s/portfolio
|
4b45f0c9bbf1647ef7865e0d2616b78030a27389
|
[
"FSFAP"
] | null | null | null |
Python/Vague/piddletest3.py
|
joel-s/portfolio
|
4b45f0c9bbf1647ef7865e0d2616b78030a27389
|
[
"FSFAP"
] | null | null | null |
"""piddletest.py
This module puts the various PIDDLE backends through their paces.
"""
import pagesizes
from piddle import *
import string
import sys
import math
from math import pi
from math import sin
from math import cos
backends = ['piddlePDF','piddlePIL','piddleVCR','piddleTK',
'piddlePS','piddleAI','piddleQD','piddleGL', 'piddleWX',
'piddleGTK']
backends.sort()
#----------------------------------------------------------------------
# note, these tests do not flush() the canvas
#----------------------------------------------------------------------
def minimal(canvasClass):
"""Just a very basic test of line drawing and canvas size."""
canvas = canvasClass(pagesizes.A6, "testA") # A6 is a quarter page
drawMinimal(canvas)
return canvas
def drawMinimal(canvas):
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
size = canvas.size # (actual size *may* differ from requested size)
canvas.defaultLineColor = green
canvas.drawLine(1,1,size[0]-1,size[1]-1)
canvas.drawLine(1,size[1]-1,size[0]-1,1)
canvas.drawRect(1,1,size[0]-1,size[1]-1, edgeWidth=5)
return canvas
#----------------------------------------------------------------------
def basics(canvasClass):
"""A general test of most of the drawing primitives except images and strings."""
canvas = canvasClass((400,400), "test-basics")
return drawBasics(canvas)
def drawTree(canvas, x,y, scale, t, tscale, i):
xp = x + cos(t)*scale/9
yp = y + sin(t)*scale/9
canvas.drawLine(x, y, xp, yp)
if i > 0:
drawTree(canvas, xp,yp, scale*8/9, t - tscale, tscale*6/7, i-1)
drawTree(canvas, xp,yp, scale*8/9, t + tscale, tscale*6/7, i-1)
def drawBasics(canvas):
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
canvas.defaultLineColor = black
canvas.defaultLineWidth = 2
for i in range(6):
drawTree(canvas, 200.0,200.0, 250.0, i*pi/3, pi/16.5, 10)
# canvas.drawCurve( 20,20, 100,50, 50,100, 160,160 )
return canvas
#----------------------------------------------------------------------
def advanced(canvasClass):
"""A general test of most of the drawing primitives except images and strings."""
canvas = canvasClass((400,400), "test-advanced")
return drawAdvanced(canvas)
def drawBranch(canvas, x,y, t,ts, r,rs, i):
x0 = x + cos(t)*r
y0 = y + sin(t)*r
rp = r+rs
tp1 = t - ts/2
tp2 = t + ts/2
canvas.drawCurve(x0,y0, x0,y0, x+cos(tp1)*r,y+sin(tp1)*r,
x + cos(tp1)*rp, y + sin(tp1)*rp)
canvas.drawCurve(x0,y0, x0,y0, x+cos(tp2)*r,y+sin(tp2)*r,
x + cos(tp2)*rp, y + sin(tp2)*rp)
if i > 0:
drawBranch(canvas, x,y, tp1,ts/2, rp,rs*2/3, i-1)
drawBranch(canvas, x,y, tp2,ts/2, rp,rs*2/3, i-1)
def drawAdvanced(canvas):
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
canvas.defaultLineColor = black
canvas.defaultLineWidth = 2
for i in range(3):
if i == 2: canvas.defaultLineColor = green
drawBranch(canvas, 200.0,200.0, i*2*pi/3,pi/3, 200.0,-40.0, 8)
# canvas.drawCurve( 20,20, 100,50, 50,100, 160,160 )
return canvas
def drawAdvanced2(canvas):
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
canvas.defaultLineColor = black
canvas.defaultLineWidth = 2
for i in range(6):
drawBranch(canvas, 200.0,200.0,250.0, i*2*pi/3,pi/6, 200.0,4/5.0, 6)
# canvas.drawCurve( 20,20, 100,50, 50,100, 160,160 )
return canvas
#----------------------------------------------------------------------
## def advanced(canvasClass):
## """A test of figures and images."""
## canvas = canvasClass((300,300), "test-advanced")
## return drawAdvanced(canvas)
## def drawAdvanced(canvas):
## saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
## figure = [
## ( figureCurve, 20,20, 100,50, 50,100, 160,160 ),
## ( figureLine, 200,200, 250,150 ),
## ( figureArc, 50,10, 250,150, 10,90 ) ]
## canvas.drawFigure(figure, fillColor=yellow, edgeWidth=4)
## try:
## import Image
## except:
## canvas.drawString("PIL not available!", 20,200)
## Image = None
## if Image:
## img = Image.open("python.gif")
## canvas.drawImage( img, 120,50,120+32,50+64 );
## canvas.drawImage( img, 0,210,300,210+32 );
## return canvas
#----------------------------------------------------------------------
def bluefunc(x): return 1.0 / (1.0 + math.exp(-10*(x-0.6)))
def redfunc(x): return 1.0 / (1.0 + math.exp(10*(x-0.5)))
def greenfunc(x): return 1 - pow(redfunc(x+0.2),2) - bluefunc(x-0.3)
def spectrum(canvasClass):
canvas = canvasClass((300,300), "test-spectrum")
return drawSpectrum(canvas)
def drawSpectrum(canvas):
"""Generates a spectrum plot; illustrates colors and useful application."""
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
def plot(f,canvas,offset=0):
for i in range(0,100):
x = float(i)/100
canvas.drawLine(i*3+offset,250, i*3+offset,250-100*f(x))
def genColors(n=100):
out = [None]*n;
for i in range(n):
x = float(i)/n
out[i] = Color(redfunc(x), greenfunc(x), bluefunc(x));
return out
colors = genColors(300)
# draw a black background for the spectrum
canvas.drawRect( 0,0,300,100, edgeColor=black, fillColor=black )
# draw the spectrum
for i in range(len(colors)):
canvas.drawLine(i,20,i,80, colors[i])
# plot the components of the spectrum
canvas.defaultLineColor = red
plot(redfunc, canvas)
canvas.defaultLineColor = blue
plot(bluefunc, canvas, 1)
canvas.defaultLineColor = green
plot(greenfunc, canvas, 2)
return canvas
#----------------------------------------------------------------------
def strings(canvasClass):
canvas = canvasClass( size=(400,400), name="test-strings" )
return drawStrings(canvas)
def CenterAndBox(canvas, s, cx=200, y=40):
"tests string positioning, stringWidth, fontAscent, and fontDescent"
canvas.drawLine(cx,y-30, cx,y+30, color=yellow)
w = canvas
w = canvas.stringWidth(s)
canvas.drawLine(cx-w/2, y, cx+w/2, y, color=red)
canvas.drawString(s, cx-w/2, y )
canvas.defaultLineColor = Color(0.7,0.7,1.0) # light blue
canvas.drawLine(cx-w/2, y-20, cx-w/2, y+20) # left
canvas.drawLine(cx+w/2, y-20, cx+w/2, y+20) # right
asc, desc = canvas.fontAscent(), canvas.fontDescent()
canvas.drawLine(cx-w/2-20, y-asc, cx+w/2+20, y-asc) # top
canvas.drawLine(cx-w/2-20, y+desc, cx+w/2+20, y+desc) # bottom
def drawStrings(canvas):
"""Checks font metrics, and also illustrates the standard fonts."""
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
def Write(canvas, s, font, curs):
if font: canvas.defaultFont = font
text = s
while text and text[-1] == '\n': text = text[:-1]
canvas.drawString(text, x=curs[0], y=curs[1])
if s[-1] == '\n':
curs[0] = 10
curs[1] = curs[1] + canvas.fontHeight() + canvas.fontDescent()
else:
curs[0] = curs[0] + canvas.stringWidth(s)
def StandardFonts(canvas, Write):
canvas.defaultLineColor = black
curs = [10,70]
for size in (12, 18):
for fontname in ("times", "courier", "helvetica", "symbol",
"monospaced", "serif", "sansserif"):
curs[0] = 10
curs[1] = curs[1] + size*1.5
Write(canvas, "%s %d " % (fontname,size), Font(face=fontname, size=size), curs)
Write(canvas, "bold ", Font(face=fontname, size=size, bold=1), curs)
Write(canvas, "italic ", Font(face=fontname, size=size, italic=1), curs)
Write(canvas, "underline", Font(face=fontname, size=size, underline=1), curs)
CenterAndBox(canvas, "spam, spam, spam, baked beans, and spam!")
StandardFonts(canvas, Write)
return canvas
#----------------------------------------------------------------------
def rotstring(canvasClass):
canvas = canvasClass( (450,300), name='test-rotstring' )
return drawRotstring(canvas)
def drawRotstring(canvas):
"""Draws rotated strings."""
saver = StateSaver(canvas) # leave canvas state as you found it, restores state when leaves scope
canvas.defaultFont = Font(bold=1)
canvas.defaultLineColor = (blue + white)/2
canvas.drawLine(0,150, 300,150)
canvas.drawLine(150,0, 150,300)
s = " __albatros at "
w = canvas.stringWidth(s)
canvas.drawEllipse(150-w,150-w, 150+w, 150+w, fillColor=transparent)
colors = [red,orange,yellow,green,blue,purple]
cnum = 0
for ang in range(0, 359, 30):
canvas.defaultLineColor = colors[cnum]
s2 = s + str(ang)
canvas.drawString(s2, 150, 150, angle=ang)
cnum = (cnum+1) % len(colors)
canvas.drawString( "This is a\nrotated\nmulti-line string!!!", 350, 100, angle= -90, font=Font(underline=1) )
#canvas.drawString( "This is a\nrotated\nmulti-line string!!!", 400, 175, angle= -45, font=Font(underline=1) )
return canvas
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def tkTest(testfunc):
# piddleTK tests are called from here because need TK's event loop
try :
import piddleTK
import Tkinter
except:
print "A module needed for piddleTK is not available, select another backend"
return
root = Tkinter.Tk()
frame = Tkinter.Frame(root) # label='piddletestTK'
#tkcanvas = piddleTK.TKCanvas(size=(400,400), name='piddletestTK', master = frame)
# try new Tk canvas
tkcanvas = piddleTK.TKCanvas(size=(400,400), name='piddletestTK', master = frame)
bframe = Tkinter.Frame(root)
minimalB=Tkinter.Button(bframe, text='minimal test',
command= lambda c=tkcanvas : (c.clear(),drawMinimal(c), c.flush())).pack(side=Tkinter.LEFT)
basicB = Tkinter.Button(bframe, text='basic test',
command= lambda c=tkcanvas: (c.clear(),drawBasics(c),c.flush()) ).pack(side=Tkinter.LEFT)
spectB =Tkinter.Button(bframe, text='spectrum test',
command= lambda c=tkcanvas: (c.clear(),drawSpectrum(c),c.flush()) ).pack(side=Tkinter.LEFT)
stringsB = Tkinter.Button(bframe, text='strings test',
command= lambda c=tkcanvas:(c.clear(),drawStrings(c),c.flush()) ).pack(side=Tkinter.LEFT)
rotstrB = Tkinter.Button(bframe, text='rotated strings test',
command= lambda c=tkcanvas:(c.clear(), drawRotstring(c),c.flush()) ).pack(side=Tkinter.LEFT)
advancedB = Tkinter.Button(bframe, text='advanced test',
command= lambda c=tkcanvas:(c.clear(), drawAdvanced(c),c.flush() ) ).pack(side=Tkinter.LEFT)
bframe.pack(side=Tkinter.TOP)
frame.pack()
# try to draw before running mainloop
if testfunc== minimal:
drawMinimal(tkcanvas)
elif testfunc == basics:
drawBasics(tkcanvas)
elif testfunc == advanced :
drawAdvanced(tkcanvas)
elif testfunc == spectrum :
drawSpectrum(tkcanvas)
elif testfunc == strings :
drawStrings(tkcanvas)
elif testfunc == rotstring :
drawRotstring(tkcanvas)
else :
print "Illegal testfunc handed to tkTest"
raise "Unsupported testfunc"
tkcanvas.flush()
root.mainloop()
root.destroy()
#----------------------------------------------------------------------
def wxTest(testfunc):
try :
import piddleWX
from wxPython.wx import wxApp
except:
print "A module needed for piddleWX is not available, select another backend"
return
global wx_app
if not globals().has_key("wx_app"):
class CanvasApp(wxApp):
"The wxApp that runs canvas. Initializes windows, and handles redrawing"
def OnInit(self):
return 1
wx_app = CanvasApp(0)
# run the test, passing the canvas class and returning the canvas
canvas = testfunc(piddleWX.WXCanvas)
canvas.flush()
# Run the main loop
wx_app.MainLoop()
def runtest(backend, testfunc):
# special cases:
if backend=='piddleTK':
tkTest(testfunc) # takes care of import, etc.
return
if backend=='piddleWX':
wxTest(testfunc) # takes care of import, etc.
return
# import the relevant module
module = __import__(backend)
# figure out the canvas class name (e.g., "PILCanvas") and get that
canvasClass = getattr(module, backend[6:]+"Canvas")
# run the test, passing the canvas class and returning the canvas
canvas = testfunc(canvasClass)
# do post-test cleanup
canvas.flush()
# handle save's here
if backend == 'piddlePIL':
canvas.save(format='png') # save as a PNG file
elif backend == 'piddleVCR':
filename = canvas.name + ".vcr"
canvas.save(filename)
print filename, "saved"
else: # if backend == 'piddlePS' or backend== 'piddlePDF':
canvas.save() # should be "pass'ed" by Canvas's that don't use save
def mainLoop():
global tests, backends
backend = None
test = None
if len(sys.argv) == 2:
runtest('piddleTK', advanced)
sys.exit(0)
while 1:
# print backends on left, tests on right, indicate chosen one of each
i = 0
while i < len(backends) or i < len(tests):
try: bstr = str(i+1) + '. ' + backends[i]
except: bstr = ''
try: tstr = chr(65+i) + '. ' + tests[i].__name__
except: tstr = ''
if i == backend: bflag = '==>'
else: bflag = ''
if i == test: tflag = '==>'
else: tflag = ''
print "%10s %-20s %10s %-20s" % (bflag, bstr, tflag, tstr)
i = i+1
print
inp = raw_input("Selection (0 to exit): ")
print
if inp == '0': return
if inp:
testinp = ''
if inp[-1] in string.letters: testinp = inp[-1]
elif inp[0] in string.letters: testinp = inp[0]
backinp = string.join(filter(lambda x:x in '0123456789',inp),'')
if backinp:
backend = int(backinp)-1
if backend < len(backends):
docstr = __import__(backends[backend]).__doc__
if docstr: print docstr
else: print "<no doc string>"
else: backend = None
if testinp:
test = ord(string.upper(testinp[0])) - ord('A')
if test >= 0 and test < len(tests):
docstr = tests[test].__doc__
if docstr:
print docstr
else: test = None
print
# now, if we have a valid backend and test, run it
if backend != None and test != None:
runtest(backends[backend], tests[test])
tests = (minimal, basics, advanced, spectrum, strings, rotstring)
if __name__=='__main__':
mainLoop()
| 31.706667
| 119
| 0.629591
|
dce453660a898f30d149906116f2416c01f2cf85
| 6,482
|
py
|
Python
|
tests/test_np.py
|
k7hoven/np
|
1d0ba711c5b679c2d1c1204c2f6ae50d2f5a5e92
|
[
"0BSD"
] | 6
|
2015-10-21T18:10:56.000Z
|
2021-08-11T13:48:17.000Z
|
tests/test_np.py
|
artificialligence/np
|
1d0ba711c5b679c2d1c1204c2f6ae50d2f5a5e92
|
[
"0BSD"
] | null | null | null |
tests/test_np.py
|
artificialligence/np
|
1d0ba711c5b679c2d1c1204c2f6ae50d2f5a5e92
|
[
"0BSD"
] | 2
|
2016-08-31T13:51:10.000Z
|
2021-07-01T07:39:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 19:49:23 2016
@author: Koos Zevenhoven
"""
import unittest
import unittest.mock
import np
import sys
class NPTestCase(unittest.TestCase):
def assertIdenticalArray(self, arr1, arr2):
self.assertTrue((arr1 == arr2).all())
self.assertEqual(arr1.dtype, arr2.dtype)
self.assertEqual(arr1.shape, arr2.shape)
class QuickSubscriptArray(NPTestCase):
def test_1D(self):
self.assertIdenticalArray(np[1,3,4,5,6,9], np.array([1,3,4,5,6,9]))
def test_mixed_values(self):
self.assertIdenticalArray(np[1,2.3,4,5.6], np.array([1,2.3,4,5.6]))
def test_float_values(self):
self.assertIdenticalArray(np[1.0,2.0], np.array([1.0, 2.0]))
def test_2D(self):
a2d = np.arange(12).reshape((3,4))
self.assertIdenticalArray(np[[0,1,2,3],[4,5,6,7],[8,9,10,11]],
np.array(a2d))
def test_3D(self):
a3d = np.arange(12).reshape((2,3,2))
self.assertIdenticalArray(np[[[ 0, 1], [ 2, 3], [ 4, 5]],
[[ 6, 7], [ 8, 9], [10, 11]]],
np.array(a3d))
class QuickSubscriptMatrix(NPTestCase):
def test_row(self):
self.assertIdenticalArray(np.m[1,2,3], np.array([[1,2,3]]))
def test_matrix_singlecolon(self):
self.assertIdenticalArray(np.m[1,2 : 3,4 : 5,6], np.array([[1,2],[3,4],[5,6]]))
def test_matrix_doublecolon(self):
self.assertIdenticalArray(np.m[1,2:
:3,4:
:5,6], np.array([[1,2],[3,4],[5,6]]))
def test_mixed_values(self):
self.assertIdenticalArray(np.m[1,2.3:4,5.6], np.array([[1,2.3],[4,5.6]]))
def test_float_values(self):
self.assertIdenticalArray(np.m[1.0, 2.0: 3.0, 4.0], np.array([[1.0, 2.0],[3.0,4.0]]))
class QuickArray(NPTestCase):
def test_0D(self):
self.assertIdenticalArray(np(3), np.array(3))
def test_1D(self):
self.assertIdenticalArray(np([1,3,4,5,6,9]), np.array([1,3,4,5,6,9]))
def test_mixed_values(self):
self.assertIdenticalArray(np([1,2.3,4,5.6]), np.array([1,2.3,4,5.6]))
def test_float_values(self):
self.assertIdenticalArray(np([1.0, 2.0]), np.array([1.0,2.0]))
def test_2D(self):
a2d = np.arange(12).reshape((3,4))
self.assertIdenticalArray(np(a2d), np.array(a2d))
def test_3D(self):
a3d = np.arange(12).reshape((2,3,2))
self.assertIdenticalArray(np(a3d), np.array(a3d))
def for_dtype_shortcuts(test_method):
def test_for_all_shortcuts(self):
for shortcut, dtype in np.np_quick_types.items():
test_method(self, getattr(np, shortcut), dtype)
return test_for_all_shortcuts
def for_dtype_matrix_shortcuts(test_method):
def test_for_all_shortcuts(self):
for shortcut, dtype in np.np_quick_types.items():
test_method(self, getattr(np.m, shortcut), dtype)
return test_for_all_shortcuts
class QuickTypeSubscriptArray(NPTestCase):
@for_dtype_shortcuts
def test_1D(self, sc, dtype):
self.assertIdenticalArray(sc[1,3,4,5,6,9], np.array([1,3,4,5,6,9], dtype=dtype))
@for_dtype_shortcuts
def test_mixed_values(self, sc, dtype):
self.assertIdenticalArray(sc[1,2.3,4,5.6], np.array([1,2.3,4,5.6], dtype=dtype))
@for_dtype_shortcuts
def test_float_values(self, sc, dtype):
self.assertIdenticalArray(sc[1.0,2.0], np.array([1.0, 2.0], dtype=dtype))
@for_dtype_shortcuts
def test_2D(self, sc, dtype):
a2d = np.arange(12).reshape((3,4))
self.assertIdenticalArray(sc[[0,1,2,3],[4,5,6,7],[8,9,10,11]],
np.array(a2d, dtype=dtype))
@for_dtype_shortcuts
def test_3D(self, sc, dtype):
a3d = np.arange(12).reshape((2,3,2))
self.assertIdenticalArray(sc[[[ 0, 1], [ 2, 3], [ 4, 5]],
[[ 6, 7], [ 8, 9], [10, 11]]],
np.array(a3d, dtype=dtype))
@unittest.skip("Skipping dtyped subscript matrices (not yet implemented)")
class QuickTypeSubscriptMatrix(NPTestCase):
@for_dtype_matrix_shortcuts
def test_row(self, sc, dtype):
self.assertIdenticalArray(sc[1,2,3], np.array([[1,2,3]], dtype=dtype))
@for_dtype_matrix_shortcuts
def test_matrix_singlecolon(self, sc, dtype):
self.assertIdenticalArray(sc[1,2 : 3,4 : 5,6], np.array([[1,2],[3,4],[5,6]], dtype=dtype))
@for_dtype_matrix_shortcuts
def test_matrix_doublecolon(self, sc, dtype):
self.assertIdenticalArray(sc[1,2:
:3,4:
:5,6], np.array([[1,2],[3,4],[5,6]], dtype=dtype))
@for_dtype_matrix_shortcuts
def test_mixed_values(self, sc, dtype):
self.assertIdenticalArray(sc[1,2.3:4,5.6], np.array([[1,2.3],[4,5.6]], dtype=dtype))
@for_dtype_matrix_shortcuts
def test_float_values(self, sc, dtype):
self.assertIdenticalArray(sc[1.0, 2.0: 3.0, 4.0], np.array([[1.0, 2.0],[3.0,4.0]], dtype=dtype))
class QuickTypeArray(NPTestCase):
@for_dtype_shortcuts
def test_0D(self, sc, dtype):
self.assertIdenticalArray(sc(3), np.array(3, dtype=dtype))
@for_dtype_shortcuts
def test_1D(self, sc, dtype):
self.assertIdenticalArray(sc([1,3,4,5,6,9]), np.array([1,3,4,5,6,9], dtype=dtype))
@for_dtype_shortcuts
def test_mixed_values(self, sc, dtype):
self.assertIdenticalArray(sc([1,2.3,4,5.6]), np.array([1,2.3,4,5.6], dtype=dtype))
@for_dtype_shortcuts
def test_float_values(self, sc, dtype):
self.assertIdenticalArray(sc([1.0, 2.0]), np.array([1.0,2.0], dtype=dtype))
@for_dtype_shortcuts
def test_2D(self, sc, dtype):
a2d = np.arange(12).reshape((3,4))
self.assertIdenticalArray(sc(a2d), np.array(a2d, dtype=dtype))
@for_dtype_shortcuts
def test_3D(self, sc, dtype):
a3d = np.arange(12).reshape((2,3,2))
self.assertIdenticalArray(sc(a3d), np.array(a3d, dtype=dtype))
if __name__ == "__main__":
unittest.main()
| 37.252874
| 104
| 0.573743
|
8c34cf54735c671cea4917cf8bc9e72219e36613
| 263
|
py
|
Python
|
coinmarketcap/__init__.py
|
tfrizza/coinmarketcap-4.1.1
|
6013f3ef893bd98b9e3168f82908d7ad9429239e
|
[
"Apache-2.0"
] | null | null | null |
coinmarketcap/__init__.py
|
tfrizza/coinmarketcap-4.1.1
|
6013f3ef893bd98b9e3168f82908d7ad9429239e
|
[
"Apache-2.0"
] | null | null | null |
coinmarketcap/__init__.py
|
tfrizza/coinmarketcap-4.1.1
|
6013f3ef893bd98b9e3168f82908d7ad9429239e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'coinmarketcap'
__version__ = '4.1.1'
__author__ = 'Martin Simon <me@martinsimon.me>'
__repo__ = 'https://github.com/mrsmn/coinmarketcap-api'
__license__ = 'Apache v2.0 License'
from .core import Market
| 23.909091
| 55
| 0.711027
|
df6726a3fff69f96fb1bbe9ab76b475ca59236d9
| 7,286
|
py
|
Python
|
deepcell/utils/testing_utils.py
|
jackstellwagen/deepcell-tf
|
d9326b8aceb2f25637e0d3934646da8f6a9f9539
|
[
"Apache-2.0"
] | null | null | null |
deepcell/utils/testing_utils.py
|
jackstellwagen/deepcell-tf
|
d9326b8aceb2f25637e0d3934646da8f6a9f9539
|
[
"Apache-2.0"
] | null | null | null |
deepcell/utils/testing_utils.py
|
jackstellwagen/deepcell-tf
|
d9326b8aceb2f25637e0d3934646da8f6a9f9539
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2019 David Van Valen at California Institute of Technology
# (Caltech), with support from the Paul Allen Family Foundation, Google,
# & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities testing Keras layers"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import tensor_shape
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.util import tf_inspect
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, custom_objects=None):
"""Test routine for a layer with a single input and single output.
Args:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Shape tuple for the expected shape of the output.
expected_output_dtype: Data type expected for the output.
custom_objects: Custom Objects to test custom layers
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
"""
if input_data is None:
assert input_shape
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output dtype=%s but '
'expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
# check shape inference
model = keras.models.Model(x, y)
expected_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
# Edited to add custom_objects to model.from_config
recovered_model = keras.models.Model.from_config(
model_config, custom_objects=custom_objects)
# End Edits
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3)
# test training mode (e.g. useful for dropout tests)
model.compile(RMSPropOptimizer(0.01), 'mse')
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
# Edited to add custom_objects to model.from_config
recovered_model = keras.models.Sequential.from_config(
model_config, custom_objects=custom_objects)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3)
# for further checks in the caller function
return actual_output
| 40.703911
| 80
| 0.666072
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.